globalIndexTemplates.C
Go to the documentation of this file.
1 /*---------------------------------------------------------------------------*\
2  ========= |
3  \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4  \\ / O peration |
5  \\ / A nd | www.openfoam.com
6  \\/ M anipulation |
7 -------------------------------------------------------------------------------
8  Copyright (C) 2013-2017 OpenFOAM Foundation
9  Copyright (C) 2019-2023 OpenCFD Ltd.
10 -------------------------------------------------------------------------------
11 License
12  This file is part of OpenFOAM.
13 
14  OpenFOAM is free software: you can redistribute it and/or modify it
15  under the terms of the GNU General Public License as published by
16  the Free Software Foundation, either version 3 of the License, or
17  (at your option) any later version.
18 
19  OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
20  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22  for more details.
23 
24  You should have received a copy of the GNU General Public License
25  along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
26 
27 \*---------------------------------------------------------------------------*/
28 
29 #include "globalIndex.H"
30 
31 // * * * * * * * * * * * * * Static Member Functions * * * * * * * * * * * * //
32 
33 template<class Addr>
36 (
37  const IndirectListBase<label, Addr>& counts,
38  const bool checkOverflow
39 )
40 {
42 
43  const label len = counts.size();
44 
45  if (len)
46  {
47  values.resize(len+1);
48 
49  label start = 0;
50  for (label i = 0; i < len; ++i)
51  {
52  const label count = counts[i];
53 
54  values[i] = start;
55  start += count;
56 
57  if (checkOverflow && start < values[i])
58  {
59  reportOverflowAndExit(i, values[i], count);
60  }
61  }
62  values[len] = start;
63  }
64 
65  return values;
66 }
67 
68 
69 template<class SubListType>
72 (
73  const List<SubListType>& lists,
74  const bool checkOverflow
75 )
76 {
78 
79  const label len = lists.size();
80 
81  if (len)
82  {
83  values.resize(len+1);
84 
85  label start = 0;
86  for (label i = 0; i < len; ++i)
87  {
88  const label count = lists[i].size();
89 
90  values[i] = start;
91  start += count;
92 
93  if (checkOverflow && start < values[i])
94  {
95  reportOverflowAndExit(i, values[i], count);
96  }
97  }
98  values[len] = start;
99  }
101  return values;
102 }
103 
104 
105 template<class ProcIDsContainer, class Type>
107 (
108  const label comm,
109  const ProcIDsContainer& procIDs,
110  const Type& localValue,
111  List<Type>& allValues,
112  const int tag,
113  const UPstream::commsTypes preferredCommsType
114 )
115 {
116  // low-level: no parRun guard
117 
118  // Automatically change from nonBlocking to scheduled for
119  // non-contiguous data.
120  const UPstream::commsTypes commsType =
121  (
122  (
124  && UPstream::commsTypes::nonBlocking == preferredCommsType
125  )
127  : preferredCommsType
128  );
129 
130  const label startOfRequests = UPstream::nRequests();
131 
132  const int masterProci = procIDs.size() ? procIDs[0] : 0;
133 
134  if (UPstream::myProcNo(comm) == masterProci)
135  {
136  allValues.resize_nocopy(procIDs.size());
137  allValues[0] = localValue;
138 
139  for (label i = 1; i < procIDs.size(); ++i)
140  {
142  {
144  (
145  commsType,
146  procIDs[i],
147  reinterpret_cast<char*>(&allValues[i]),
148  sizeof(Type),
149  tag,
150  comm
151  );
152  }
153  else
154  {
155  IPstream fromProc(commsType, procIDs[i], 0, tag, comm);
156  fromProc >> allValues[i];
157  }
158  }
159  }
160  else
161  {
162  allValues.clear(); // safety: zero-size on non-master
163 
164  if (is_contiguous<Type>::value)
165  {
167  (
168  commsType,
169  masterProci,
170  reinterpret_cast<const char*>(&localValue),
171  sizeof(Type),
172  tag,
173  comm
174  );
175  }
176  else
177  {
178  OPstream toMaster(commsType, masterProci, 0, tag, comm);
179  toMaster << localValue;
180  }
181  }
182 
183  if (commsType == UPstream::commsTypes::nonBlocking)
184  {
185  // Wait for outstanding requests
186  UPstream::waitRequests(startOfRequests);
187  }
188 }
189 
190 
191 template<class ProcIDsContainer, class Type>
193 (
194  const labelUList& off, // needed on master only
195  const label comm,
196  const ProcIDsContainer& procIDs,
197  const UList<Type>& fld,
198  List<Type>& allFld,
199  const int tag,
200  const UPstream::commsTypes preferredCommsType
201 )
202 {
203  // low-level: no parRun guard
204 
205  // Automatically change from nonBlocking to scheduled for
206  // non-contiguous data.
207  const UPstream::commsTypes commsType =
208  (
209  (
210  !is_contiguous<Type>::value
211  && UPstream::commsTypes::nonBlocking == preferredCommsType
212  )
214  : preferredCommsType
215  );
216 
217  const label startOfRequests = UPstream::nRequests();
218 
219  const int masterProci = procIDs.size() ? procIDs[0] : 0;
220 
221  if (UPstream::myProcNo(comm) == masterProci)
222  {
223  allFld.resize_nocopy(off.back()); // == totalSize()
224 
225  // Assign my local data - respect offset information
226  // so that we can request 0 entries to be copied.
227  // Also handle the case where we have a slice of the full
228  // list.
229 
230  SubList<Type>(allFld, off[1]-off[0], off[0]) =
231  SubList<Type>(fld, off[1]-off[0]);
232 
233  for (label i = 1; i < procIDs.size(); ++i)
234  {
235  SubList<Type> procSlot(allFld, off[i+1]-off[i], off[i]);
236 
237  if (procSlot.empty())
238  {
239  // Nothing to do
240  }
241  else if (is_contiguous<Type>::value)
242  {
244  (
245  commsType,
246  procIDs[i],
247  procSlot.data_bytes(),
248  procSlot.size_bytes(),
249  tag,
250  comm
251  );
252  }
253  else
254  {
255  IPstream fromProc(commsType, procIDs[i], 0, tag, comm);
256  fromProc >> procSlot;
257  }
258  }
259  }
260  else
261  {
262  if (fld.empty())
263  {
264  // Nothing to do
265  }
266  else if (is_contiguous<Type>::value)
267  {
269  (
270  commsType,
271  masterProci,
272  fld.cdata_bytes(),
273  fld.size_bytes(),
274  tag,
275  comm
276  );
277  }
278  else
279  {
280  OPstream toMaster(commsType, masterProci, 0, tag, comm);
281  toMaster << fld;
282  }
283  }
284 
285  if (commsType == UPstream::commsTypes::nonBlocking)
286  {
287  // Wait for outstanding requests
288  UPstream::waitRequests(startOfRequests);
289  }
290 }
291 
292 
293 template<class ProcIDsContainer, class Type, class Addr>
295 (
296  const labelUList& off, // needed on master only
297  const label comm,
298  const ProcIDsContainer& procIDs,
299  const IndirectListBase<Type, Addr>& fld,
300  List<Type>& allFld,
301  const int tag,
302  const UPstream::commsTypes preferredCommsType
303 )
304 {
305  // low-level: no parRun guard
306 
307  if (is_contiguous<Type>::value)
308  {
309  // Flatten list (locally) so that we can benefit from using direct
310  // read/write of contiguous data
311 
312  gather
313  (
314  off,
315  comm,
316  procIDs,
317  List<Type>(fld),
318  allFld,
319  tag,
320  preferredCommsType
321  );
322  return;
323  }
324 
325  // Automatically change from nonBlocking to scheduled for
326  // non-contiguous data.
327  const UPstream::commsTypes commsType =
328  (
329  (
330  !is_contiguous<Type>::value
331  && UPstream::commsTypes::nonBlocking == preferredCommsType
332  )
334  : preferredCommsType
335  );
336 
337  const label startOfRequests = UPstream::nRequests();
338 
339  const int masterProci = procIDs.size() ? procIDs[0] : 0;
340 
341  if (UPstream::myProcNo(comm) == masterProci)
342  {
343  allFld.resize_nocopy(off.back()); // == totalSize()
344 
345  // Assign my local data - respect offset information
346  // so that we can request 0 entries to be copied
347 
348  SubList<Type> localSlot(allFld, off[1]-off[0], off[0]);
349  if (!localSlot.empty())
350  {
351  localSlot = fld;
352  }
353 
354  // Already verified commsType != nonBlocking
355  for (label i = 1; i < procIDs.size(); ++i)
356  {
357  SubList<Type> procSlot(allFld, off[i+1]-off[i], off[i]);
358 
359  if (procSlot.empty())
360  {
361  // Nothing to do
362  }
363  else
364  {
365  IPstream fromProc(commsType, procIDs[i], 0, tag, comm);
366  fromProc >> procSlot;
367  }
368  }
369  }
370  else
371  {
372  if (fld.empty())
373  {
374  // Nothing to do
375  }
376  else
377  {
378  OPstream toMaster(commsType, masterProci, 0, tag, comm);
379  toMaster << fld;
380  }
381  }
382 
383  if (commsType == UPstream::commsTypes::nonBlocking)
384  {
385  // Wait for outstanding requests
386  UPstream::waitRequests(startOfRequests);
387  }
388 }
389 
390 
391 // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
392 
393 template<class Type>
395 (
396  const UList<Type>& sendData,
397  List<Type>& allData,
398  const int tag,
399  const UPstream::commsTypes commsType,
400  const label comm
401 ) const
402 {
403  if (!UPstream::parRun())
404  {
405  // Serial: direct copy
406  allData = sendData;
407  return;
408  }
409 
410  {
412  (
413  offsets_, // needed on master only
414  comm,
415  UPstream::allProcs(comm), // All communicator ranks
416  sendData,
417  allData,
418  tag,
419  commsType
420  );
421  if (!UPstream::master(comm))
422  {
423  allData.clear(); // safety: zero-size on non-master
424  }
425  }
426 }
427 
428 
429 template<class Type, class Addr>
431 (
432  const IndirectListBase<Type, Addr>& sendData,
433  List<Type>& allData,
434  const int tag,
435  const UPstream::commsTypes commsType,
436  const label comm
437 ) const
438 {
439  if (!UPstream::parRun())
440  {
441  // Serial: direct copy
442  allData = sendData;
443  return;
444  }
445 
446  {
448  (
449  offsets_, // needed on master only
450  comm,
451  UPstream::allProcs(comm), // All communicator ranks
452  sendData,
453  allData,
454  tag,
455  commsType
456  );
457  if (!UPstream::master(comm))
458  {
459  allData.clear(); // safety: zero-size on non-master
460  }
461  }
462 }
463 
464 
465 template<class Type, class OutputContainer>
466 OutputContainer Foam::globalIndex::gather
467 (
468  const UList<Type>& sendData,
469  const int tag,
470  const UPstream::commsTypes commsType,
471  const label comm
472 ) const
473 {
474  OutputContainer allData;
475  gather(sendData, allData, tag, commsType, comm);
476  return allData;
477 }
478 
479 
480 template<class Type, class Addr, class OutputContainer>
481 OutputContainer Foam::globalIndex::gather
482 (
483  const IndirectListBase<Type, Addr>& sendData,
484  const int tag,
485  const UPstream::commsTypes commsType,
486  const label comm
487 ) const
488 {
489  OutputContainer allData;
490  gather(sendData, allData, tag, commsType, comm);
491  return allData;
492 }
493 
494 
495 template<class Type>
497 (
498  List<Type>& fld,
499  const int tag,
500  const UPstream::commsTypes commsType,
501  const label comm
502 ) const
503 {
504  if (UPstream::parRun())
505  {
506  List<Type> allData;
507  gather(fld, allData, tag, commsType, comm);
508 
509  if (UPstream::master(comm))
510  {
511  fld.transfer(allData);
512  }
513  else
514  {
515  fld.clear(); // zero-size on non-master
516  }
517  }
518  // Serial: (no-op)
519 }
520 
521 
522 template<class Type, class OutputContainer>
524 (
525  const UList<Type>& sendData,
526  OutputContainer& allData,
527  const label comm,
528 
529  const UPstream::commsTypes commsType,
530  const int tag
531 ) const
532 {
533  if (!UPstream::parRun())
534  {
535  // Serial: direct copy
536  allData = sendData;
537  return;
538  }
539 
540  // MPI_Gatherv requires contiguous data, but a byte-wise transfer can
541  // quickly exceed the 'int' limits used for MPI sizes/offsets.
542  // Thus gather label/scalar components when possible to increase the
543  // effective size limit.
544  //
545  // Note: cannot rely on pTraits (cmptType, nComponents) since this method
546  // needs to compile (and work) even with things like strings etc.
547 
548  // Single char ad hoc "enum":
549  // - b(yte): gather bytes
550  // - f(loat): gather scalars components
551  // - i(nt): gather label components
552  // - 0: gather with Pstream read/write etc.
553 
554  List<int> recvCounts;
555  List<int> recvOffsets;
556 
557  char dataMode(0);
558  int nCmpts(0);
559 
560  if (is_contiguous<Type>::value)
561  {
562  if (is_contiguous_scalar<Type>::value)
563  {
564  dataMode = 'f';
565  nCmpts = static_cast<int>(sizeof(Type)/sizeof(scalar));
566  }
567  else if (is_contiguous_label<Type>::value)
568  {
569  dataMode = 'i';
570  nCmpts = static_cast<int>(sizeof(Type)/sizeof(label));
571  }
572  else
573  {
574  dataMode = 'b';
575  nCmpts = static_cast<int>(sizeof(Type));
576  }
577 
578  // Offsets must fit into int
579  if (UPstream::master(comm))
580  {
581  const globalIndex& globalAddr = *this;
582 
583  if (globalAddr.totalSize() > (INT_MAX/nCmpts))
584  {
585  // Offsets do not fit into int - revert to manual.
586  dataMode = 0;
587  }
588  else
589  {
590  // Must be same as Pstream::nProcs(comm), at least on master!
591  const label nproc = globalAddr.nProcs();
592 
593  allData.resize_nocopy(globalAddr.totalSize());
594 
595  recvCounts.resize(nproc);
596  recvOffsets.resize(nproc+1);
597 
598  for (label proci = 0; proci < nproc; ++proci)
599  {
600  recvCounts[proci] = globalAddr.localSize(proci)*nCmpts;
601  recvOffsets[proci] = globalAddr.localStart(proci)*nCmpts;
602  }
603  recvOffsets[nproc] = globalAddr.totalSize()*nCmpts;
604 
605  // Assign local data directly
606 
607  recvCounts[0] = 0; // ie, ignore for MPI_Gatherv
608  SubList<Type>(allData, globalAddr.range(0)) =
609  SubList<Type>(sendData, globalAddr.range(0));
610  }
611  }
612 
613  // Consistent information for everyone
614  UPstream::broadcast(&dataMode, 1, comm);
615  }
616 
617  // Dispatch
618  switch (dataMode)
619  {
620  case 'b': // Byte-wise
621  {
623  (
624  sendData.cdata_bytes(),
625  sendData.size_bytes(),
626  allData.data_bytes(),
627  recvCounts,
628  recvOffsets,
629  comm
630  );
631  break;
632  }
633  case 'f': // Float (scalar) components
634  {
635  typedef scalar cmptType;
636 
638  (
639  reinterpret_cast<const cmptType*>(sendData.cdata()),
640  (sendData.size()*nCmpts),
641  reinterpret_cast<cmptType*>(allData.data()),
642  recvCounts,
643  recvOffsets,
644  comm
645  );
646  break;
647  }
648  case 'i': // Int (label) components
649  {
650  typedef label cmptType;
651 
653  (
654  reinterpret_cast<const cmptType*>(sendData.cdata()),
655  (sendData.size()*nCmpts),
656  reinterpret_cast<cmptType*>(allData.data()),
657  recvCounts,
658  recvOffsets,
659  comm
660  );
661  break;
662  }
663  default: // Regular (manual) gathering
664  {
666  (
667  offsets_, // needed on master only
668  comm,
669  UPstream::allProcs(comm), // All communicator ranks
670  sendData,
671  allData,
672  tag,
673  commsType
674  );
675  break;
676  }
677  }
678 
679  if (!UPstream::master(comm))
680  {
681  allData.clear(); // safety: zero-size on non-master
682  }
683 }
684 
685 
686 template<class Type, class OutputContainer>
687 OutputContainer Foam::globalIndex::mpiGather
688 (
689  const UList<Type>& sendData,
690  const label comm,
691 
692  const UPstream::commsTypes commsType,
693  const int tag
694 ) const
695 {
696  OutputContainer allData;
697  mpiGather(sendData, allData, comm, commsType, tag);
698  return allData;
699 }
700 
701 
702 template<class Type>
704 (
705  List<Type>& fld,
706  const label comm,
707 
708  const UPstream::commsTypes commsType,
709  const int tag
710 ) const
711 {
712  if (UPstream::parRun())
713  {
714  List<Type> allData;
715  mpiGather(fld, allData, comm, commsType, tag);
716 
717  if (UPstream::master(comm))
718  {
719  fld.transfer(allData);
720  }
721  else
722  {
723  fld.clear(); // zero-size on non-master
724  }
725  }
726  // Serial: (no-op)
727 }
728 
729 
730 template<class Type, class OutputContainer>
732 (
733  const UList<Type>& sendData,
734  OutputContainer& allData,
735  const label comm,
736 
737  const UPstream::commsTypes commsType,
738  const int tag
739 )
740 {
741  if (UPstream::parRun())
742  {
743  // Gather sizes - only needed on master
744  globalIndex(globalIndex::gatherOnly{}, sendData.size(), comm)
745  .mpiGather(sendData, allData, comm, commsType, tag);
746  }
747  else
748  {
749  // Serial: direct copy
750  allData = sendData;
751  }
752 }
753 
754 
755 template<class Type, class OutputContainer>
756 OutputContainer Foam::globalIndex::mpiGatherOp
757 (
758  const UList<Type>& sendData,
759  const label comm,
760 
761  const UPstream::commsTypes commsType,
762  const int tag
763 )
764 {
765  OutputContainer allData;
766  mpiGatherOp(sendData, allData, comm, commsType, tag);
767  return allData;
768 }
769 
770 
771 template<class Type>
773 (
774  List<Type>& fld,
775  const label comm,
776 
777  const UPstream::commsTypes commsType,
778  const int tag
779 )
780 {
781  if (UPstream::parRun())
782  {
783  List<Type> allData;
784  mpiGatherOp(fld, allData, comm, commsType, tag);
785 
786  if (UPstream::master(comm))
787  {
788  fld.transfer(allData);
789  }
790  else
791  {
792  fld.clear(); // zero-size on non-master
793  }
794  }
795  // Serial: (no-op)
796 }
797 
798 
799 template<class Type>
801 (
802  const UList<Type>& sendData,
803  List<Type>& allData,
804  const int tag,
805  const UPstream::commsTypes commsType,
806  const label comm
807 )
808 {
809  if (UPstream::parRun())
810  {
811  // Gather sizes - only needed on master
812  globalIndex(globalIndex::gatherOnly{}, sendData.size(), comm)
813  .gather(sendData, allData, tag, commsType, comm);
814  }
815  else
816  {
817  // Serial: direct copy
818  allData = sendData;
819  }
820 }
821 
822 
823 template<class Type, class Addr>
825 (
826  const IndirectListBase<Type, Addr>& sendData,
827  List<Type>& allData,
828  const int tag,
829  const UPstream::commsTypes commsType,
830  const label comm
831 )
832 {
833  if (UPstream::parRun())
834  {
835  // Gather sizes - only needed on master
836  globalIndex(globalIndex::gatherOnly{}, sendData.size(), comm)
837  .gather(sendData, allData, tag, commsType, comm);
838  }
839  else
840  {
841  // Serial: direct copy
842  allData = List<Type>(sendData);
843  }
844 }
845 
846 
847 template<class Type, class OutputContainer>
848 OutputContainer Foam::globalIndex::gatherOp
849 (
850  const UList<Type>& sendData,
851  const int tag,
852  const UPstream::commsTypes commsType,
853  const label comm
854 )
855 {
856  OutputContainer allData;
857  gatherOp(sendData, allData, tag, commsType, comm);
858  return allData;
859 }
860 
861 
862 template<class Type, class Addr, class OutputContainer>
863 OutputContainer Foam::globalIndex::gatherOp
864 (
865  const IndirectListBase<Type, Addr>& sendData,
866  const int tag,
867  const UPstream::commsTypes commsType,
868  const label comm
869 )
870 {
871  OutputContainer allData;
872  gatherOp(sendData, allData, tag, commsType, comm);
873  return allData;
874 }
875 
876 
877 template<class Type>
879 (
880  List<Type>& fld,
881  const int tag,
882  const UPstream::commsTypes commsType,
883  const label comm
884 )
885 {
886  if (UPstream::parRun())
887  {
888  // Gather sizes - only needed on master
889  globalIndex(globalIndex::gatherOnly{}, fld.size(), comm)
890  .gather(fld, tag, commsType, comm);
891  }
892  // Serial: (no-op)
893 }
894 
895 
896 template<class ProcIDsContainer, class Type>
898 (
899  const labelUList& off, // needed on master only
900  const label comm,
901  const ProcIDsContainer& procIDs,
902  const UList<Type>& allFld,
903  UList<Type>& fld,
904  const int tag,
905  const UPstream::commsTypes preferredCommsType
906 )
907 {
908  // low-level: no parRun guard
909 
910  // Automatically change from nonBlocking to scheduled for
911  // non-contiguous data.
912  const UPstream::commsTypes commsType =
913  (
914  (
915  !is_contiguous<Type>::value
916  && UPstream::commsTypes::nonBlocking == preferredCommsType
917  )
919  : preferredCommsType
920  );
921 
922  const label startOfRequests = UPstream::nRequests();
923 
924  const int masterProci = procIDs.size() ? procIDs[0] : 0;
925 
926  if (UPstream::myProcNo(comm) == masterProci)
927  {
928  for (label i = 1; i < procIDs.size(); ++i)
929  {
930  const SubList<Type> procSlot(allFld, off[i+1]-off[i], off[i]);
931 
932  if (procSlot.empty())
933  {
934  // Nothing to do
935  }
936  else if (is_contiguous<Type>::value)
937  {
939  (
940  commsType,
941  procIDs[i],
942  procSlot.cdata_bytes(),
943  procSlot.size_bytes(),
944  tag,
945  comm
946  );
947  }
948  else
949  {
950  OPstream toProc(commsType, procIDs[i], 0, tag, comm);
951  toProc << procSlot;
952  }
953  }
954 
955  // Assign my local data - respect offset information
956  // so that we can request 0 entries to be copied.
957  // Also handle the case where we have a slice of the full
958  // list.
959 
960  SubList<Type>(fld, off[1]-off[0]) =
961  SubList<Type>(allFld, off[1]-off[0], off[0]);
962  }
963  else
964  {
965  // Note: we are receiving into UList, so sizes MUST match or we
966  // have a problem. Can therefore reasonably assume that a zero-sized
967  // send matches a zero-sized receive, and we can skip that.
968 
969  if (fld.empty())
970  {
971  // Nothing to do
972  }
973  else if (is_contiguous<Type>::value)
974  {
976  (
977  commsType,
978  masterProci,
979  fld.data_bytes(),
980  fld.size_bytes(),
981  tag,
982  comm
983  );
984  }
985  else
986  {
987  IPstream fromMaster(commsType, masterProci, 0, tag, comm);
988  fromMaster >> fld;
989  }
990  }
991 
992  if (commsType == UPstream::commsTypes::nonBlocking)
993  {
994  // Wait for outstanding requests
995  UPstream::waitRequests(startOfRequests);
996  }
997 }
998 
999 
1000 template<class Type>
1002 (
1003  const UList<Type>& allData,
1004  UList<Type>& localData,
1005  const int tag,
1006  const UPstream::commsTypes commsType,
1007  const label comm
1008 ) const
1009 {
1010  if (UPstream::parRun())
1011  {
1012  scatter
1013  (
1014  offsets_, // needed on master only
1015  comm,
1016  UPstream::allProcs(comm), // All communicator ranks
1017  allData,
1018  localData,
1019  tag,
1020  commsType
1021  );
1022  }
1023  else
1024  {
1025  // Serial: direct copy
1026  // - fails miserably if incorrectly dimensioned!
1027  localData.deepCopy(allData);
1028  }
1029 }
1030 
1031 
1032 template<class Type, class OutputContainer>
1033 OutputContainer Foam::globalIndex::scatter
1034 (
1035  const UList<Type>& allData,
1036  const int tag,
1037  const UPstream::commsTypes commsType,
1038  const label comm
1039 ) const
1040 {
1041  if (UPstream::parRun())
1042  {
1043  // The globalIndex might be correct on master only,
1044  // so scatter local sizes to ensure consistency
1045 
1046  const label count
1047  (
1048  UPstream::listScatterValues<label>(this->localSizes(), comm)
1049  );
1050 
1051  OutputContainer localData(count);
1052  this->scatter(allData, localData, tag, commsType, comm);
1053 
1054  return localData;
1055  }
1056  else
1057  {
1058  // Serial: direct copy
1059  return OutputContainer(allData);
1060  }
1061 }
1062 
1063 
1064 template<class Type, class CombineOp>
1066 (
1067  List<Type>& allFld,
1068  const labelUList& globalIds,
1069  const CombineOp& cop,
1070  const label comm,
1071  const int tag
1072 ) const
1073 {
1074  allFld.resize_nocopy(globalIds.size());
1075 
1076  if (globalIds.size())
1077  {
1078  // Sort according to processor
1079  labelList order;
1080  DynamicList<label> validBins(Pstream::nProcs());
1081 
1082  CompactListList<label> bins
1083  (
1084  bin(offsets(), globalIds, order, validBins)
1085  );
1086 
1087  // Send local indices to individual processors as local index
1088  PstreamBuffers sendBufs(UPstream::commsTypes::nonBlocking, tag, comm);
1089 
1090  for (const auto proci : validBins)
1091  {
1092  labelList localIDs(bins[proci]);
1093 
1094  for (label& val : localIDs)
1095  {
1096  val = toLocal(proci, val);
1097  }
1098 
1099  UOPstream os(proci, sendBufs);
1100  os << localIDs;
1101  }
1102  sendBufs.finishedSends();
1103 
1104 
1105  PstreamBuffers returnBufs(UPstream::commsTypes::nonBlocking, tag, comm);
1106 
1107  for (const int proci : sendBufs.allProcs())
1108  {
1109  if (sendBufs.recvDataCount(proci))
1110  {
1111  UIPstream is(proci, sendBufs);
1112  labelList localIDs(is);
1113 
1114  // Collect entries
1115  List<Type> fld(localIDs.size());
1116  cop(fld, localIDs);
1117 
1118  UOPstream os(proci, returnBufs);
1119  os << fld;
1120  }
1121  }
1122  returnBufs.finishedSends();
1123 
1124  // Slot back
1125  for (const auto proci : validBins)
1126  {
1127  label start = bins.offsets()[proci];
1128  const SubList<label> es
1129  (
1130  order,
1131  bins.offsets()[proci+1]-start, // start
1132  start
1133  );
1134  UIPstream is(proci, returnBufs);
1135  List<Type> fld(is);
1136 
1137  UIndirectList<Type>(allFld, es) = fld;
1138  }
1139  }
1140 }
1141 
1142 
1143 // ************************************************************************* //
static labelList calcOffsets(const labelUList &counts, const bool checkOverflow=false)
Calculate offsets from a list of local sizes, with optional check for label overflow.
Definition: globalIndex.C:189
void size(const label n)
Older name for setAddressableSize.
Definition: UList.H:116
static void mpiGatherOp(const UList< Type > &sendData, OutputContainer &allData, const label comm=UPstream::worldComm, const UPstream::commsTypes=UPstream::commsTypes::nonBlocking, const int tag=UPstream::msgType())
Use MPI_Gatherv call to collect contiguous data when possible (in serial: performs a simple copy)...
static label read(const UPstream::commsTypes commsType, const int fromProcNo, char *buf, const std::streamsize bufSize, const int tag=UPstream::msgType(), const label comm=UPstream::worldComm, UPstream::Request *req=nullptr)
Read buffer contents from given processor.
Definition: UIPstreamRead.C:35
commsTypes
Communications types.
Definition: UPstream.H:72
static label nRequests() noexcept
Number of outstanding requests (on the internal list of requests)
static rangeType allProcs(const label communicator=worldComm)
Range of process indices for all processes.
Definition: UPstream.H:1176
static bool & parRun() noexcept
Test if this a parallel run.
Definition: UPstream.H:1049
Dispatch tag: Construct &#39;one-sided&#39; from local sizes, using gather but no broadcast.
Definition: globalIndex.H:110
void resize_nocopy(const label len)
Adjust allocated size of list without necessarily.
Definition: ListI.H:175
static int myProcNo(const label communicator=worldComm)
Rank of this process in the communicator (starting from masterNo()). Can be negative if the process i...
Definition: UPstream.H:1074
Base for lists with indirect addressing, templated on the list contents type and the addressing type...
static void waitRequests()
Wait for all requests to finish.
Definition: UPstream.H:1538
UList< label > labelUList
A UList of labels.
Definition: UList.H:78
void gather(const Type *sendData, Type *recvData, int count, MPI_Datatype datatype, const label comm, UPstream::Request *req=nullptr, label *requestID=nullptr)
Input inter-processor communications stream.
Definition: IPstream.H:49
static void mpiGatherInplaceOp(List< Type > &fld, const label comm=UPstream::worldComm, const UPstream::commsTypes=UPstream::commsTypes::nonBlocking, const int tag=UPstream::msgType())
Use MPI_Gatherv call to inplace collect contiguous data when possible. (in serial: a no-op)...
List< T > values(const HashTable< T, Key, Hash > &tbl, const bool doSort=false)
List of values from HashTable, optionally sorted.
Definition: HashOps.H:164
unsigned int count(const UList< bool > &bools, const bool val=true)
Count number of &#39;true&#39; entries.
Definition: BitOps.H:73
Calculates a unique integer (label so might not have enough room - 2G max) for processor + local inde...
Definition: globalIndex.H:61
static label nProcs(const label communicator=worldComm)
Number of ranks in parallel run (for given communicator). It is 1 for serial run. ...
Definition: UPstream.H:1065
"scheduled" : (MPI_Send, MPI_Recv)
static labelList calcListOffsets(const List< SubListType > &lists, const bool checkOverflow=false)
Calculate offsets from list of lists, with optional check for label overflow.
void mpiGather(const UList< Type > &sendData, OutputContainer &allData, const label comm=UPstream::worldComm, const UPstream::commsTypes=UPstream::commsTypes::nonBlocking, const int tag=UPstream::msgType()) const
Use MPI_Gatherv call for contiguous data when possible (in serial: performs a simple copy)...
void clear()
Clear the list, i.e. set size to zero.
Definition: ListI.H:137
void mpiGatherInplace(List< Type > &fld, const label comm=UPstream::worldComm, const UPstream::commsTypes=UPstream::commsTypes::nonBlocking, const int tag=UPstream::msgType()) const
Use MPI_Gatherv call to inplace collect contiguous data when possible. (in serial: a no-op)...
static void gather(const char *sendData, int sendCount, char *recvData, const UList< int > &recvCounts, const UList< int > &recvOffsets, const label communicator=worldComm)
Receive variable length char data from all ranks.
label size() const noexcept
The number of elements in the list.
void scatter(const Type *sendData, Type *recvData, int count, MPI_Datatype datatype, const label comm, UPstream::Request *req=nullptr, label *requestID=nullptr)
static void gatherValues(const label comm, const ProcIDsContainer &procIDs, const Type &localValue, List< Type > &allValues, const int tag=UPstream::msgType(), const UPstream::commsTypes=UPstream::commsTypes::nonBlocking)
Collect single values in processor order on master (== procIDs[0]).
OBJstream os(runTime.globalPath()/outputName)
void get(List< Type > &allFld, const labelUList &globalIds, const CombineOp &cop, const label comm=UPstream::worldComm, const int tag=UPstream::msgType()) const
Get (potentially remote) data. Elements required given as global indices.
gmvFile<< "tracers "<< particles.size()<< nl;for(const passiveParticle &p :particles){ gmvFile<< p.position().x()<< ' ';}gmvFile<< nl;for(const passiveParticle &p :particles){ gmvFile<< p.position().y()<< ' ';}gmvFile<< nl;for(const passiveParticle &p :particles){ gmvFile<< p.position().z()<< ' ';}gmvFile<< nl;for(const word &name :lagrangianScalarNames){ IOField< scalar > fld(IOobject(name, runTime.timeName(), cloud::prefix, mesh, IOobject::MUST_READ, IOobject::NO_WRITE))
static void gatherInplaceOp(List< Type > &fld, const int tag=UPstream::msgType(), const UPstream::commsTypes=UPstream::commsTypes::nonBlocking, const label comm=UPstream::worldComm)
Inplace collect data in processor order on master (in serial: a no-op).
A template class to specify that a data type can be considered as being contiguous in memory...
Definition: contiguous.H:70
static bool broadcast(char *buf, const std::streamsize bufSize, const label communicator, const int rootProcNo=masterNo())
Broadcast buffer contents to all processes in given communicator. The sizes must match on all process...
static bool master(const label communicator=worldComm)
True if process corresponds to the master rank in the communicator.
Definition: UPstream.H:1082
static bool write(const UPstream::commsTypes commsType, const int toProcNo, const char *buf, const std::streamsize bufSize, const int tag=UPstream::msgType(), const label comm=UPstream::worldComm, UPstream::Request *req=nullptr, const UPstream::sendModes sendMode=UPstream::sendModes::normal)
Write buffer contents to given processor.
"nonBlocking" : (MPI_Isend, MPI_Irecv)
void gatherInplace(List< Type > &fld, const int tag=UPstream::msgType(), const UPstream::commsTypes=UPstream::commsTypes::nonBlocking, const label comm=UPstream::worldComm) const
Inplace collect data in processor order on master (in serial: a no-op).
static void gather(const labelUList &offsets, const label comm, const ProcIDsContainer &procIDs, const UList< Type > &fld, List< Type > &allFld, const int tag=UPstream::msgType(), const UPstream::commsTypes=UPstream::commsTypes::nonBlocking)
Collect data in processor order on master (== procIDs[0]).
List< label > labelList
A List of labels.
Definition: List.H:62
static void gatherOp(const UList< Type > &sendData, List< Type > &allData, const int tag=UPstream::msgType(), const UPstream::commsTypes=UPstream::commsTypes::nonBlocking, const label comm=UPstream::worldComm)
Collect data in processor order on master (in serial: performs a simple copy).
static void scatter(const labelUList &offsets, const label comm, const ProcIDsContainer &procIDs, const UList< Type > &allFld, UList< Type > &fld, const int tag=UPstream::msgType(), const UPstream::commsTypes=UPstream::commsTypes::nonBlocking)
Distribute data in processor order.