PstreamExchange.C
Go to the documentation of this file.
1 /*---------------------------------------------------------------------------*\
2  ========= |
3  \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4  \\ / O peration |
5  \\ / A nd | www.openfoam.com
6  \\/ M anipulation |
7 -------------------------------------------------------------------------------
8  Copyright (C) 2011-2016 OpenFOAM Foundation
9  Copyright (C) 2016-2023 OpenCFD Ltd.
10 -------------------------------------------------------------------------------
11 License
12  This file is part of OpenFOAM.
13 
14  OpenFOAM is free software: you can redistribute it and/or modify it
15  under the terms of the GNU General Public License as published by
16  the Free Software Foundation, either version 3 of the License, or
17  (at your option) any later version.
18 
19  OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
20  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22  for more details.
23 
24  You should have received a copy of the GNU General Public License
25  along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
26 
27 \*---------------------------------------------------------------------------*/
28 
29 #include "Pstream.H"
30 #include "contiguous.H"
31 #include "PstreamReduceOps.H"
32 
33 // * * * * * * * * * * * * * * * * * Details * * * * * * * * * * * * * * * * //
34 
35 namespace Foam
36 {
37 namespace PstreamDetail
38 {
39 
40 //- Setup sends and receives, each specified as [rank, span] tuple
41 // The serial list of tuples can be populated from other lists, from maps
42 // of data or subsets of lists/maps etc.
43 template<class Type>
44 void exchangeBuf
45 (
46  const UList<std::pair<int, stdFoam::span<const Type>>>& sends,
47  const UList<std::pair<int, stdFoam::span<Type>>>& recvs,
48 
49  const int tag,
50  const label comm,
51  const bool wait
52 )
53 {
54  const label startOfRequests = UPstream::nRequests();
55  const int myProci = UPstream::myProcNo(comm);
56 
57  // Set up receives
58  // ~~~~~~~~~~~~~~~
59 
60  for (auto& slot : recvs)
61  {
62  // [rank, span]
63  const auto proci = slot.first;
64  auto& payload = slot.second;
65 
66  if (proci != myProci && !payload.empty())
67  {
69  (
71  proci,
72  payload.data_bytes(),
73  payload.size_bytes(),
74  tag,
75  comm
76  );
77  }
78  }
79 
80  // Set up sends
81  // ~~~~~~~~~~~~
82 
83  for (const auto& slot : sends)
84  {
85  // [rank, span]
86  const auto proci = slot.first;
87  const auto& payload = slot.second;
88 
89  if (proci != myProci && !payload.empty())
90  {
91  if
92  (
94  (
96  proci,
97  payload.cdata_bytes(),
98  payload.size_bytes(),
99  tag,
100  comm
101  )
102  )
103  {
105  << "Cannot send outgoing message to:"
106  << proci << " nBytes:"
107  << label(payload.size_bytes())
109  }
110  }
111  }
112 
113  // Wait for all to finish
114  // ~~~~~~~~~~~~~~~~~~~~~~
115 
116  if (wait)
117  {
118  UPstream::waitRequests(startOfRequests);
119  }
120 }
121 
122 
123 //- Chunked exchange of \em contiguous data.
124 //- Setup sends and receives, each specified as [rank, span] tuple.
125 // The serial list of tuples can be populated from other lists, from
126 // maps of data or subsets of lists/maps etc.
127 template<class Type>
129 (
130  const UList<std::pair<int, stdFoam::span<const Type>>>& sends,
131  const UList<std::pair<int, stdFoam::span<Type>>>& recvs,
132 
133  const int tag,
134  const label comm,
135  const bool wait
136 )
137 {
138  typedef std::pair<int, stdFoam::span<const Type>> sendTuple;
139  typedef std::pair<int, stdFoam::span<Type>> recvTuple;
140 
141  // Caller already checked for parRun and maxChunkSize > 0
142 
143  {
144  // Determine the number of chunks to send. Note that we
145  // only have to look at the sending data since we are
146  // guaranteed that some processor's sending size is some other
147  // processor's receive size. Also we can ignore any local comms.
148  //
149  // We need to send chunks so the number of iterations:
150  // maxChunkSize iterations
151  // ------------ ----------
152  // 0 0
153  // 1..maxChunkSize 1
154  // maxChunkSize+1..2*maxChunkSize 2
155  // ...
156 
157  const label maxChunkSize =
158  (
159  max
160  (
161  static_cast<label>(1),
162  static_cast<label>(UPstream::maxCommsSize/sizeof(Type))
163  )
164  );
165 
166  const int myProci = UPstream::myProcNo(comm);
167 
168  label nChunks(0);
169  {
170  // Get max send count (elements)
171  auto maxCount = static_cast<stdFoam::span<char>::size_type>(0);
172 
173  for (const auto& slot : sends)
174  {
175  // [rank, span]
176  const auto proci = slot.first;
177  const auto count = slot.second.size();
178 
179  if (proci != myProci && count > maxCount)
180  {
181  // Note: using max() can be ambiguous
182  maxCount = count;
183  }
184  }
185 
186  // Convert from send count (elements) to number of chunks.
187  // Can normally calculate with (count-1), but add some safety
188  if (maxCount)
189  {
190  nChunks = 1 + label(maxCount/maxChunkSize);
191  }
192 
193  // MPI reduce (message tag is irrelevant)
194  reduce(nChunks, maxOp<label>(), UPstream::msgType(), comm);
195  }
196 
197 
198  // Dispatch the exchanges chunk-wise
199  List<sendTuple> sendChunks(sends);
200  List<recvTuple> recvChunks(recvs);
201 
202  // Dispatch
203  for (label iter = 0; iter < nChunks; ++iter)
204  {
205  // The begin/end for the data window
206  const auto beg = static_cast<std::size_t>(iter*maxChunkSize);
207  const auto end = static_cast<std::size_t>((iter+1)*maxChunkSize);
208 
209  forAll(sendChunks, sloti)
210  {
211  const auto& baseline = sends[sloti].second;
212  auto& payload = sendChunks[sloti].second;
213 
214  // Window the data
215  if (beg < baseline.size())
216  {
217  payload =
218  (
219  (end < baseline.size())
220  ? baseline.subspan(beg, end - beg)
221  : baseline.subspan(beg)
222  );
223  }
224  else
225  {
226  payload = baseline.first(0); // zero-sized
227  }
228  }
229 
230  forAll(recvChunks, sloti)
231  {
232  const auto& baseline = recvs[sloti].second;
233  auto& payload = recvChunks[sloti].second;
234 
235  // Window the data
236  if (beg < baseline.size())
237  {
238  payload =
239  (
240  (end < baseline.size())
241  ? baseline.subspan(beg, end - beg)
242  : baseline.subspan(beg)
243  );
244  }
245  else
246  {
247  payload = baseline.first(0); // zero-sized
248  }
249  }
250 
251 
252  // Exchange data chunks
253  PstreamDetail::exchangeBuf<Type>
254  (
255  sendChunks,
256  recvChunks,
257  tag,
258  comm,
259  wait
260  );
261 
262  // Debugging output - can report on master only...
263  #if 0 // ifdef Foam_PstreamExchange_debug_chunks
264  do
265  {
266  labelList sendStarts(sends.size());
267  labelList sendCounts(sends.size());
268 
269  forAll(sendChunks, sloti)
270  {
271  const auto& baseline = sends[sloti].second;
272  const auto& payload = sendChunks[sloti].second;
273 
274  sendStarts[sloti] = (payload.data() - baseline.data());
275  sendCounts[sloti] = (payload.size());
276  }
277 
278  Info<< "iter " << iter
279  << ": beg=" << flatOutput(sendStarts)
280  << " len=" << flatOutput(sendCounts) << endl;
281  } while (false);
282  #endif
283  }
284  }
285 }
286 
287 
288 //- Exchange \em contiguous data using point-to-point communication.
289 //- Sends sendBufs, receives into recvBufs.
290 // Data provided and received as container all of which have been
291 // properly sized before calling
292 //
293 // No internal guards or resizing.
294 template<class Container, class Type>
296 (
297  const UList<Container>& sendBufs,
298  UList<Container>& recvBufs,
299  const int tag,
300  const label comm,
301  const bool wait
302 )
303 {
304  const label startOfRequests = UPstream::nRequests();
305  const label myProci = UPstream::myProcNo(comm);
306 
307  // Set up receives
308  // ~~~~~~~~~~~~~~~
309 
310  forAll(recvBufs, proci)
311  {
312  auto& recvData = recvBufs[proci];
313 
314  if (proci != myProci && !recvData.empty())
315  {
317  (
319  proci,
320  recvData.data_bytes(),
321  recvData.size_bytes(),
322  tag,
323  comm
324  );
325  }
326  }
327 
328 
329  // Set up sends
330  // ~~~~~~~~~~~~
331 
332  forAll(sendBufs, proci)
333  {
334  const auto& sendData = sendBufs[proci];
335 
336  if (proci != myProci && !sendData.empty())
337  {
338  if
339  (
341  (
343  proci,
344  sendData.cdata_bytes(),
345  sendData.size_bytes(),
346  tag,
347  comm
348  )
349  )
350  {
352  << "Cannot send outgoing message. "
353  << "to:" << proci << " nBytes:"
354  << label(sendData.size_bytes())
356  }
357  }
358  }
359 
360  // Wait for all to finish
361  // ~~~~~~~~~~~~~~~~~~~~~~
362 
363  if (wait)
364  {
365  UPstream::waitRequests(startOfRequests);
366  }
367 }
368 
369 
370 //- Exchange \em contiguous data using point-to-point communication.
371 //- Sends sendBufs, receives into recvBufs.
372 // Data provided and received as container all of which have been
373 // properly sized before calling
374 //
375 // No internal guards or resizing.
376 template<class Container, class Type>
378 (
379  const Map<Container>& sendBufs,
380  Map<Container>& recvBufs,
381  const int tag,
382  const label comm,
383  const bool wait
384 )
385 {
386  const label startOfRequests = UPstream::nRequests();
387  const label myProci = UPstream::myProcNo(comm);
388 
389  // Set up receives
390  // ~~~~~~~~~~~~~~~
391 
392  forAllIters(recvBufs, iter)
393  {
394  const label proci = iter.key();
395  auto& recvData = iter.val();
396 
397  if (proci != myProci && !recvData.empty())
398  {
400  (
402  proci,
403  recvData.data_bytes(),
404  recvData.size_bytes(),
405  tag,
406  comm
407  );
408  }
409  }
410 
411 
412  // Set up sends
413  // ~~~~~~~~~~~~
414 
415  forAllConstIters(sendBufs, iter)
416  {
417  const label proci = iter.key();
418  const auto& sendData = iter.val();
419 
420  if (proci != myProci && !sendData.empty())
421  {
422  if
423  (
425  (
427  proci,
428  sendData.cdata_bytes(),
429  sendData.size_bytes(),
430  tag,
431  comm
432  )
433  )
434  {
436  << "Cannot send outgoing message to:"
437  << proci << " nBytes:"
438  << label(sendData.size_bytes())
440  }
441  }
442  }
443 
444  // Wait for all to finish
445  // ~~~~~~~~~~~~~~~~~~~~~~
446 
447  if (wait)
448  {
449  UPstream::waitRequests(startOfRequests);
450  }
451 }
452 
453 } // namespace PstreamDetail
454 } // namespace Foam
455 
457 
458 // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
459 
460 template<class Container, class Type>
462 (
463  const UList<Container>& sendBufs,
464  const labelUList& recvSizes,
465  List<Container>& recvBufs,
466  const int tag,
467  const label comm,
468  const bool wait
469 )
470 {
471  static_assert(is_contiguous<Type>::value, "Contiguous data only!");
472 
473  if (!UPstream::is_rank(comm))
474  {
475  return; // Process not in communicator
476  }
477 
478  const label myProci = UPstream::myProcNo(comm);
479  const label numProcs = UPstream::nProcs(comm);
480 
481  if (sendBufs.size() != numProcs)
482  {
484  << "Size of list " << sendBufs.size()
485  << " does not equal the number of processors " << numProcs
487  }
488 
489  recvBufs.resize_nocopy(numProcs);
490 
491  if (UPstream::is_parallel(comm))
492  {
493  // Presize all receive buffers
494  forAll(recvSizes, proci)
495  {
496  const label count = recvSizes[proci];
497 
498  if (proci != myProci && count > 0)
499  {
500  recvBufs[proci].resize_nocopy(count);
501  }
502  else
503  {
504  recvBufs[proci].clear();
505  }
506  }
507 
508  typedef std::pair<int, stdFoam::span<const Type>> sendTuple;
509  typedef std::pair<int, stdFoam::span<Type>> recvTuple;
510 
511  if (UPstream::maxCommsSize <= 0)
512  {
513  // Do the exchanging in one go
514  PstreamDetail::exchangeContainer<Container, Type>
515  (
516  sendBufs,
517  recvBufs,
518  tag,
519  comm,
520  wait
521  );
522  }
523  else
524  {
525  // Dispatch using chunk-wise exchanges
526  // Populate send sequence
527  DynamicList<sendTuple> sends(sendBufs.size());
528  forAll(sendBufs, proci)
529  {
530  const auto& sendData = sendBufs[proci];
531 
532  if (proci != myProci && !sendData.empty())
533  {
534  sends.push_back
535  (
536  sendTuple
537  (
538  proci,
539  { sendData.cdata(), std::size_t(sendData.size()) }
540  )
541  );
542  }
543  }
544 
545  // Populate recv sequence
546  DynamicList<recvTuple> recvs(recvBufs.size());
547  forAll(recvBufs, proci)
548  {
549  auto& recvData = recvBufs[proci];
550 
551  if (proci != myProci && !recvData.empty())
552  {
553  recvs.push_back
554  (
555  recvTuple
556  (
557  proci,
558  { recvData.data(), std::size_t(recvData.size()) }
559  )
560  );
561  }
562  }
563 
564  // Exchange buffers in chunks
565  PstreamDetail::exchangeChunkedBuf<Type>
566  (
567  sends,
568  recvs,
569  tag,
570  comm,
571  wait
572  );
573  }
574  }
575 
576  // Do myself. Already checked if in communicator
577  recvBufs[myProci] = sendBufs[myProci];
578 }
579 
580 
581 template<class Container, class Type>
583 (
584  const Map<Container>& sendBufs,
585  const Map<label>& recvSizes,
586  Map<Container>& recvBufs,
587  const int tag,
588  const label comm,
589  const bool wait
590 )
591 {
592  static_assert(is_contiguous<Type>::value, "Contiguous data only!");
593 
594  const int myProci = UPstream::myProcNo(comm);
595 
596  // Initial: clear out receive 'slots'
597  // Preferrable to clear out the map entries instead of the map itself
598  // since this can potentially preserve allocated space
599  // (eg DynamicList entries) between calls
600 
601  forAllIters(recvBufs, iter)
602  {
603  iter.val().clear();
604  }
605 
606  if (UPstream::is_parallel(comm))
607  {
608  // Presize all receive buffers
609  forAllIters(recvSizes, iter)
610  {
611  const label proci = iter.key();
612  const label count = iter.val();
613 
614  if (proci != myProci && count > 0)
615  {
616  recvBufs(proci).resize_nocopy(count);
617  }
618  }
619 
620  // Define the exchange sequences as a flattened list.
621  // We add an additional step of ordering the send/recv list
622  // by message size, which can help with transfer speeds.
623 
624  typedef std::pair<int, stdFoam::span<const Type>> sendTuple;
625  typedef std::pair<int, stdFoam::span<Type>> recvTuple;
626 
627  // Populate send sequences
628  DynamicList<sendTuple> sends(sendBufs.size());
629  forAllConstIters(sendBufs, iter)
630  {
631  const auto proci = iter.key();
632  const auto& sendData = iter.val();
633 
634  if (proci != myProci && !sendData.empty())
635  {
636  sends.push_back
637  (
638  sendTuple
639  (
640  proci,
641  { sendData.cdata(), std::size_t(sendData.size()) }
642  )
643  );
644  }
645  }
646 
647  // Shorter messages first
648  std::sort
649  (
650  sends.begin(),
651  sends.end(),
652  [=](const sendTuple& a, const sendTuple& b)
653  {
654  return (a.second.size() < b.second.size());
655  }
656  );
657 
658  // Populate recv sequences
659  DynamicList<recvTuple> recvs(recvBufs.size());
660  forAllIters(recvBufs, iter)
661  {
662  const auto proci = iter.key();
663  auto& recvData = recvBufs[proci];
664 
665  if (proci != myProci && !recvData.empty())
666  {
667  recvs.push_back
668  (
669  recvTuple
670  (
671  proci,
672  { recvData.data(), std::size_t(recvData.size()) }
673  )
674  );
675  }
676  }
677 
678  // Shorter messages first
679  std::sort
680  (
681  recvs.begin(),
682  recvs.end(),
683  [=](const recvTuple& a, const recvTuple& b)
684  {
685  return (a.second.size() < b.second.size());
686  }
687  );
688 
689 
690  if (UPstream::maxCommsSize <= 0)
691  {
692  // Do the exchanging in a single go
693  PstreamDetail::exchangeBuf<Type>
694  (
695  sends,
696  recvs,
697  tag,
698  comm,
699  wait
700  );
701  }
702  else
703  {
704  // Exchange buffers in chunks
705  PstreamDetail::exchangeChunkedBuf<Type>
706  (
707  sends,
708  recvs,
709  tag,
710  comm,
711  wait
712  );
713  }
714  }
715 
716  // Do myself (if actually in the communicator)
717  if (UPstream::is_rank(comm))
718  {
719  const auto iter = sendBufs.find(myProci);
720 
721  bool needsCopy = iter.good();
722 
723  if (needsCopy)
724  {
725  const auto& sendData = iter.val();
726 
727  needsCopy = !sendData.empty();
728  if (needsCopy)
729  {
730  // insert_or_assign
731  recvBufs(myProci) = sendData;
732  }
733  }
734 
735  if (!needsCopy)
736  {
737  recvBufs.erase(myProci);
738  }
739  }
740 }
741 
742 
743 template<class Container>
745 (
746  const labelUList& sendProcs,
747  const labelUList& recvProcs,
748  const Container& sendBufs,
749  labelList& recvSizes,
750  const label tag,
751  const label comm
752 )
753 {
754  if (!UPstream::is_rank(comm))
755  {
756  recvSizes.clear();
757  return; // Process not in communicator
758  }
759 
760  const label myProci = UPstream::myProcNo(comm);
761  const label numProcs = UPstream::nProcs(comm);
762 
763  if (sendBufs.size() != numProcs)
764  {
766  << "Size of container " << sendBufs.size()
767  << " does not equal the number of processors " << numProcs
769  }
770 
771  labelList sendSizes(numProcs);
772  for (label proci = 0; proci < numProcs; ++proci)
773  {
774  sendSizes[proci] = sendBufs[proci].size();
775  }
776 
777  recvSizes.resize_nocopy(numProcs);
778  recvSizes = 0; // Ensure non-received entries are properly zeroed
779 
780  // Preserve self-send, even if not described by neighbourhood
781  recvSizes[myProci] = sendSizes[myProci];
782 
783  const label startOfRequests = UPstream::nRequests();
784 
785  for (const label proci : recvProcs)
786  {
787  if (proci != myProci)
788  {
790  (
792  proci,
793  reinterpret_cast<char*>(&recvSizes[proci]),
794  sizeof(label),
795  tag,
796  comm
797  );
798  }
799  }
800 
801  for (const label proci : sendProcs)
802  {
803  if (proci != myProci)
804  {
806  (
808  proci,
809  reinterpret_cast<char*>(&sendSizes[proci]),
810  sizeof(label),
811  tag,
812  comm
813  );
814  }
815  }
816 
817  UPstream::waitRequests(startOfRequests);
818 }
819 
820 
821 template<class Container>
823 (
824  const labelUList& neighProcs,
825  const Container& sendBufs,
826  labelList& recvSizes,
827  const label tag,
828  const label comm
829 )
830 {
831  if (!UPstream::is_rank(comm))
832  {
833  recvSizes.clear();
834  return; // Process not in communicator
835  }
836 
837  Pstream::exchangeSizes<Container>
838  (
839  neighProcs, // send
840  neighProcs, // recv
841  sendBufs,
842  recvSizes,
843  tag,
844  comm
845  );
846 }
847 
848 
849 // Sparse sending
850 template<class Container>
852 (
853  const Map<Container>& sendBufs,
854  Map<label>& recvSizes,
855  const label tag,
856  const label comm
857 )
858 {
859  Map<label> sendSizes(2*sendBufs.size());
860  recvSizes.clear(); // Done in allToAllConsensus too, but be explicit here
861 
862  if (!UPstream::is_rank(comm))
863  {
864  return; // Process not in communicator
865  }
866 
867  forAllConstIters(sendBufs, iter)
868  {
869  const label proci = iter.key();
870  const label count = iter.val().size();
871 
872  if (count)
873  {
874  sendSizes.emplace(proci, count);
875  }
876  }
877 
879  (
880  sendSizes,
881  recvSizes,
882  (tag + 314159), // some unique tag?
883  comm
884  );
885 }
886 
887 
888 template<class Container>
890 (
891  const Container& sendBufs,
892  labelList& recvSizes,
893  const label comm
894 )
895 {
896  if (!UPstream::is_rank(comm))
897  {
898  recvSizes.clear();
899  return; // Process not in communicator
900  }
901 
902  const label numProcs = UPstream::nProcs(comm);
903 
904  if (sendBufs.size() != numProcs)
905  {
907  << "Size of container " << sendBufs.size()
908  << " does not equal the number of processors " << numProcs
910  }
911 
912  labelList sendSizes(numProcs);
913  forAll(sendBufs, proci)
914  {
915  sendSizes[proci] = sendBufs[proci].size();
916  }
917  recvSizes.resize_nocopy(sendSizes.size());
918 
919  if
920  (
923  )
924  {
925  // Use algorithm NBX: Nonblocking Consensus Exchange
926 
928  (
929  sendSizes,
930  recvSizes,
931  (UPstream::msgType() + 314159), // some unique tag?
932  comm
933  );
934  return;
935  }
936 
937  UPstream::allToAll(sendSizes, recvSizes, comm);
938 }
939 
940 
941 template<class Container, class Type>
943 (
944  const UList<Container>& sendBufs,
945  List<Container>& recvBufs,
946  const int tag,
947  const label comm,
948  const bool wait
949 )
950 {
951  // Algorithm PEX: Personalized Exchange
952  // - Step 1: each process writes the data sizes to each peer and
953  // redistributes the vector (eg, MPI_Alltoall or non-blocking consensus)
954  // - Step 2: size receive buffers and setup receives for all
955  // non-zero sendcounts. Post all sends and wait.
956 
957  labelList recvSizes;
958  exchangeSizes(sendBufs, recvSizes, comm);
959 
960  exchange<Container, Type>(sendBufs, recvSizes, recvBufs, tag, comm, wait);
961 }
962 
963 
964 template<class Container, class Type>
966 (
967  const Map<Container>& sendBufs,
968  Map<Container>& recvBufs,
969  const int tag,
970  const label comm,
971  const bool wait
972 )
973 {
974  // Algorithm PEX: Personalized Exchange
975  // but using nonblocking consensus exchange for the sizes
976 
977  Map<label> recvSizes;
978  exchangeSizes(sendBufs, recvSizes, tag, comm);
979 
980  exchange<Container, Type>(sendBufs, recvSizes, recvBufs, tag, comm, wait);
981 }
982 
983 
984 // ************************************************************************* //
void size(const label n)
Older name for setAddressableSize.
Definition: UList.H:116
Inter-processor communication reduction functions.
static label read(const UPstream::commsTypes commsType, const int fromProcNo, char *buf, const std::streamsize bufSize, const int tag=UPstream::msgType(), const label comm=UPstream::worldComm, UPstream::Request *req=nullptr)
Read buffer contents from given processor.
Definition: UIPstreamRead.C:35
error FatalError
Error stream (stdout output on all processes), with additional &#39;FOAM FATAL ERROR&#39; header text and sta...
#define FatalErrorInFunction
Report an error message using Foam::FatalError.
Definition: error.H:598
A 1D array of objects of type <T>, where the size of the vector is known and used for subscript bound...
Definition: BitOps.H:56
label max(const labelHashSet &set, label maxValue=labelMin)
Find the max value in labelHashSet, optionally limited by second argument.
Definition: hashSets.C:40
static label nRequests() noexcept
Number of outstanding requests (on the internal list of requests)
static int maxCommsSize
Optional maximum message size (bytes)
Definition: UPstream.H:390
T & first()
Access first element of the list, position [0].
Definition: UList.H:853
Ostream & endl(Ostream &os)
Add newline and flush stream.
Definition: Ostream.H:531
void resize_nocopy(const label len)
Adjust allocated size of list without necessarily.
Definition: ListI.H:175
static int & msgType() noexcept
Message tag of standard messages.
Definition: UPstream.H:1229
static int myProcNo(const label communicator=worldComm)
Rank of this process in the communicator (starting from masterNo()). Can be negative if the process i...
Definition: UPstream.H:1074
static void allToAllConsensus(const UList< int32_t > &sendData, UList< int32_t > &recvData, const int tag, const label communicator=worldComm)
Exchange non-zero int32_t data between ranks [NBX].
static void waitRequests()
Wait for all requests to finish.
Definition: UPstream.H:1538
UList< label > labelUList
A UList of labels.
Definition: UList.H:78
#define forAll(list, i)
Loop across all elements in list.
Definition: stdFoam.H:421
static int nProcsNonblockingExchange
Number of processors to change to nonBlocking consensual exchange (NBX). Ignored for zero or negative...
Definition: UPstream.H:375
label size() const noexcept
The number of elements in table.
Definition: HashTable.H:342
unsigned int count(const UList< bool > &bools, const bool val=true)
Count number of &#39;true&#39; entries.
Definition: BitOps.H:73
static label nProcs(const label communicator=worldComm)
Number of ranks in parallel run (for given communicator). It is 1 for serial run. ...
Definition: UPstream.H:1065
static bool is_rank(const label communicator=worldComm)
True if process corresponds to any rank (master or sub-rank) in the given communicator.
Definition: UPstream.H:1091
A 1D vector of objects of type <T> that resizes itself as necessary to accept the new objects...
Definition: DynamicList.H:51
const dimensionedScalar b
Wien displacement law constant: default SI units: [m.K].
Definition: createFields.H:27
void clear()
Clear the list, i.e. set size to zero.
Definition: ListI.H:137
iterator find(const label &key)
Find and return an iterator set at the hashed entry.
Definition: HashTableI.H:86
void sort(UList< T > &list)
Sort the list.
Definition: UList.C:296
void clear()
Remove all entries from table.
Definition: HashTable.C:725
void exchangeBuf(const UList< std::pair< int, stdFoam::span< const Type >>> &sends, const UList< std::pair< int, stdFoam::span< Type >>> &recvs, const int tag, const label comm, const bool wait)
Setup sends and receives, each specified as [rank, span] tuple.
#define forAllIters(container, iter)
Iterate across all elements in the container object.
Definition: stdFoam.H:336
static bool is_parallel(const label communicator=worldComm)
True if parallel algorithm or exchange is required.
Definition: UPstream.H:1111
errorManip< error > abort(error &err)
Definition: errorManip.H:139
A 1D vector of objects of type <T>, where the size of the vector is known and can be used for subscri...
Definition: HashTable.H:105
void exchangeChunkedBuf(const UList< std::pair< int, stdFoam::span< const Type >>> &sends, const UList< std::pair< int, stdFoam::span< Type >>> &recvs, const int tag, const label comm, const bool wait)
Chunked exchange of contiguous data. Setup sends and receives, each specified as [rank, span] tuple.
static void exchange(const UList< Container > &sendBufs, const labelUList &recvSizes, List< Container > &recvBufs, const int tag=UPstream::msgType(), const label comm=UPstream::worldComm, const bool wait=true)
Helper: exchange contiguous data. Sends sendBufs, receives into recvBufs using predetermined receive ...
static void allToAll(const UList< int32_t > &sendData, UList< int32_t > &recvData, const label communicator=worldComm)
Exchange int32_t data with all ranks in communicator.
constexpr auto end(C &c) -> decltype(c.end())
Return iterator to the end of the container c.
Definition: stdFoam.H:201
bool erase(const iterator &iter)
Erase an entry specified by given iterator.
Definition: HashTable.C:472
void exchangeContainer(const UList< Container > &sendBufs, UList< Container > &recvBufs, const int tag, const label comm, const bool wait)
Exchange contiguous data using point-to-point communication. Sends sendBufs, receives into recvBufs...
A template class to specify that a data type can be considered as being contiguous in memory...
Definition: contiguous.H:70
static void exchangeSizes(const labelUList &sendProcs, const labelUList &recvProcs, const Container &sendBufs, labelList &sizes, const label tag=UPstream::msgType(), const label comm=UPstream::worldComm)
Helper: exchange sizes of sendBufs for specified send/recv ranks.
static bool write(const UPstream::commsTypes commsType, const int toProcNo, const char *buf, const std::streamsize bufSize, const int tag=UPstream::msgType(), const label comm=UPstream::worldComm, UPstream::Request *req=nullptr, const UPstream::sendModes sendMode=UPstream::sendModes::normal)
Write buffer contents to given processor.
void reduce(const List< UPstream::commsStruct > &comms, T &value, const BinaryOp &bop, const int tag, const label comm)
Reduce inplace (cf. MPI Allreduce) using specified communication schedule.
"nonBlocking" : (MPI_Isend, MPI_Irecv)
Rudimentary functionality similar to std::span for holding memory view.
Definition: stdFoam.H:500
messageStream Info
Information stream (stdout output on master, null elsewhere)
std::size_t size_type
Definition: stdFoam.H:521
Namespace for OpenFOAM.
forAllConstIters(mixture.phases(), phase)
Definition: pEqn.H:28
A HashTable to objects of type <T> with a label key.
FlatOutput::OutputAdaptor< Container, Delimiters > flatOutput(const Container &obj, Delimiters delim)
Global flatOutput() function with specified output delimiters.
Definition: FlatOutput.H:225