UPstream.C
Go to the documentation of this file.
1 /*---------------------------------------------------------------------------*\
2  ========= |
3  \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4  \\ / O peration |
5  \\ / A nd | www.openfoam.com
6  \\/ M anipulation |
7 -------------------------------------------------------------------------------
8  Copyright (C) 2011-2017 OpenFOAM Foundation
9  Copyright (C) 2016-2022 OpenCFD Ltd.
10 -------------------------------------------------------------------------------
11 License
12  This file is part of OpenFOAM.
13 
14  OpenFOAM is free software: you can redistribute it and/or modify it
15  under the terms of the GNU General Public License as published by
16  the Free Software Foundation, either version 3 of the License, or
17  (at your option) any later version.
18 
19  OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
20  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22  for more details.
23 
24  You should have received a copy of the GNU General Public License
25  along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
26 
27 \*---------------------------------------------------------------------------*/
28 
29 #include "Pstream.H"
30 #include "PstreamReduceOps.H"
31 #include "PstreamGlobals.H"
32 #include "profilingPstream.H"
33 #include "int.H"
34 #include "SubList.H"
35 #include "UPstreamWrapping.H"
36 #include "collatedFileOperation.H"
37 
38 #include <mpi.h>
39 #include <cstring>
40 #include <cstdlib>
41 #include <csignal>
42 
43 // * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
44 
45 // The min value and default for MPI buffers length
46 constexpr int minBufLen = 20000000;
47 
48 // Track if we have attached MPI buffers
49 static bool ourBuffers = false;
50 
51 // Track if we initialized MPI
52 static bool ourMpi = false;
53 
54 
55 // * * * * * * * * * * * * * * * Local Functions * * * * * * * * * * * * * * //
56 
57 static void attachOurBuffers()
58 {
59  if (ourBuffers)
60  {
61  return; // Already attached
62  }
63  ourBuffers = true;
64 
65  // Use UPstream::mpiBufferSize (optimisationSwitch),
66  // but allow override with MPI_BUFFER_SIZE env variable (int value)
67 
68 #ifndef SGIMPI
69  int len = 0;
70 
71  const std::string str(Foam::getEnv("MPI_BUFFER_SIZE"));
72  if (str.empty() || !Foam::read(str, len) || len <= 0)
73  {
75  }
76 
77  if (len < minBufLen)
78  {
79  len = minBufLen;
80  }
81 
83  {
84  Foam::Pout<< "UPstream::init : buffer-size " << len << '\n';
85  }
86 
87  char* buf = new char[len];
88 
89  if (MPI_SUCCESS != MPI_Buffer_attach(buf, len))
90  {
91  delete[] buf;
92  Foam::Pout<< "UPstream::init : could not attach buffer\n";
93  }
94 #endif
95 }
96 
97 
98 static void detachOurBuffers()
99 {
100  if (!ourBuffers)
101  {
102  return; // Nothing to detach
103  }
104  ourBuffers = false;
105 
106  // Some MPI notes suggest that the return code is MPI_SUCCESS when
107  // no buffer is attached.
108  // Be extra careful and require a non-zero size as well.
109 
110 #ifndef SGIMPI
111  int len = 0;
112  char* buf = nullptr;
113 
114  if (MPI_SUCCESS == MPI_Buffer_detach(&buf, &len) && len)
115  {
116  delete[] buf;
117  }
118 #endif
119 }
120 
121 
122 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
123 
124 // NOTE:
125 // valid parallel options vary between implementations, but flag common ones.
126 // if they are not removed by MPI_Init(), the subsequent argument processing
127 // will notice that they are wrong
128 void Foam::UPstream::addValidParOptions(HashTable<string>& validParOptions)
129 {
130  validParOptions.insert("np", "");
131  validParOptions.insert("p4pg", "PI file");
132  validParOptions.insert("p4wd", "directory");
133  validParOptions.insert("p4amslave", "");
134  validParOptions.insert("p4yourname", "hostname");
135  validParOptions.insert("machinefile", "machine file");
136 }
137 
138 
140 {
141  int flag = 0;
142 
143  MPI_Finalized(&flag);
144  if (flag)
145  {
146  // Already finalized - this is an error
148  << "MPI was already finalized - cannot perform MPI_Init\n"
150 
151  return false;
152  }
153 
154  MPI_Initialized(&flag);
155  if (flag)
156  {
157  if (debug)
158  {
159  Pout<< "UPstream::initNull : was already initialized\n";
160  }
161  }
162  else
163  {
164  // Not already initialized
165 
166  MPI_Init_thread
167  (
168  nullptr, // argc
169  nullptr, // argv
170  MPI_THREAD_SINGLE,
171  &flag // provided_thread_support
172  );
173 
174  ourMpi = true;
175  }
176 
177  // Could also attach buffers etc.
178 
179  return true;
180 }
181 
182 
183 bool Foam::UPstream::init(int& argc, char**& argv, const bool needsThread)
184 {
185  int numprocs = 0, myRank = 0;
186  int provided_thread_support = 0;
187  int flag = 0;
188 
189  MPI_Finalized(&flag);
190  if (flag)
191  {
192  // Already finalized - this is an error
194  << "MPI was already finalized - cannot perform MPI_Init" << endl
196 
197  return false;
198  }
199 
200  MPI_Initialized(&flag);
201  if (flag)
202  {
203  // Already initialized.
204  // Warn if we've called twice, but skip if initialized externally
205 
206  if (ourMpi)
207  {
209  << "MPI was already initialized - cannot perform MPI_Init" << nl
210  << "This could indicate an application programming error!"
211  << endl;
212 
213  return true;
214  }
215  else if (debug)
216  {
217  Pout<< "UPstream::init : was already initialized\n";
218  }
219  }
220  else
221  {
222  MPI_Init_thread
223  (
224  &argc,
225  &argv,
226  (
227  needsThread
228  ? MPI_THREAD_MULTIPLE
229  : MPI_THREAD_SINGLE
230  ),
231  &provided_thread_support
232  );
233 
234  ourMpi = true;
235  }
236 
237  // Check argument list for local world
238  label worldIndex = -1;
239  word world;
240  for (int argi = 1; argi < argc; ++argi)
241  {
242  if (strcmp(argv[argi], "-world") == 0)
243  {
244  worldIndex = argi++;
245  if (argi >= argc)
246  {
248  << "Missing world name to argument \"world\""
250  }
251  world = argv[argi];
252  break;
253  }
254  }
255 
256  // Filter 'world' option
257  if (worldIndex != -1)
258  {
259  for (label i = worldIndex+2; i < argc; i++)
260  {
261  argv[i-2] = argv[i];
262  }
263  argc -= 2;
264  }
265 
266  MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
267  MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
268 
269  if (debug)
270  {
271  Pout<< "UPstream::init :"
272  << " thread-support : wanted:" << needsThread
273  << " obtained:"
274  << (
275  provided_thread_support == MPI_THREAD_MULTIPLE
276  ? "MPI_THREAD_MULTIPLE"
277  : "MPI_THREAD_SINGLE"
278  )
279  << " procs:" << numprocs
280  << " rank:" << myRank
281  << " world:" << world << endl;
282  }
283 
284  if (worldIndex == -1 && numprocs <= 1)
285  {
287  << "attempt to run parallel on 1 processor"
289  }
290 
291  // Initialise parallel structure
292  setParRun(numprocs, provided_thread_support == MPI_THREAD_MULTIPLE);
293 
294  if (worldIndex != -1)
295  {
296  // During startup, so worldComm == globalComm
297 
298  wordList worlds(numprocs);
299  worlds[Pstream::myProcNo(UPstream::globalComm)] = world;
301 
302  // Compact
304  {
305  DynamicList<word> worldNames(numprocs);
306  worldIDs_.resize_nocopy(numprocs);
307 
308  forAll(worlds, proci)
309  {
310  const word& world = worlds[proci];
311 
312  worldIDs_[proci] = worldNames.find(world);
313 
314  if (worldIDs_[proci] == -1)
315  {
316  worldIDs_[proci] = worldNames.size();
317  worldNames.push_back(world);
318  }
319  }
320 
321  allWorlds_.transfer(worldNames);
322  }
323  Pstream::broadcasts(UPstream::globalComm, allWorlds_, worldIDs_);
324 
325  const label myWorldId =
327 
328  DynamicList<label> subRanks;
329  forAll(worldIDs_, proci)
330  {
331  if (worldIDs_[proci] == myWorldId)
332  {
333  subRanks.push_back(proci);
334  }
335  }
336 
337  // Allocate new communicator with globalComm as its parent
338  const label subComm =
340  (
341  UPstream::globalComm, // parent
342  subRanks,
343  true
344  );
345 
346 
347  // Override worldComm
348  UPstream::worldComm = subComm;
349  // For testing: warn use of non-worldComm
351 
352  if (debug)
353  {
354  // Check
355  int subNumProcs, subRank;
356  MPI_Comm_size
357  (
359  &subNumProcs
360  );
361  MPI_Comm_rank
362  (
364  &subRank
365  );
366 
367  Pout<< "UPstream::init : in world:" << world
368  << " using local communicator:" << subComm
369  << " rank " << subRank
370  << " of " << subNumProcs
371  << endl;
372  }
373 
374  // Override Pout prefix (move to setParRun?)
375  Pout.prefix() = '[' + world + '/' + name(myProcNo(subComm)) + "] ";
376  Perr.prefix() = Pout.prefix();
377  }
378  else
379  {
380  // All processors use world 0
381  worldIDs_.resize_nocopy(numprocs);
382  worldIDs_ = 0;
383  }
384 
386 
387  return true;
388 }
389 
390 
391 void Foam::UPstream::shutdown(int errNo)
392 {
393  if (debug)
394  {
395  Pout<< "UPstream::shutdown\n";
396  }
397 
398  int flag = 0;
399 
400  MPI_Initialized(&flag);
401  if (!flag)
402  {
403  // No MPI initialized - we are done
404  return;
405  }
406 
407  MPI_Finalized(&flag);
408  if (flag)
409  {
410  // Already finalized elsewhere?
411  if (ourMpi)
412  {
414  << "MPI was already finalized (by a connected program?)\n";
415  }
416  else if (debug)
417  {
418  Pout<< "UPstream::shutdown : was already finalized\n";
419  }
420  }
421  else
422  {
424  }
425 
426 
427  // Warn about any outstanding requests
428  {
429  label nOutstanding = 0;
430 
432  {
433  if (!PstreamGlobals::freedRequests_.found(requestID))
434  {
435  ++nOutstanding;
436  }
437  }
438 
440 
441  if (nOutstanding)
442  {
444  << "There were still " << nOutstanding
445  << " outstanding MPI_Requests." << nl
446  << "Which means your code exited before doing a "
447  << " UPstream::waitRequests()." << nl
448  << "This should not happen for a normal code exit."
449  << nl;
450  }
451  }
452 
453  // Clean mpi communicators
454  forAll(myProcNo_, communicator)
455  {
456  if (myProcNo_[communicator] >= 0)
457  {
458  freePstreamCommunicator(communicator);
459  }
460  }
461 
462  if (!flag)
463  {
464  // MPI not already finalized
465 
466  if (!ourMpi)
467  {
469  << "Finalizing MPI, but was initialized elsewhere\n";
470  }
471 
472  if (errNo == 0)
473  {
474  MPI_Finalize();
475  }
476  else
477  {
478  // Abort only locally or world?
479  MPI_Abort(MPI_COMM_WORLD, errNo);
480  }
481  }
482 }
483 
484 
485 void Foam::UPstream::exit(int errNo)
486 {
487  UPstream::shutdown(errNo);
488  std::exit(errNo);
489 }
490 
491 
493 {
494  MPI_Abort(MPI_COMM_WORLD, 1);
495 }
496 
497 
498 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
499 
500 void Foam::UPstream::allocatePstreamCommunicator
501 (
502  const label parentIndex,
503  const label index
504 )
505 {
506  if (index == PstreamGlobals::MPIGroups_.size())
507  {
508  // Extend storage with dummy values
509  MPI_Comm newComm = MPI_COMM_NULL;
510  MPI_Group newGroup = MPI_GROUP_NULL;
511  PstreamGlobals::MPIGroups_.push_back(newGroup);
512  PstreamGlobals::MPICommunicators_.push_back(newComm);
513  }
514  else if (index > PstreamGlobals::MPIGroups_.size())
515  {
517  << "PstreamGlobals out of sync with UPstream data. Problem."
519  }
520 
521 
522  if (parentIndex == -1)
523  {
524  // Global communicator. Same as world communicator for single-world
525 
526  if (index != UPstream::globalComm)
527  {
529  << "world communicator should always be index "
532  }
533 
534  PstreamGlobals::MPICommunicators_[index] = MPI_COMM_WORLD;
535  MPI_Comm_group(MPI_COMM_WORLD, &PstreamGlobals::MPIGroups_[index]);
536  MPI_Comm_rank(MPI_COMM_WORLD, &myProcNo_[index]);
537 
538  // Set the number of ranks to the actual number
539  int numProcs;
540  MPI_Comm_size(MPI_COMM_WORLD, &numProcs);
541 
542  //procIDs_[index] = identity(numProcs);
543  procIDs_[index].resize_nocopy(numProcs);
544  forAll(procIDs_[index], i)
545  {
546  procIDs_[index][i] = i;
547  }
548  }
549  else if (parentIndex == -2)
550  {
551  // Self communicator
552 
553  PstreamGlobals::MPICommunicators_[index] = MPI_COMM_SELF;
554  MPI_Comm_group(MPI_COMM_SELF, &PstreamGlobals::MPIGroups_[index]);
555  MPI_Comm_rank(MPI_COMM_SELF, &myProcNo_[index]);
556 
557  // Number of ranks is always 1 (self communicator)
558 
559  #ifdef FULLDEBUG
560  int numProcs;
561  MPI_Comm_size(MPI_COMM_SELF, &numProcs);
562 
563  if (numProcs != 1)
564  {
565  // Already finalized - this is an error
567  << "MPI_COMM_SELF had " << numProcs << " != 1 ranks!\n"
569  }
570  #endif
571 
572  procIDs_[index].resize_nocopy(1);
573  procIDs_[index] = 0;
574  }
575  else
576  {
577  // Create new group
578  MPI_Group_incl
579  (
580  PstreamGlobals::MPIGroups_[parentIndex],
581  procIDs_[index].size(),
582  procIDs_[index].cdata(),
584  );
585 
586  #if defined(MSMPI_VER)
587  // ms-mpi (10.0 and others?) does not have MPI_Comm_create_group
588  MPI_Comm_create
589  (
593  );
594  #else
595  // Create new communicator for this group
596  MPI_Comm_create_group
597  (
602  );
603  #endif
604 
605  if (PstreamGlobals::MPICommunicators_[index] == MPI_COMM_NULL)
606  {
607  myProcNo_[index] = -1;
608  }
609  else
610  {
611  if
612  (
613  MPI_Comm_rank
614  (
616  &myProcNo_[index]
617  )
618  )
619  {
621  << "Problem :"
622  << " when allocating communicator at " << index
623  << " from ranks " << procIDs_[index]
624  << " of parent " << parentIndex
625  << " cannot find my own rank"
627  }
628  }
629  }
630 }
631 
632 
633 void Foam::UPstream::freePstreamCommunicator(const label communicator)
634 {
635  // Skip placeholders and pre-defined (not allocated) communicators
636 
637  if (UPstream::debug)
638  {
639  Pout<< "freePstreamCommunicator: " << communicator
640  << " from " << PstreamGlobals::MPICommunicators_.size() << endl;
641  }
642 
643  // Not touching the first two communicators (SELF, WORLD)
644  if (communicator > 1)
645  {
646  if (MPI_COMM_NULL != PstreamGlobals::MPICommunicators_[communicator])
647  {
648  // Free communicator. Sets communicator to MPI_COMM_NULL
649  MPI_Comm_free(&PstreamGlobals::MPICommunicators_[communicator]);
650  }
651 
652  if (MPI_GROUP_NULL != PstreamGlobals::MPIGroups_[communicator])
653  {
654  // Free group. Sets group to MPI_GROUP_NULL
655  MPI_Group_free(&PstreamGlobals::MPIGroups_[communicator]);
656  }
657  }
658 }
659 
660 
662 {
664 }
665 
666 
667 void Foam::UPstream::resetRequests(const label n)
668 {
669  if (n >= 0 && n < PstreamGlobals::outstandingRequests_.size())
670  {
672  }
673 }
674 
675 
676 void Foam::UPstream::waitRequests(const label start)
677 {
678  if (!UPstream::parRun())
679  {
680  return; // No-op for non-parallel
681  }
682 
683  if (UPstream::debug)
684  {
685  Pout<< "UPstream::waitRequests : starting wait for "
687  << " outstanding requests starting at " << start << endl;
688  }
689 
690  // TBD: check for
691  // (start < 0 || start > PstreamGlobals::outstandingRequests_.size())
692 
694  {
695  SubList<MPI_Request> waitRequests
696  (
699  start
700  );
701 
703 
704  // On success: sets each request to MPI_REQUEST_NULL
705  if
706  (
707  MPI_Waitall
708  (
709  waitRequests.size(),
710  waitRequests.data(),
711  MPI_STATUSES_IGNORE
712  )
713  )
714  {
716  << "MPI_Waitall returned with error" << Foam::endl;
717  }
718 
720 
722  }
723 
724  if (debug)
725  {
726  Pout<< "UPstream::waitRequests : finished wait." << endl;
727  }
728 }
729 
730 
731 void Foam::UPstream::waitRequest(const label i)
732 {
733  if (!UPstream::parRun() || i < 0)
734  {
735  return; // No-op for non-parallel, or placeholder indices
736  }
737 
738  if (debug)
739  {
740  Pout<< "UPstream::waitRequest : starting wait for request:" << i
741  << endl;
742  }
743 
744  if (i >= PstreamGlobals::outstandingRequests_.size())
745  {
747  << "You asked for request=" << i
748  << " from " << PstreamGlobals::outstandingRequests_.size()
749  << " outstanding requests!" << nl
750  << "Mixing use of blocking/non-blocking comms?"
752  }
753 
755 
756  // On success: sets request to MPI_REQUEST_NULL
757  if
758  (
759  MPI_Wait
760  (
762  MPI_STATUS_IGNORE
763  )
764  )
765  {
767  << "MPI_Wait returned with error" << Foam::endl;
768  }
769 
771  // Push index onto free cache
773 
774  if (debug)
775  {
776  Pout<< "UPstream::waitRequest : finished wait for request:" << i
777  << endl;
778  }
779 }
780 
781 
782 bool Foam::UPstream::finishedRequest(const label i)
783 {
784  if (!UPstream::parRun() || i < 0)
785  {
786  return true; // No-op for non-parallel, or placeholder indices
787  }
788 
789  if (debug)
790  {
791  Pout<< "UPstream::finishedRequest : checking request:" << i
792  << endl;
793  }
794 
795  if (i >= PstreamGlobals::outstandingRequests_.size())
796  {
798  << "You asked for request=" << i
799  << " from " << PstreamGlobals::outstandingRequests_.size()
800  << " outstanding requests!" << nl
801  << "Mixing use of blocking/non-blocking comms?"
803  }
804 
805  // On success: sets request to MPI_REQUEST_NULL
806  int flag;
807  MPI_Test
808  (
810  &flag,
811  MPI_STATUS_IGNORE
812  );
813 
814  if (debug)
815  {
816  Pout<< "UPstream::finishedRequest : finished request:" << i
817  << endl;
818  }
819 
820  return flag != 0;
821 }
822 
823 
824 int Foam::UPstream::allocateTag(const char* const msg)
825 {
826  int tag;
827  if (PstreamGlobals::freedTags_.size())
828  {
831  }
832  else
833  {
834  tag = ++PstreamGlobals::nTags_;
835  }
836 
837  if (debug)
838  {
839  Pout<< "UPstream::allocateTag";
840  if (msg) Pout<< ' ' << msg;
841  Pout<< " : tag:" << tag << endl;
842  }
843 
844  return tag;
845 }
846 
847 
848 void Foam::UPstream::freeTag(const int tag, const char* const msg)
849 {
850  if (debug)
851  {
852  Pout<< "UPstream::freeTag ";
853  if (msg) Pout<< ' ' << msg;
854  Pout<< " : tag:" << tag << endl;
855  }
857 }
858 
859 
860 // ************************************************************************* //
static bool ourBuffers
Definition: UPstream.C:42
prefixOSstream Perr
OSstream wrapped stderr (std::cerr) with parallel prefix.
DynamicList< label > freedRequests_
DynamicList< MPI_Request > outstandingRequests_
Outstanding non-blocking operations.
System signed integer.
Inter-processor communication reduction functions.
errorManipArg< error, int > exit(error &err, const int errNo=1)
Definition: errorManip.H:125
error FatalError
Error stream (stdout output on all processes), with additional &#39;FOAM FATAL ERROR&#39; header text and sta...
#define FatalErrorInFunction
Report an error message using Foam::FatalError.
Definition: error.H:578
DynamicList< int > freedTags_
Free&#39;d message tags.
static label nRequests() noexcept
Number of outstanding requests.
Definition: UPstream.C:83
constexpr char nl
The newline &#39;\n&#39; character (0x0a)
Definition: Ostream.H:49
DynamicList< MPI_Comm > MPICommunicators_
Ostream & endl(Ostream &os)
Add newline and flush stream.
Definition: Ostream.H:487
static bool initNull()
Special purpose initialisation function.
Definition: UPstream.C:30
static bool & parRun() noexcept
Test if this a parallel run.
Definition: UPstream.H:639
static bool finishedRequest(const label i)
Non-blocking comms: has request i finished?
Definition: UPstream.C:101
static int & msgType() noexcept
Message tag of standard messages.
Definition: UPstream.H:806
string getEnv(const std::string &envName)
Get environment value for given envName.
Definition: POSIX.C:292
static int myProcNo(const label communicator=worldComm)
Number of this process (starting from masterNo() = 0)
Definition: UPstream.H:688
static label worldComm
Default world communicator (all processors). May differ from globalComm if local worlds are in use...
Definition: UPstream.H:361
static label allocateCommunicator(const label parent, const labelUList &subRanks, const bool doPstream=true)
Allocate a new communicator with subRanks of parent communicator.
Definition: UPstream.C:139
void pop_back(label n=1)
Reduce size by 1 or more elements. Can be called on an empty list.
Definition: DynamicListI.H:648
static void addWaitTime()
Add time increment to wait time.
static void shutdown(int errNo=0)
Shutdown (finalize) MPI as required.
Definition: UPstream.C:51
bool read(const char *buf, int32_t &val)
Same as readInt32.
Definition: int32.H:125
#define forAll(list, i)
Loop across all elements in list.
Definition: stdFoam.H:413
static int allocateTag(const char *const msg=nullptr)
Definition: UPstream.C:817
static constexpr label globalComm
Communicator for all processors, irrespective of any local worlds.
Definition: UPstream.H:371
static void detachOurBuffers()
Definition: UPstream.C:91
static void gatherList(const List< commsStruct > &comms, List< T > &values, const int tag, const label comm)
Gather data, but keep individual values separate. Uses the specified communication schedule...
static void exit(int errNo=1)
Shutdown (finalize) MPI as required and exit program with errNo.
Definition: UPstream.C:55
word name(const expressions::valueTypeCode typeCode)
A word representation of a valueTypeCode. Empty for INVALID.
Definition: exprTraits.C:52
static bool init(int &argc, char **&argv, const bool needsThread)
Initialisation function called from main.
Definition: UPstream.C:40
int nTags_
Max outstanding message tag operations.
static bool ourMpi
Definition: UPstream.C:45
const string & prefix() const noexcept
Return the stream prefix.
static label warnComm
Debugging: warn for use of any communicator differing from warnComm.
Definition: UPstream.H:366
errorManip< error > abort(error &err)
Definition: errorManip.H:139
const direction noexcept
Definition: Scalar.H:258
static void beginTiming()
Update timer prior to measurement.
static void resetRequests(const label n)
Truncate outstanding requests to given length.
Definition: UPstream.C:89
int debug
Static debugging option.
DynamicList< MPI_Group > MPIGroups_
static void attachOurBuffers()
Definition: UPstream.C:50
static void waitRequests(const label start=0)
Wait until all requests (from start onwards) have finished.
Definition: UPstream.C:93
void push_back(const T &val)
Copy append an element to the end of this list.
Definition: DynamicListI.H:496
static void broadcasts(const label comm, Type &arg1, Args &&... args)
Broadcast multiple items to all processes in communicator.
static void abort()
Call MPI_Abort with no other checks or cleanup.
Definition: UPstream.C:62
List< word > wordList
A List of words.
Definition: fileName.H:58
#define WarningInFunction
Report a warning using Foam::Warning.
static bool master(const label communicator=worldComm)
Am I the master rank.
Definition: UPstream.H:672
constexpr int minBufLen
Definition: UPstream.C:39
T & back()
Access last element of the list, position [size()-1].
Definition: UListI.H:209
label n
static void freeTag(const int tag, const char *const msg=nullptr)
Definition: UPstream.C:841
static const int mpiBufferSize
MPI buffer-size (bytes)
Definition: UPstream.H:352
static void waitRequest(const label i)
Wait until request i has finished.
Definition: UPstream.C:97
bool found
prefixOSstream Pout
OSstream wrapped stdout (std::cout) with parallel prefix.
static void addValidParOptions(HashTable< string > &validParOptions)
Add the valid option this type of communications library adds/requires on the command line...
Definition: UPstream.C:26