UPstream.C
Go to the documentation of this file.
1 /*---------------------------------------------------------------------------*\
2  ========= |
3  \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4  \\ / O peration |
5  \\ / A nd | www.openfoam.com
6  \\/ M anipulation |
7 -------------------------------------------------------------------------------
8  Copyright (C) 2011-2017 OpenFOAM Foundation
9  Copyright (C) 2016-2023 OpenCFD Ltd.
10 -------------------------------------------------------------------------------
11 License
12  This file is part of OpenFOAM.
13 
14  OpenFOAM is free software: you can redistribute it and/or modify it
15  under the terms of the GNU General Public License as published by
16  the Free Software Foundation, either version 3 of the License, or
17  (at your option) any later version.
18 
19  OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
20  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22  for more details.
23 
24  You should have received a copy of the GNU General Public License
25  along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
26 
27 \*---------------------------------------------------------------------------*/
28 
29 #include "Pstream.H"
30 #include "PstreamReduceOps.H"
31 #include "PstreamGlobals.H"
32 #include "profilingPstream.H"
33 #include "int.H"
34 #include "UPstreamWrapping.H"
35 #include "collatedFileOperation.H"
36 
37 #include <cstdlib>
38 #include <cstring>
39 #include <memory>
40 #include <numeric>
41 #include <string>
42 
43 // * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
44 
45 // The min value and default for MPI buffer length
46 constexpr int minBufLen = 20000000;
47 
48 // Track size of attached MPI buffer
49 static int attachedBufLen = 0;
50 
51 // Track if we initialized MPI
52 static bool ourMpi = false;
53 
54 
55 // * * * * * * * * * * * * * * * Local Functions * * * * * * * * * * * * * * //
56 
57 // Attach user-defined send buffer
58 static void attachOurBuffers()
59 {
60 #ifndef SGIMPI
61  if (attachedBufLen)
62  {
63  return; // Already attached
64  }
65 
66  // Use UPstream::mpiBufferSize (optimisationSwitch),
67  // but allow override with MPI_BUFFER_SIZE env variable (int value)
68 
69  int len = 0;
70 
71  const std::string str(Foam::getEnv("MPI_BUFFER_SIZE"));
72  if (str.empty() || !Foam::read(str, len) || len <= 0)
73  {
75  }
76 
77  if (len < minBufLen)
78  {
79  len = minBufLen;
80  }
81 
82  char* buf = new char[len];
83 
84  if (MPI_SUCCESS == MPI_Buffer_attach(buf, len))
85  {
86  // Properly attached
87  attachedBufLen = len;
88 
90  {
91  Foam::Pout<< "UPstream::init : buffer-size " << len << '\n';
92  }
93  }
94  else
95  {
96  delete[] buf;
97  Foam::Pout<< "UPstream::init : could not attach buffer\n";
98  }
99 #endif
100 }
101 
102 
103 // Remove an existing user-defined send buffer
104 // IMPORTANT:
105 // This operation will block until all messages currently in the
106 // buffer have been transmitted.
107 static void detachOurBuffers()
108 {
109 #ifndef SGIMPI
110  if (!attachedBufLen)
111  {
112  return; // Nothing to detach
113  }
114 
115  // Some MPI notes suggest that the return code is MPI_SUCCESS when
116  // no buffer is attached.
117  // Be extra careful and require a non-zero size as well.
118 
119  char* buf = nullptr;
120  int len = 0;
121 
122  if (MPI_SUCCESS == MPI_Buffer_detach(&buf, &len) && len)
123  {
124  // This was presumably the buffer that we attached
125  // and not someone else.
126  delete[] buf;
127  }
128 
129  // Nothing attached
130  attachedBufLen = 0;
131 #endif
132 }
133 
134 
135 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
136 
137 // NOTE:
138 // valid parallel options vary between implementations, but flag common ones.
139 // if they are not removed by MPI_Init(), the subsequent argument processing
140 // will notice that they are wrong
141 void Foam::UPstream::addValidParOptions(HashTable<string>& validParOptions)
142 {
143  validParOptions.insert("np", "");
144  validParOptions.insert("p4pg", "PI file");
145  validParOptions.insert("p4wd", "directory");
146  validParOptions.insert("p4amslave", "");
147  validParOptions.insert("p4yourname", "hostname");
148  validParOptions.insert("machinefile", "machine file");
149 }
150 
151 
153 {
154  int flag = 0;
155 
156  MPI_Finalized(&flag);
157  if (flag)
158  {
159  // Already finalized - this is an error
161  << "MPI was already finalized - cannot perform MPI_Init\n"
163 
164  return false;
165  }
166 
167  MPI_Initialized(&flag);
168  if (flag)
169  {
170  if (UPstream::debug)
171  {
172  Pout<< "UPstream::initNull : was already initialized\n";
173  }
174  }
175  else
176  {
177  // Not already initialized
178 
179  MPI_Init_thread
180  (
181  nullptr, // argc
182  nullptr, // argv
183  MPI_THREAD_SINGLE,
184  &flag // provided_thread_support
185  );
186 
187  ourMpi = true;
188  }
189 
190  // Could also attach buffers etc.
191 
192  return true;
193 }
194 
195 
196 bool Foam::UPstream::init(int& argc, char**& argv, const bool needsThread)
197 {
198  int numprocs = 0, myRank = 0;
199  int provided_thread_support = 0;
200  int flag = 0;
201 
202  MPI_Finalized(&flag);
203  if (flag)
204  {
205  // Already finalized - this is an error
207  << "MPI was already finalized - cannot perform MPI_Init" << endl
209 
210  return false;
211  }
212 
213  MPI_Initialized(&flag);
214  if (flag)
215  {
216  // Already initialized.
217  // Warn if we've called twice, but skip if initialized externally
218 
219  if (ourMpi)
220  {
222  << "MPI was already initialized - cannot perform MPI_Init" << nl
223  << "This could indicate an application programming error!"
224  << endl;
225 
226  return true;
227  }
228  else if (UPstream::debug)
229  {
230  Pout<< "UPstream::init : was already initialized\n";
231  }
232  }
233  else
234  {
235  MPI_Init_thread
236  (
237  &argc,
238  &argv,
239  (
240  needsThread
241  ? MPI_THREAD_MULTIPLE
242  : MPI_THREAD_SINGLE
243  ),
244  &provided_thread_support
245  );
246 
247  ourMpi = true;
248  }
249 
250  // Check argument list for local world
251  label worldIndex = -1;
252  word world;
253  for (int argi = 1; argi < argc; ++argi)
254  {
255  if (strcmp(argv[argi], "-world") == 0)
256  {
257  worldIndex = argi++;
258  if (argi >= argc)
259  {
261  << "Missing world name to argument \"world\""
263  }
264  world = argv[argi];
265  break;
266  }
267  }
268 
269  // Filter 'world' option
270  if (worldIndex != -1)
271  {
272  for (label i = worldIndex+2; i < argc; i++)
273  {
274  argv[i-2] = argv[i];
275  }
276  argc -= 2;
277  }
278 
279  MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
280  MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
281 
282  if (UPstream::debug)
283  {
284  Pout<< "UPstream::init :"
285  << " thread-support : requested:" << needsThread
286  << " obtained:"
287  << (
288  (provided_thread_support == MPI_THREAD_SINGLE)
289  ? "SINGLE"
290  : (provided_thread_support == MPI_THREAD_SERIALIZED)
291  ? "SERIALIZED"
292  : (provided_thread_support == MPI_THREAD_MULTIPLE)
293  ? "MULTIPLE"
294  : "other"
295  )
296  << " procs:" << numprocs
297  << " rank:" << myRank
298  << " world:" << world << endl;
299  }
300 
301  if (worldIndex == -1 && numprocs <= 1)
302  {
304  << "attempt to run parallel on 1 processor"
306  }
307 
308  // Initialise parallel structure
309  setParRun(numprocs, provided_thread_support == MPI_THREAD_MULTIPLE);
310 
311  if (worldIndex != -1)
312  {
313  // During startup, so commWorld() == commGlobal()
314 
315  wordList worlds(numprocs);
316  worlds[UPstream::myProcNo(UPstream::commGlobal())] = world;
318  (
319  worlds,
322  );
323 
324  // Compact
326  {
327  DynamicList<word> worldNames(numprocs);
328  worldIDs_.resize_nocopy(numprocs);
329 
330  forAll(worlds, proci)
331  {
332  const word& world = worlds[proci];
333 
334  worldIDs_[proci] = worldNames.find(world);
335 
336  if (worldIDs_[proci] == -1)
337  {
338  worldIDs_[proci] = worldNames.size();
339  worldNames.push_back(world);
340  }
341  }
342 
343  allWorlds_.transfer(worldNames);
344  }
345  Pstream::broadcasts(UPstream::commGlobal(), allWorlds_, worldIDs_);
346 
347  const label myWorldId =
349 
350  DynamicList<label> subRanks;
351  forAll(worldIDs_, proci)
352  {
353  if (worldIDs_[proci] == myWorldId)
354  {
355  subRanks.push_back(proci);
356  }
357  }
358 
359  // Allocate new communicator with comm-global as its parent
360  const label subComm =
362 
363 
364  // Override worldComm
365  UPstream::worldComm = subComm;
366  // For testing: warn use of non-worldComm
368 
369  // MPI_COMM_SELF : the processor number wrt the new world communicator
370  if (procIDs_[UPstream::commSelf()].size())
371  {
372  procIDs_[UPstream::commSelf()].front() =
373  UPstream::myProcNo(subComm);
374  }
375 
376  if (UPstream::debug)
377  {
378  // Check
379  int subNumProcs, subRank;
380  MPI_Comm_size
381  (
383  &subNumProcs
384  );
385  MPI_Comm_rank
386  (
388  &subRank
389  );
390 
391  Pout<< "UPstream::init : in world:" << world
392  << " using local communicator:" << subComm
393  << " rank " << subRank
394  << " of " << subNumProcs
395  << endl;
396  }
397 
398  // Override Pout prefix (move to setParRun?)
399  Pout.prefix() = '[' + world + '/' + name(myProcNo(subComm)) + "] ";
400  Perr.prefix() = Pout.prefix();
401  }
402  else
403  {
404  // All processors use world 0
405  worldIDs_.resize_nocopy(numprocs);
406  worldIDs_ = 0;
407  }
408 
410 
411  return true;
412 }
413 
414 
415 void Foam::UPstream::shutdown(int errNo)
416 {
417  int flag = 0;
418 
419  MPI_Initialized(&flag);
420  if (!flag)
421  {
422  // MPI not initialized - we have nothing to do
423  return;
424  }
425 
426  MPI_Finalized(&flag);
427  if (flag)
428  {
429  // MPI already finalized - we have nothing to do
430  if (ourMpi)
431  {
433  << "MPI was already finalized (by a connected program?)\n";
434  }
435  else if (UPstream::debug && errNo == 0)
436  {
437  Pout<< "UPstream::shutdown : was already finalized\n";
438  }
439  ourMpi = false;
440  return;
441  }
442 
443  if (!ourMpi)
444  {
446  << "Finalizing MPI, but was initialized elsewhere\n";
447  }
448  ourMpi = false;
449 
450 
451  // Abort - stop now, without any final synchonization steps!
452  // -----
453 
454  if (errNo != 0)
455  {
456  MPI_Abort(MPI_COMM_WORLD, errNo);
457  return;
458  }
459 
460 
461  // Regular cleanup
462  // ---------------
463 
464  if (UPstream::debug)
465  {
466  Pout<< "UPstream::shutdown\n";
467  }
468 
469  // Check for any outstanding requests
470  {
471  label nOutstanding = 0;
472 
473  for (MPI_Request request : PstreamGlobals::outstandingRequests_)
474  {
475  if (MPI_REQUEST_NULL != request)
476  {
477  // TBD: MPI_Cancel(&request); MPI_Request_free(&request);
478  ++nOutstanding;
479  }
480  }
481 
482  if (nOutstanding)
483  {
485  << "Still have " << nOutstanding
486  << " outstanding MPI requests."
487  << " Should not happen for a normal code exit."
488  << endl;
489  }
490 
492  }
493 
494 
495  {
497 
498  forAllReverse(myProcNo_, communicator)
499  {
500  freeCommunicatorComponents(communicator);
501  }
502  }
503 
504 
505  MPI_Finalize();
506 }
507 
508 
509 void Foam::UPstream::exit(int errNo)
510 {
511  UPstream::shutdown(errNo);
512  std::exit(errNo);
513 }
514 
515 
517 {
518  MPI_Abort(MPI_COMM_WORLD, 1);
519 }
520 
521 
522 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
523 
524 void Foam::UPstream::allocateCommunicatorComponents
525 (
526  const label parentIndex,
527  const label index
528 )
529 {
530  if (index == PstreamGlobals::MPICommunicators_.size())
531  {
532  // Extend storage with null values
534  PstreamGlobals::MPICommunicators_.emplace_back(MPI_COMM_NULL);
535  }
536  else if (index > PstreamGlobals::MPICommunicators_.size())
537  {
539  << "PstreamGlobals out of sync with UPstream data. Problem."
541  }
542 
543 
544  if (parentIndex == -1)
545  {
546  // Global communicator. Same as world communicator for single-world
547 
548  if (index != UPstream::commGlobal())
549  {
551  << "world communicator should always be index "
554  }
555 
556  PstreamGlobals::pendingMPIFree_[index] = false;
557  PstreamGlobals::MPICommunicators_[index] = MPI_COMM_WORLD;
558 
559  // TBD: MPI_Comm_dup(MPI_COMM_WORLD, ...);
560  // with pendingMPIFree_[index] = true
561  // Note: freeCommunicatorComponents() may need an update
562 
563  MPI_Comm_rank
564  (
566  &myProcNo_[index]
567  );
568 
569  // Set the number of ranks to the actual number
570  int numProcs;
571  MPI_Comm_size
572  (
574  &numProcs
575  );
576 
577  // identity [0-numProcs], as 'int'
578  procIDs_[index].resize_nocopy(numProcs);
579  std::iota(procIDs_[index].begin(), procIDs_[index].end(), 0);
580  }
581  else if (parentIndex == -2)
582  {
583  // MPI_COMM_SELF
584 
585  PstreamGlobals::pendingMPIFree_[index] = false;
586  PstreamGlobals::MPICommunicators_[index] = MPI_COMM_SELF;
587 
588  MPI_Comm_rank(MPI_COMM_SELF, &myProcNo_[index]);
589 
590  // Number of ranks is always 1 (self communicator)
591 
592  #ifdef FULLDEBUG
593  int numProcs;
594  MPI_Comm_size(MPI_COMM_SELF, &numProcs);
595 
596  if (numProcs != 1)
597  {
598  // Already finalized - this is an error
600  << "MPI_COMM_SELF had " << numProcs << " != 1 ranks!\n"
602  }
603  #endif
604 
605  // For MPI_COMM_SELF : the process IDs within the world communicator.
606  // Uses MPI_COMM_WORLD in case called before UPstream::commGlobal()
607  // was initialized
608 
609  procIDs_[index].resize_nocopy(1);
610  MPI_Comm_rank(MPI_COMM_WORLD, &procIDs_[index].front());
611  }
612  else
613  {
614  // General sub-communicator
615 
616  PstreamGlobals::pendingMPIFree_[index] = true;
617 
618  // Starting from parent
619  MPI_Group parent_group;
620  MPI_Comm_group
621  (
623  &parent_group
624  );
625 
626  MPI_Group active_group;
627  MPI_Group_incl
628  (
629  parent_group,
630  procIDs_[index].size(),
631  procIDs_[index].cdata(),
632  &active_group
633  );
634 
635  #if defined(MSMPI_VER)
636  // ms-mpi (10.0 and others?) does not have MPI_Comm_create_group
637  MPI_Comm_create
638  (
640  active_group,
642  );
643  #else
644  // Create new communicator for this group
645  MPI_Comm_create_group
646  (
648  active_group,
651  );
652  #endif
653 
654  // Groups not needed after this...
655  MPI_Group_free(&parent_group);
656  MPI_Group_free(&active_group);
657 
658  if (PstreamGlobals::MPICommunicators_[index] == MPI_COMM_NULL)
659  {
660  // No communicator created
661  myProcNo_[index] = -1;
662  PstreamGlobals::pendingMPIFree_[index] = false;
663  }
664  else
665  {
666  if
667  (
668  MPI_Comm_rank
669  (
671  &myProcNo_[index]
672  )
673  )
674  {
676  << "Problem :"
677  << " when allocating communicator at " << index
678  << " from ranks " << procIDs_[index]
679  << " of parent " << parentIndex
680  << " cannot find my own rank"
682  }
683  }
684  }
685 }
686 
687 
688 void Foam::UPstream::freeCommunicatorComponents(const label index)
689 {
690  // Skip placeholders and pre-defined (not allocated) communicators
691  if (UPstream::debug)
692  {
693  Pout<< "freeCommunicatorComponents: " << index
694  << " from " << PstreamGlobals::MPICommunicators_.size() << endl;
695  }
696 
697  // Not touching the first two communicators (SELF, WORLD)
698  // or anything out-of bounds.
699  //
700  // No UPstream communicator indices when MPI is initialized outside
701  // of OpenFOAM - thus needs a bounds check too!
702 
703  if
704  (
705  index > 1
706  && index < PstreamGlobals::MPICommunicators_.size()
707  )
708  {
709  if
710  (
712  && (MPI_COMM_NULL != PstreamGlobals::MPICommunicators_[index])
713  )
714  {
715  // Free communicator. Sets communicator to MPI_COMM_NULL
716  MPI_Comm_free(&PstreamGlobals::MPICommunicators_[index]);
717  }
718 
719  PstreamGlobals::pendingMPIFree_[index] = false;
720  }
721 }
722 
723 
724 void Foam::UPstream::barrier(const label communicator, UPstream::Request* req)
725 {
726  // No-op for non-parallel or not on communicator
727  if (!UPstream::parRun() || !UPstream::is_rank(communicator))
728  {
730  return;
731  }
732 
733  if (req)
734  {
735  MPI_Request request;
736 
737  // Non-blocking
738  if
739  (
740  MPI_Ibarrier
741  (
742  PstreamGlobals::MPICommunicators_[communicator],
743  &request
744  )
745  )
746  {
748  << "MPI_Ibarrier returned with error"
750  }
751 
752  *req = UPstream::Request(request);
753  }
754  else
755  {
756  // Blocking
757  if
758  (
759  MPI_Barrier
760  (
762  )
763  )
764  {
766  << "MPI_Barrier returned with error"
768  }
769  }
770 }
771 
772 
773 std::pair<int,int>
775 (
776  const UPstream::commsTypes commsType,
777  const int fromProcNo,
778  const int tag,
779  const label communicator
780 )
781 {
782  std::pair<int,int> result(-1, 0);
783 
784  // No-op for non-parallel or not on communicator
785  if (!UPstream::parRun() || !UPstream::is_rank(communicator))
786  {
787  return result;
788  }
789 
790  const int source = (fromProcNo < 0) ? MPI_ANY_SOURCE : fromProcNo;
791  // Supporting MPI_ANY_TAG is not particularly useful...
792 
793  int flag = 0;
794  MPI_Status status;
795 
796  if (UPstream::commsTypes::blocking == commsType)
797  {
798  // Blocking
800 
801  if
802  (
803  MPI_Probe
804  (
805  source,
806  tag,
807  PstreamGlobals::MPICommunicators_[communicator],
808  &status
809  )
810  )
811  {
813  << "MPI_Probe returned with error"
815  }
816 
818  flag = 1;
819  }
820  else
821  {
822  // Non-blocking
824 
825  if
826  (
827  MPI_Iprobe
828  (
829  source,
830  tag,
831  PstreamGlobals::MPICommunicators_[communicator],
832  &flag,
833  &status
834  )
835  )
836  {
838  << "MPI_Iprobe returned with error"
840  }
841 
843  }
844 
845  if (flag)
846  {
847  result.first = status.MPI_SOURCE;
848  MPI_Get_count(&status, MPI_BYTE, &result.second);
849  }
850 
851  return result;
852 }
853 
854 
855 // ************************************************************************* //
static void addProbeTime()
Add time increment to probe time.
prefixOSstream Perr
OSstream wrapped stderr (std::cerr) with parallel prefix.
"blocking" : (MPI_Bsend, MPI_Recv)
DynamicList< MPI_Request > outstandingRequests_
Outstanding non-blocking operations.
System signed integer.
Inter-processor communication reduction functions.
errorManipArg< error, int > exit(error &err, const int errNo=1)
Definition: errorManip.H:125
commsTypes
Communications types.
Definition: UPstream.H:72
error FatalError
Error stream (stdout output on all processes), with additional &#39;FOAM FATAL ERROR&#39; header text and sta...
#define FatalErrorInFunction
Report an error message using Foam::FatalError.
Definition: error.H:598
Functions to wrap MPI_Bcast, MPI_Allreduce, MPI_Iallreduce etc.
constexpr char nl
The newline &#39;\n&#39; character (0x0a)
Definition: Ostream.H:50
DynamicList< MPI_Comm > MPICommunicators_
Ostream & endl(Ostream &os)
Add newline and flush stream.
Definition: Ostream.H:531
static bool initNull()
Special purpose initialisation function.
Definition: UPstream.C:30
static bool & parRun() noexcept
Test if this a parallel run.
Definition: UPstream.H:1049
static int & msgType() noexcept
Message tag of standard messages.
Definition: UPstream.H:1229
static constexpr label commSelf() noexcept
Communicator within the current rank only.
Definition: UPstream.H:424
void reset_request(UPstream::Request *requestPtr, label *requestIdx=nullptr)
Reset UPstream::Request to null and/or the index of the outstanding request to -1.
string getEnv(const std::string &envName)
Get environment value for given envName.
Definition: POSIX.C:339
static int myProcNo(const label communicator=worldComm)
Rank of this process in the communicator (starting from masterNo()). Can be negative if the process i...
Definition: UPstream.H:1074
static label worldComm
Communicator for all ranks. May differ from commGlobal() if local worlds are in use.
Definition: UPstream.H:409
static void shutdown(int errNo=0)
Shutdown (finalize) MPI as required.
Definition: UPstream.C:51
bool read(const char *buf, int32_t &val)
Same as readInt32.
Definition: int32.H:127
#define forAll(list, i)
Loop across all elements in list.
Definition: stdFoam.H:421
DynamicList< bool > pendingMPIFree_
static void detachOurBuffers()
Definition: UPstream.C:100
static void gatherList(const List< commsStruct > &comms, List< T > &values, const int tag, const label comm)
Gather data, but keep individual values separate. Uses the specified communication schedule...
static void exit(int errNo=1)
Shutdown (finalize) MPI as required and exit program with errNo.
Definition: UPstream.C:55
word name(const expressions::valueTypeCode typeCode)
A word representation of a valueTypeCode. Empty for expressions::valueTypeCode::INVALID.
Definition: exprTraits.C:127
static bool is_rank(const label communicator=worldComm)
True if process corresponds to any rank (master or sub-rank) in the given communicator.
Definition: UPstream.H:1091
static bool init(int &argc, char **&argv, const bool needsThread)
Initialisation function called from main.
Definition: UPstream.C:40
static bool ourMpi
Definition: UPstream.C:45
T & emplace_back(Args &&... args)
Construct an element at the end of the list, return reference to the new list element.
Definition: DynamicListI.H:538
const string & prefix() const noexcept
Return the stream prefix.
static label warnComm
Debugging: warn for use of any communicator differing from warnComm.
Definition: UPstream.H:414
errorManip< error > abort(error &err)
Definition: errorManip.H:139
static void beginTiming()
Update timer prior to measurement.
int debug
Static debugging option.
constexpr auto end(C &c) -> decltype(c.end())
Return iterator to the end of the container c.
Definition: stdFoam.H:201
static void attachOurBuffers()
Definition: UPstream.C:51
static void addRequestTime()
Add time increment to request time.
static void broadcasts(const label comm, Type &arg1, Args &&... args)
Broadcast multiple items to all communicator ranks. Does nothing in non-parallel. ...
static void abort()
Call MPI_Abort with no other checks or cleanup.
Definition: UPstream.C:62
List< word > wordList
List of word.
Definition: fileName.H:59
#define WarningInFunction
Report a warning using Foam::Warning.
static std::pair< int, int > probeMessage(const UPstream::commsTypes commsType, const int fromProcNo, const int tag=UPstream::msgType(), const label communicator=worldComm)
Probe for an incoming message.
Definition: UPstream.C:89
static bool master(const label communicator=worldComm)
True if process corresponds to the master rank in the communicator.
Definition: UPstream.H:1082
constexpr int minBufLen
Definition: UPstream.C:39
static int attachedBufLen
Definition: UPstream.C:42
#define forAllReverse(list, i)
Reverse loop across all elements in list.
Definition: stdFoam.H:437
static const int mpiBufferSize
MPI buffer-size (bytes)
Definition: UPstream.H:400
constexpr auto begin(C &c) -> decltype(c.begin())
Return iterator to the beginning of the container c.
Definition: stdFoam.H:168
prefixOSstream Pout
OSstream wrapped stdout (std::cout) with parallel prefix.
static void addValidParOptions(HashTable< string > &validParOptions)
Add the valid option this type of communications library adds/requires on the command line...
Definition: UPstream.C:26
static label allocateCommunicator(const label parent, const labelRange &subRanks, const bool withComponents=true)
Allocate new communicator with contiguous sub-ranks on the parent communicator.
Definition: UPstream.C:258
static void barrier(const label communicator, UPstream::Request *req=nullptr)
Impose a synchronisation barrier (optionally non-blocking)
Definition: UPstream.C:83
static constexpr label commGlobal() noexcept
Communicator for all ranks, irrespective of any local worlds.
Definition: UPstream.H:419