UPstream.C
Go to the documentation of this file.
1 /*---------------------------------------------------------------------------*\
2  ========= |
3  \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4  \\ / O peration |
5  \\ / A nd | www.openfoam.com
6  \\/ M anipulation |
7 -------------------------------------------------------------------------------
8  Copyright (C) 2011-2017 OpenFOAM Foundation
9  Copyright (C) 2016-2024 OpenCFD Ltd.
10 -------------------------------------------------------------------------------
11 License
12  This file is part of OpenFOAM.
13 
14  OpenFOAM is free software: you can redistribute it and/or modify it
15  under the terms of the GNU General Public License as published by
16  the Free Software Foundation, either version 3 of the License, or
17  (at your option) any later version.
18 
19  OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
20  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22  for more details.
23 
24  You should have received a copy of the GNU General Public License
25  along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
26 
27 \*---------------------------------------------------------------------------*/
28 
29 #include "Pstream.H"
30 #include "PstreamReduceOps.H"
31 #include "PstreamGlobals.H"
32 #include "profilingPstream.H"
33 #include "int.H"
34 #include "UPstreamWrapping.H"
35 #include "collatedFileOperation.H"
36 
37 #include <cstdlib>
38 #include <cstring>
39 #include <memory>
40 #include <numeric>
41 #include <string>
42 
43 #undef Pstream_use_MPI_Get_count
44 
45 // * * * * * * * * * * * * * * Static Data Members * * * * * * * * * * * * * //
46 
47 // The min value and default for MPI buffer length
48 constexpr int minBufLen = 20000000;
49 
50 // Track size of attached MPI buffer
51 static int attachedBufLen = 0;
52 
53 // Track if we initialized MPI
54 static bool ourMpi = false;
55 
56 
57 // * * * * * * * * * * * * * * * Local Functions * * * * * * * * * * * * * * //
58 
59 // Attach user-defined send buffer
60 static void attachOurBuffers()
61 {
62 #ifndef SGIMPI
63  if (attachedBufLen)
64  {
65  return; // Already attached
66  }
67 
68  // Use UPstream::mpiBufferSize (optimisationSwitch),
69  // but allow override with MPI_BUFFER_SIZE env variable (int value)
70 
71  int len = 0;
72 
73  const std::string str(Foam::getEnv("MPI_BUFFER_SIZE"));
74  if (str.empty() || !Foam::read(str, len) || len <= 0)
75  {
77  }
78 
79  if (len < minBufLen)
80  {
81  len = minBufLen;
82  }
83 
84  char* buf = new char[len];
85 
86  if (MPI_SUCCESS == MPI_Buffer_attach(buf, len))
87  {
88  // Properly attached
89  attachedBufLen = len;
90 
92  {
93  Foam::Perr<< "UPstream::init : buffer-size " << len << '\n';
94  }
95  }
96  else
97  {
98  delete[] buf;
99  Foam::Perr<< "UPstream::init : could not attach buffer\n";
100  }
101 #endif
102 }
103 
104 
105 // Remove an existing user-defined send buffer
106 // IMPORTANT:
107 // This operation will block until all messages currently in the
108 // buffer have been transmitted.
109 static void detachOurBuffers()
110 {
111 #ifndef SGIMPI
112  if (!attachedBufLen)
113  {
114  return; // Nothing to detach
115  }
116 
117  // Some MPI notes suggest that the return code is MPI_SUCCESS when
118  // no buffer is attached.
119  // Be extra careful and require a non-zero size as well.
120 
121  char* buf = nullptr;
122  int len = 0;
123 
124  if (MPI_SUCCESS == MPI_Buffer_detach(&buf, &len) && len)
125  {
126  // This was presumably the buffer that we attached
127  // and not someone else.
128  delete[] buf;
129  }
130 
131  // Nothing attached
132  attachedBufLen = 0;
133 #endif
134 }
135 
136 
137 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
138 
139 // NOTE:
140 // valid parallel options vary between implementations, but flag common ones.
141 // if they are not removed by MPI_Init(), the subsequent argument processing
142 // will notice that they are wrong
143 void Foam::UPstream::addValidParOptions(HashTable<string>& validParOptions)
144 {
145  validParOptions.insert("np", "");
146  validParOptions.insert("p4pg", "PI file");
147  validParOptions.insert("p4wd", "directory");
148  validParOptions.insert("p4amslave", "");
149  validParOptions.insert("p4yourname", "hostname");
150  validParOptions.insert("machinefile", "machine file");
151 }
152 
153 
155 {
156  int flag = 0;
157 
158  MPI_Finalized(&flag);
159  if (flag)
160  {
161  // Already finalized - this is an error
163  << "MPI was already finalized - cannot perform MPI_Init\n"
165 
166  return false;
167  }
168 
169  MPI_Initialized(&flag);
170  if (flag)
171  {
172  if (UPstream::debug)
173  {
174  Perr<< "UPstream::initNull : was already initialized\n";
175  }
176  }
177  else
178  {
179  // Not already initialized
180 
181  MPI_Init_thread
182  (
183  nullptr, // argc
184  nullptr, // argv
185  MPI_THREAD_SINGLE,
186  &flag // provided_thread_support
187  );
188 
189  ourMpi = true;
190  }
191 
192  // Could also attach buffers etc.
193 
194  return true;
195 }
196 
197 
198 bool Foam::UPstream::init(int& argc, char**& argv, const bool needsThread)
199 {
200  int numprocs = 0, myRank = 0;
201  int provided_thread_support = 0;
202  int flag = 0;
203 
204  MPI_Finalized(&flag);
205  if (flag)
206  {
207  // Already finalized - this is an error
209  << "MPI was already finalized - cannot perform MPI_Init" << endl
211 
212  return false;
213  }
214 
215  MPI_Initialized(&flag);
216  if (flag)
217  {
218  // Already initialized.
219  // Warn if we've called twice, but skip if initialized externally
220 
221  if (ourMpi)
222  {
224  << "MPI was already initialized - cannot perform MPI_Init" << nl
225  << "This could indicate an application programming error!"
226  << endl;
227 
228  return true;
229  }
230  else if (UPstream::debug)
231  {
232  Perr<< "UPstream::init : was already initialized\n";
233  }
234  }
235  else
236  {
237  MPI_Init_thread
238  (
239  &argc,
240  &argv,
241  (
242  needsThread
243  ? MPI_THREAD_MULTIPLE
244  : MPI_THREAD_SINGLE
245  ),
246  &provided_thread_support
247  );
248 
249  ourMpi = true;
250  }
251 
252  // Check argument list for local world
253  label worldIndex = -1;
254  word world;
255  for (int argi = 1; argi < argc; ++argi)
256  {
257  if (strcmp(argv[argi], "-world") == 0)
258  {
259  worldIndex = argi++;
260  if (argi >= argc)
261  {
263  << "Missing world name to argument \"world\""
265  }
266  world = argv[argi];
267  break;
268  }
269  }
270 
271  // Filter 'world' option
272  if (worldIndex != -1)
273  {
274  for (label i = worldIndex+2; i < argc; i++)
275  {
276  argv[i-2] = argv[i];
277  }
278  argc -= 2;
279  }
280 
281  MPI_Comm_size(MPI_COMM_WORLD, &numprocs);
282  MPI_Comm_rank(MPI_COMM_WORLD, &myRank);
283 
284  if (UPstream::debug)
285  {
286  Perr<< "UPstream::init :"
287  << " thread-support : requested:" << needsThread
288  << " obtained:"
289  << (
290  (provided_thread_support == MPI_THREAD_SINGLE)
291  ? "SINGLE"
292  : (provided_thread_support == MPI_THREAD_SERIALIZED)
293  ? "SERIALIZED"
294  : (provided_thread_support == MPI_THREAD_MULTIPLE)
295  ? "MULTIPLE"
296  : "other"
297  )
298  << " procs:" << numprocs
299  << " rank:" << myRank
300  << " world:" << world << endl;
301  }
302 
303  if (worldIndex == -1 && numprocs <= 1)
304  {
306  << "attempt to run parallel on 1 processor"
308  }
309 
310  // Initialise parallel structure
311  setParRun(numprocs, provided_thread_support == MPI_THREAD_MULTIPLE);
312 
313  if (worldIndex != -1)
314  {
315  // During startup, so commWorld() == commGlobal()
316 
317  wordList worlds(numprocs);
318  worlds[UPstream::myProcNo(UPstream::commGlobal())] = world;
320  (
321  worlds,
324  );
325 
326  // Compact
328  {
329  DynamicList<word> worldNames(numprocs);
330  worldIDs_.resize_nocopy(numprocs);
331 
332  forAll(worlds, proci)
333  {
334  const word& world = worlds[proci];
335 
336  worldIDs_[proci] = worldNames.find(world);
337 
338  if (worldIDs_[proci] == -1)
339  {
340  worldIDs_[proci] = worldNames.size();
341  worldNames.push_back(world);
342  }
343  }
344 
345  allWorlds_.transfer(worldNames);
346  }
347  Pstream::broadcasts(UPstream::commGlobal(), allWorlds_, worldIDs_);
348 
349  const label myWorldId =
351 
352  DynamicList<label> subRanks;
353  forAll(worldIDs_, proci)
354  {
355  if (worldIDs_[proci] == myWorldId)
356  {
357  subRanks.push_back(proci);
358  }
359  }
360 
361  // Allocate new communicator with comm-global as its parent
362  const label subComm =
364 
365 
366  // Override worldComm
367  UPstream::worldComm = subComm;
368  // For testing: warn use of non-worldComm
370 
371  // MPI_COMM_SELF : the processor number wrt the new world communicator
372  if (procIDs_[UPstream::commSelf()].size())
373  {
374  procIDs_[UPstream::commSelf()].front() =
375  UPstream::myProcNo(subComm);
376  }
377 
378  if (UPstream::debug)
379  {
380  // Check
381  int subNumProcs, subRank;
382  MPI_Comm_size
383  (
385  &subNumProcs
386  );
387  MPI_Comm_rank
388  (
390  &subRank
391  );
392 
393  Perr<< "UPstream::init : in world:" << world
394  << " using local communicator:" << subComm
395  << " rank " << subRank
396  << " of " << subNumProcs
397  << endl;
398  }
399 
400  // Override Pout prefix (move to setParRun?)
401  Pout.prefix() = '[' + world + '/' + name(myProcNo(subComm)) + "] ";
402  Perr.prefix() = Pout.prefix();
403  }
404  else
405  {
406  // All processors use world 0
407  worldIDs_.resize_nocopy(numprocs);
408  worldIDs_ = 0;
409  }
410 
412 
413  return true;
414 }
415 
416 
417 void Foam::UPstream::shutdown(int errNo)
418 {
419  int flag = 0;
420 
421  MPI_Initialized(&flag);
422  if (!flag)
423  {
424  // MPI not initialized - we have nothing to do
425  return;
426  }
427 
428  MPI_Finalized(&flag);
429  if (flag)
430  {
431  // MPI already finalized - we have nothing to do
432  if (ourMpi)
433  {
435  << "MPI was already finalized (by a connected program?)\n";
436  }
437  else if (UPstream::debug && errNo == 0)
438  {
439  Perr<< "UPstream::shutdown : was already finalized\n";
440  }
441  ourMpi = false;
442  return;
443  }
444 
445  if (!ourMpi)
446  {
448  << "Finalizing MPI, but was initialized elsewhere\n";
449  }
450  ourMpi = false;
451 
452 
453  // Abort - stop now, without any final synchonization steps!
454  // -----
455 
456  if (errNo != 0)
457  {
458  MPI_Abort(MPI_COMM_WORLD, errNo);
459  return;
460  }
461 
462 
463  // Regular cleanup
464  // ---------------
465 
466  if (UPstream::debug)
467  {
468  Perr<< "UPstream::shutdown\n";
469  }
470 
471  // Check for any outstanding requests
472  {
473  label nOutstanding = 0;
474 
475  for (MPI_Request request : PstreamGlobals::outstandingRequests_)
476  {
477  if (MPI_REQUEST_NULL != request)
478  {
479  // TBD: MPI_Cancel(&request); MPI_Request_free(&request);
480  ++nOutstanding;
481  }
482  }
483 
484  if (nOutstanding)
485  {
487  << "Still have " << nOutstanding
488  << " outstanding MPI requests."
489  << " Should not happen for a normal code exit."
490  << endl;
491  }
492 
494  }
495 
496 
497  {
499 
500  forAllReverse(myProcNo_, communicator)
501  {
502  freeCommunicatorComponents(communicator);
503  }
504  }
505 
506 
507  MPI_Finalize();
508 }
509 
510 
511 void Foam::UPstream::exit(int errNo)
512 {
513  UPstream::shutdown(errNo);
514  std::exit(errNo);
515 }
516 
517 
519 {
520  MPI_Abort(MPI_COMM_WORLD, 1);
521 }
522 
523 
524 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
525 
526 void Foam::UPstream::allocateCommunicatorComponents
527 (
528  const label parentIndex,
529  const label index
530 )
531 {
532  if (index == PstreamGlobals::MPICommunicators_.size())
533  {
534  // Extend storage with null values
536  PstreamGlobals::MPICommunicators_.emplace_back(MPI_COMM_NULL);
537  }
538  else if (index > PstreamGlobals::MPICommunicators_.size())
539  {
541  << "PstreamGlobals out of sync with UPstream data. Problem."
543  }
544 
545 
546  if (parentIndex == -1)
547  {
548  // Global communicator. Same as world communicator for single-world
549 
550  if (index != UPstream::commGlobal())
551  {
553  << "base world communicator should always be index "
556  }
557 
558  PstreamGlobals::pendingMPIFree_[index] = false;
559  PstreamGlobals::MPICommunicators_[index] = MPI_COMM_WORLD;
560 
561  // TBD: MPI_Comm_dup(MPI_COMM_WORLD, ...);
562  // with pendingMPIFree_[index] = true
563  // Note: freeCommunicatorComponents() may need an update
564 
565  MPI_Comm_rank
566  (
568  &myProcNo_[index]
569  );
570 
571  // Set the number of ranks to the actual number
572  int numProcs;
573  MPI_Comm_size
574  (
576  &numProcs
577  );
578 
579  // identity [0-numProcs], as 'int'
580  procIDs_[index].resize_nocopy(numProcs);
581  std::iota(procIDs_[index].begin(), procIDs_[index].end(), 0);
582  }
583  else if (parentIndex == -2)
584  {
585  // MPI_COMM_SELF
586 
587  PstreamGlobals::pendingMPIFree_[index] = false;
588  PstreamGlobals::MPICommunicators_[index] = MPI_COMM_SELF;
589 
590  MPI_Comm_rank(MPI_COMM_SELF, &myProcNo_[index]);
591 
592  // Number of ranks is always 1 (self communicator)
593 
594  #ifdef FULLDEBUG
595  int numProcs;
596  MPI_Comm_size(MPI_COMM_SELF, &numProcs);
597 
598  if (numProcs != 1)
599  {
600  // Already finalized - this is an error
602  << "MPI_COMM_SELF had " << numProcs << " != 1 ranks!\n"
604  }
605  #endif
606 
607  // For MPI_COMM_SELF : the process IDs within the world communicator.
608  // Uses MPI_COMM_WORLD in case called before UPstream::commGlobal()
609  // was initialized
610 
611  procIDs_[index].resize_nocopy(1);
612  MPI_Comm_rank(MPI_COMM_WORLD, &procIDs_[index].front());
613  }
614  else
615  {
616  // General sub-communicator
617 
618  PstreamGlobals::pendingMPIFree_[index] = true;
619 
620  // Starting from parent
621  MPI_Group parent_group;
622  MPI_Comm_group
623  (
625  &parent_group
626  );
627 
628  MPI_Group active_group;
629  MPI_Group_incl
630  (
631  parent_group,
632  procIDs_[index].size(),
633  procIDs_[index].cdata(),
634  &active_group
635  );
636 
637  #if defined(MSMPI_VER)
638  // ms-mpi (10.0 and others?) does not have MPI_Comm_create_group
639  MPI_Comm_create
640  (
642  active_group,
644  );
645  #else
646  // Create new communicator for this group
647  MPI_Comm_create_group
648  (
650  active_group,
653  );
654  #endif
655 
656  // Groups not needed after this...
657  MPI_Group_free(&parent_group);
658  MPI_Group_free(&active_group);
659 
660  if (PstreamGlobals::MPICommunicators_[index] == MPI_COMM_NULL)
661  {
662  // No communicator created
663  myProcNo_[index] = -1;
664  PstreamGlobals::pendingMPIFree_[index] = false;
665  }
666  else
667  {
668  if
669  (
670  MPI_Comm_rank
671  (
673  &myProcNo_[index]
674  )
675  )
676  {
678  << "Problem :"
679  << " when allocating communicator at " << index
680  << " from ranks " << procIDs_[index]
681  << " of parent " << parentIndex
682  << " cannot find my own rank"
684  }
685  }
686  }
687 }
688 
689 
690 void Foam::UPstream::freeCommunicatorComponents(const label index)
691 {
692  if (UPstream::debug)
693  {
694  Perr<< "freeCommunicatorComponents: " << index
695  << " from " << PstreamGlobals::MPICommunicators_.size() << endl;
696  }
697 
698  // Only free communicators that we have specifically allocated ourselves
699  //
700  // Bounds checking needed since there are no UPstream communicator indices
701  // when MPI is initialized outside of OpenFOAM
702 
703  if
704  (
705  (index >= 0 && index < PstreamGlobals::MPICommunicators_.size())
707  )
708  {
709  PstreamGlobals::pendingMPIFree_[index] = false;
710 
711  // Free communicator. Sets communicator to MPI_COMM_NULL
712  if (MPI_COMM_NULL != PstreamGlobals::MPICommunicators_[index])
713  {
714  MPI_Comm_free(&PstreamGlobals::MPICommunicators_[index]);
715  }
716  }
717 }
718 
719 
720 void Foam::UPstream::barrier(const label communicator, UPstream::Request* req)
721 {
722  // No-op for non-parallel or not on communicator
723  if (!UPstream::parRun() || !UPstream::is_rank(communicator))
724  {
726  return;
727  }
728 
729  if (req)
730  {
731  MPI_Request request;
732 
733  // Non-blocking
734  if
735  (
736  MPI_Ibarrier
737  (
738  PstreamGlobals::MPICommunicators_[communicator],
739  &request
740  )
741  )
742  {
744  << "MPI_Ibarrier returned with error"
746  }
747 
748  *req = UPstream::Request(request);
749  }
750  else
751  {
752  // Blocking
753  if
754  (
755  MPI_Barrier
756  (
758  )
759  )
760  {
762  << "MPI_Barrier returned with error"
764  }
765  }
766 }
767 
768 
769 std::pair<int,int64_t>
771 (
772  const UPstream::commsTypes commsType,
773  const int fromProcNo,
774  const int tag,
775  const label communicator
776 )
777 {
778  std::pair<int,int64_t> result(-1, 0);
779 
780  // No-op for non-parallel or not on communicator
781  if (!UPstream::parRun() || !UPstream::is_rank(communicator))
782  {
783  return result;
784  }
785 
786  const int source = (fromProcNo < 0) ? MPI_ANY_SOURCE : fromProcNo;
787  // Supporting MPI_ANY_TAG is not particularly useful...
788 
789  int flag = 0;
790  MPI_Status status;
791 
792  if (UPstream::commsTypes::buffered == commsType)
793  {
794  // Blocking
796 
797  if
798  (
799  MPI_Probe
800  (
801  source,
802  tag,
803  PstreamGlobals::MPICommunicators_[communicator],
804  &status
805  )
806  )
807  {
809  << "MPI_Probe returned with error"
811  }
812 
814  flag = 1;
815  }
816  else
817  {
818  // Non-blocking
820 
821  if
822  (
823  MPI_Iprobe
824  (
825  source,
826  tag,
827  PstreamGlobals::MPICommunicators_[communicator],
828  &flag,
829  &status
830  )
831  )
832  {
834  << "MPI_Iprobe returned with error"
836  }
837 
839  }
840 
841  if (flag)
842  {
843  // Unlikely to be used with large amounts of data,
844  // but use MPI_Get_elements_x() instead of MPI_Count() anyhow
845 
846  #ifdef Pstream_use_MPI_Get_count
847  int count(0);
848  MPI_Get_count(&status, MPI_BYTE, &count);
849  #else
850  MPI_Count count(0);
851  MPI_Get_elements_x(&status, MPI_BYTE, &count);
852  #endif
853 
854  // Errors
855  if (count == MPI_UNDEFINED || int64_t(count) < 0)
856  {
858  << "MPI_Get_count() or MPI_Get_elements_x() : "
859  "returned undefined or negative value"
861  }
862  else if (int64_t(count) > int64_t(INT_MAX))
863  {
865  << "MPI_Get_count() or MPI_Get_elements_x() : "
866  "count is larger than INI_MAX bytes"
868  }
869 
870 
871  result.first = status.MPI_SOURCE;
872  result.second = int64_t(count);
873  }
874 
875  return result;
876 }
877 
878 
879 // ************************************************************************* //
static void addProbeTime()
Add time increment to probe time.
prefixOSstream Perr
OSstream wrapped stderr (std::cerr) with parallel prefix.
DynamicList< MPI_Request > outstandingRequests_
Outstanding non-blocking operations.
System signed integer.
Inter-processor communication reduction functions.
errorManipArg< error, int > exit(error &err, const int errNo=1)
Definition: errorManip.H:125
commsTypes
Communications types.
Definition: UPstream.H:77
error FatalError
Error stream (stdout output on all processes), with additional &#39;FOAM FATAL ERROR&#39; header text and sta...
#define FatalErrorInFunction
Report an error message using Foam::FatalError.
Definition: error.H:608
Functions to wrap MPI_Bcast, MPI_Allreduce, MPI_Iallreduce etc.
constexpr char nl
The newline &#39;\n&#39; character (0x0a)
Definition: Ostream.H:50
DynamicList< MPI_Comm > MPICommunicators_
Ostream & endl(Ostream &os)
Add newline and flush stream.
Definition: Ostream.H:531
static void gatherList(const UList< commsStruct > &comms, UList< T > &values, const int tag, const label comm)
Gather data, but keep individual values separate. Uses the specified communication schedule...
static bool initNull()
Special purpose initialisation function.
Definition: UPstream.C:30
static bool & parRun() noexcept
Test if this a parallel run.
Definition: UPstream.H:1061
static int & msgType() noexcept
Message tag of standard messages.
Definition: UPstream.H:1252
static constexpr label commSelf() noexcept
Communicator within the current rank only.
Definition: UPstream.H:436
static std::pair< int, int64_t > probeMessage(const UPstream::commsTypes commsType, const int fromProcNo, const int tag=UPstream::msgType(), const label communicator=worldComm)
Probe for an incoming message.
Definition: UPstream.C:89
void reset_request(UPstream::Request *requestPtr, label *requestIdx=nullptr)
Reset UPstream::Request to null and/or the index of the outstanding request to -1.
string getEnv(const std::string &envName)
Get environment value for given envName.
Definition: POSIX.C:339
static int myProcNo(const label communicator=worldComm)
Rank of this process in the communicator (starting from masterNo()). Can be negative if the process i...
Definition: UPstream.H:1086
static label worldComm
Communicator for all ranks. May differ from commGlobal() if local worlds are in use.
Definition: UPstream.H:421
static void shutdown(int errNo=0)
Shutdown (finalize) MPI as required.
Definition: UPstream.C:51
bool read(const char *buf, int32_t &val)
Same as readInt32.
Definition: int32.H:127
#define forAll(list, i)
Loop across all elements in list.
Definition: stdFoam.H:421
DynamicList< bool > pendingMPIFree_
unsigned int count(const UList< bool > &bools, const bool val=true)
Count number of &#39;true&#39; entries.
Definition: BitOps.H:73
static void detachOurBuffers()
Definition: UPstream.C:102
static void exit(int errNo=1)
Shutdown (finalize) MPI as required and exit program with errNo.
Definition: UPstream.C:55
word name(const expressions::valueTypeCode typeCode)
A word representation of a valueTypeCode. Empty for expressions::valueTypeCode::INVALID.
Definition: exprTraits.C:127
static bool is_rank(const label communicator=worldComm)
True if process corresponds to any rank (master or sub-rank) in the given communicator.
Definition: UPstream.H:1103
static bool init(int &argc, char **&argv, const bool needsThread)
Initialisation function called from main.
Definition: UPstream.C:40
static bool ourMpi
Definition: UPstream.C:47
T & emplace_back(Args &&... args)
Construct an element at the end of the list, return reference to the new list element.
Definition: DynamicListI.H:538
const string & prefix() const noexcept
Return the stream prefix.
static label warnComm
Debugging: warn for use of any communicator differing from warnComm.
Definition: UPstream.H:426
errorManip< error > abort(error &err)
Definition: errorManip.H:139
static void beginTiming()
Update timer prior to measurement.
int debug
Static debugging option.
constexpr auto end(C &c) -> decltype(c.end())
Return iterator to the end of the container c.
Definition: stdFoam.H:201
static void attachOurBuffers()
Definition: UPstream.C:53
static void addRequestTime()
Add time increment to request time.
static void broadcasts(const label comm, Type &arg1, Args &&... args)
Broadcast multiple items to all communicator ranks. Does nothing in non-parallel. ...
static void abort()
Call MPI_Abort with no other checks or cleanup.
Definition: UPstream.C:62
List< word > wordList
List of word.
Definition: fileName.H:59
#define WarningInFunction
Report a warning using Foam::Warning.
static bool master(const label communicator=worldComm)
True if process corresponds to the master rank in the communicator.
Definition: UPstream.H:1094
constexpr int minBufLen
Definition: UPstream.C:41
static int attachedBufLen
Definition: UPstream.C:44
#define forAllReverse(list, i)
Reverse loop across all elements in list.
Definition: stdFoam.H:437
"buffered" : (MPI_Bsend, MPI_Recv)
static const int mpiBufferSize
MPI buffer-size (bytes)
Definition: UPstream.H:412
constexpr auto begin(C &c) -> decltype(c.begin())
Return iterator to the beginning of the container c.
Definition: stdFoam.H:168
prefixOSstream Pout
OSstream wrapped stdout (std::cout) with parallel prefix.
static void addValidParOptions(HashTable< string > &validParOptions)
Add the valid option this type of communications library adds/requires on the command line...
Definition: UPstream.C:26
static label allocateCommunicator(const label parent, const labelRange &subRanks, const bool withComponents=true)
Allocate new communicator with contiguous sub-ranks on the parent communicator.
Definition: UPstream.C:260
static void barrier(const label communicator, UPstream::Request *req=nullptr)
Impose a synchronisation barrier (optionally non-blocking)
Definition: UPstream.C:83
static constexpr label commGlobal() noexcept
Communicator for all ranks, irrespective of any local worlds.
Definition: UPstream.H:431