UPstreamRequest.C
Go to the documentation of this file.
1 /*---------------------------------------------------------------------------*\
2  ========= |
3  \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4  \\ / O peration |
5  \\ / A nd | www.openfoam.com
6  \\/ M anipulation |
7 -------------------------------------------------------------------------------
8  Copyright (C) 2011 OpenFOAM Foundation
9  Copyright (C) 2023 OpenCFD Ltd.
10 -------------------------------------------------------------------------------
11 License
12  This file is part of OpenFOAM.
13 
14  OpenFOAM is free software: you can redistribute it and/or modify it
15  under the terms of the GNU General Public License as published by
16  the Free Software Foundation, either version 3 of the License, or
17  (at your option) any later version.
18 
19  OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
20  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22  for more details.
23 
24  You should have received a copy of the GNU General Public License
25  along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
26 
27 \*---------------------------------------------------------------------------*/
28 
29 #include "UPstreamWrapping.H"
30 #include "PstreamGlobals.H"
31 #include "profilingPstream.H"
32 
33 // * * * * * * * * * * * * * * * * Constructors * * * * * * * * * * * * * * //
34 
36 :
37  UPstream::Request(MPI_REQUEST_NULL)
38 {}
39 
40 
41 // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
42 
44 {
45  return MPI_REQUEST_NULL != PstreamDetail::Request::get(*this);
46 }
47 
48 
50 {
51  *this = UPstream::Request(MPI_REQUEST_NULL);
52 }
53 
54 
55 // * * * * * * * * * * * * * * * Member Functions * * * * * * * * * * * * * //
56 
58 {
60 }
61 
62 
63 void Foam::UPstream::resetRequests(const label n)
64 {
65  if (n >= 0 && n < PstreamGlobals::outstandingRequests_.size())
66  {
68  }
69 }
70 
71 
72 void Foam::UPstream::addRequest(UPstream::Request& req)
73 {
74  // No-op for non-parallel
75  if (!UPstream::parRun())
76  {
77  return;
78  }
79 
80  // Transcribe as a MPI_Request
82  (
84  );
85 
86  // Invalidate parameter
87  req = UPstream::Request(MPI_REQUEST_NULL);
88 }
89 
90 
91 void Foam::UPstream::cancelRequest(const label i)
92 {
93  // No-op for non-parallel, or out-of-range (eg, placeholder indices)
94  if
95  (
97  || i < 0
99  )
100  {
101  return;
102  }
103 
104  {
105  auto& request = PstreamGlobals::outstandingRequests_[i];
106  if (MPI_REQUEST_NULL != request) // Active handle is mandatory
107  {
108  MPI_Cancel(&request);
109  MPI_Request_free(&request); //<- Sets to MPI_REQUEST_NULL
110  }
111  }
112 }
113 
114 
115 void Foam::UPstream::cancelRequest(UPstream::Request& req)
116 {
117  // No-op for non-parallel
118  if (!UPstream::parRun())
119  {
120  return;
121  }
122 
123  {
124  MPI_Request request = PstreamDetail::Request::get(req);
125  if (MPI_REQUEST_NULL != request) // Active handle is mandatory
126  {
127  MPI_Cancel(&request);
128  MPI_Request_free(&request);
129  }
130  req = UPstream::Request(MPI_REQUEST_NULL); // Now inactive
131  }
132 }
133 
134 
135 void Foam::UPstream::cancelRequests(UList<UPstream::Request>& requests)
136 {
137  // No-op for non-parallel
138  if (!UPstream::parRun())
139  {
140  return;
141  }
142 
143  for (auto& req : requests)
144  {
145  MPI_Request request = PstreamDetail::Request::get(req);
146  if (MPI_REQUEST_NULL != request) // Active handle is mandatory
147  {
148  MPI_Cancel(&request);
149  MPI_Request_free(&request);
150  }
151  req = UPstream::Request(MPI_REQUEST_NULL); // Now inactive
152  }
153 }
154 
155 
156 void Foam::UPstream::freeRequest(UPstream::Request& req)
157 {
158  // No-op for non-parallel
159  if (!UPstream::parRun())
160  {
161  return;
162  }
163 
164  {
165  MPI_Request request = PstreamDetail::Request::get(req);
166  if (MPI_REQUEST_NULL != request) // Active handle is mandatory
167  {
168  // if (cancel)
169  // {
170  // MPI_Cancel(&request);
171  // }
172  MPI_Request_free(&request);
173  }
174  req = UPstream::Request(MPI_REQUEST_NULL); // Now inactive
175  }
176 }
177 
178 
179 void Foam::UPstream::freeRequests(UList<UPstream::Request>& requests)
180 {
181  // No-op for non-parallel
182  if (!UPstream::parRun())
183  {
184  return;
185  }
186 
187  for (auto& req : requests)
188  {
189  MPI_Request request = PstreamDetail::Request::get(req);
190  if (MPI_REQUEST_NULL != request) // Active handle is mandatory
191  {
192  // if (cancel)
193  // {
194  // MPI_Cancel(&request);
195  // }
196  MPI_Request_free(&request);
197  }
198  req = UPstream::Request(MPI_REQUEST_NULL); // Now inactive
199  }
200 }
201 
202 
203 void Foam::UPstream::waitRequests(const label pos, label len)
204 {
205  // No-op for non-parallel, no pending requests or out-of-range
206  if
207  (
209  || (pos < 0 || pos >= PstreamGlobals::outstandingRequests_.size())
210  || !len
211  )
212  {
213  return;
214  }
215 
217  bool trim = true; // Trim the trailing part of the list
218 
219  // Apply range-checking on slice with (len < 0) behaving like npos
220  // (ie, the rest of the list)
221  if (len >= 0 && len < count)
222  {
223  // A non-trailing slice
224  count = len;
225  trim = false;
226  }
227  // Have count >= 1
228 
230 
231  if (UPstream::debug)
232  {
233  Pout<< "UPstream::waitRequests : starting wait for "
234  << count << " requests starting at " << pos << endl;
235  }
236 
238 
239  if (count == 1)
240  {
241  // On success: sets request to MPI_REQUEST_NULL
242  if (MPI_Wait(waitRequests, MPI_STATUS_IGNORE))
243  {
245  << "MPI_Wait returned with error"
247  }
248  }
249  else if (count > 1)
250  {
251  // On success: sets each request to MPI_REQUEST_NULL
252  if (MPI_Waitall(count, waitRequests, MPI_STATUSES_IGNORE))
253  {
255  << "MPI_Waitall returned with error"
257  }
258  }
259 
261 
262  if (trim)
263  {
264  // Trim the length of outstanding requests
266  }
267 
268  if (UPstream::debug)
269  {
270  Pout<< "UPstream::waitRequests : finished wait." << endl;
271  }
272 }
273 
274 
275 void Foam::UPstream::waitRequests(UList<UPstream::Request>& requests)
276 {
277  // No-op for non-parallel or no pending requests
278  if (!UPstream::parRun() || requests.empty())
279  {
280  return;
281  }
282 
283  // Looks ugly but is legitimate since UPstream::Request is an intptr_t,
284  // which is always large enough to hold an MPI_Request (int or pointer)
285 
286  label count = 0;
287  auto* waitRequests = reinterpret_cast<MPI_Request*>(requests.data());
288 
289  for (auto& req : requests)
290  {
291  MPI_Request request = PstreamDetail::Request::get(req);
292 
293  if (MPI_REQUEST_NULL != request) // Apply some prefiltering
294  {
295  waitRequests[count] = request;
296  ++count;
297  }
298  }
299 
300  if (!count)
301  {
302  // No active request handles
303  return;
304  }
305 
307 
308  // On success: sets each request to MPI_REQUEST_NULL
309  if (MPI_Waitall(count, waitRequests, MPI_STATUSES_IGNORE))
310  {
312  << "MPI_Waitall returned with error"
314  }
315 
317 
318  // Everything handled, reset all to MPI_REQUEST_NULL
319  requests = UPstream::Request(MPI_REQUEST_NULL);
320 }
321 
322 
323 bool Foam::UPstream::waitAnyRequest(const label pos, label len)
324 {
325  // No-op for non-parallel, no pending requests or out-of-range
326  if
327  (
329  || (pos < 0 || pos >= PstreamGlobals::outstandingRequests_.size())
330  || !len
331  )
332  {
333  return false;
334  }
335 
337 
338  // Apply range-checking on slice with (len < 0) behaving like npos
339  // (ie, the rest of the list)
340  if (len >= 0 && len < count)
341  {
342  // A non-trailing slice
343  count = len;
344  }
345  // Have count >= 1
346 
348 
349  if (UPstream::debug)
350  {
351  Pout<< "UPstream::waitAnyRequest : starting wait for some of "
352  << count << " requests starting at " << pos << endl;
353  }
354 
356 
357  // On success: sets request to MPI_REQUEST_NULL
358  int index = MPI_UNDEFINED;
359  if (MPI_Waitany(count, waitRequests, &index, MPI_STATUS_IGNORE))
360  {
362  << "MPI_Waitany returned with error"
364  }
365 
367 
368  if (index == MPI_UNDEFINED)
369  {
370  // No active request handles
371  return false;
372  }
373 
374  return true;
375 }
376 
377 
379 (
380  const label pos,
381  DynamicList<int>* indices
382 )
383 {
384  // No-op for non-parallel, no pending requests or out-of-range
385  if
386  (
388  || (pos < 0 || pos >= PstreamGlobals::outstandingRequests_.size())
389  // || !len
390  )
391  {
392  if (indices)
393  {
394  indices->clear();
395  }
396  return false;
397  }
398 
400 
401  // Apply range-checking on slice with (len < 0) behaving like npos
402  // (ie, the rest of the list)
403  // if (len >= 0 && len < count)
404  // {
405  // // A non-trailing slice
406  // count = len;
407  // }
408  // Have count >= 1
409 
411 
412  if (UPstream::debug)
413  {
414  Pout<< "UPstream:waitSomeRequest : starting wait for any of "
415  << count << " requests starting at " << pos << endl;
416  }
417 
418 
419  // Local temporary storage, or return via calling parameter
420  List<int> tmpIndices;
421 
422  if (indices)
423  {
424  indices->resize_nocopy(count);
425  }
426  else
427  {
428  tmpIndices.resize(count);
429  }
430 
432 
433  // On success: sets non-blocking requests to MPI_REQUEST_NULL
434  int outcount = 0;
435  if
436  (
437  MPI_Waitsome
438  (
439  count,
440  waitRequests,
441  &outcount,
442  (indices ? indices->data() : tmpIndices.data()),
443  MPI_STATUSES_IGNORE
444  )
445  )
446  {
448  << "MPI_Waitsome returned with error"
450  }
451 
453 
454  if (outcount == MPI_UNDEFINED || outcount < 1)
455  {
456  // No active request handles
457  if (indices)
458  {
459  indices->clear();
460  }
461  return false;
462  }
463 
464  if (indices)
465  {
466  indices->resize(outcount);
467  }
468 
469  return true;
470 }
471 
472 
473 Foam::label Foam::UPstream::waitAnyRequest(UList<UPstream::Request>& requests)
474 {
475  // No-op for non-parallel or no pending requests
476  if (!UPstream::parRun() || requests.empty())
477  {
478  return -1;
479  }
480 
481  // Looks ugly but is legitimate since UPstream::Request is an intptr_t,
482  // which is always large enough to hold an MPI_Request (int or pointer)
483 
484  label count = 0;
485  auto* waitRequests = reinterpret_cast<MPI_Request*>(requests.data());
486 
487  // Transcribe UPstream::Request into MPI_Request
488  // - do not change locations within the list since these are relevant
489  // for the return index.
490  for (auto& req : requests)
491  {
493  ++count;
494  }
495 
497 
498  // On success: sets request to MPI_REQUEST_NULL
499  int index = MPI_UNDEFINED;
500  if (MPI_Waitany(count, waitRequests, &index, MPI_STATUS_IGNORE))
501  {
503  << "MPI_Waitany returned with error"
505  }
506 
508 
509  if (index == MPI_UNDEFINED)
510  {
511  index = -1; // No outstanding requests
512  }
513 
514  // Transcribe MPI_Request back into UPstream::Request
515  // - do in reverse order - see note in finishedRequests()
516  {
517  for (label i = count-1; i >= 0; --i)
518  {
519  requests[i] = UPstream::Request(waitRequests[i]);
520  }
521 
522  // Trailing portion
523  for (label i = count; i < requests.size(); ++i)
524  {
525  requests[i] = UPstream::Request(MPI_REQUEST_NULL);
526  }
527  }
528 
529  return index;
530 }
531 
532 
533 // FUTURE?
534 //
583 
584 
585 void Foam::UPstream::waitRequest(const label i)
586 {
587  // No-op for non-parallel, or out-of-range (eg, placeholder indices)
588  if
589  (
591  || i < 0
593  )
594  {
595  return;
596  }
597 
598  auto& request = PstreamGlobals::outstandingRequests_[i];
599 
600  // No-op for null request
601  if (MPI_REQUEST_NULL == request)
602  {
603  return;
604  }
605 
606  if (UPstream::debug)
607  {
608  Pout<< "UPstream::waitRequest : starting wait for request:"
609  << i << endl;
610  }
611 
613 
614  // On success: sets request to MPI_REQUEST_NULL
615  if (MPI_Wait(&request, MPI_STATUS_IGNORE))
616  {
618  << "MPI_Wait returned with error"
620  }
621 
623 
624  if (UPstream::debug)
625  {
626  Pout<< "UPstream::waitRequest : finished wait for request:"
627  << i << endl;
628  }
629 }
630 
631 
632 void Foam::UPstream::waitRequest(UPstream::Request& req)
633 {
634  // No-op for non-parallel
635  if (!UPstream::parRun())
636  {
637  return;
638  }
639 
640  MPI_Request request = PstreamDetail::Request::get(req);
641 
642  // No-op for null request
643  if (MPI_REQUEST_NULL == request)
644  {
645  return;
646  }
647 
649 
650  if (MPI_Wait(&request, MPI_STATUS_IGNORE))
651  {
653  << "MPI_Wait returned with error"
655  }
656 
658 
659  req = UPstream::Request(MPI_REQUEST_NULL); // Now inactive
660 }
661 
662 
663 bool Foam::UPstream::finishedRequest(const label i)
664 {
665  // No-op for non-parallel, or out-of-range (eg, placeholder indices)
666  if
667  (
669  || i < 0
671  )
672  {
673  return true;
674  }
675 
676  if (UPstream::debug)
677  {
678  Pout<< "UPstream::finishedRequest : check request:"
679  << i << endl;
680  }
681 
682  auto& request = PstreamGlobals::outstandingRequests_[i];
683 
684  // Fast-path (no-op) for null request
685  if (MPI_REQUEST_NULL == request)
686  {
687  return true;
688  }
689 
690  // On success: sets request to MPI_REQUEST_NULL
691  int flag = 0;
692  MPI_Test(&request, &flag, MPI_STATUS_IGNORE);
693 
694  return flag != 0;
695 }
696 
697 
698 bool Foam::UPstream::finishedRequest(UPstream::Request& req)
699 {
700  // No-op for non-parallel
701  if (!UPstream::parRun())
702  {
703  return true;
704  }
705 
706  MPI_Request request = PstreamDetail::Request::get(req);
707 
708  // Fast-path (no-op) for null request
709  if (MPI_REQUEST_NULL == request)
710  {
711  return true;
712  }
713 
714  int flag = 0;
715  MPI_Test(&request, &flag, MPI_STATUS_IGNORE);
716 
717  if (flag)
718  {
719  // Success: now inactive
720  req = UPstream::Request(MPI_REQUEST_NULL);
721  }
722 
723  return flag != 0;
724 }
725 
726 
727 bool Foam::UPstream::finishedRequests(const label pos, label len)
728 {
729  // No-op for non-parallel, or out-of-range (eg, placeholder indices)
730  if
731  (
733  || (pos < 0 || pos >= PstreamGlobals::outstandingRequests_.size())
734  || !len
735  )
736  {
737  return true;
738  }
739 
741 
742  // Apply range-checking on slice with (len < 0) behaving like npos
743  // (ie, the rest of the list)
744  if (len >= 0 && len < count)
745  {
746  // A non-trailing slice
747  count = len;
748  }
749  // Have count >= 1
750 
751  if (UPstream::debug)
752  {
753  Pout<< "UPstream::finishedRequests : check " << count
754  << " requests starting at " << pos << endl;
755  }
756 
758 
759  int flag = 1;
760 
761  if (count == 1)
762  {
763  // Fast-path (no-op) for single null request
764  if (MPI_REQUEST_NULL == *waitRequests)
765  {
766  return true;
767  }
768 
769  // On success: sets request to MPI_REQUEST_NULL
770  MPI_Test(waitRequests, &flag, MPI_STATUS_IGNORE);
771  }
772  else if (count > 1)
773  {
774  // On success: sets each request to MPI_REQUEST_NULL
775  // On failure: no request is modified
776  MPI_Testall(count, waitRequests, &flag, MPI_STATUSES_IGNORE);
777  }
778 
779  return flag != 0;
780 }
781 
782 
783 bool Foam::UPstream::finishedRequests(UList<UPstream::Request>& requests)
784 {
785  // No-op for non-parallel or no pending requests
786  if (!UPstream::parRun() || requests.empty())
787  {
788  return true;
789  }
790 
791  // Looks ugly but is legitimate since UPstream::Request is an intptr_t,
792  // which is always large enough to hold an MPI_Request (int or pointer)
793 
794  label count = 0;
795  auto* waitRequests = reinterpret_cast<MPI_Request*>(requests.data());
796 
797  for (auto& req : requests)
798  {
799  MPI_Request request = PstreamDetail::Request::get(req);
800 
801  if (MPI_REQUEST_NULL != request) // Apply some prefiltering
802  {
803  waitRequests[count] = request;
804  ++count;
805  }
806  }
807 
808  if (!count)
809  {
810  // No active handles
811  return true;
812  }
813 
814  // On success: sets each request to MPI_REQUEST_NULL
815  // On failure: no request is modified
816  int flag = 0;
817  MPI_Testall(count, waitRequests, &flag, MPI_STATUSES_IGNORE);
818 
819  if (flag)
820  {
821  // Success: reset all requests to MPI_REQUEST_NULL
822  requests = UPstream::Request(MPI_REQUEST_NULL);
823  }
824  else
825  {
826  // Not all done. Recover wrapped representation but in reverse order
827  // since sizeof(MPI_Request) can be smaller than
828  // sizeof(UPstream::Request::value_type)
829  // eg, mpich has MPI_Request as 'int'
830  //
831  // This is uglier that we'd like, but much better than allocating
832  // and freeing a scratch buffer each time we query things.
833 
834  for (label i = count-1; i >= 0; --i)
835  {
836  requests[i] = UPstream::Request(waitRequests[i]);
837  }
838 
839  // Trailing portion
840  for (label i = count; i < requests.size(); ++i)
841  {
842  requests[i] = UPstream::Request(MPI_REQUEST_NULL);
843  }
844  }
845 
846  return flag != 0;
847 }
848 
849 
850 bool Foam::UPstream::finishedRequestPair(label& req0, label& req1)
851 {
852  // No-op for non-parallel
853  if (!UPstream::parRun())
854  {
855  req0 = -1;
856  req1 = -1;
857  return true;
858  }
859 
860  bool anyActive = false;
861  MPI_Request waitRequests[2];
862 
863  // No-op for out-of-range (eg, placeholder indices)
864 
865  if (req0 >= 0 && req0 < PstreamGlobals::outstandingRequests_.size())
866  {
868  }
869  else
870  {
871  waitRequests[0] = MPI_REQUEST_NULL;
872  }
873 
874  if (req1 >= 0 && req1 < PstreamGlobals::outstandingRequests_.size())
875  {
877  }
878  else
879  {
880  waitRequests[1] = MPI_REQUEST_NULL;
881  }
882 
883  if (MPI_REQUEST_NULL != waitRequests[0]) // An active handle
884  {
885  anyActive = true;
886  }
887  else
888  {
889  req0 = -1;
890  }
891 
892  if (MPI_REQUEST_NULL != waitRequests[1]) // An active handle
893  {
894  anyActive = true;
895  }
896  else
897  {
898  req1 = -1;
899  }
900 
901  if (!anyActive)
902  {
903  // No active handles
904  return true;
905  }
906 
908 
909  // On success: sets each request to MPI_REQUEST_NULL
910  int indices[2];
911  int outcount = 0;
912  if
913  (
914  MPI_Testsome
915  (
916  2,
917  waitRequests,
918  &outcount,
919  indices,
920  MPI_STATUSES_IGNORE
921  )
922  )
923  {
925  << "MPI_Testsome returned with error"
927  }
928 
930 
931  if (outcount == MPI_UNDEFINED)
932  {
933  // No active request handles.
934  // Slight pedantic, but copy back requests in case they were altered
935 
936  if (req0 >= 0)
937  {
939  }
940 
941  if (req1 >= 0)
942  {
944  }
945 
946  // Flag indices as 'done'
947  req0 = -1;
948  req1 = -1;
949  return true;
950  }
951 
952  // Copy back requests to their 'stack' locations
953  for (int i = 0; i < outcount; ++i)
954  {
955  const int idx = indices[i];
956 
957  if (idx == 0)
958  {
959  if (req0 >= 0)
960  {
962  req0 = -1;
963  }
964  }
965  if (idx == 1)
966  {
967  if (req1 >= 0)
968  {
970  req1 = -1;
971  }
972  }
973  }
974 
975  return (outcount > 0);
976 }
977 
978 
979 void Foam::UPstream::waitRequestPair(label& req0, label& req1)
980 {
981  // No-op for non-parallel. Flag indices as 'done'
982  if (!UPstream::parRun())
983  {
984  req0 = -1;
985  req1 = -1;
986  return;
987  }
988 
989  int count = 0;
990  MPI_Request waitRequests[2];
991 
992  // No-op for out-of-range (eg, placeholder indices)
993  // Prefilter inactive handles
994 
995  if (req0 >= 0 && req0 < PstreamGlobals::outstandingRequests_.size())
996  {
998  PstreamGlobals::outstandingRequests_[req0] = MPI_REQUEST_NULL;
999 
1000  if (MPI_REQUEST_NULL != waitRequests[count]) // An active handle
1001  {
1002  ++count;
1003  }
1004  }
1005 
1006  if (req1 >= 0 && req1 < PstreamGlobals::outstandingRequests_.size())
1007  {
1009  PstreamGlobals::outstandingRequests_[req1] = MPI_REQUEST_NULL;
1010 
1011  if (MPI_REQUEST_NULL != waitRequests[count]) // An active handle
1012  {
1013  ++count;
1014  }
1015  }
1016 
1017  // Flag in advance as being handled
1018  req0 = -1;
1019  req1 = -1;
1020 
1021  if (!count)
1022  {
1023  // No active handles
1024  return;
1025  }
1026 
1028 
1029  // On success: sets each request to MPI_REQUEST_NULL
1030  if (MPI_Waitall(count, waitRequests, MPI_STATUSES_IGNORE))
1031  {
1033  << "MPI_Waitall returned with error"
1034  << Foam::abort(FatalError);
1035  }
1036 
1038 }
1039 
1040 
1041 // ************************************************************************* //
static bool waitSomeRequests(const label pos, DynamicList< int > *indices=nullptr)
Wait until some requests (from position onwards) have finished. Corresponds to MPI_Waitsome() ...
DynamicList< MPI_Request > outstandingRequests_
Outstanding non-blocking operations.
void reset() noexcept
Reset to default constructed value (MPI_REQUEST_NULL)
error FatalError
Error stream (stdout output on all processes), with additional &#39;FOAM FATAL ERROR&#39; header text and sta...
#define FatalErrorInFunction
Report an error message using Foam::FatalError.
Definition: error.H:578
Functions to wrap MPI_Bcast, MPI_Allreduce, MPI_Iallreduce etc.
static label nRequests() noexcept
Number of outstanding requests (on the internal list of requests)
Ostream & endl(Ostream &os)
Add newline and flush stream.
Definition: Ostream.H:487
static bool & parRun() noexcept
Test if this a parallel run.
Definition: UPstream.H:1004
static void cancelRequest(const label i)
Non-blocking comms: cancel and free outstanding request. Corresponds to MPI_Cancel() + MPI_Request_fr...
static bool finishedRequest(const label i)
Non-blocking comms: has request i finished? Corresponds to MPI_Test()
static void freeRequest(UPstream::Request &req)
Non-blocking comms: free outstanding request. Corresponds to MPI_Request_free()
static void freeRequests(UList< UPstream::Request > &requests)
Non-blocking comms: free outstanding requests. Corresponds to MPI_Request_free()
static void addWaitTime()
Add time increment to wait time.
bool good() const noexcept
True if not equal to MPI_REQUEST_NULL.
static void waitRequests()
Wait for all requests to finish.
Definition: UPstream.H:1536
static bool finishedRequestPair(label &req0, label &req1)
Non-blocking comms: have both requests finished? Corresponds to pair of MPI_Test() ...
dimensionedScalar pos(const dimensionedScalar &ds)
unsigned int count(const UList< bool > &bools, const bool val=true)
Count number of &#39;true&#39; entries.
Definition: BitOps.H:73
string trim(const std::string &s)
Return string trimmed of leading and trailing whitespace.
Definition: stringOps.C:1032
errorManip< error > abort(error &err)
Definition: errorManip.H:139
static bool waitAnyRequest(const label pos, label len=-1)
Wait until any request (from position onwards) has finished. Corresponds to MPI_Waitany() ...
const direction noexcept
Definition: Scalar.H:258
static void beginTiming()
Update timer prior to measurement.
static void resetRequests(const label n)
Truncate outstanding requests to given length, which is expected to be in the range [0 to nRequests()...
int debug
Static debugging option.
static void waitRequestPair(label &req0, label &req1)
Non-blocking comms: wait for both requests to finish. Corresponds to pair of MPI_Wait() ...
static void addRequest(UPstream::Request &req)
Transfer the (wrapped) MPI request to the internal global list.
Request() noexcept
Default construct as MPI_REQUEST_NULL.
static void cancelRequests(UList< UPstream::Request > &requests)
Non-blocking comms: cancel and free outstanding requests. Corresponds to MPI_Cancel() + MPI_Request_f...
label n
static std::enable_if< std::is_pointer< Type >::value, Type >::type get(const UPstream::Request &req) noexcept
static void waitRequest(const label i)
Wait until request i has finished. Corresponds to MPI_Wait()
prefixOSstream Pout
OSstream wrapped stdout (std::cout) with parallel prefix.
static bool finishedRequests(const label pos, label len=-1)
Non-blocking comms: have all requests (from position onwards) finished? Corresponds to MPI_Testall() ...