processorTopologyNew.H
Go to the documentation of this file.
1 /*---------------------------------------------------------------------------*\
2  ========= |
3  \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4  \\ / O peration |
5  \\ / A nd | www.openfoam.com
6  \\/ M anipulation |
7 -------------------------------------------------------------------------------
8  Copyright (C) 2011-2017 OpenFOAM Foundation
9  Copyright (C) 2022-2023 OpenCFD Ltd.
10 -------------------------------------------------------------------------------
11 License
12  This file is part of OpenFOAM.
13 
14  OpenFOAM is free software: you can redistribute it and/or modify it
15  under the terms of the GNU General Public License as published by
16  the Free Software Foundation, either version 3 of the License, or
17  (at your option) any later version.
18 
19  OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
20  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22  for more details.
23 
24  You should have received a copy of the GNU General Public License
25  along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
26 
27 InClass
28  Foam::processorTopology
29 
30 Description
31  Define the processor-processor connection table by walking a list of
32  patches and detecting the processor ones.
33 
34  This has been split off as a separate include from processorTopology
35  to minimize dependencies.
36 
37 Warning
38  Does not currently correctly support multiple processor
39  patches connecting two processors.
40 
41 \*---------------------------------------------------------------------------*/
42 
43 #ifndef Foam_processorTopologyNew_H
44 #define Foam_processorTopologyNew_H
45 
46 #include "processorTopology.H"
47 #include "commSchedule.H"
48 #include "DynamicList.H"
49 
50 // * * * * * * * * * * * * * Static Member Functions * * * * * * * * * * * * //
51 
52 template<class ProcPatch, class PatchListType>
54 (
55  const PatchListType& patches,
56  const label comm
57 )
58 {
59  processorTopology topo;
60 
61  topo.comm_ = comm; // The communicator
62  auto& schedule = topo.patchSchedule_;
63 
64  schedule.resize(2*patches.size());
65 
66 
67  // The evaluation number within the schedule
68  label patchEvali = 0;
69 
70  // Number of processor patches
71  label numProcPatches = 0;
72 
73  //
74  // 1. Schedule all non-processor patches
75  //
76 
77  forAll(patches, patchi)
78  {
79  if (isA<ProcPatch>(patches[patchi]))
80  {
81  ++numProcPatches;
82  }
83  else
84  {
85  schedule[patchEvali++].setInitEvaluate(patchi);
86  schedule[patchEvali++].setEvaluate(patchi);
87  }
88  }
89 
90 
91  // Assemble processor patch information
92  if (UPstream::parRun() && numProcPatches)
93  {
94  // Create reverse map (from proc to patch)
95  // - assumes single connections between processors
96 
97  auto& patchMap = topo.procPatchMap_;
98  patchMap.reserve(numProcPatches);
99 
100  forAll(patches, patchi)
101  {
102  const auto* cpp = isA<ProcPatch>(patches[patchi]);
103  if (cpp)
104  {
105  patchMap.set(cpp->neighbProcNo(), patchi);
106  }
107  }
108  }
109 
110 
111  //
112  // 2. Handle processor patches
113  //
114 
115  if
116  (
119  )
120  {
121  const label myProci = UPstream::myProcNo(comm);
122  const label nProcs = UPstream::nProcs(comm);
123 
124  // Synchronized on all processors
125  const auto& procToProcs = topo.procAdjacency();
126 
127  // Determine the schedule for all processor patches.
128  // Insert processor pair once to determine the schedule.
129  // Each processor pair stands for both send and receive.
130 
131  label nComms = 0;
132  for (const labelList& nbrProcs : procToProcs)
133  {
134  nComms += nbrProcs.size();
135  }
136  DynamicList<labelPair> comms(nComms/2);
137 
138  forAll(procToProcs, proci)
139  {
140  for (const label nbrProci : procToProcs[proci])
141  {
142  if (proci < nbrProci)
143  {
144  // Owner to neighbour connection
145  comms.push_back(labelPair(proci, nbrProci));
146  }
147  }
148  }
149 
150  // Determine a schedule.
151 
152  labelList mySchedule
153  (
154  commSchedule(nProcs, comms).procSchedule()[myProci]
155  );
156 
157  for (const label scheduleIndex : mySchedule)
158  {
159  // Get the other processor
160  label nbrProci = comms[scheduleIndex].first();
161  if (nbrProci == myProci)
162  {
163  nbrProci = comms[scheduleIndex].second();
164  }
165  const label patchi = topo.procPatchLookup(nbrProci);
166 
167  if (myProci > nbrProci)
168  {
169  schedule[patchEvali++].setInitEvaluate(patchi);
170  schedule[patchEvali++].setEvaluate(patchi);
171  }
172  else
173  {
174  schedule[patchEvali++].setEvaluate(patchi);
175  schedule[patchEvali++].setInitEvaluate(patchi);
176  }
177  }
178  }
179  else
180  {
181  // Non-blocking schedule for processor patches
182 
183  if (numProcPatches)
184  {
185  forAll(patches, patchi)
186  {
187  if (isA<ProcPatch>(patches[patchi]))
188  {
189  schedule[patchEvali].setInitEvaluate(patchi);
190  schedule[patchEvali + numProcPatches].setEvaluate(patchi);
191  ++patchEvali;
192  }
193  }
194  }
195  }
196 
197  return topo;
198 }
199 
200 
201 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
202 
203 #endif
204 
205 // ************************************************************************* //
void size(const label n)
Older name for setAddressableSize.
Definition: UList.H:116
void resize(const label len)
Adjust allocated size of list.
Definition: ListI.H:160
T & first()
Access first element of the list, position [0].
Definition: UList.H:853
static bool & parRun() noexcept
Test if this a parallel run.
Definition: UPstream.H:1049
static int myProcNo(const label communicator=worldComm)
Rank of this process in the communicator (starting from masterNo()). Can be negative if the process i...
Definition: UPstream.H:1074
#define forAll(list, i)
Loop across all elements in list.
Definition: stdFoam.H:421
label procPatchLookup(const label proci) const
Which local boundary is attached to specified neighbour processor.
static label nProcs(const label communicator=worldComm)
Number of ranks in parallel run (for given communicator). It is 1 for serial run. ...
Definition: UPstream.H:1065
"scheduled" : (MPI_Send, MPI_Recv)
A 1D vector of objects of type <T> that resizes itself as necessary to accept the new objects...
Definition: DynamicList.H:51
const labelListList & procAdjacency() const
The complete processor to processor connection adjacency table. Globally synchronized information...
label comm() const noexcept
The communicator used during creation of the topology.
Pair< label > labelPair
A pair of labels.
Definition: Pair.H:51
Determines/represents processor-processor connection. After instantiation contains the processor-proc...
Determines the order in which a set of processors should communicate with one another.
Definition: commSchedule.H:63
void push_back(const T &val)
Copy append an element to the end of this list.
Definition: DynamicListI.H:555
static processorTopology New(const PatchListType &patches, const label comm)
Factory method to create topology, schedule and proc/patch maps.
static commsTypes defaultCommsType
Default commsType.
Definition: UPstream.H:385
void reserve(label numEntries)
Reserve space for at least the specified number of elements (not the number of buckets) and regenerat...
Definition: HashTable.C:712
const polyBoundaryMesh & patches