processorTopologyNew.H
Go to the documentation of this file.
1 /*---------------------------------------------------------------------------*\
2  ========= |
3  \\ / F ield | OpenFOAM: The Open Source CFD Toolbox
4  \\ / O peration |
5  \\ / A nd | www.openfoam.com
6  \\/ M anipulation |
7 -------------------------------------------------------------------------------
8  Copyright (C) 2011-2017 OpenFOAM Foundation
9  Copyright (C) 2022 OpenCFD Ltd.
10 -------------------------------------------------------------------------------
11 License
12  This file is part of OpenFOAM.
13 
14  OpenFOAM is free software: you can redistribute it and/or modify it
15  under the terms of the GNU General Public License as published by
16  the Free Software Foundation, either version 3 of the License, or
17  (at your option) any later version.
18 
19  OpenFOAM is distributed in the hope that it will be useful, but WITHOUT
20  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
21  FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
22  for more details.
23 
24  You should have received a copy of the GNU General Public License
25  along with OpenFOAM. If not, see <http://www.gnu.org/licenses/>.
26 
27 InClass
28  Foam::processorTopology
29 
30 Description
31  Define the processor-processor connection table by walking a list of
32  patches and detecting the processor ones.
33 
34  This has been split off as a separate include from processorTopology
35  to minimize dependencies.
36 
37 Warning
38  Does not currently correctly support multiple processor
39  patches connecting two processors.
40 
41 \*---------------------------------------------------------------------------*/
42 
43 #ifndef Foam_processorTopologyNew_H
44 #define Foam_processorTopologyNew_H
45 
46 #include "processorTopology.H"
47 #include "bitSet.H"
48 #include "commSchedule.H"
49 #include "DynamicList.H"
50 #include "Pstream.H"
51 
52 // * * * * * * * * * * * * * Static Member Functions * * * * * * * * * * * * //
53 
54 template<class ProcPatch, class PatchListType>
56 (
57  const PatchListType& patches,
58  const label comm
59 )
60 {
61  const label myProci = Pstream::myProcNo(comm);
62  const label nProcs = Pstream::nProcs(comm);
63 
64  processorTopology procTopo;
65 
66  auto& procToProcs = procTopo.procNeighbours_;
67  auto& procToPatch = procTopo.procPatchMap_;
68  auto& schedule = procTopo.patchSchedule_;
69 
70  procToProcs.resize(nProcs);
71  schedule.resize(2*patches.size());
72 
73 
74  if (Pstream::parRun())
75  {
76  // Fill my 'slot' with my neighbours
77  auto& procSlot = procToProcs[myProci];
78 
79  bitSet isNeighbour(procToProcs.size());
80 
81  forAll(patches, patchi)
82  {
83  const auto* cpp = isA<ProcPatch>(patches[patchi]);
84  if (cpp)
85  {
86  const label nbrProci = cpp->neighbProcNo();
87 
88  isNeighbour.set(nbrProci);
89  }
90  }
91 
92  // The neighbouring procs in sorted (ascending) order
93  procSlot = isNeighbour.sortedToc();
94 
95  const label maxNbrProci = procSlot.empty() ? -1 : procSlot.last();
96 
97  // Note could also use Map<label> if desired
98  procToPatch.resize_nocopy(maxNbrProci + 1);
99  procToPatch = -1;
100 
101  forAll(patches, patchi)
102  {
103  const auto* cpp = isA<ProcPatch>(patches[patchi]);
104  if (cpp)
105  {
106  const label nbrProci = cpp->neighbProcNo();
107 
108  // Reverse map
109  procToPatch[nbrProci] = patchi;
110  }
111  }
112 
113  // Synchronize on all processors
114  Pstream::allGatherList(procToProcs, UPstream::msgType(), comm);
115  }
116 
117 
118  // Define the schedule
119 
120  label patchEvali = 0;
121 
122  // 1. All non-processor patches
123  // ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
124 
125  forAll(patches, patchi)
126  {
127  if (!isA<ProcPatch>(patches[patchi]))
128  {
129  schedule[patchEvali++].setInitEvaluate(patchi);
130  schedule[patchEvali++].setEvaluate(patchi);
131  }
132  }
133 
134 
135  // 2. All processor patches
136  // ~~~~~~~~~~~~~~~~~~~~~~~~
137 
138  if
139  (
142  )
143  {
144  // Determine the schedule for all processor patches.
145  // Insert processor pair once to determine the schedule.
146  // Each processor pair stands for both send and receive.
147 
148  label nComms = 0;
149  for (const labelList& nbrProcs : procToProcs)
150  {
151  nComms += nbrProcs.size();
152  }
153  DynamicList<labelPair> comms(nComms/2);
154 
155  forAll(procToProcs, proci)
156  {
157  for (const label nbrProci : procToProcs[proci])
158  {
159  if (proci < nbrProci)
160  {
161  comms.append(labelPair(proci, nbrProci));
162  }
163  }
164  }
165 
166  // Determine a schedule.
167  labelList mySchedule
168  (
170  (
171  nProcs,
172  comms
173  ).procSchedule()[myProci]
174  );
175 
176  for (const label scheduleIndex : mySchedule)
177  {
178  // Get the other processor
179  label nbrProci = comms[scheduleIndex][0];
180  if (nbrProci == myProci)
181  {
182  nbrProci = comms[scheduleIndex][1];
183  }
184  const label patchi = procToPatch[nbrProci];
185 
186  if (myProci > nbrProci)
187  {
188  schedule[patchEvali++].setInitEvaluate(patchi);
189  schedule[patchEvali++].setEvaluate(patchi);
190  }
191  else
192  {
193  schedule[patchEvali++].setEvaluate(patchi);
194  schedule[patchEvali++].setInitEvaluate(patchi);
195  }
196  }
197  }
198  else
199  {
200  // Non-blocking schedule for processor patches
201 
202  // initEvaluate
203  forAll(patches, patchi)
204  {
205  if (isA<ProcPatch>(patches[patchi]))
206  {
207  schedule[patchEvali++].setInitEvaluate(patchi);
208  }
209  }
210 
211  // evaluate
212  forAll(patches, patchi)
213  {
214  if (isA<ProcPatch>(patches[patchi]))
215  {
216  schedule[patchEvali++].setEvaluate(patchi);
217  }
218  }
219  }
220 
221  return procTopo;
222 }
223 
224 
225 // * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * * //
226 
227 #endif
228 
229 // ************************************************************************* //
void resize(const label len)
Adjust allocated size of list.
Definition: ListI.H:132
static bool & parRun() noexcept
Test if this a parallel run.
Definition: UPstream.H:639
static int & msgType() noexcept
Message tag of standard messages.
Definition: UPstream.H:806
static int myProcNo(const label communicator=worldComm)
Number of this process (starting from masterNo() = 0)
Definition: UPstream.H:688
#define forAll(list, i)
Loop across all elements in list.
Definition: stdFoam.H:413
static void allGatherList(List< T > &values, const int tag=UPstream::msgType(), const label comm=UPstream::worldComm)
Gather data, but keep individual values separate. Uses linear/tree communication. ...
static label nProcs(const label communicator=worldComm)
Number of ranks in parallel run (for given communicator) is 1 for serial run.
Definition: UPstream.H:656
"scheduled" : (MPI_Send, MPI_Recv)
A 1D vector of objects of type <T> that resizes itself as necessary to accept the new objects...
Definition: DynamicList.H:51
void append(const T &val)
Copy append an element to the end of this list.
Definition: DynamicList.H:558
Pair< label > labelPair
A pair of labels.
Definition: Pair.H:50
Determines/represents processor-processor connection. After instantiation contains the processor-proc...
Determines the order in which a set of processors should communicate with one another.
Definition: commSchedule.H:63
label nProcs() const noexcept
The number of processors used by the topology.
static processorTopology New(const PatchListType &patches, const label comm)
Factory method to create topology, schedule and proc/patch maps.
static commsTypes defaultCommsType
Default commsType.
Definition: UPstream.H:337
A bitSet stores bits (elements with only two states) in packed internal format and supports a variety...
Definition: bitSet.H:59
const polyBoundaryMesh & patches