43 #ifndef Foam_processorTopologyNew_H 44 #define Foam_processorTopologyNew_H 54 template<
class ProcPatch,
class PatchListType>
66 auto& procToProcs = procTopo.procNeighbours_;
67 auto& procToPatch = procTopo.procPatchMap_;
68 auto& schedule = procTopo.patchSchedule_;
71 schedule.resize(2*
patches.size());
77 auto& procSlot = procToProcs[myProci];
79 bitSet isNeighbour(procToProcs.size());
83 const auto* cpp = isA<ProcPatch>(
patches[patchi]);
86 const label nbrProci = cpp->neighbProcNo();
88 isNeighbour.set(nbrProci);
93 procSlot = isNeighbour.sortedToc();
95 const label maxNbrProci = procSlot.empty() ? -1 : procSlot.last();
98 procToPatch.resize_nocopy(maxNbrProci + 1);
103 const auto* cpp = isA<ProcPatch>(
patches[patchi]);
106 const label nbrProci = cpp->neighbProcNo();
109 procToPatch[nbrProci] = patchi;
120 label patchEvali = 0;
127 if (!isA<ProcPatch>(
patches[patchi]))
129 schedule[patchEvali++].setInitEvaluate(patchi);
130 schedule[patchEvali++].setEvaluate(patchi);
149 for (
const labelList& nbrProcs : procToProcs)
151 nComms += nbrProcs.size();
155 forAll(procToProcs, proci)
157 for (
const label nbrProci : procToProcs[proci])
159 if (proci < nbrProci)
173 ).procSchedule()[myProci]
176 for (
const label scheduleIndex : mySchedule)
179 label nbrProci = comms[scheduleIndex][0];
180 if (nbrProci == myProci)
182 nbrProci = comms[scheduleIndex][1];
184 const label patchi = procToPatch[nbrProci];
186 if (myProci > nbrProci)
188 schedule[patchEvali++].setInitEvaluate(patchi);
189 schedule[patchEvali++].setEvaluate(patchi);
193 schedule[patchEvali++].setEvaluate(patchi);
194 schedule[patchEvali++].setInitEvaluate(patchi);
205 if (isA<ProcPatch>(
patches[patchi]))
207 schedule[patchEvali++].setInitEvaluate(patchi);
214 if (isA<ProcPatch>(
patches[patchi]))
216 schedule[patchEvali++].setEvaluate(patchi);
void resize(const label len)
Adjust allocated size of list.
static bool & parRun() noexcept
Test if this a parallel run.
static int & msgType() noexcept
Message tag of standard messages.
static int myProcNo(const label communicator=worldComm)
Number of this process (starting from masterNo() = 0)
#define forAll(list, i)
Loop across all elements in list.
static void allGatherList(List< T > &values, const int tag=UPstream::msgType(), const label comm=UPstream::worldComm)
Gather data, but keep individual values separate. Uses linear/tree communication. ...
static label nProcs(const label communicator=worldComm)
Number of ranks in parallel run (for given communicator) is 1 for serial run.
"scheduled" : (MPI_Send, MPI_Recv)
A 1D vector of objects of type <T> that resizes itself as necessary to accept the new objects...
void append(const T &val)
Copy append an element to the end of this list.
Pair< label > labelPair
A pair of labels.
Determines/represents processor-processor connection. After instantiation contains the processor-proc...
Determines the order in which a set of processors should communicate with one another.
label nProcs() const noexcept
The number of processors used by the topology.
static processorTopology New(const PatchListType &patches, const label comm)
Factory method to create topology, schedule and proc/patch maps.
static commsTypes defaultCommsType
Default commsType.
A bitSet stores bits (elements with only two states) in packed internal format and supports a variety...
const polyBoundaryMesh & patches