FEI Package Browser (Single Doxygen Collection) Version of the Day
Loading...
Searching...
No Matches
fei_CommUtils.cpp
Go to the documentation of this file.
1
2#include <fei_CommUtils.hpp>
4
5#undef fei_file
6#define fei_file "fei_CommUtils.cpp"
7
8#include <fei_ErrMacros.hpp>
9
10namespace fei {
11
12//------------------------------------------------------------------------
14{
15#ifdef FEI_SER
16 return 0;
17#else
18 int local_proc = 0;
19 MPI_Comm_rank(comm, &local_proc);
20 return local_proc;
21#endif
22}
23
24//------------------------------------------------------------------------
26{
27#ifdef FEI_SER
28 return 1;
29#else
30 int num_procs = 1;
31 MPI_Comm_size(comm, &num_procs);
32 return num_procs;
33#endif
34}
35
36//------------------------------------------------------------------------
37void Barrier(MPI_Comm comm)
38{
39#ifndef FEI_SER
40 MPI_Barrier(comm);
41#endif
42}
43
44//------------------------------------------------------------------------
45int mirrorProcs(MPI_Comm comm, std::vector<int>& toProcs, std::vector<int>& fromProcs)
46{
47 fromProcs.resize(0);
48#ifdef FEI_SER
49 fromProcs.push_back(0);
50 return(0);
51#else
52 int num_procs = fei::numProcs(comm);
53 std::vector<int> tmpIntData(num_procs*3, 0);
54
55 int* buf = &tmpIntData[0];
56 int* recvbuf = buf+num_procs;
57
58 for(unsigned i=0; i<toProcs.size(); ++i) {
59 buf[toProcs[i]] = 1;
60 }
61
62 for(int ii=2*num_procs; ii<3*num_procs; ++ii) {
63 buf[ii] = 1;
64 }
65
66 CHK_MPI( MPI_Reduce_scatter(buf, &(buf[num_procs]), &(buf[2*num_procs]),
67 MPI_INT, MPI_SUM, comm) );
68
69 int numRecvProcs = buf[num_procs];
70
71 int tag = 11116;
72 std::vector<MPI_Request> mpiReqs(numRecvProcs);
73
74 int offset = 0;
75 for(int ii=0; ii<numRecvProcs; ++ii) {
76 CHK_MPI( MPI_Irecv(&(recvbuf[ii]), 1, MPI_INT, MPI_ANY_SOURCE, tag,
77 comm, &(mpiReqs[offset++])) );
78 }
79
80 for(unsigned i=0; i<toProcs.size(); ++i) {
81 CHK_MPI( MPI_Send(&(toProcs[i]), 1, MPI_INT, toProcs[i], tag, comm) );
82 }
83
84 MPI_Status status;
85 for(int ii=0; ii<numRecvProcs; ++ii) {
86 int index;
87 MPI_Waitany(numRecvProcs, &mpiReqs[0], &index, &status);
88 fromProcs.push_back(status.MPI_SOURCE);
89 }
90
91 std::sort(fromProcs.begin(), fromProcs.end());
92
93 return(0);
94#endif
95}
96
97//------------------------------------------------------------------------
98int mirrorCommPattern(MPI_Comm comm, comm_map* inPattern, comm_map*& outPattern)
99{
100#ifdef FEI_SER
101 (void)inPattern;
102 (void)outPattern;
103#else
104 int localP = localProc(comm);
105 int numP = numProcs(comm);
106
107 if (numP < 2) return(0);
108
109 std::vector<int> buf(numP*2, 0);
110
111 int numInProcs = inPattern->getMap().size();
112 std::vector<int> inProcs(numInProcs);
113 fei::copyKeysToVector(inPattern->getMap(), inProcs);
114
115 std::vector<int> outProcs;
116
117 int err = mirrorProcs(comm, inProcs, outProcs);
118 if (err != 0) ERReturn(-1);
119
120 std::vector<int> recvbuf(outProcs.size(), 0);
121
122 outPattern = new comm_map(0,1);
123
124 MPI_Datatype mpi_ttype = fei::mpiTraits<int>::mpi_type();
125
126 //now recv a length (the contents of buf[i]) from each "out-proc", which
127 //will be the length of the equation data that will also be recvd from that
128 //proc.
129 std::vector<MPI_Request> mpiReqs(outProcs.size());
130 std::vector<MPI_Status> mpiStss(outProcs.size());
131 MPI_Request* requests = &mpiReqs[0];
132 MPI_Status* statuses = &mpiStss[0];
133
134 int firsttag = 11117;
135 int offset = 0;
136 int* outProcsPtr = &outProcs[0];
137 for(unsigned i=0; i<outProcs.size(); ++i) {
138 if (MPI_Irecv(&(recvbuf[i]), 1, MPI_INT, outProcsPtr[i], firsttag,
139 comm, &requests[offset++]) != MPI_SUCCESS) ERReturn(-1);
140 }
141
142 comm_map::map_type& in_row_map = inPattern->getMap();
144 in_iter = in_row_map.begin(),
145 in_end = in_row_map.end();
146
147 int* inProcsPtr = &inProcs[0];
148 for(int ii=0; in_iter!= in_end; ++in_iter, ++ii) {
149 comm_map::row_type* in_row = in_iter->second;
150 buf[ii] = in_row->size();
151 if (MPI_Send(&(buf[ii]), 1, MPI_INT, inProcsPtr[ii], firsttag,
152 comm) != MPI_SUCCESS) ERReturn(-1);
153 }
154
155 int numOutProcs = outProcs.size();
156
157 MPI_Waitall(numOutProcs, requests, statuses);
158 std::vector<int> lengths(numOutProcs);
159 int totalRecvLen = 0;
160 offset = 0;
161 for(int ii=0; ii<numOutProcs; ++ii) {
162 if (recvbuf[ii] > 0) {
163 lengths[offset++] = recvbuf[ii];
164 totalRecvLen += recvbuf[ii];
165 }
166 }
167
168 //now we need to create the space into which we'll receive the
169 //lists that other procs send to us.
170 std::vector<int> recvData(totalRecvLen, 999999);
171
172 int tag2 = 11118;
173 offset = 0;
174 for(int ii=0; ii<numOutProcs; ++ii) {
175 CHK_MPI(MPI_Irecv(&(recvData[offset]), lengths[ii], mpi_ttype,
176 outProcs[ii], tag2, comm, &requests[ii]) );
177 offset += lengths[ii];
178 }
179
180 std::vector<int> sendList;
181
182 in_iter = in_row_map.begin();
183
184 for(int ii=0; in_iter != in_end; ++in_iter,++ii) {
185 if (inProcs[ii] == localP) {
186 continue;
187 }
188 sendList.resize(in_iter->second->size());
189 fei::copySetToArray(*(in_iter->second), sendList.size(), &sendList[0]);
190
191 CHK_MPI(MPI_Send(&sendList[0], sendList.size(), mpi_ttype,
192 inProcs[ii], tag2, comm) );
193 }
194
195 //our final communication operation is to catch the Irecvs we started above.
196 for(int ii=0; ii<numOutProcs; ++ii) {
197 MPI_Wait(&requests[ii], &statuses[ii]);
198 }
199
200 //now we've completed all the communication, so we're ready to put the data
201 //we received into the outPattern object.
202 offset = 0;
203 for(int ii=0; ii<numOutProcs; ii++) {
204 outPattern->addIndices(outProcs[ii], lengths[ii],
205 &(recvData[offset]));
206 offset += lengths[ii];
207 }
208
209#endif
210 return(0);
211}
212
213
214//------------------------------------------------------------------------
216 const std::vector<int>& sendProcs,
217 std::vector<int>& sendData,
218 const std::vector<int>& recvProcs,
219 std::vector<int>& recvData)
220{
221 if (sendProcs.size() == 0 && recvProcs.size() == 0) return(0);
222 if (sendProcs.size() != sendData.size()) return(-1);
223#ifndef FEI_SER
224 recvData.resize(recvProcs.size());
225 std::vector<MPI_Request> mpiReqs;
226 mpiReqs.resize(recvProcs.size());
227
228 int tag = 11114;
229 MPI_Datatype mpi_dtype = MPI_INT;
230
231 //launch Irecv's for recvData:
232
233 int localProc = fei::localProc(comm);
234 int numRecvProcs = recvProcs.size();
235 int req_offset = 0;
236 for(unsigned i=0; i<recvProcs.size(); ++i) {
237 if (recvProcs[i] == localProc) {--numRecvProcs; continue; }
238
239 CHK_MPI( MPI_Irecv(&(recvData[i]), 1, mpi_dtype, recvProcs[i], tag,
240 comm, &mpiReqs[req_offset++]) );
241 }
242
243 //send the sendData:
244
245 for(unsigned i=0; i<sendProcs.size(); ++i) {
246 if (sendProcs[i] == localProc) continue;
247
248 CHK_MPI( MPI_Send(&(sendData[i]), 1, mpi_dtype,
249 sendProcs[i], tag, comm) );
250 }
251
252 //complete the Irecv's:
253
254 for(int ii=0; ii<numRecvProcs; ++ii) {
255 int index;
256 MPI_Status status;
257 CHK_MPI( MPI_Waitany(numRecvProcs, &mpiReqs[0], &index, &status) );
258 }
259
260#endif
261 return(0);
262}
263
264//------------------------------------------------------------------------
265int Allreduce(MPI_Comm comm, bool localBool, bool& globalBool)
266{
267#ifndef FEI_SER
268 int localInt = localBool ? 1 : 0;
269 int globalInt = 0;
270
271 CHK_MPI( MPI_Allreduce(&localInt, &globalInt, 1, MPI_INT, MPI_MAX, comm) );
272
273 globalBool = globalInt==1 ? true : false;
274#else
275 globalBool = localBool;
276#endif
277
278 return(0);
279}
280
281}//namespace fei
282
MAP_TYPE::iterator iterator
void addIndices(int row, int numIndices, const int *indices)
#define ERReturn(a)
#define CHK_MPI(a)
#define MPI_SUCCESS
Definition fei_mpi.h:62
#define MPI_Barrier(a)
Definition fei_mpi.h:61
#define MPI_Request
Definition fei_mpi.h:57
#define MPI_Comm
Definition fei_mpi.h:56
int localProc(MPI_Comm comm)
void copySetToArray(const SET_TYPE &set_obj, int lenList, int *list)
void copyKeysToVector(const MAP_TYPE &map_obj, std::vector< int > &keyvector)
int mirrorCommPattern(MPI_Comm comm, comm_map *inPattern, comm_map *&outPattern)
int mirrorProcs(MPI_Comm comm, std::vector< int > &toProcs, std::vector< int > &fromProcs)
int exchangeIntData(MPI_Comm comm, const std::vector< int > &sendProcs, std::vector< int > &sendData, const std::vector< int > &recvProcs, std::vector< int > &recvData)
snl_fei::RaggedTable< std::map< int, std::set< int > * >, std::set< int > > comm_map
int Allreduce(MPI_Comm comm, bool localBool, bool &globalBool)
int numProcs(MPI_Comm comm)
void Barrier(MPI_Comm comm)