FEI Version of the Day
fei_CommUtils.cpp
00001 /*
00002 // @HEADER
00003 // ************************************************************************
00004 //             FEI: Finite Element Interface to Linear Solvers
00005 //                  Copyright (2005) Sandia Corporation.
00006 //
00007 // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, the
00008 // U.S. Government retains certain rights in this software.
00009 //
00010 // Redistribution and use in source and binary forms, with or without
00011 // modification, are permitted provided that the following conditions are
00012 // met:
00013 //
00014 // 1. Redistributions of source code must retain the above copyright
00015 // notice, this list of conditions and the following disclaimer.
00016 //
00017 // 2. Redistributions in binary form must reproduce the above copyright
00018 // notice, this list of conditions and the following disclaimer in the
00019 // documentation and/or other materials provided with the distribution.
00020 //
00021 // 3. Neither the name of the Corporation nor the names of the
00022 // contributors may be used to endorse or promote products derived from
00023 // this software without specific prior written permission.
00024 //
00025 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
00026 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
00027 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
00028 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
00029 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
00030 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
00031 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00032 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00033 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00034 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00035 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00036 //
00037 // Questions? Contact Alan Williams (william@sandia.gov) 
00038 //
00039 // ************************************************************************
00040 // @HEADER
00041 */
00042 
00043 
00044 #include <fei_CommUtils.hpp>
00045 #include <fei_TemplateUtils.hpp>
00046 
00047 #undef fei_file
00048 #define fei_file "fei_CommUtils.cpp"
00049 
00050 #include <fei_ErrMacros.hpp>
00051 
00052 namespace fei {
00053 
00054 //------------------------------------------------------------------------
00055 int localProc(MPI_Comm comm)
00056 {
00057 #ifdef FEI_SER
00058   return 0;
00059 #else
00060   int local_proc = 0;
00061   MPI_Comm_rank(comm, &local_proc);
00062   return local_proc;
00063 #endif
00064 }
00065 
00066 //------------------------------------------------------------------------
00067 int numProcs(MPI_Comm comm)
00068 {
00069 #ifdef FEI_SER
00070   return 1;
00071 #else
00072   int num_procs = 1;
00073   MPI_Comm_size(comm, &num_procs);
00074   return num_procs;
00075 #endif
00076 }
00077 
00078 //------------------------------------------------------------------------
00079 void Barrier(MPI_Comm comm)
00080 {
00081 #ifndef FEI_SER
00082   MPI_Barrier(comm);
00083 #endif
00084 }
00085 
00086 //------------------------------------------------------------------------
00087 int mirrorProcs(MPI_Comm comm, std::vector<int>& toProcs, std::vector<int>& fromProcs)
00088 {
00089   fromProcs.resize(0);
00090 #ifdef FEI_SER
00091   fromProcs.push_back(0);
00092   return(0);
00093 #else
00094   int num_procs = fei::numProcs(comm);
00095   std::vector<int> tmpIntData(num_procs*3, 0);
00096 
00097   int* buf = &tmpIntData[0];
00098   int* recvbuf = buf+num_procs;
00099 
00100   for(unsigned i=0; i<toProcs.size(); ++i) {
00101     buf[toProcs[i]] = 1;
00102   }
00103 
00104   for(int ii=2*num_procs; ii<3*num_procs; ++ii) {
00105     buf[ii] = 1;
00106   }
00107 
00108   CHK_MPI( MPI_Reduce_scatter(buf, &(buf[num_procs]), &(buf[2*num_procs]),
00109                               MPI_INT, MPI_SUM, comm) );
00110 
00111   int numRecvProcs = buf[num_procs];
00112 
00113   int tag = 11116;
00114   std::vector<MPI_Request> mpiReqs(numRecvProcs);
00115 
00116   int offset = 0;
00117   for(int ii=0; ii<numRecvProcs; ++ii) {
00118     CHK_MPI( MPI_Irecv(&(recvbuf[ii]), 1, MPI_INT, MPI_ANY_SOURCE, tag,
00119                        comm, &(mpiReqs[offset++])) );
00120   }
00121 
00122   for(unsigned i=0; i<toProcs.size(); ++i) {
00123     CHK_MPI( MPI_Send(&(toProcs[i]), 1, MPI_INT, toProcs[i], tag, comm) );
00124   }
00125 
00126   MPI_Status status;
00127   for(int ii=0; ii<numRecvProcs; ++ii) {
00128     int index;
00129     MPI_Waitany(numRecvProcs, &mpiReqs[0], &index, &status);
00130     fromProcs.push_back(status.MPI_SOURCE);
00131   }
00132 
00133   std::sort(fromProcs.begin(), fromProcs.end());
00134 
00135   return(0);
00136 #endif
00137 }
00138 
00139 //------------------------------------------------------------------------
00140 int mirrorCommPattern(MPI_Comm comm, comm_map* inPattern, comm_map*& outPattern)
00141 {
00142 #ifdef FEI_SER
00143   (void)inPattern;
00144   (void)outPattern;
00145 #else
00146   int localP = localProc(comm);
00147   int numP  = numProcs(comm);
00148 
00149   if (numP < 2) return(0);
00150 
00151   std::vector<int> buf(numP*2, 0);
00152 
00153   int numInProcs = inPattern->getMap().size();
00154   std::vector<int> inProcs(numInProcs);
00155   fei::copyKeysToVector(inPattern->getMap(), inProcs);
00156 
00157   std::vector<int> outProcs;
00158 
00159   int err = mirrorProcs(comm, inProcs, outProcs);
00160   if (err != 0) ERReturn(-1);
00161 
00162   std::vector<int> recvbuf(outProcs.size(), 0);
00163 
00164   outPattern = new comm_map(0,1);
00165 
00166   MPI_Datatype mpi_ttype = fei::mpiTraits<int>::mpi_type();
00167 
00168   //now recv a length (the contents of buf[i]) from each "out-proc", which
00169   //will be the length of the equation data that will also be recvd from that
00170   //proc.
00171   std::vector<MPI_Request> mpiReqs(outProcs.size());
00172   std::vector<MPI_Status> mpiStss(outProcs.size());
00173   MPI_Request* requests = &mpiReqs[0];
00174   MPI_Status* statuses = &mpiStss[0];
00175 
00176   int firsttag = 11117;
00177   int offset = 0;
00178   int* outProcsPtr = &outProcs[0];
00179   for(unsigned i=0; i<outProcs.size(); ++i) {
00180     if (MPI_Irecv(&(recvbuf[i]), 1, MPI_INT, outProcsPtr[i], firsttag,
00181                   comm, &requests[offset++]) != MPI_SUCCESS) ERReturn(-1);
00182   }
00183 
00184   comm_map::map_type& in_row_map = inPattern->getMap();
00185   comm_map::map_type::iterator
00186     in_iter = in_row_map.begin(),
00187     in_end  = in_row_map.end();
00188  
00189   int* inProcsPtr = &inProcs[0];
00190   for(int ii=0; in_iter!= in_end; ++in_iter, ++ii) {
00191     comm_map::row_type* in_row = in_iter->second;
00192     buf[ii] = in_row->size();
00193     if (MPI_Send(&(buf[ii]), 1, MPI_INT, inProcsPtr[ii], firsttag,
00194                  comm) != MPI_SUCCESS) ERReturn(-1);
00195   }
00196 
00197   int numOutProcs = outProcs.size();
00198 
00199   MPI_Waitall(numOutProcs, requests, statuses);
00200   std::vector<int> lengths(numOutProcs);
00201   int totalRecvLen = 0;
00202   offset = 0;
00203   for(int ii=0; ii<numOutProcs; ++ii) {
00204     if (recvbuf[ii] > 0) {
00205       lengths[offset++] = recvbuf[ii];
00206       totalRecvLen += recvbuf[ii];
00207     }
00208   }
00209 
00210   //now we need to create the space into which we'll receive the
00211   //lists that other procs send to us.
00212   std::vector<int> recvData(totalRecvLen, 999999);
00213 
00214   int tag2 = 11118;
00215   offset = 0;
00216   for(int ii=0; ii<numOutProcs; ++ii) {
00217     CHK_MPI(MPI_Irecv(&(recvData[offset]), lengths[ii], mpi_ttype,
00218                       outProcs[ii], tag2, comm, &requests[ii]) );
00219     offset += lengths[ii];
00220   }
00221 
00222   std::vector<int> sendList;
00223 
00224   in_iter = in_row_map.begin();
00225 
00226   for(int ii=0; in_iter != in_end; ++in_iter,++ii) {
00227     if (inProcs[ii] == localP) {
00228       continue;
00229     }
00230     sendList.resize(in_iter->second->size());
00231     fei::copySetToArray(*(in_iter->second), sendList.size(), &sendList[0]);
00232 
00233     CHK_MPI(MPI_Send(&sendList[0], sendList.size(), mpi_ttype,
00234                      inProcs[ii], tag2, comm) );
00235   }
00236 
00237   //our final communication operation is to catch the Irecvs we started above.
00238   for(int ii=0; ii<numOutProcs; ++ii) {
00239     MPI_Wait(&requests[ii], &statuses[ii]);
00240   }
00241 
00242   //now we've completed all the communication, so we're ready to put the data
00243   //we received into the outPattern object.
00244   offset = 0;
00245   for(int ii=0; ii<numOutProcs; ii++) {
00246     outPattern->addIndices(outProcs[ii], lengths[ii],
00247                            &(recvData[offset]));
00248     offset += lengths[ii];
00249   }
00250 
00251 #endif
00252   return(0);
00253 }
00254 
00255 
00256 //------------------------------------------------------------------------
00257 int exchangeIntData(MPI_Comm comm,
00258                     const std::vector<int>& sendProcs,
00259                     std::vector<int>& sendData,
00260                     const std::vector<int>& recvProcs,
00261                     std::vector<int>& recvData)
00262 {
00263   if (sendProcs.size() == 0 && recvProcs.size() == 0) return(0);
00264   if (sendProcs.size() != sendData.size()) return(-1);
00265 #ifndef FEI_SER
00266   recvData.resize(recvProcs.size());
00267   std::vector<MPI_Request> mpiReqs;
00268   mpiReqs.resize(recvProcs.size());
00269 
00270   int tag = 11114;
00271   MPI_Datatype mpi_dtype = MPI_INT;
00272 
00273   //launch Irecv's for recvData:
00274 
00275   int localProc = fei::localProc(comm);
00276   int numRecvProcs = recvProcs.size();
00277   int req_offset = 0;
00278   for(unsigned i=0; i<recvProcs.size(); ++i) {
00279     if (recvProcs[i] == localProc) {--numRecvProcs; continue; }
00280 
00281     CHK_MPI( MPI_Irecv(&(recvData[i]), 1, mpi_dtype, recvProcs[i], tag,
00282                        comm, &mpiReqs[req_offset++]) );
00283   }
00284 
00285   //send the sendData:
00286 
00287   for(unsigned i=0; i<sendProcs.size(); ++i) {
00288     if (sendProcs[i] == localProc) continue;
00289 
00290     CHK_MPI( MPI_Send(&(sendData[i]), 1, mpi_dtype,
00291                       sendProcs[i], tag, comm) );
00292   }
00293 
00294   //complete the Irecv's:
00295 
00296   for(int ii=0; ii<numRecvProcs; ++ii) {
00297     int index;
00298     MPI_Status status;
00299     CHK_MPI( MPI_Waitany(numRecvProcs, &mpiReqs[0], &index, &status) );
00300   }
00301 
00302 #endif
00303   return(0);
00304 }
00305 
00306 //------------------------------------------------------------------------
00307 int Allreduce(MPI_Comm comm, bool localBool, bool& globalBool)
00308 {
00309 #ifndef FEI_SER
00310   int localInt = localBool ? 1 : 0;
00311   int globalInt = 0;
00312 
00313   CHK_MPI( MPI_Allreduce(&localInt, &globalInt, 1, MPI_INT, MPI_MAX, comm) );
00314 
00315   globalBool = globalInt==1 ? true : false;
00316 #else
00317   globalBool = localBool;
00318 #endif
00319 
00320   return(0);
00321 }
00322 
00323 }//namespace fei
00324 
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Friends