EpetraExt Package Browser (Single Doxygen Collection) Development
EpetraExt_MultiMpiComm.cpp
Go to the documentation of this file.
00001 //@HEADER
00002 // ***********************************************************************
00003 //
00004 //     EpetraExt: Epetra Extended - Linear Algebra Services Package
00005 //                 Copyright (2011) Sandia Corporation
00006 //
00007 // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
00008 // the U.S. Government retains certain rights in this software.
00009 //
00010 // Redistribution and use in source and binary forms, with or without
00011 // modification, are permitted provided that the following conditions are
00012 // met:
00013 //
00014 // 1. Redistributions of source code must retain the above copyright
00015 // notice, this list of conditions and the following disclaimer.
00016 //
00017 // 2. Redistributions in binary form must reproduce the above copyright
00018 // notice, this list of conditions and the following disclaimer in the
00019 // documentation and/or other materials provided with the distribution.
00020 //
00021 // 3. Neither the name of the Corporation nor the names of the
00022 // contributors may be used to endorse or promote products derived from
00023 // this software without specific prior written permission.
00024 //
00025 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
00026 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
00027 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
00028 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
00029 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
00030 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
00031 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00032 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00033 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00034 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00035 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00036 //
00037 // Questions? Contact Michael A. Heroux (maherou@sandia.gov)
00038 //
00039 // ***********************************************************************
00040 //@HEADER
00041 
00042 #include "EpetraExt_MultiMpiComm.h" 
00043 #include "Teuchos_Assert.hpp"
00044 #include "Teuchos_VerbosityLevel.hpp"
00045 
00046 namespace EpetraExt {
00047 
00048 MultiMpiComm::MultiMpiComm(MPI_Comm globalMpiComm, int subDomainProcs, int numTimeSteps_) :
00049   Epetra_MpiComm(globalMpiComm),
00050   myComm(Teuchos::rcp(new Epetra_MpiComm(globalMpiComm))),
00051         subComm(0)
00052 {
00053   Teuchos::RCP<Teuchos::FancyOStream> out = this->getOStream();
00054   Teuchos::EVerbosityLevel verbLevel = this->getVerbLevel();
00055 
00056   // The default output stream only outputs to proc 0, which is not what
00057   // we generally want.  Manually override this if necessary so we get output
00058   // to all processors
00059   int outputRootRank = out->getOutputToRootOnly();
00060   if (outputRootRank >= 0) {
00061     out->setOutputToRootOnly(-1);
00062   }
00063 
00064   //Need to construct subComm for each sub domain, compute subDomainRank,
00065   //and check that all integer arithmatic works out correctly.
00066  
00067   int ierrmpi, size, rank;
00068   ierrmpi = MPI_Comm_size(globalMpiComm, &size);
00069   ierrmpi = MPI_Comm_rank(globalMpiComm, &rank);
00070 
00071   TEUCHOS_TEST_FOR_EXCEPTION(
00072     size % subDomainProcs != 0,
00073     std::logic_error,
00074     "ERROR: num subDomainProcs "<< subDomainProcs << 
00075     " does not divide into num total procs " << size << std::endl);
00076 
00077   numSubDomains = size / subDomainProcs;
00078   numTimeDomains = subDomainProcs;
00079 
00080   // Create split communicators
00081   MPI_Comm split_MPI_Comm;
00082   MPI_Comm time_split_MPI_Comm;
00083   subDomainRank = rank / subDomainProcs;
00084   timeDomainRank = rank % subDomainProcs;
00085   ierrmpi =  MPI_Comm_split(globalMpiComm, subDomainRank, rank, 
00086           &split_MPI_Comm);
00087   ierrmpi =  MPI_Comm_split(globalMpiComm, timeDomainRank, rank, 
00088           &time_split_MPI_Comm);
00089 
00090   // Construct second epetra communicators
00091   subComm = new Epetra_MpiComm(split_MPI_Comm);
00092   timeComm = new Epetra_MpiComm(time_split_MPI_Comm);
00093 
00094   // Compute number of time steps on this sub domain
00095   ResetNumTimeSteps(numTimeSteps_);
00096 
00097   if (verbLevel != Teuchos::VERB_NONE) {
00098     if (numTimeSteps_ > 0)
00099       *out << "Processor " << rank << " is on subdomain " << subDomainRank 
00100      << " and owns " << numTimeStepsOnDomain 
00101      << " time steps, starting with " 
00102      <<  firstTimeStepOnDomain << std::endl;
00103     else
00104       *out << "Processor " << rank << " is on subdomain " << subDomainRank 
00105      << std::endl;
00106   }
00107 
00108   // Reset output flag if we changed it
00109   if (outputRootRank >= 0) {
00110     out->setOutputToRootOnly(outputRootRank);
00111   }
00112 }
00113 
00114 // This constructor is for just one subdomain, so only adds the info
00115 // for multiple time steps on the domain. No two-level parallelism.
00116 MultiMpiComm::MultiMpiComm(const Epetra_MpiComm& EpetraMpiComm_, int numTimeSteps_) :
00117   Epetra_MpiComm(EpetraMpiComm_),
00118   myComm(Teuchos::rcp(new Epetra_MpiComm(EpetraMpiComm_))),
00119         subComm(0)
00120 {
00121 
00122   numSubDomains = 1;
00123   subDomainRank = 0;
00124   numTimeSteps = numTimeSteps_;
00125   numTimeStepsOnDomain = numTimeSteps_;
00126   firstTimeStepOnDomain = 0;
00127  
00128   subComm = new Epetra_MpiComm(EpetraMpiComm_);
00129 
00130   // Create split communicators for time domain
00131   MPI_Comm time_split_MPI_Comm;
00132   int rank = EpetraMpiComm_.MyPID();
00133   int ierrmpi =  MPI_Comm_split(EpetraMpiComm_.Comm(), rank, rank, 
00134         &time_split_MPI_Comm);
00135   timeComm = new Epetra_MpiComm(time_split_MPI_Comm);
00136   numTimeDomains = EpetraMpiComm_.NumProc();
00137   timeDomainRank = rank;
00138 }
00139   
00140 //Copy Constructor
00141 MultiMpiComm::MultiMpiComm(const MultiMpiComm &MMC ) :
00142   Epetra_MpiComm(MMC),
00143   myComm(Teuchos::rcp(new Epetra_MpiComm(dynamic_cast<const Epetra_MpiComm&>(MMC)))),
00144         subComm(new Epetra_MpiComm(*MMC.subComm)),
00145   timeComm(new Epetra_MpiComm(*MMC.timeComm))
00146 {
00147   numSubDomains = MMC.numSubDomains;
00148   numTimeDomains = MMC.numTimeDomains;
00149   subDomainRank = MMC.subDomainRank;
00150   timeDomainRank = MMC.timeDomainRank;
00151 
00152   numTimeSteps = MMC.numTimeSteps;
00153   numTimeStepsOnDomain = MMC.numTimeStepsOnDomain;
00154   firstTimeStepOnDomain = MMC.firstTimeStepOnDomain;
00155 }
00156 
00157 MultiMpiComm::~MultiMpiComm()
00158 {
00159   delete subComm;
00160   delete timeComm;
00161 }
00162 
00163 void MultiMpiComm::ResetNumTimeSteps(int numTimeSteps_)
00164 {
00165   numTimeSteps = numTimeSteps_;
00166 
00167   // Compute number of time steps on this sub domain
00168   if (numTimeSteps > 0) {
00169     // Compute part for number of domains dividing evenly into number of steps
00170     numTimeStepsOnDomain = numTimeSteps / numSubDomains; 
00171     firstTimeStepOnDomain = numTimeStepsOnDomain * subDomainRank;
00172 
00173     // Dole out remainder
00174     int remainder = numTimeSteps % numSubDomains;
00175     if (subDomainRank < remainder) {
00176       numTimeStepsOnDomain++; 
00177       firstTimeStepOnDomain += subDomainRank; 
00178     }
00179     else firstTimeStepOnDomain += remainder; 
00180   }
00181   else {
00182     numTimeStepsOnDomain = -1;
00183     firstTimeStepOnDomain = -1;
00184   }
00185 }
00186 
00187 } //namespace EpetraExt
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines