EpetraExt Development
EpetraExt_MultiMpiComm.cpp
Go to the documentation of this file.
00001 //@HEADER
00002 // ***********************************************************************
00003 //
00004 //     EpetraExt: Epetra Extended - Linear Algebra Services Package
00005 //                 Copyright (2011) Sandia Corporation
00006 //
00007 // Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation,
00008 // the U.S. Government retains certain rights in this software.
00009 //
00010 // Redistribution and use in source and binary forms, with or without
00011 // modification, are permitted provided that the following conditions are
00012 // met:
00013 //
00014 // 1. Redistributions of source code must retain the above copyright
00015 // notice, this list of conditions and the following disclaimer.
00016 //
00017 // 2. Redistributions in binary form must reproduce the above copyright
00018 // notice, this list of conditions and the following disclaimer in the
00019 // documentation and/or other materials provided with the distribution.
00020 //
00021 // 3. Neither the name of the Corporation nor the names of the
00022 // contributors may be used to endorse or promote products derived from
00023 // this software without specific prior written permission.
00024 //
00025 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
00026 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
00027 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
00028 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
00029 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
00030 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
00031 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00032 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00033 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00034 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00035 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00036 //
00037 // Questions? Contact Michael A. Heroux (maherou@sandia.gov)
00038 //
00039 // ***********************************************************************
00040 //@HEADER
00041 
00042 #include "EpetraExt_MultiMpiComm.h" 
00043 #include "Teuchos_Assert.hpp"
00044 #include "Teuchos_VerbosityLevel.hpp"
00045 
00046 namespace EpetraExt {
00047 
00048 MultiMpiComm::MultiMpiComm(MPI_Comm globalMpiComm, int subDomainProcs, int numTimeSteps_,
00049          const Teuchos::EVerbosityLevel verbLevel) :
00050   Epetra_MpiComm(globalMpiComm),
00051   Teuchos::VerboseObject<MultiMpiComm>(verbLevel),
00052   myComm(Teuchos::rcp(new Epetra_MpiComm(globalMpiComm))),
00053         subComm(0)
00054 {
00055   Teuchos::RCP<Teuchos::FancyOStream> out = this->getOStream();
00056 
00057   // The default output stream only outputs to proc 0, which is not what
00058   // we generally want.  Manually override this if necessary so we get output
00059   // to all processors
00060   int outputRootRank = out->getOutputToRootOnly();
00061   if (outputRootRank >= 0) {
00062     out->setOutputToRootOnly(-1);
00063   }
00064 
00065   //Need to construct subComm for each sub domain, compute subDomainRank,
00066   //and check that all integer arithmatic works out correctly.
00067  
00068   int ierrmpi, size, rank;
00069   ierrmpi = MPI_Comm_size(globalMpiComm, &size);
00070   ierrmpi = MPI_Comm_rank(globalMpiComm, &rank);
00071 
00072   TEUCHOS_TEST_FOR_EXCEPTION(
00073     size % subDomainProcs != 0,
00074     std::logic_error,
00075     "ERROR: num subDomainProcs "<< subDomainProcs << 
00076     " does not divide into num total procs " << size << std::endl);
00077 
00078   numSubDomains = size / subDomainProcs;
00079   numTimeDomains = subDomainProcs;
00080 
00081   // Create split communicators
00082   MPI_Comm split_MPI_Comm;
00083   MPI_Comm time_split_MPI_Comm;
00084   subDomainRank = rank / subDomainProcs;
00085   timeDomainRank = rank % subDomainProcs;
00086   ierrmpi =  MPI_Comm_split(globalMpiComm, subDomainRank, rank, 
00087           &split_MPI_Comm);
00088   ierrmpi =  MPI_Comm_split(globalMpiComm, timeDomainRank, rank, 
00089           &time_split_MPI_Comm);
00090 
00091   // Construct second epetra communicators
00092   subComm = new Epetra_MpiComm(split_MPI_Comm);
00093   timeComm = new Epetra_MpiComm(time_split_MPI_Comm);
00094 
00095   // Compute number of time steps on this sub domain
00096   ResetNumTimeSteps(numTimeSteps_);
00097 
00098   if (verbLevel != Teuchos::VERB_NONE) {
00099     if (numTimeSteps_ > 0)
00100       *out << "Processor " << rank << " is on subdomain " << subDomainRank 
00101      << " and owns " << numTimeStepsOnDomain 
00102      << " time steps, starting with " 
00103      <<  firstTimeStepOnDomain << std::endl;
00104     else
00105       *out << "Processor " << rank << " is on subdomain " << subDomainRank 
00106      << std::endl;
00107   }
00108 
00109   // Reset output flag if we changed it
00110   if (outputRootRank >= 0) {
00111     out->setOutputToRootOnly(outputRootRank);
00112   }
00113 }
00114 
00115 // This constructor is for just one subdomain, so only adds the info
00116 // for multiple time steps on the domain. No two-level parallelism.
00117 MultiMpiComm::MultiMpiComm(const Epetra_MpiComm& EpetraMpiComm_, int numTimeSteps_,
00118          const Teuchos::EVerbosityLevel verbLevel) :
00119   Epetra_MpiComm(EpetraMpiComm_),
00120   Teuchos::VerboseObject<MultiMpiComm>(verbLevel),
00121   myComm(Teuchos::rcp(new Epetra_MpiComm(EpetraMpiComm_))),
00122         subComm(0)
00123 {
00124 
00125   numSubDomains = 1;
00126   subDomainRank = 0;
00127   numTimeSteps = numTimeSteps_;
00128   numTimeStepsOnDomain = numTimeSteps_;
00129   firstTimeStepOnDomain = 0;
00130  
00131   subComm = new Epetra_MpiComm(EpetraMpiComm_);
00132 
00133   // Create split communicators for time domain
00134   MPI_Comm time_split_MPI_Comm;
00135   int rank = EpetraMpiComm_.MyPID();
00136   int ierrmpi =  MPI_Comm_split(EpetraMpiComm_.Comm(), rank, rank, 
00137         &time_split_MPI_Comm);
00138   timeComm = new Epetra_MpiComm(time_split_MPI_Comm);
00139   numTimeDomains = EpetraMpiComm_.NumProc();
00140   timeDomainRank = rank;
00141 }
00142   
00143 //Copy Constructor
00144 MultiMpiComm::MultiMpiComm(const MultiMpiComm &MMC ) :
00145   Epetra_MpiComm(MMC),
00146   myComm(Teuchos::rcp(new Epetra_MpiComm(dynamic_cast<const Epetra_MpiComm&>(MMC)))),
00147         subComm(new Epetra_MpiComm(*MMC.subComm)),
00148   timeComm(new Epetra_MpiComm(*MMC.timeComm))
00149 {
00150   numSubDomains = MMC.numSubDomains;
00151   numTimeDomains = MMC.numTimeDomains;
00152   subDomainRank = MMC.subDomainRank;
00153   timeDomainRank = MMC.timeDomainRank;
00154 
00155   numTimeSteps = MMC.numTimeSteps;
00156   numTimeStepsOnDomain = MMC.numTimeStepsOnDomain;
00157   firstTimeStepOnDomain = MMC.firstTimeStepOnDomain;
00158 }
00159 
00160 MultiMpiComm::~MultiMpiComm()
00161 {
00162   delete subComm;
00163   delete timeComm;
00164 }
00165 
00166 void MultiMpiComm::ResetNumTimeSteps(int numTimeSteps_)
00167 {
00168   numTimeSteps = numTimeSteps_;
00169 
00170   // Compute number of time steps on this sub domain
00171   if (numTimeSteps > 0) {
00172     // Compute part for number of domains dividing evenly into number of steps
00173     numTimeStepsOnDomain = numTimeSteps / numSubDomains; 
00174     firstTimeStepOnDomain = numTimeStepsOnDomain * subDomainRank;
00175 
00176     // Dole out remainder
00177     int remainder = numTimeSteps % numSubDomains;
00178     if (subDomainRank < remainder) {
00179       numTimeStepsOnDomain++; 
00180       firstTimeStepOnDomain += subDomainRank; 
00181     }
00182     else firstTimeStepOnDomain += remainder; 
00183   }
00184   else {
00185     numTimeStepsOnDomain = -1;
00186     firstTimeStepOnDomain = -1;
00187   }
00188 }
00189 
00190 } //namespace EpetraExt
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines