Teuchos - Trilinos Tools Package Version of the Day
Teuchos_DefaultMpiComm.hpp
00001 // @HEADER
00002 // ***********************************************************************
00003 //
00004 //                    Teuchos: Common Tools Package
00005 //                 Copyright (2004) Sandia Corporation
00006 //
00007 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
00008 // license for use of this work by or on behalf of the U.S. Government.
00009 //
00010 // Redistribution and use in source and binary forms, with or without
00011 // modification, are permitted provided that the following conditions are
00012 // met:
00013 //
00014 // 1. Redistributions of source code must retain the above copyright
00015 // notice, this list of conditions and the following disclaimer.
00016 //
00017 // 2. Redistributions in binary form must reproduce the above copyright
00018 // notice, this list of conditions and the following disclaimer in the
00019 // documentation and/or other materials provided with the distribution.
00020 //
00021 // 3. Neither the name of the Corporation nor the names of the
00022 // contributors may be used to endorse or promote products derived from
00023 // this software without specific prior written permission.
00024 //
00025 // THIS SOFTWARE IS PROVIDED BY SANDIA CORPORATION "AS IS" AND ANY
00026 // EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
00027 // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
00028 // PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL SANDIA CORPORATION OR THE
00029 // CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
00030 // EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
00031 // PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
00032 // PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
00033 // LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
00034 // NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
00035 // SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00036 //
00037 // Questions? Contact Michael A. Heroux (maherou@sandia.gov)
00038 //
00039 // ***********************************************************************
00040 // @HEADER
00041 
00042 #ifndef TEUCHOS_MPI_COMM_HPP
00043 #define TEUCHOS_MPI_COMM_HPP
00044 
00045 
00046 #include "Teuchos_Comm.hpp"
00047 #include "Teuchos_CommUtilities.hpp"
00048 #include "Teuchos_OrdinalTraits.hpp"
00049 #include "Teuchos_OpaqueWrapper.hpp"
00050 #include "Teuchos_MpiReductionOpSetter.hpp"
00051 #include "Teuchos_SerializationTraitsHelpers.hpp"
00052 #include "Teuchos_Workspace.hpp"
00053 #include "Teuchos_TypeNameTraits.hpp"
00054 #include "Teuchos_as.hpp"
00055 #include "Teuchos_Assert.hpp"
00056 #include "mpi.h"
00057 #include <iterator>
00058 
00059 // This must be defined globally for the whole program!
00060 //#define TEUCHOS_MPI_COMM_DUMP
00061 
00062 
00063 #ifdef TEUCHOS_MPI_COMM_DUMP
00064 #  include "Teuchos_VerboseObject.hpp"
00065 #endif
00066 
00067 
00068 namespace Teuchos {
00069 
00071 std::string
00072 mpiErrorCodeToString (const int err);
00073 
00074 #ifdef TEUCHOS_MPI_COMM_DUMP
00075 template<typename Ordinal, typename T>
00076 void dumpBuffer(
00077   const std::string &funcName, const std::string &buffName
00078   ,const Ordinal bytes, const T buff[]
00079   )
00080 {
00081   Teuchos::RCP<Teuchos::FancyOStream>
00082     out = Teuchos::VerboseObjectBase::getDefaultOStream();
00083   Teuchos::OSTab tab(out);
00084   *out
00085     << "\n" << funcName << "::" << buffName << ":\n";
00086   tab.incrTab();
00087   for( Ordinal i = 0; i < bytes; ++i ) {
00088     *out << buffName << "[" << i << "] = '" << buff[i] << "'\n";
00089   }
00090   *out << "\n";
00091 }
00092 #endif // TEUCHOS_MPI_COMM_DUMP
00093 
00094 
00105 class MpiCommRequest : public CommRequest {
00106 public:
00108   MpiCommRequest (MPI_Request rawMpiRequest,
00109                   const ArrayView<char>::size_type numBytesInMessage) :
00110     rawMpiRequest_ (rawMpiRequest), numBytes_ (numBytesInMessage)
00111   {}
00112 
00119   MPI_Request releaseRawMpiRequest()
00120   {
00121     MPI_Request tmp_rawMpiRequest = rawMpiRequest_;
00122     rawMpiRequest_ = MPI_REQUEST_NULL;
00123     return tmp_rawMpiRequest;
00124   }
00125 
00127   bool isNull() const {
00128     return rawMpiRequest_ == MPI_REQUEST_NULL;
00129   }
00130 
00136   ArrayView<char>::size_type numBytes () const {
00137     return numBytes_;
00138   }
00139 
00140 private:
00142   MPI_Request rawMpiRequest_;
00144   ArrayView<char>::size_type numBytes_;
00145 
00146   MpiCommRequest(); // Not defined
00147 };
00148 
00156 inline RCP<MpiCommRequest>
00157 mpiCommRequest (MPI_Request rawMpiRequest,
00158                 const ArrayView<char>::size_type numBytes)
00159 {
00160   return rcp (new MpiCommRequest (rawMpiRequest, numBytes));
00161 }
00162 
00174 template<class OrdinalType>
00175 class MpiCommStatus : public CommStatus<OrdinalType> {
00176 public:
00177   MpiCommStatus (MPI_Status status) : status_ (status) {}
00178 
00180   virtual ~MpiCommStatus() {}
00181 
00183   OrdinalType getSourceRank () { return status_.MPI_SOURCE; }
00184 
00186   OrdinalType getTag () { return status_.MPI_TAG; }
00187 
00189   OrdinalType getError () { return status_.MPI_ERROR; }
00190 
00191 private:
00193   MpiCommStatus ();
00194 
00196   MPI_Status status_;
00197 };
00198 
00202 template<class OrdinalType>
00203 inline RCP<MpiCommStatus<OrdinalType> >
00204 mpiCommStatus (MPI_Status rawMpiStatus)
00205 {
00206   return rcp (new MpiCommStatus<OrdinalType> (rawMpiStatus));
00207 }
00208 
00219 template<typename Ordinal>
00220 class MpiComm : public Comm<Ordinal> {
00221 public:
00222 
00224 
00225 
00232   MpiComm(
00233     const RCP<const OpaqueWrapper<MPI_Comm> > &rawMpiComm
00234     );
00235 
00252   MpiComm(const MpiComm<Ordinal>& other);
00253 
00255   RCP<const OpaqueWrapper<MPI_Comm> > getRawMpiComm() const
00256   {return rawMpiComm_;}
00257 
00276   void setErrorHandler (const RCP<const OpaqueWrapper<MPI_Errhandler> >& errHandler);
00277 
00279 
00281 
00282 
00284   virtual int getRank() const;
00286   virtual int getSize() const;
00288   virtual void barrier() const;
00290   virtual void broadcast(
00291     const int rootRank, const Ordinal bytes, char buffer[]
00292     ) const;
00294   virtual void gatherAll(
00295     const Ordinal sendBytes, const char sendBuffer[]
00296     ,const Ordinal recvBytes, char recvBuffer[]
00297     ) const;
00299   virtual void reduceAll(
00300     const ValueTypeReductionOp<Ordinal,char> &reductOp
00301     ,const Ordinal bytes, const char sendBuffer[], char globalReducts[]
00302     ) const;
00304   virtual void reduceAllAndScatter(
00305     const ValueTypeReductionOp<Ordinal,char> &reductOp
00306     ,const Ordinal sendBytes, const char sendBuffer[]
00307     ,const Ordinal recvCounts[], char myGlobalReducts[]
00308     ) const;
00310         virtual void scan(
00311     const ValueTypeReductionOp<Ordinal,char> &reductOp
00312     ,const Ordinal bytes, const char sendBuffer[], char scanReducts[]
00313     ) const;
00315   virtual void send(
00316     const Ordinal bytes, const char sendBuffer[], const int destRank
00317     ) const;
00319   virtual void ssend(
00320     const Ordinal bytes, const char sendBuffer[], const int destRank
00321     ) const;
00323   virtual int receive(
00324     const int sourceRank, const Ordinal bytes, char recvBuffer[]
00325     ) const;
00327   virtual void readySend(
00328     const ArrayView<const char> &sendBuffer,
00329     const int destRank
00330     ) const;
00332   virtual RCP<CommRequest> isend(
00333     const ArrayView<const char> &sendBuffer,
00334     const int destRank
00335     ) const;
00337   virtual RCP<CommRequest> ireceive(
00338     const ArrayView<char> &Buffer,
00339     const int sourceRank
00340     ) const;
00342   virtual void waitAll(
00343     const ArrayView<RCP<CommRequest> > &requests
00344     ) const;
00346   virtual void
00347   waitAll (const ArrayView<RCP<CommRequest> >& requests,
00348            const ArrayView<RCP<CommStatus<Ordinal> > >& statuses) const;
00350   virtual RCP<CommStatus<Ordinal> >
00351   wait (const Ptr<RCP<CommRequest> >& request) const;
00353   virtual RCP< Comm<Ordinal> > duplicate() const;
00355   virtual RCP< Comm<Ordinal> > split(const int color, const int key) const;
00357   virtual RCP< Comm<Ordinal> > createSubcommunicator(
00358     const ArrayView<const int>& ranks) const;
00360 
00362 
00363 
00365   std::string description() const;
00366 
00368 
00369   // These should be private but the PGI compiler requires them be public
00370 
00371   static int const minTag_ = 26000; // These came from Teuchos::MpiComm???
00372   static int const maxTag_ = 26099; // ""
00373 
00374 private:
00375 
00376   // Set internal data members once the rawMpiComm_ data member is valid.
00377   void setupMembersFromComm();
00378   static int tagCounter_;
00379 
00380   RCP<const OpaqueWrapper<MPI_Comm> > rawMpiComm_;
00381   int rank_;
00382   int size_;
00383   int tag_;
00384 
00386   RCP<const OpaqueWrapper<MPI_Errhandler> > customErrorHandler_;
00387 
00388   void assertRank(const int rank, const std::string &rankName) const;
00389 
00390   // Not defined and not to be called!
00391   MpiComm();
00392 
00393 #ifdef TEUCHOS_MPI_COMM_DUMP
00394 public:
00395   static bool show_dump;
00396 #endif // TEUCHOS_MPI_COMM_DUMP
00397 
00398 };
00399 
00400 
00414 template<typename Ordinal>
00415 RCP<MpiComm<Ordinal> >
00416 createMpiComm(
00417   const RCP<const OpaqueWrapper<MPI_Comm> > &rawMpiComm
00418   );
00419 
00420 
00421 // ////////////////////////
00422 // Implementations
00423 
00424 
00425 // Static members
00426 
00427 
00428 template<typename Ordinal>
00429 int MpiComm<Ordinal>::tagCounter_ = MpiComm<Ordinal>::minTag_;
00430 
00431 
00432 // Constructors
00433 
00434 
00435 template<typename Ordinal>
00436 MpiComm<Ordinal>::MpiComm(
00437   const RCP<const OpaqueWrapper<MPI_Comm> > &rawMpiComm
00438   )
00439 {
00440   TEUCHOS_TEST_FOR_EXCEPTION(rawMpiComm.get()==NULL, std::invalid_argument,
00441     "Teuchos::MpiComm constructor: The input RCP is null.");
00442   TEUCHOS_TEST_FOR_EXCEPTION(*rawMpiComm == MPI_COMM_NULL,
00443     std::invalid_argument, "Teuchos::MpiComm constructor: The given MPI_Comm "
00444     "is MPI_COMM_NULL.");
00445   rawMpiComm_ = rawMpiComm;
00446 
00447   // FIXME (mfh 26 Mar 2012) The following is a bit wicked in that it
00448   // changes the behavior of existing applications that use MpiComm,
00449   // without warning.  I've chosen to do it because I can't figure out
00450   // any other way to help me debug MPI_Waitall failures on some (but
00451   // not all) of the testing platforms.  The problem is that MPI's
00452   // default error handler is MPI_ERRORS_ARE_FATAL, which immediately
00453   // aborts on error without returning an error code from the MPI
00454   // function.  Also, the testing platforms' MPI implementations'
00455   // diagnostics are not giving me useful information.  Thus, I'm
00456   // setting the default error handler to MPI_ERRORS_RETURN, so that
00457   // MPI_Waitall will return an error code.
00458   //
00459   // Note that all MpiComm methods check error codes returned by MPI
00460   // functions, and throw an exception if the code is not MPI_SUCCESS.
00461   // Thus, this change in behavior will only affect your program in
00462   // the following case: You call a function f() in the try block of a
00463   // try-catch, and expect f() to throw an exception (generally
00464   // std::runtime_error) in a particular case not related to MpiComm,
00465   // but MpiComm throws the exception instead.  It's probably a bad
00466   // idea for you to do this, because MpiComm might very well throw
00467   // exceptions for things like invalid arguments.
00468   const bool makeMpiErrorsReturn = true;
00469   if (makeMpiErrorsReturn) {
00470     RCP<const OpaqueWrapper<MPI_Errhandler> > errHandler =
00471       rcp (new OpaqueWrapper<MPI_Errhandler> (MPI_ERRORS_RETURN));
00472     setErrorHandler (errHandler);
00473   }
00474 
00475   setupMembersFromComm();
00476 }
00477 
00478 
00479 template<typename Ordinal>
00480 MpiComm<Ordinal>::MpiComm(const MpiComm<Ordinal>& other)
00481 {
00482   TEUCHOS_TEST_FOR_EXCEPT(other.getRawMpiComm().get() == NULL);
00483   TEUCHOS_TEST_FOR_EXCEPT(*other.getRawMpiComm() == MPI_COMM_NULL);
00484   MPI_Comm newComm;
00485   const int err = MPI_Comm_dup (*other.getRawMpiComm(), &newComm);
00486   TEUCHOS_TEST_FOR_EXCEPTION(err != MPI_SUCCESS, std::runtime_error,
00487     "Teuchos::MpiComm copy constructor: MPI_Comm_dup failed with error \""
00488     << mpiErrorCodeToString (err) << "\".");
00489   rawMpiComm_ = opaqueWrapper (newComm,MPI_Comm_free);
00490   setupMembersFromComm();
00491 }
00492 
00493 
00494 template<typename Ordinal>
00495 void MpiComm<Ordinal>::setupMembersFromComm()
00496 {
00497   MPI_Comm_size(*rawMpiComm_, &size_);
00498   MPI_Comm_rank(*rawMpiComm_, &rank_);
00499   if(tagCounter_ > maxTag_)
00500     tagCounter_ = minTag_;
00501   tag_ = tagCounter_++;
00502 }
00503 
00504 
00505 template<typename Ordinal>
00506 void
00507 MpiComm<Ordinal>::
00508 setErrorHandler (const RCP<const OpaqueWrapper<MPI_Errhandler> >& errHandler)
00509 {
00510   if (! is_null (errHandler)) {
00511     const int err = MPI_Comm_set_errhandler (*getRawMpiComm(), *errHandler);
00512     TEUCHOS_TEST_FOR_EXCEPTION(err != MPI_SUCCESS, std::runtime_error,
00513       "Teuchos::MpiComm::setErrorHandler: MPI_Comm_set_errhandler() failed with "
00514       "error \"" << mpiErrorCodeToString (err) << "\".");
00515   }
00516   // Wait to set this until the end, in case MPI_Errhandler_set()
00517   // doesn't succeed.
00518   customErrorHandler_ = errHandler;
00519 }
00520 
00521 
00522 
00523 // Overridden from Comm
00524 
00525 
00526 template<typename Ordinal>
00527 int MpiComm<Ordinal>::getRank() const
00528 {
00529   return rank_;
00530 }
00531 
00532 
00533 template<typename Ordinal>
00534 int MpiComm<Ordinal>::getSize() const
00535 {
00536   return size_;
00537 }
00538 
00539 
00540 template<typename Ordinal>
00541 void MpiComm<Ordinal>::barrier() const
00542 {
00543   TEUCHOS_COMM_TIME_MONITOR(
00544     "Teuchos::MpiComm<"<<OrdinalTraits<Ordinal>::name()<<">::barrier()"
00545     );
00546   const int err = MPI_Barrier(*rawMpiComm_);
00547   TEUCHOS_TEST_FOR_EXCEPTION(err != MPI_SUCCESS, std::runtime_error,
00548     "Teuchos::MpiComm::barrier: MPI_Barrier failed with error \""
00549     << mpiErrorCodeToString (err) << "\".");
00550 }
00551 
00552 
00553 template<typename Ordinal>
00554 void MpiComm<Ordinal>::broadcast(
00555   const int rootRank, const Ordinal bytes, char buffer[]
00556   ) const
00557 {
00558   TEUCHOS_COMM_TIME_MONITOR(
00559     "Teuchos::MpiComm<"<<OrdinalTraits<Ordinal>::name()<<">::broadcast(...)"
00560     );
00561   const int err = MPI_Bcast(buffer,bytes,MPI_CHAR,rootRank,*rawMpiComm_);
00562   TEUCHOS_TEST_FOR_EXCEPTION(err != MPI_SUCCESS, std::runtime_error,
00563     "Teuchos::MpiComm::broadcast: MPI_Bcast failed with error \""
00564     << mpiErrorCodeToString (err) << "\".");
00565 }
00566 
00567 
00568 template<typename Ordinal>
00569 void MpiComm<Ordinal>::gatherAll(
00570   const Ordinal sendBytes, const char sendBuffer[],
00571   const Ordinal recvBytes, char recvBuffer[]
00572   ) const
00573 {
00574   TEUCHOS_COMM_TIME_MONITOR(
00575     "Teuchos::MpiComm<"<<OrdinalTraits<Ordinal>::name()<<">::gatherAll(...)"
00576     );
00577   TEUCHOS_ASSERT_EQUALITY((sendBytes*size_), recvBytes );
00578   const int err =
00579     MPI_Allgather (const_cast<char *>(sendBuffer), sendBytes, MPI_CHAR,
00580                    recvBuffer, sendBytes, MPI_CHAR, *rawMpiComm_);
00581   // NOTE: 'sendBytes' is being sent above for the MPI arg recvcount (which is
00582   // very confusing in the MPI documentation) for MPI_Allgether(...).
00583 
00584   TEUCHOS_TEST_FOR_EXCEPTION(err != MPI_SUCCESS, std::runtime_error,
00585     "Teuchos::MpiComm::gatherAll: MPI_Allgather failed with error \""
00586     << mpiErrorCodeToString (err) << "\".");
00587 }
00588 
00589 
00590 template<typename Ordinal>
00591 void
00592 MpiComm<Ordinal>::
00593 reduceAll (const ValueTypeReductionOp<Ordinal,char> &reductOp,
00594            const Ordinal bytes,
00595            const char sendBuffer[],
00596            char globalReducts[]) const
00597 {
00598   TEUCHOS_COMM_TIME_MONITOR( "Teuchos::MpiComm::reduceAll(...)" );
00599 
00600   MpiReductionOpSetter op(mpiReductionOp(rcp(&reductOp,false)));
00601   MPI_Datatype char_block;
00602 
00603   // TODO (mfh 26 Mar 2012) Check returned error codes of the MPI
00604   // custom datatype functions.
00605   MPI_Type_contiguous(bytes, MPI_CHAR, &char_block);
00606   MPI_Type_commit(&char_block);
00607 
00608   const int err =
00609     MPI_Allreduce (const_cast<char*>(sendBuffer), globalReducts, 1, char_block,
00610                    op.mpi_op(), *rawMpiComm_);
00611   TEUCHOS_TEST_FOR_EXCEPTION(err != MPI_SUCCESS, std::runtime_error,
00612     "Teuchos::MpiComm::reduceAll (custom op): MPI_Allreduce failed with error \""
00613     << mpiErrorCodeToString (err) << "\".");
00614 
00615   // TODO (mfh 26 Mar 2012) Check returned error codes of the MPI
00616   // custom datatype functions.
00617   MPI_Type_free(&char_block);
00618 }
00619 
00620 
00621 template<typename Ordinal>
00622 void MpiComm<Ordinal>::reduceAllAndScatter(
00623   const ValueTypeReductionOp<Ordinal,char> &reductOp
00624   ,const Ordinal sendBytes, const char sendBuffer[]
00625   ,const Ordinal recvCounts[], char myGlobalReducts[]
00626   ) const
00627 {
00628 
00629   (void)sendBytes; // Ignore if not in debug mode
00630 
00631   TEUCHOS_COMM_TIME_MONITOR(
00632     "Teuchos::MpiComm<"<<OrdinalTraits<Ordinal>::name()<<">::reduceAllAndScatter(...)"
00633     );
00634 
00635 #ifdef TEUCHOS_DEBUG
00636   Ordinal sumRecvBytes = 0;
00637   for( Ordinal i = 0; i < size_; ++i ) {
00638     sumRecvBytes += recvCounts[i];
00639   }
00640   TEUCHOS_TEST_FOR_EXCEPT(!(sumRecvBytes==sendBytes));
00641 #endif // TEUCHOS_DEBUG
00642 
00643 #ifdef TEUCHOS_MPI_COMM_DUMP
00644   if(show_dump) {
00645     dumpBuffer<Ordinal,char>(
00646       "Teuchos::MpiComm<Ordinal>::reduceAllAndScatter(...)",
00647       "sendBuffer", sendBytes, sendBuffer );
00648     dumpBuffer<Ordinal,Ordinal>(
00649       "Teuchos::MpiComm<Ordinal>::reduceAllAndScatter(...)",
00650       "recvCounts", as<Ordinal>(size_), recvCounts );
00651     dumpBuffer<Ordinal,char>(
00652       "Teuchos::MpiComm<Ordinal>::reduceAllAndScatter(...)",
00653       "myGlobalReducts", as<char>(recvCounts[rank_]), myGlobalReducts );
00654   }
00655 #endif // TEUCHOS_MPI_COMM_DUMP
00656 
00657   // Create a new recvCount[] if Ordinal!=int
00658   WorkspaceStore* wss = get_default_workspace_store().get();
00659   const bool Ordinal_is_int = typeid(int)==typeid(Ordinal);
00660   Workspace<int> ws_int_recvCounts(wss,Ordinal_is_int?0:size_);
00661   const int *int_recvCounts = 0;
00662   if(Ordinal_is_int) {
00663     int_recvCounts = reinterpret_cast<const int*>(recvCounts);
00664     // Note: We must do an reinterpet cast since this must
00665     // compile even if it is not executed.  I could implement
00666     // code that would not need to do this using template
00667     // conditionals but I don't want to bother.
00668   }
00669   else {
00670     std::copy(recvCounts, recvCounts+size_, &ws_int_recvCounts[0]);
00671     int_recvCounts = &ws_int_recvCounts[0];
00672   }
00673 
00674   // Perform the operation
00675   MpiReductionOpSetter op(mpiReductionOp(rcp(&reductOp, false)));
00676 
00677   const int err = MPI_Reduce_scatter(
00678     const_cast<char*>(sendBuffer), myGlobalReducts,
00679     const_cast<int*>(int_recvCounts),
00680     MPI_CHAR,
00681     op.mpi_op(),
00682     *rawMpiComm_
00683     );
00684   TEUCHOS_TEST_FOR_EXCEPTION(err != MPI_SUCCESS, std::runtime_error,
00685     "Teuchos::MpiComm::reduceAllAndScatter: MPI_Reduce_scatter failed with "
00686     "error \"" << mpiErrorCodeToString (err) << "\".");
00687 }
00688 
00689 
00690 template<typename Ordinal>
00691 void MpiComm<Ordinal>::scan(
00692   const ValueTypeReductionOp<Ordinal,char> &reductOp
00693   ,const Ordinal bytes, const char sendBuffer[], char scanReducts[]
00694   ) const
00695 {
00696   TEUCHOS_COMM_TIME_MONITOR( "Teuchos::MpiComm::scan(...)" );
00697 
00698   MpiReductionOpSetter op(mpiReductionOp(rcp(&reductOp,false)));
00699   const int err =
00700     MPI_Scan (const_cast<char*>(sendBuffer), scanReducts, bytes, MPI_CHAR,
00701               op.mpi_op(), *rawMpiComm_);
00702   TEUCHOS_TEST_FOR_EXCEPTION(err != MPI_SUCCESS, std::runtime_error,
00703     "Teuchos::MpiComm::scan: MPI_Scan() failed with error \""
00704     << mpiErrorCodeToString (err) << "\".");
00705 }
00706 
00707 
00708 template<typename Ordinal>
00709 void
00710 MpiComm<Ordinal>::send (const Ordinal bytes,
00711                         const char sendBuffer[],
00712                         const int destRank) const
00713 {
00714   TEUCHOS_COMM_TIME_MONITOR( "Teuchos::MpiComm::send(...)" );
00715 
00716 #ifdef TEUCHOS_MPI_COMM_DUMP
00717   if(show_dump) {
00718     dumpBuffer<Ordinal,char>(
00719       "Teuchos::MpiComm<Ordinal>::send(...)"
00720       ,"sendBuffer", bytes, sendBuffer
00721       );
00722   }
00723 #endif // TEUCHOS_MPI_COMM_DUMP
00724 
00725   const int err = MPI_Send (const_cast<char*>(sendBuffer), bytes, MPI_CHAR,
00726                             destRank, tag_, *rawMpiComm_);
00727   TEUCHOS_TEST_FOR_EXCEPTION(err != MPI_SUCCESS, std::runtime_error,
00728     "Teuchos::MpiComm::send: MPI_Send() failed with error \""
00729     << mpiErrorCodeToString (err) << "\".");
00730 }
00731 
00732 
00733 template<typename Ordinal>
00734 void
00735 MpiComm<Ordinal>::ssend (const Ordinal bytes,
00736                          const char sendBuffer[],
00737                          const int destRank) const
00738 {
00739   TEUCHOS_COMM_TIME_MONITOR( "Teuchos::MpiComm::ssend(...)" );
00740 
00741 #ifdef TEUCHOS_MPI_COMM_DUMP
00742   if(show_dump) {
00743     dumpBuffer<Ordinal,char>(
00744       "Teuchos::MpiComm<Ordinal>::send(...)"
00745       ,"sendBuffer", bytes, sendBuffer
00746       );
00747   }
00748 #endif // TEUCHOS_MPI_COMM_DUMP
00749 
00750   const int err = MPI_Ssend (const_cast<char*>(sendBuffer), bytes, MPI_CHAR,
00751                              destRank, tag_, *rawMpiComm_);
00752   TEUCHOS_TEST_FOR_EXCEPTION(err != MPI_SUCCESS, std::runtime_error,
00753     "Teuchos::MpiComm::send: MPI_Ssend() failed with error \""
00754     << mpiErrorCodeToString (err) << "\".");
00755 }
00756 
00757 
00758 template<typename Ordinal>
00759 void MpiComm<Ordinal>::readySend(
00760   const ArrayView<const char> &sendBuffer,
00761   const int destRank
00762   ) const
00763 {
00764   TEUCHOS_COMM_TIME_MONITOR( "Teuchos::MpiComm::readySend" );
00765 
00766 #ifdef TEUCHOS_MPI_COMM_DUMP
00767   if(show_dump) {
00768     dumpBuffer<Ordinal,char>(
00769       "Teuchos::MpiComm<Ordinal>::readySend(...)"
00770       ,"sendBuffer", bytes, sendBuffer
00771       );
00772   }
00773 #endif // TEUCHOS_MPI_COMM_DUMP
00774 
00775   const int err =
00776     MPI_Rsend (const_cast<char*>(sendBuffer.getRawPtr()), sendBuffer.size(),
00777                MPI_CHAR, destRank, tag_, *rawMpiComm_);
00778   TEUCHOS_TEST_FOR_EXCEPTION(err != MPI_SUCCESS, std::runtime_error,
00779     "Teuchos::MpiComm::readySend: MPI_Rsend() failed with error \""
00780     << mpiErrorCodeToString (err) << "\".");
00781 }
00782 
00783 
00784 template<typename Ordinal>
00785 int
00786 MpiComm<Ordinal>::receive (const int sourceRank,
00787                            const Ordinal bytes,
00788                            char recvBuffer[]) const
00789 {
00790   TEUCHOS_COMM_TIME_MONITOR( "Teuchos::MpiComm::receive(...)" );
00791 
00792   // A negative source rank indicates MPI_ANY_SOURCE, namely that we
00793   // will take an incoming message from any process, as long as the
00794   // tag matches.
00795   const int theSrcRank = (sourceRank < 0) ? MPI_ANY_SOURCE : sourceRank;
00796 
00797   MPI_Status status;
00798   const int err = MPI_Recv (recvBuffer, bytes, MPI_CHAR, theSrcRank, tag_,
00799                             *rawMpiComm_, &status);
00800   TEUCHOS_TEST_FOR_EXCEPTION(err != MPI_SUCCESS, std::runtime_error,
00801     "Teuchos::MpiComm::receive: MPI_Recv() failed with error \""
00802     << mpiErrorCodeToString (err) << "\".");
00803 
00804 #ifdef TEUCHOS_MPI_COMM_DUMP
00805   if (show_dump) {
00806     dumpBuffer<Ordinal,char> ("Teuchos::MpiComm<Ordinal>::receive(...)",
00807                               "recvBuffer", bytes, recvBuffer);
00808   }
00809 #endif // TEUCHOS_MPI_COMM_DUMP
00810 
00811   // Returning the source rank is useful in the MPI_ANY_SOURCE case.
00812   return status.MPI_SOURCE;
00813 }
00814 
00815 
00816 template<typename Ordinal>
00817 RCP<CommRequest>
00818 MpiComm<Ordinal>::isend (const ArrayView<const char> &sendBuffer,
00819                          const int destRank) const
00820 {
00821   TEUCHOS_COMM_TIME_MONITOR( "Teuchos::MpiComm::isend(...)" );
00822 
00823   MPI_Request rawMpiRequest = MPI_REQUEST_NULL;
00824   const int err =
00825     MPI_Isend (const_cast<char*>(sendBuffer.getRawPtr()), sendBuffer.size(),
00826                MPI_CHAR, destRank, tag_, *rawMpiComm_, &rawMpiRequest);
00827   TEUCHOS_TEST_FOR_EXCEPTION(err != MPI_SUCCESS, std::runtime_error,
00828     "Teuchos::MpiComm::isend: MPI_Isend() failed with error \""
00829     << mpiErrorCodeToString (err) << "\".");
00830 
00831   return mpiCommRequest (rawMpiRequest, sendBuffer.size());
00832 }
00833 
00834 
00835 template<typename Ordinal>
00836 RCP<CommRequest>
00837 MpiComm<Ordinal>::ireceive (const ArrayView<char> &recvBuffer,
00838                             const int sourceRank) const
00839 {
00840   TEUCHOS_COMM_TIME_MONITOR( "Teuchos::MpiComm::ireceive(...)" );
00841 
00842   // A negative source rank indicates MPI_ANY_SOURCE, namely that we
00843   // will take an incoming message from any process, as long as the
00844   // tag matches.
00845   const int theSrcRank = (sourceRank < 0) ? MPI_ANY_SOURCE : sourceRank;
00846 
00847   MPI_Request rawMpiRequest = MPI_REQUEST_NULL;
00848   const int err =
00849     MPI_Irecv (const_cast<char*>(recvBuffer.getRawPtr()), recvBuffer.size(),
00850                MPI_CHAR, theSrcRank, tag_, *rawMpiComm_, &rawMpiRequest);
00851   TEUCHOS_TEST_FOR_EXCEPTION(err != MPI_SUCCESS, std::runtime_error,
00852     "Teuchos::MpiComm::ireceive: MPI_Irecv() failed with error \""
00853     << mpiErrorCodeToString (err) << "\".");
00854 
00855   return mpiCommRequest (rawMpiRequest, recvBuffer.size());
00856 }
00857 
00858 
00859 namespace {
00860   // Called by both MpiComm::waitAll() implementations.
00861   template<typename Ordinal>
00862   void
00863   waitAllImpl (const ArrayView<RCP<CommRequest> >& requests,
00864                const ArrayView<MPI_Status>& rawMpiStatuses)
00865   {
00866     typedef ArrayView<RCP<CommRequest> >::size_type size_type;
00867     const size_type count = requests.size();
00868     // waitAllImpl() is not meant to be called by users, so it's a bug
00869     // for the two views to have different lengths.
00870     TEUCHOS_TEST_FOR_EXCEPTION(rawMpiStatuses.size() != count,
00871       std::logic_error, "Teuchos::MpiComm's waitAllImpl: rawMpiStatus.size() = "
00872       << rawMpiStatuses.size() << " != requests.size() = " << requests.size()
00873       << ".  Please report this bug to the Tpetra developers.");
00874     if (count == 0) {
00875       return; // No requests on which to wait
00876     }
00877 
00878     // MpiComm wraps MPI and can't expose any MPI structs or opaque
00879     // objects.  Thus, we have to unpack requests into a separate array.
00880     // If that's too slow, then your code should just call into MPI
00881     // directly.
00882     //
00883     // Pull out the raw MPI requests from the wrapped requests.
00884     // MPI_Waitall should not fail if a request is MPI_REQUEST_NULL, but
00885     // we keep track just to inform the user.
00886     bool someNullRequests = false;
00887     Array<MPI_Request> rawMpiRequests (count, MPI_REQUEST_NULL);
00888     for (int i = 0; i < count; ++i) {
00889       RCP<CommRequest> request = requests[i];
00890       if (! is_null (request)) {
00891         RCP<MpiCommRequest> mpiRequest =
00892           rcp_dynamic_cast<MpiCommRequest> (request);
00893         // releaseRawMpiRequest() sets the MpiCommRequest's raw
00894         // MPI_Request to MPI_REQUEST_NULL.  This makes waitAll() not
00895         // satisfy the strong exception guarantee.  That's OK because
00896         // MPI_Waitall() doesn't promise that it satisfies the strong
00897         // exception guarantee, and we would rather conservatively
00898         // invalidate the handles than leave dangling requests around
00899         // and risk users trying to wait on the same request twice.
00900         rawMpiRequests[i] = mpiRequest->releaseRawMpiRequest();
00901       }
00902       else { // Null requests map to MPI_REQUEST_NULL
00903         rawMpiRequests[i] = MPI_REQUEST_NULL;
00904         someNullRequests = true;
00905       }
00906     }
00907 
00908     // This is the part where we've finally peeled off the wrapper and
00909     // we can now interact with MPI directly.
00910     //
00911     // One option in the one-argument version of waitAll() is to ignore
00912     // the statuses completely.  MPI lets you pass in the named constant
00913     // MPI_STATUSES_IGNORE for the MPI_Status array output argument in
00914     // MPI_Waitall(), which would tell MPI not to bother with the
00915     // statuses.  However, we want the statuses because we can use them
00916     // for detailed error diagnostics in case something goes wrong.
00917     const int err = MPI_Waitall (count, rawMpiRequests.getRawPtr(),
00918                                  rawMpiStatuses.getRawPtr());
00919 
00920     // In MPI_Waitall(), an error indicates that one or more requests
00921     // failed.  In that case, there could be requests that completed
00922     // (their MPI_Status' error field is MPI_SUCCESS), and other
00923     // requests that have not completed yet but have not necessarily
00924     // failed (MPI_PENDING).  We make no attempt here to wait on the
00925     // pending requests.  It doesn't make sense for us to do so, because
00926     // in general Teuchos::Comm doesn't attempt to provide robust
00927     // recovery from failed messages.
00928     if (err != MPI_SUCCESS) {
00929       if (err == MPI_ERR_IN_STATUS) {
00930         //
00931         // When MPI_Waitall returns MPI_ERR_IN_STATUS (a standard error
00932         // class), it's telling us to check the error codes in the
00933         // returned statuses.  In that case, we do so and generate a
00934         // detailed exception message.
00935         //
00936         // Figure out which of the requests failed.
00937         Array<std::pair<size_type, int> > errorLocationsAndCodes;
00938         for (size_type k = 0; k < rawMpiStatuses.size(); ++k) {
00939           const int curErr = rawMpiStatuses[k].MPI_ERROR;
00940           if (curErr != MPI_SUCCESS) {
00941             errorLocationsAndCodes.push_back (std::make_pair (k, curErr));
00942           }
00943         }
00944         const size_type numErrs = errorLocationsAndCodes.size();
00945         if (numErrs > 0) {
00946           // There was at least one error.  Assemble a detailed
00947           // exception message reporting which requests failed,
00948           // their error codes, and their source
00949           std::ostringstream os;
00950           os << "Teuchos::MpiComm::waitAll: MPI_Waitall() failed with error \""
00951              << mpiErrorCodeToString (err) << "\".  Of the " << count
00952              << " total request" << (count != 1 ? "s" : "") << ", " << numErrs
00953              << " failed.  Here are the indices of the failed requests, and the "
00954             "error codes extracted from their returned MPI_Status objects:"
00955              << std::endl;
00956           for (size_type k = 0; k < numErrs; ++k) {
00957             const size_type errInd = errorLocationsAndCodes[k].first;
00958             os << "Request " << errInd << ": MPI_ERROR = "
00959                << mpiErrorCodeToString (rawMpiStatuses[errInd].MPI_ERROR)
00960                << std::endl;
00961           }
00962           if (someNullRequests) {
00963             os << "  On input to MPI_Waitall, there was at least one MPI_"
00964               "Request that was MPI_REQUEST_NULL.  MPI_Waitall should not "
00965               "normally fail in that case, but we thought we should let you know "
00966               "regardless.";
00967           }
00968           TEUCHOS_TEST_FOR_EXCEPTION(true, std::runtime_error, os.str());
00969         }
00970         // If there were no actual errors in the returned statuses,
00971         // well, then I guess everything is OK.  Just keep going.
00972       }
00973       else {
00974         std::ostringstream os;
00975         os << "Teuchos::MpiComm::waitAll: MPI_Waitall() failed with error \""
00976            << mpiErrorCodeToString (err) << "\".";
00977         if (someNullRequests) {
00978           os << "  On input to MPI_Waitall, there was at least one MPI_Request "
00979             "that was MPI_REQUEST_NULL.  MPI_Waitall should not normally fail in "
00980             "that case, but we thought we should let you know regardless.";
00981         }
00982         TEUCHOS_TEST_FOR_EXCEPTION(true, std::runtime_error, os.str());
00983       }
00984     }
00985 
00986 #ifdef HAVE_TEUCHOS_DEBUG
00987     if (false) // mfh 02 Apr 2012: This test fails in some cases (e.g., Belos BlockCG), with the MPI_Request reporting 8 bytes and the MPI_Status reporting 0 bytes.  The tests pass otherwise, so I'm disabling this check for now.
00988     {
00989       // In debug mode, test whether the requests' message lengths
00990       // matched the message lengths on completion.
00991       Array<size_type> nonmatchingIndices;
00992       Array<std::pair<size_type, size_type> > nonmatchingLengthPairs;
00993       for (size_type k = 0; k < count; ++k) {
00994         if (! is_null (requests[k])) {
00995           RCP<MpiCommRequest> mpiRequest =
00996             rcp_dynamic_cast<MpiCommRequest> (requests[k]);
00997 
00998           int statusCount = -1;
00999           (void) MPI_Get_count (&rawMpiStatuses[k], MPI_CHAR, &statusCount);
01000           if (mpiRequest->numBytes() != as<size_type> (statusCount)) {
01001             nonmatchingIndices.push_back (k);
01002             nonmatchingLengthPairs.push_back (std::make_pair (mpiRequest->numBytes(), Teuchos::as<size_type> (statusCount)));
01003           }
01004         }
01005       }
01006       const size_type numNonmatching = nonmatchingIndices.size();
01007       if (numNonmatching > 0) {
01008         std::ostringstream os;
01009         os << "Teuchos::MpiComm::waitAll(): " << numNonmatching << " message "
01010           "request" << (numNonmatching != 1 ? "s" : "") << " have a number of "
01011           "bytes which does not match the number of bytes in "
01012            << (numNonmatching != 1 ? "their" : "its") << " corresponding status"
01013            << (numNonmatching != 1 ? "es" : "") << "." << std::endl;
01014         os << "Here are the lengths that don't match (from MPI_Request, MPI_Status resp.): " << std::endl;
01015         for (Array<std::pair<size_type, size_type> >::const_iterator it = nonmatchingLengthPairs.begin(); it != nonmatchingLengthPairs.end(); ++it) {
01016           os << "(" << it->first << "," << it->second << ") ";
01017         }
01018         if (err == MPI_ERR_IN_STATUS) {
01019           os << std::endl << "This is that weird case where MPI_Waitall returned MPI_ERR_IN_STATUS, but all of the MPI_Statuses' error codes were MPI_SUCCESS.";
01020         }
01021         // This is a bug, so we throw std::logic_error.
01022         TEUCHOS_TEST_FOR_EXCEPTION(numNonmatching > 0, std::logic_error, os.str());
01023       }
01024     }
01025 #endif // HAVE_TEUCHOS_DEBUG
01026 
01027     // Invalidate the input array of requests by setting all entries to
01028     // null.
01029     std::fill (requests.begin(), requests.end(), null);
01030   }
01031 } // namespace (anonymous)
01032 
01033 
01034 
01035 template<typename Ordinal>
01036 void
01037 MpiComm<Ordinal>::
01038 waitAll (const ArrayView<RCP<CommRequest> >& requests) const
01039 {
01040   TEUCHOS_COMM_TIME_MONITOR( "Teuchos::MpiComm::waitAll(requests)" );
01041 
01042   Array<MPI_Status> rawMpiStatuses (requests.size());
01043   waitAllImpl<Ordinal> (requests, rawMpiStatuses());
01044 }
01045 
01046 
01047 template<typename Ordinal>
01048 void
01049 MpiComm<Ordinal>::
01050 waitAll (const ArrayView<RCP<CommRequest> >& requests,
01051          const ArrayView<RCP<CommStatus<Ordinal> > >& statuses) const
01052 {
01053   TEUCHOS_COMM_TIME_MONITOR( "Teuchos::MpiComm::waitAll(requests, statuses)" );
01054 
01055   typedef ArrayView<RCP<CommRequest> >::size_type size_type;
01056   const size_type count = requests.size();
01057 
01058   TEUCHOS_TEST_FOR_EXCEPTION(count != statuses.size(),
01059     std::invalid_argument, "Teuchos::MpiComm::waitAll: requests.size() = "
01060     << count << " != statuses.size() = " << statuses.size() << ".");
01061 
01062   Array<MPI_Status> rawMpiStatuses (count);
01063   waitAllImpl<Ordinal> (requests, rawMpiStatuses());
01064 
01065   // Repackage the raw MPI_Status structs into the wrappers.
01066   for (size_type i = 0; i < count; ++i) {
01067     statuses[i] = mpiCommStatus<Ordinal> (rawMpiStatuses[i]);
01068   }
01069 }
01070 
01071 
01072 template<typename Ordinal>
01073 RCP<CommStatus<Ordinal> >
01074 MpiComm<Ordinal>::wait (const Ptr<RCP<CommRequest> >& request) const
01075 {
01076   TEUCHOS_COMM_TIME_MONITOR( "Teuchos::MpiComm::wait(...)" );
01077 
01078   if (is_null(*request)) {
01079     return null; // Nothing to wait on ...
01080   }
01081   const RCP<MpiCommRequest> mpiCommRequest =
01082     rcp_dynamic_cast<MpiCommRequest>(*request);
01083   // This function doesn't satisfy the strong exception guarantee,
01084   // because releaseRawMpiRequest() modifies the MpiCommRequest.
01085   MPI_Request rawMpiRequest = mpiCommRequest->releaseRawMpiRequest();
01086   MPI_Status status;
01087   const int err = MPI_Wait (&rawMpiRequest, &status);
01088   TEUCHOS_TEST_FOR_EXCEPTION(err != MPI_SUCCESS, std::runtime_error,
01089     "Teuchos::MpiComm::wait: MPI_Wait() failed with error \""
01090     << mpiErrorCodeToString (err) << "\".");
01091 
01092   *request = null;
01093   return rcp (new MpiCommStatus<Ordinal> (status));
01094 }
01095 
01096 
01097 template<typename Ordinal>
01098 RCP< Comm<Ordinal> >
01099 MpiComm<Ordinal>::duplicate() const
01100 {
01101   return rcp (new MpiComm<Ordinal> (*this));
01102 }
01103 
01104 
01105 template<typename Ordinal>
01106 RCP< Comm<Ordinal> >
01107 MpiComm<Ordinal>::split(const int color, const int key) const
01108 {
01109   MPI_Comm newComm;
01110   const int splitReturn =
01111     MPI_Comm_split (*rawMpiComm_,
01112                     color < 0 ? MPI_UNDEFINED : color,
01113                     key,
01114                     &newComm);
01115   TEUCHOS_TEST_FOR_EXCEPTION(
01116     splitReturn != MPI_SUCCESS,
01117     std::logic_error,
01118     "Teuchos::MpiComm::split: Failed to create communicator with color "
01119     << color << "and key " << key << ".  MPI_Comm_split failed with error \""
01120     << mpiErrorCodeToString (splitReturn) << "\".");
01121   if (newComm == MPI_COMM_NULL) {
01122     return RCP< Comm<Ordinal> >();
01123   } else {
01124     return rcp(new MpiComm<Ordinal>(
01125                    rcp_implicit_cast<const OpaqueWrapper<MPI_Comm> >( 
01126                                      opaqueWrapper(newComm,MPI_Comm_free))));
01127   }
01128 }
01129 
01130 
01131 template<typename Ordinal>
01132 RCP< Comm<Ordinal> >
01133 MpiComm<Ordinal>::createSubcommunicator(const ArrayView<const int> &ranks) const
01134 {
01135   int err = MPI_SUCCESS; // For error codes returned by MPI functions
01136 
01137   // Get the group that this communicator is in.
01138   MPI_Group thisGroup;
01139   err = MPI_Comm_group(*rawMpiComm_, &thisGroup);
01140   TEUCHOS_TEST_FOR_EXCEPTION(err != MPI_SUCCESS, std::logic_error,
01141     "Failed to obtain the current communicator's group.  "
01142     "MPI_Comm_group failed with error \""
01143     << mpiErrorCodeToString (err) << "\".");
01144 
01145   // Create a new group with the specified members.
01146   MPI_Group newGroup;
01147   // It's rude to cast away const, but MPI functions demand it.
01148   //
01149   // NOTE (mfh 14 Aug 2012) Please don't ask for &ranks[0] unless you
01150   // know that ranks.size() > 0.  That's why I'm using getRawPtr().
01151   err = MPI_Group_incl (thisGroup, ranks.size(),
01152                         const_cast<int*> (ranks.getRawPtr ()), &newGroup);
01153   TEUCHOS_TEST_FOR_EXCEPTION(err != MPI_SUCCESS, std::logic_error,
01154     "Failed to create subgroup.  MPI_Group_incl failed with error \""
01155     << mpiErrorCodeToString (err) << "\".");
01156 
01157   // Create a new communicator from the new group.
01158   MPI_Comm newComm;
01159   try {
01160     err = MPI_Comm_create(*rawMpiComm_, newGroup, &newComm);
01161     TEUCHOS_TEST_FOR_EXCEPTION(err != MPI_SUCCESS, std::logic_error,
01162       "Failed to create subcommunicator.  MPI_Comm_create failed with error \""
01163       << mpiErrorCodeToString (err) << "\".");
01164   } catch (...) {
01165     // Attempt to free the new group before rethrowing.  If
01166     // successful, this will prevent a memory leak due to the "lost"
01167     // group that was allocated successfully above.  Since we're
01168     // throwing std::logic_error anyway, we can only promise
01169     // best-effort recovery; thus, we don't check the error code.
01170     (void) MPI_Group_free (&newGroup);
01171     (void) MPI_Group_free (&thisGroup);
01172     throw;
01173   }
01174 
01175   // We don't need the group any more, so free it.
01176   err = MPI_Group_free (&newGroup);
01177   TEUCHOS_TEST_FOR_EXCEPTION(err != MPI_SUCCESS, std::logic_error,
01178     "Failed to free subgroup.  MPI_Group_free failed with error \""
01179     << mpiErrorCodeToString (err) << "\".");
01180   err = MPI_Group_free (&thisGroup);
01181   TEUCHOS_TEST_FOR_EXCEPTION(err != MPI_SUCCESS, std::logic_error,
01182     "Failed to free subgroup.  MPI_Group_free failed with error \""
01183     << mpiErrorCodeToString (err) << "\".");
01184 
01185   if (newComm == MPI_COMM_NULL) {
01186     return RCP< Comm<Ordinal> >();
01187   } else {
01188     return rcp(new MpiComm<Ordinal>(
01189                    rcp_implicit_cast<const OpaqueWrapper<MPI_Comm> >(
01190                                      opaqueWrapper(newComm,MPI_Comm_free))));
01191   }
01192 }
01193 
01194 
01195 // Overridden from Describable
01196 
01197 
01198 template<typename Ordinal>
01199 std::string MpiComm<Ordinal>::description() const
01200 {
01201   std::ostringstream oss;
01202   oss
01203     << typeName(*this)
01204     << "{"
01205     << "size="<<size_
01206     << ",rank="<<rank_
01207     << ",rawMpiComm="<<static_cast<MPI_Comm>(*rawMpiComm_)
01208     <<"}";
01209   return oss.str();
01210 }
01211 
01212 
01213 #ifdef TEUCHOS_MPI_COMM_DUMP
01214 template<typename Ordinal>
01215 bool MpiComm<Ordinal>::show_dump = false;
01216 #endif
01217 
01218 
01219 // private
01220 
01221 
01222 template<typename Ordinal>
01223 void MpiComm<Ordinal>::assertRank(const int rank, const std::string &rankName) const
01224 {
01225   TEUCHOS_TEST_FOR_EXCEPTION(
01226     ! ( 0 <= rank && rank < size_ ), std::logic_error
01227     ,"Error, "<<rankName<<" = " << rank << " is not < 0 or is not"
01228     " in the range [0,"<<size_-1<<"]!"
01229     );
01230 }
01231 
01232 
01233 } // namespace Teuchos
01234 
01235 
01236 template<typename Ordinal>
01237 Teuchos::RCP<Teuchos::MpiComm<Ordinal> >
01238 Teuchos::createMpiComm(
01239   const RCP<const OpaqueWrapper<MPI_Comm> > &rawMpiComm
01240   )
01241 {
01242   if( rawMpiComm.get()!=NULL && *rawMpiComm != MPI_COMM_NULL )
01243     return rcp(new MpiComm<Ordinal>(rawMpiComm));
01244   return Teuchos::null;
01245 }
01246 
01247 
01248 #endif // TEUCHOS_MPI_COMM_HPP
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines