Sierra Toolkit Version of the Day
mpih.hpp
Go to the documentation of this file.
00001 /*--------------------------------------------------------------------*/
00002 /*    Copyright 2002, 2009 Sandia Corporation.                              */
00003 /*    Under the terms of Contract DE-AC04-94AL85000, there is a       */
00004 /*    non-exclusive license for use of this work by or on behalf      */
00005 /*    of the U.S. Government.  Export of this program may require     */
00006 /*    a license from the United States Government.                    */
00007 /*--------------------------------------------------------------------*/
00018 #ifndef STK_UTIL_PARALLEL_mpih_h
00019 #define STK_UTIL_PARALLEL_mpih_h
00020 
00021 #include <vector>
00022 #include <mpi.h>
00023 
00024 #include <stk_util/parallel/Exception.hpp>
00025 
00026 #include <MPIH_Include.h>
00027 
00028 namespace sierra {
00029 namespace mpih {
00030 
00039 void Enable();
00040 
00041 void Keyval_delete(MPI_Comm comm);
00042 
00057 void Sub_Communicator (MPI_Comm old_comm,
00058            MPI_Comm new_comm);
00059 
00066 void Register_Handles ();
00067 
00078 void Add_Handle (const ExParallel &X);
00079 
00085 void Delete_Handles ();
00086 
00099 void Bcast(void        * buffer,
00100      int           count,
00101      MPI_Datatype  datatype,
00102      int           root);
00103 
00104 void Allreduce( void        * in_buffer,
00105     void        * out_buffer,
00106     int           count,
00107     MPI_Datatype  datatype,
00108     MPI_Op        op );
00109 
00110 void Gather( void        * send_buf,
00111        int           send_size,
00112        MPI_Datatype  send_datatype,
00113        void        * recv_buf,
00114        int           recv_size,
00115        MPI_Datatype  recv_datatype,
00116        int           root);
00117 
00118 void Reduce( void        * in_buffer,
00119        void        * out_buffer,
00120        int           count,
00121        MPI_Datatype  datatype,
00122        MPI_Op        op,
00123        int           root);
00124 
00125 void Reduce_Scatter( void        * in_buffer,
00126          void        * out_buffer,
00127          int           recv_count,
00128          MPI_Datatype  datatype,
00129          MPI_Op        op);
00130 
00131 void Scatter( void        * send_buf,
00132         int           send_size,
00133         MPI_Datatype  send_datatype,
00134         void        * recv_buf,
00135         int           recv_size,
00136         MPI_Datatype  recv_datatype,
00137         int           root);
00138 
00139 void Map_Free(MPIH_Map * map);
00140 
00141 void Map_Query
00142 (MPIH_Map          map         /* in:  Map for the sparse operation */,
00143  int              *symmetric   /* out:                         */,
00144  size_t           *nsend       /* out: Number of sends         */,
00145  std::vector<int> *sendlist    /* out: List of destinations    */,
00146  std::vector<int> *sendlength  /* out:                         */,
00147  std::vector<int> *sendbuflen  /* out:                         */,
00148  size_t           *nrecv       /* out: Number of receives      */,
00149  std::vector<int> *recvlist    /* out: List of destinations    */,
00150  std::vector<int> *recvlength  /* out:                      */,
00151  std::vector<int> *recvbuflen  /* out:                      */ );
00152 
00153 void Sparse
00154 (void       * sendbuf   /* in:  address of send buffer        */ ,
00155  MPI_Datatype sendtype  /* in:  datatype of the send messages */ ,
00156  void       * recvbuf   /* in:  address of receive buffer     */ ,
00157  MPI_Datatype recvtype  /* in:  datatype of the recv messages */ ,
00158  int          transpose /* in:  whether to reverse communic.  */ ,
00159  MPIH_Map     map       /* in:  communication map             */ );
00160 
00161 /* The Sparse is a combination of Initialize_Sparse and Wait_Sparse.
00162  * This allows computation to happen while the communication is
00163  * begin processed.  After the call to Initialize_Sparse the
00164  * buffers should NOT be reused until the completion of the
00165  * corresponding Wait_Sparse call.  The map passed to Wait_Sparse
00166  * must be the same map that was used in Initialize_Sparse.
00167  * Also, it is not possible to call Initialize_Sparse twice followed
00168  * by two calls to Wait_Sparse.  Because MPI does not guarentee
00169  * the order that messages are received, the first Wait_Sparse
00170  * could get the messages from the second Initialize_Sparse.
00171  */
00172 void Initialize_Sparse
00173 (void       * sendbuf   /* in:  address of send buffer        */ ,
00174  MPI_Datatype sendtype  /* in:  datatype of the send messages */ ,
00175  void       * recvbuf   /* in:  address of receive buffer     */ ,
00176  MPI_Datatype recvtype  /* in:  datatype of the recv messages */ ,
00177  int          transpose /* in:  whether to reverse communic.  */ ,
00178  MPIH_Map     map       /* in:  communication map             */ );
00179 
00180 void Wait_Sparse
00181 (MPIH_Map     map       /* in:  communication map             */ );
00182 
00183 void Sparse_Map
00184 (const std::vector<int> &lengths   /* in:  Byte length of each message  */ ,
00185  const std::vector<int> &buflens   /* in:  Byte length of each message buffer */ ,
00186  const std::vector<int> &sendlist  /* in:  Destination processors       */ ,
00187  MPIH_Map * map              /* out: Map for the sparse operation */ );
00188 
00189 void Sparse_Symmetric_Map
00190 (const std::vector<int> &lengths   /* in:  Length of each message       */ ,
00191  const std::vector<int> &buflens   /* in:  Length of each message buffe */ ,
00192  const std::vector<int> &sendlist  /* in:  Destination processors       */ ,
00193  MPIH_Map * map       /* out: Map for the sparse operation */ );
00194 
00195 inline void ParallelExceptionCheck()
00196 {
00197   int dummy = 0;
00198   Bcast(&dummy, 0, MPI_INT, 0);
00199 }
00200 
00201 
00202 /*
00203  * The rest of the functions are unlikely to be used
00204  * outside of mpih.C.
00205  */
00206 
00207 /* Local handle, Global handles, and just plain handles:
00208  *
00209  * Just plain handles are the handles that are registered
00210  *    on this processor.  The number of them is unknown
00211  *    in advance.  These are required to be consistant
00212  *    across all processors.
00213  *
00214  * local_handle is singular, it refers to the exception
00215  *    handle that has been set on the local processor.
00216  *    This is the handle that will be propagated on
00217  *    the next collective communication.
00218  *
00219  * Global handles is the collection of local handles
00220  *    from all of the processors after the collective
00221  *    communication.  There will be one for each
00222  *    processor, but some will be NULL if there was not
00223  *    a local handle set for that processor.
00224  */
00225 void Activate_Handles   ();
00226 void Deactivate_Handles ();
00227 int  Get_Nhandles       ();
00228 void Get_Handles        (ExParallel **handles);
00229 
00230 ExParallel *Get_Local_Handle ();
00231 void Set_Local_Handle   (ExParallel &handle);
00232 void Reset_Local_Handle ();
00233 
00234 void Get_Global_Handles (ExParallel ** handles);
00235 int  Get_Global_Status  ();
00236 
00237 void Set_Status_Check   ();
00238 void Reset_Status_Check ();
00239 int  Get_Status_Check   ();
00240 
00241 void Get_Tags (int *active,
00242          int *tag_sparse,
00243          int *tag_normal,
00244          int *tag_message);
00245 
00246 void Get_Functions (MPIH_Handler_compete *handler_compete_fn ,
00247         MPIH_Handler_execute *handler_execute_fn );
00248 
00249 int  Get_Control_Message();
00250 
00251 /* interface for the product versioning. */
00252 const char *get_product_name();
00253 const char *get_product_version();
00254 const char *get_product_qualifier();
00255 void register_product();
00256 
00257 } // namespace mpih
00258 } // namespace sierra
00259 
00260 #endif //  STK_UTIL_PARALLEL_mpih_h
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines