Epetra_MpiDistributor.cpp

Go to the documentation of this file.
00001 //@HEADER
00002 // ************************************************************************
00003 // 
00004 //               Epetra: Linear Algebra Services Package 
00005 //                 Copyright (2001) Sandia Corporation
00006 // 
00007 // Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive
00008 // license for use of this work by or on behalf of the U.S. Government.
00009 // 
00010 // This library is free software; you can redistribute it and/or modify
00011 // it under the terms of the GNU Lesser General Public License as
00012 // published by the Free Software Foundation; either version 2.1 of the
00013 // License, or (at your option) any later version.
00014 //  
00015 // This library is distributed in the hope that it will be useful, but
00016 // WITHOUT ANY WARRANTY; without even the implied warranty of
00017 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00018 // Lesser General Public License for more details.
00019 //  
00020 // You should have received a copy of the GNU Lesser General Public
00021 // License along with this library; if not, write to the Free Software
00022 // Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307
00023 // USA
00024 // Questions? Contact Michael A. Heroux (maherou@sandia.gov) 
00025 // 
00026 // ************************************************************************
00027 //@HEADER
00028 
00029 #include "Epetra_MpiDistributor.h"
00030 #include "Epetra_MpiComm.h"
00031 
00032 
00033 //==============================================================================
00034 // Epetra_MpiDistributor constructor
00035 Epetra_MpiDistributor::Epetra_MpiDistributor(const Epetra_MpiComm & Comm): 
00036   Epetra_Object("Epetra::MpiDistributor"),
00037   lengths_to_(0),
00038   procs_to_(0),
00039   indices_to_(0),
00040   size_indices_to_(0),
00041   lengths_from_(0),
00042   procs_from_(0),
00043   indices_from_(0),
00044   size_indices_from_(0),
00045   resized_(false),
00046   sizes_(0),
00047   sizes_to_(0),
00048   starts_to_(0),
00049   starts_to_ptr_(0),
00050   indices_to_ptr_(0),
00051   sizes_from_(0),
00052   starts_from_(0),
00053   starts_from_ptr_(0),
00054   indices_from_ptr_(0),
00055   nrecvs_(0),
00056   nsends_(0),
00057   nexports_(0),
00058   self_msg_(0),
00059   max_send_length_(0),
00060   total_recv_length_(0),
00061   tag_(Comm.GetMpiTag()),
00062   epComm_(&Comm),
00063   comm_(Comm.GetMpiComm()),
00064   request_(0),
00065   status_(0),
00066   no_delete_(false),
00067   send_array_(0),
00068   send_array_size_(0),
00069   comm_plan_reverse_(0)
00070 {
00071 }
00072 
00073 //==============================================================================
00074 Epetra_MpiDistributor::Epetra_MpiDistributor(const Epetra_MpiDistributor & Distributor):
00075   Epetra_Object("Epetra::MpiDistributor"),
00076   lengths_to_(0),
00077   procs_to_(0),
00078   indices_to_(0),
00079   size_indices_to_(Distributor.size_indices_to_),
00080   lengths_from_(0),
00081   procs_from_(0),
00082   indices_from_(0),
00083   size_indices_from_(Distributor.size_indices_from_),
00084   resized_(false),
00085   sizes_(0),
00086   sizes_to_(0),
00087   starts_to_(0),
00088   starts_to_ptr_(0),
00089   indices_to_ptr_(0),
00090   sizes_from_(0),
00091   starts_from_(0),
00092   starts_from_ptr_(0),
00093   indices_from_ptr_(0),
00094   nrecvs_(Distributor.nrecvs_),
00095   nsends_(Distributor.nsends_),
00096   nexports_(Distributor.nexports_),
00097   self_msg_(Distributor.self_msg_),
00098   max_send_length_(Distributor.max_send_length_),
00099   total_recv_length_(Distributor.total_recv_length_),
00100   tag_(Distributor.tag_),
00101   epComm_(Distributor.epComm_),
00102   comm_(Distributor.comm_),
00103   request_(0),
00104   status_(0),
00105   no_delete_(Distributor.no_delete_),
00106   send_array_(0),
00107   send_array_size_(0),
00108   comm_plan_reverse_(0)
00109 {
00110   int i;
00111   if (nsends_>0) {
00112     lengths_to_ = new int[nsends_];
00113     procs_to_ = new int[nsends_];
00114     for (i=0; i<nsends_; i++) {
00115       lengths_to_[i] = Distributor.lengths_to_[i];
00116       procs_to_[i] = Distributor.procs_to_[i];
00117     }
00118   }
00119   if (size_indices_to_>0) {
00120     indices_to_ = new int[size_indices_to_];
00121     for (i=0; i<size_indices_to_; i++) {
00122       indices_to_[i] = Distributor.indices_to_[i];
00123     }
00124   }
00125 
00126   if (nrecvs_>0) {
00127     lengths_from_ = new int[nrecvs_];
00128     procs_from_ = new int[nrecvs_];
00129     request_ = new MPI_Request[ nrecvs_ ];
00130     status_ = new MPI_Status[ nrecvs_ ];
00131     for (i=0; i<nrecvs_; i++) {
00132       lengths_from_[i] = Distributor.lengths_from_[i];
00133       procs_from_[i] = Distributor.procs_from_[i];
00134     }
00135   }
00136   if (size_indices_from_>0) {
00137     indices_from_ = new int[size_indices_from_];
00138     for (i=0; i<size_indices_from_; i++) {
00139       indices_from_[i] = Distributor.indices_from_[i];
00140     }
00141   }
00142 }
00143 
00144 //==============================================================================
00145 // Epetra_MpiDistributor destructor
00146 Epetra_MpiDistributor::~Epetra_MpiDistributor()
00147 {
00148   if( !no_delete_ )
00149   {
00150     if( lengths_to_ != 0 ) delete [] lengths_to_;
00151     if( procs_to_ != 0 ) delete [] procs_to_;
00152     if( indices_to_ != 0 ) delete [] indices_to_;
00153     if( lengths_from_ != 0 ) delete [] lengths_from_;
00154     if( procs_from_ != 0 ) delete [] procs_from_;
00155     if( indices_from_ != 0 ) delete [] indices_from_;
00156     if( starts_to_ != 0 ) delete [] starts_to_;
00157     if( starts_from_ != 0 ) delete [] starts_from_;
00158   }
00159 
00160   if( sizes_ != 0 ) delete [] sizes_;
00161   if( sizes_to_ != 0 ) delete [] sizes_to_;
00162   if( sizes_from_ != 0 ) delete [] sizes_from_;
00163   if( starts_to_ptr_ != 0 ) delete [] starts_to_ptr_;
00164   if( starts_from_ptr_ != 0 ) delete [] starts_from_ptr_;
00165   if( indices_to_ptr_ != 0 ) delete [] indices_to_ptr_;
00166   if( indices_from_ptr_ != 0 ) delete [] indices_from_ptr_;
00167 
00168   if( request_ != 0 ) delete [] request_;
00169   if( status_ != 0 ) delete [] status_;
00170 
00171   if( send_array_size_ != 0 ) { delete [] send_array_; send_array_size_ = 0; }
00172 
00173   if( comm_plan_reverse_ != 0 ) delete comm_plan_reverse_;
00174 }
00175 
00176 
00177 //==============================================================================
00178 //---------------------------------------------------------------------------
00179 //CreateFromSends Method
00180 // - create communication plan given a known list of procs to send to
00181 //---------------------------------------------------------------------------
00182 int Epetra_MpiDistributor::CreateFromSends( const int & NumExportIDs,
00183                                             const int * ExportPIDs,
00184                                             bool Deterministic,
00185                                             int & NumRemoteIDs )
00186 {
00187   (void)Deterministic;
00188   nexports_ = NumExportIDs;
00189 
00190   int i;
00191 
00192   int my_proc;
00193   MPI_Comm_rank( comm_, &my_proc );
00194 
00195   int nprocs;
00196   MPI_Comm_size( comm_, &nprocs );
00197 
00198   // Check to see if items are grouped by processor w/o gaps
00199   // If so, indices_to -> 0
00200 
00201   // Setup data structures for quick traversal of arrays
00202   int * starts = new int[ nprocs + 1 ];
00203   for( i = 0; i < nprocs; i++ )
00204     starts[i] = 0;
00205 
00206   int nactive = 0;
00207   bool no_send_buff = true;
00208   int numDeadIndices = 0; // In some cases the GIDs will not be owned by any processors and the PID will be -1
00209 
00210   for( i = 0; i < NumExportIDs; i++ )
00211   {
00212     if( no_send_buff && i && (ExportPIDs[i] < ExportPIDs[i-1]) )
00213       no_send_buff = false;
00214     if( ExportPIDs[i] >= 0 )
00215     {
00216       ++starts[ ExportPIDs[i] ];
00217       ++nactive;
00218     }
00219     else numDeadIndices++; // Increase the number of dead indices.  Used below to leave these out of the analysis
00220   }
00221 
00222   self_msg_ = ( starts[my_proc] != 0 ) ? 1 : 0;
00223 
00224   nsends_ = 0;
00225 
00226   if( no_send_buff ) //grouped by processor, no send buffer or indices_to_ needed
00227   {
00228     for( i = 0; i < nprocs; ++i )
00229       if( starts[i] ) ++nsends_;
00230 
00231     if( nsends_ )
00232     {
00233       procs_to_ = new int[nsends_];
00234       starts_to_ = new int[nsends_];
00235       lengths_to_ = new int[nsends_];
00236     }
00237 
00238     int index = numDeadIndices;  // Leave off the dead indices (PID = -1)
00239     int proc;
00240     for( i = 0; i < nsends_; ++i )
00241     {
00242       starts_to_[i] = index;
00243       proc = ExportPIDs[index];
00244       procs_to_[i] = proc;
00245       index += starts[proc];
00246     }
00247 
00248     if( nsends_ )
00249       Sort_ints_( procs_to_, starts_to_, nsends_ );
00250 
00251     max_send_length_ = 0;
00252 
00253     for( i = 0; i < nsends_; ++i )
00254     {
00255       proc = procs_to_[i];
00256       lengths_to_[i] = starts[proc];
00257       if( (proc != my_proc) && (lengths_to_[i] > max_send_length_) )
00258         max_send_length_ = lengths_to_[i];
00259     }
00260   }
00261   else //not grouped by processor, need send buffer and indices_to_
00262   {
00263     if( starts[0] != 0 ) nsends_ = 1;
00264 
00265     for( i = 1; i < nprocs; i++ )
00266     {
00267       if( starts[i] != 0 ) ++nsends_;
00268       starts[i] += starts[i-1];
00269     }
00270 
00271     for( i = nprocs-1; i != 0; i-- )
00272       starts[i] = starts[i-1];
00273 
00274     starts[0] = 0;
00275 
00276     if (nactive>0) {
00277       indices_to_ = new int[ nactive ];
00278       size_indices_to_ = nactive;
00279     }
00280 
00281     for( i = 0; i < NumExportIDs; i++ )
00282     if( ExportPIDs[i] >= 0 )
00283     {
00284       indices_to_[ starts[ ExportPIDs[i] ] ] = i;
00285       ++starts[ ExportPIDs[i] ];
00286     }
00287 
00288     //Reconstuct starts array to index into indices_to.
00289 
00290     for( i = nprocs-1; i != 0; i-- )
00291       starts[i] = starts[i-1];
00292     starts[0] = 0;
00293     starts[nprocs] = nactive;
00294 
00295     if (nsends_>0) {
00296       lengths_to_ = new int[ nsends_ ];
00297       procs_to_ = new int[ nsends_ ];
00298       starts_to_ = new int[ nsends_ ];
00299     }
00300 
00301     int j = 0;
00302     max_send_length_ = 0;
00303 
00304     for( i = 0; i < nprocs; i++ )
00305       if( starts[i+1] != starts[i] )
00306       {
00307         lengths_to_[j] = starts[i+1] - starts[i];
00308         starts_to_[j] = starts[i];
00309         if( ( i != my_proc ) && ( lengths_to_[j] > max_send_length_ ) )
00310           max_send_length_ = lengths_to_[j];
00311         procs_to_[j] = i;
00312         j++;
00313       }
00314   }
00315     
00316   delete [] starts;
00317 
00318   nsends_ -= self_msg_;
00319 
00320   //Invert map to see what msgs are received and what length
00321   EPETRA_CHK_ERR( ComputeRecvs_( my_proc, nprocs ) );
00322 
00323   if (nrecvs_>0) {
00324     if( !request_ ) {
00325       request_ = new MPI_Request[ nrecvs_ ];
00326       status_ = new MPI_Status[ nrecvs_ ];
00327     }
00328   }
00329 
00330   NumRemoteIDs = total_recv_length_;
00331 
00332   return 0;
00333 }
00334 
00335 //==============================================================================
00336 //---------------------------------------------------------------------------
00337 //CreateFromRecvs Method
00338 // - create communication plan given a known list of procs to recv from
00339 //---------------------------------------------------------------------------
00340 int Epetra_MpiDistributor::CreateFromRecvs( const int & NumRemoteIDs,
00341            const int * RemoteGIDs,
00342                  const int * RemotePIDs,
00343            bool Deterministic,
00344                  int & NumExportIDs,
00345            int *& ExportGIDs,
00346            int *& ExportPIDs )
00347 {
00348   int my_proc;
00349   MPI_Comm_rank( comm_, &my_proc );
00350 
00351   int nprocs;
00352   MPI_Comm_size( comm_, &nprocs );
00353 
00354   EPETRA_CHK_ERR( ComputeSends_( NumRemoteIDs, RemoteGIDs, RemotePIDs, NumExportIDs,
00355          ExportGIDs, ExportPIDs, my_proc) );
00356 
00357   int testNumRemoteIDs;
00358   EPETRA_CHK_ERR( CreateFromSends( NumExportIDs, ExportPIDs,
00359            Deterministic, testNumRemoteIDs ) );
00360 
00361   return(0);
00362 }
00363 
00364 //==============================================================================
00365 //---------------------------------------------------------------------------
00366 //ComputeRecvs Method
00367 //---------------------------------------------------------------------------
00368 int Epetra_MpiDistributor::ComputeRecvs_( int my_proc, 
00369               int nprocs )
00370 {
00371   int * msg_count = new int[ nprocs ];
00372   int * counts = new int[ nprocs ];
00373 
00374   int i;
00375   MPI_Status status;
00376 
00377   for( i = 0; i < nprocs; i++ )
00378   {
00379     msg_count[i] = 0;
00380     counts[i] = 1;
00381   }
00382 
00383   for( i = 0; i < nsends_+self_msg_; i++ )
00384     msg_count[ procs_to_[i] ] = 1;
00385 
00386 #if defined(REDUCE_SCATTER_BUG)
00387 // the bug is found in mpich on linux platforms
00388   MPI_Reduce(msg_count, counts, nprocs, MPI_INT, MPI_SUM, 0, comm_);
00389   MPI_Scatter(counts, 1, MPI_INT, &nrecvs_, 1, MPI_INT, 0, comm_);
00390 #else
00391   MPI_Reduce_scatter( msg_count, &nrecvs_, counts, MPI_INT, MPI_SUM, comm_ );
00392 #endif
00393 
00394   delete [] msg_count;
00395   delete [] counts;
00396 
00397   if (nrecvs_>0) {
00398     lengths_from_ = new int[nrecvs_];
00399     procs_from_ = new int[nrecvs_];
00400     for(i=0; i<nrecvs_; ++i) {
00401       lengths_from_[i] = 0;
00402       procs_from_[i] = 0;
00403     }
00404   }
00405 
00406 #ifndef NEW_COMM_PATTERN
00407   for( i = 0; i < (nsends_+self_msg_); i++ )
00408     if( procs_to_[i] != my_proc ) {
00409       MPI_Send( &(lengths_to_[i]), 1, MPI_INT, procs_to_[i], tag_, comm_ );
00410     }
00411     else
00412     {
00413       //set self_msg_ to end block of recv arrays
00414       lengths_from_[nrecvs_-1] = lengths_to_[i];
00415       procs_from_[nrecvs_-1] = my_proc;
00416     }
00417 
00418   for( i = 0; i < (nrecvs_-self_msg_); i++ )
00419   {
00420     MPI_Recv( &(lengths_from_[i]), 1, MPI_INT, MPI_ANY_SOURCE, tag_, comm_, &status );
00421     procs_from_[i] = status.MPI_SOURCE;
00422   }
00423 
00424   MPI_Barrier( comm_ );
00425 #else
00426   if (nrecvs_>0) {
00427     if( !request_ ) {
00428       request_ = new MPI_Request[nrecvs_-self_msg_];
00429       status_ = new MPI_Status[nrecvs_-self_msg_];
00430     }
00431   }
00432 
00433   for( i = 0; i < (nrecvs_-self_msg_); i++ )
00434     MPI_Irecv( &(lengths_from_[i]), 1, MPI_INT, MPI_ANY_SOURCE, tag_, comm_, &(request_[i]) );
00435 
00436   MPI_Barrier( comm_ );
00437 
00438   for( i = 0; i < (nsends_+self_msg_); i++ )
00439     if( procs_to_[i] != my_proc ) {
00440       MPI_Rsend( &(lengths_to_[i]), 1, MPI_INT, procs_to_[i], tag_, comm_ );
00441     }
00442     else
00443     {
00444       //set self_msg_ to end block of recv arrays
00445       lengths_from_[nrecvs_-1] = lengths_to_[i];
00446       procs_from_[nrecvs_-1] = my_proc;
00447     }
00448 
00449   if( (nrecvs_-self_msg_) > 0 ) MPI_Waitall( (nrecvs_-self_msg_), request_, status_ );
00450 
00451   for( i = 0; i < (nrecvs_-self_msg_); i++ )
00452     procs_from_[i] = status_[i].MPI_SOURCE;
00453 #endif
00454 
00455   Sort_ints_( procs_from_, lengths_from_, nrecvs_ );
00456 
00457   // Compute indices_from_
00458   // Seems to break some rvs communication
00459 /* Not necessary since rvs communication is always blocked
00460   size_indices_from_ = 0;
00461   if( nrecvs_ > 0 )
00462   {
00463     for( i = 0; i < nrecvs_; i++ )  size_indices_from_ += lengths_from_[i];
00464     indices_from_ = new int[ size_indices_from_ ];
00465 
00466     for (i=0; i<size_indices_from_; i++) indices_from_[i] = i;
00467   }
00468 */
00469 
00470   if (nrecvs_>0) starts_from_ = new int[nrecvs_];
00471   int j = 0;
00472   for( i=0; i<nrecvs_; ++i )
00473   {
00474     starts_from_[i] = j;
00475     j += lengths_from_[i];
00476   }
00477 
00478   total_recv_length_ = 0;
00479   for( i = 0; i < nrecvs_; i++ )
00480     total_recv_length_ += lengths_from_[i];
00481 
00482   nrecvs_ -= self_msg_;
00483 
00484   MPI_Barrier( comm_ );
00485   
00486   return false;
00487 }
00488 
00489 //==============================================================================
00490 //---------------------------------------------------------------------------
00491 //ComputeSends Method
00492 //---------------------------------------------------------------------------
00493 int Epetra_MpiDistributor::ComputeSends_( int num_imports,
00494         const int *& import_ids,
00495         const int *& import_procs,
00496         int & num_exports,
00497         int *& export_ids,
00498         int *& export_procs,
00499         int my_proc ) {
00500  
00501   Epetra_MpiDistributor tmp_plan(*epComm_);
00502   int i;
00503 
00504   int * proc_list = 0;
00505   int * import_objs = 0;
00506   char * c_export_objs = 0;
00507 
00508   if( num_imports > 0 )
00509   {
00510     proc_list = new int[ num_imports ];
00511     import_objs = new int[ 2 * num_imports ];
00512 
00513     for( i = 0; i < num_imports; i++ )
00514     {
00515       proc_list[i] = import_procs[i];
00516 
00517       import_objs[2*i] = import_ids[i];
00518       import_objs[2*i+1] = my_proc;
00519     }
00520   }
00521 
00522   EPETRA_CHK_ERR(tmp_plan.CreateFromSends( num_imports, proc_list,
00523              true, num_exports) );
00524   if( num_exports > 0 )
00525   {
00526     //export_objs = new int[ 2 * num_exports ];
00527     export_ids = new int[ num_exports ];
00528     export_procs = new int[ num_exports ];
00529   }
00530   else
00531   {
00532     export_ids = 0;
00533     export_procs = 0;
00534   }
00535 
00536   int len_c_export_objs = 0;
00537   EPETRA_CHK_ERR( tmp_plan.Do(reinterpret_cast<char *> (import_objs),
00538             2 * (int)sizeof( int ), 
00539             len_c_export_objs,
00540             c_export_objs) );
00541   int * export_objs = reinterpret_cast<int *>(c_export_objs);
00542 
00543   for( i = 0; i < num_exports; i++ ) {
00544     export_ids[i] = export_objs[2*i];
00545     export_procs[i] = export_objs[2*i+1];
00546   }
00547 
00548   if( proc_list != 0 ) delete [] proc_list;
00549   if( import_objs != 0 ) delete [] import_objs;
00550   if( len_c_export_objs != 0 ) delete [] c_export_objs;
00551 
00552   return(0);
00553 
00554 }
00555 
00556 //==============================================================================
00557 // Do method
00558 int Epetra_MpiDistributor::Do( char * export_objs,
00559                                int obj_size,
00560                                int & len_import_objs,
00561                                char *& import_objs )
00562 {
00563   EPETRA_CHK_ERR( DoPosts(export_objs, obj_size, len_import_objs, import_objs) );
00564   EPETRA_CHK_ERR( DoWaits() );
00565   return(0);
00566 }
00567 
00568 //==============================================================================
00569 // DoReverse method
00570 int Epetra_MpiDistributor::DoReverse( char * export_objs,
00571                                       int obj_size,
00572                                       int & len_import_objs,
00573                                       char *& import_objs )
00574 {
00575   EPETRA_CHK_ERR( DoReversePosts(export_objs, obj_size,
00576          len_import_objs, import_objs) );
00577   EPETRA_CHK_ERR( DoReverseWaits() );
00578   return(0);
00579 }
00580 //==============================================================================
00581 //---------------------------------------------------------------------------
00582 //Do_Posts Method
00583 //---------------------------------------------------------------------------
00584 int Epetra_MpiDistributor::DoPosts( char * export_objs,
00585             int obj_size,
00586                                     int & len_import_objs,
00587             char *& import_objs )
00588 {
00589   int i, j, k;
00590 
00591   int my_proc = 0;
00592   int self_recv_address = 0;
00593 
00594   MPI_Comm_rank( comm_, &my_proc );
00595 
00596   if( len_import_objs < (total_recv_length_*obj_size) )
00597   {
00598     if( import_objs!=0 ) {delete [] import_objs; import_objs = 0;}
00599     len_import_objs = total_recv_length_*obj_size;
00600     if (len_import_objs>0) import_objs = new char[len_import_objs];
00601     for( i=0; i<len_import_objs; ++i ) import_objs[i]=0;
00602   }
00603 
00604   k = 0;
00605 
00606   j = 0;
00607   for( i = 0; i < (nrecvs_+self_msg_); i++ )
00608   {
00609     if( procs_from_[i] != my_proc )
00610     {
00611       MPI_Irecv( &(import_objs[j]),
00612                  lengths_from_[i] * obj_size,
00613                  MPI_CHAR, procs_from_[i],
00614                  tag_, comm_,
00615                  &(request_[k]) );
00616       k++;
00617     }
00618     else
00619       self_recv_address = j;
00620 
00621     j += lengths_from_[i] * obj_size;
00622   }
00623 
00624   MPI_Barrier( comm_ );
00625 
00626   //setup scan through procs_to list starting w/ higher numbered procs 
00627   //Should help balance msg traffic
00628   int nblocks = nsends_ + self_msg_; 
00629   int proc_index = 0; 
00630   while( proc_index < nblocks && procs_to_[proc_index] < my_proc )
00631     ++proc_index;                    
00632   if( proc_index == nblocks ) proc_index = 0;
00633    
00634   int self_num = 0, self_index = 0;
00635   int p;
00636 
00637   if( !indices_to_ ) //data already blocked by processor
00638   {
00639     for( i = 0; i < nblocks; ++i )
00640     {
00641       p = i + proc_index;
00642       if( p > (nblocks-1) ) p -= nblocks;
00643 
00644       if( procs_to_[p] != my_proc )
00645         MPI_Rsend( &export_objs[starts_to_[p]*obj_size],
00646                    lengths_to_[p]*obj_size,
00647                    MPI_CHAR,
00648                    procs_to_[p],
00649                    tag_,
00650                    comm_ );
00651       else
00652        self_num = p;
00653     }
00654 
00655     if( self_msg_ )
00656       memcpy( &import_objs[self_recv_address],
00657               &export_objs[starts_to_[self_num]*obj_size],
00658               lengths_to_[self_num]*obj_size );
00659   }
00660   else //data not blocked by proc, use send buffer
00661   {
00662     if( send_array_size_ < (max_send_length_*obj_size) )
00663     {
00664       if( send_array_!=0 ) {delete [] send_array_; send_array_ = 0;}
00665       send_array_size_ = max_send_length_*obj_size;
00666       if (send_array_size_>0) send_array_ = new char[send_array_size_];
00667     }
00668 
00669     j = 0;
00670     for( i = 0; i < nblocks; i++ )
00671     {
00672       p = i + proc_index;
00673       if( p > (nblocks-1) ) p -= nblocks;
00674       if( procs_to_[p] != my_proc )
00675       {
00676         int offset = 0;
00677         j = starts_to_[p];
00678         for( k = 0; k < lengths_to_[p]; k++ )
00679         {
00680           memcpy( &(send_array_[offset]), 
00681                   &(export_objs[indices_to_[j]*obj_size]),
00682                   obj_size );
00683           ++j;
00684           offset += obj_size;
00685         }
00686         MPI_Rsend( send_array_,
00687                    lengths_to_[p] * obj_size,
00688                    MPI_CHAR,
00689                    procs_to_[p],
00690                    tag_, comm_ );
00691       }
00692       else
00693       {
00694         self_num = p;
00695         self_index = starts_to_[p];
00696       }
00697     }
00698 
00699     if( self_msg_ )
00700       for( k = 0; k < lengths_to_[self_num]; k++ )
00701       {
00702         memcpy( &(import_objs[self_recv_address]),
00703                 &(export_objs[indices_to_[self_index]*obj_size]),
00704                 obj_size );
00705         self_index++;
00706         self_recv_address += obj_size;
00707       }
00708   }
00709 
00710   return(0);
00711 }
00712 //==============================================================================
00713 //---------------------------------------------------------------------------
00714 //Do_Waits Method
00715 //---------------------------------------------------------------------------
00716 int Epetra_MpiDistributor::DoWaits()
00717 {
00718   if( nrecvs_ > 0 ) MPI_Waitall( nrecvs_, request_, status_ );
00719 
00720   return(0);
00721 }
00722 
00723 //==============================================================================
00724 //---------------------------------------------------------------------------
00725 //DoReverse_Posts Method
00726 //---------------------------------------------------------------------------
00727 int Epetra_MpiDistributor::DoReversePosts( char * export_objs,
00728                                            int obj_size,
00729                                            int & len_import_objs,
00730                                            char *& import_objs )
00731 {
00732   assert(indices_to_==0); //Can only do reverse comm when original data
00733                           // is blocked by processor
00734 
00735   int i;
00736   int my_proc = 0;
00737 
00738   MPI_Comm_rank( comm_, &my_proc );
00739 
00740   if( comm_plan_reverse_ == 0 )
00741   {
00742     int total_send_length = 0;
00743     for( i = 0; i < nsends_+self_msg_; i++ )
00744       total_send_length += lengths_to_[i];
00745 
00746     int max_recv_length = 0;
00747     for( i = 0; i < nrecvs_; i++ )
00748       if( procs_from_[i] != my_proc )
00749         if( lengths_from_[i] > max_recv_length )
00750           max_recv_length = lengths_from_[i];
00751 
00752     comm_plan_reverse_ = new Epetra_MpiDistributor(*epComm_);
00753 
00754     comm_plan_reverse_->lengths_to_ = lengths_from_;
00755     comm_plan_reverse_->procs_to_ = procs_from_;
00756     comm_plan_reverse_->indices_to_ = indices_from_;
00757     comm_plan_reverse_->starts_to_ = starts_from_;
00758 
00759     comm_plan_reverse_->lengths_from_ = lengths_to_;
00760     comm_plan_reverse_->procs_from_ = procs_to_;
00761     comm_plan_reverse_->indices_from_ = indices_to_;
00762     comm_plan_reverse_->starts_from_ = starts_to_;
00763 
00764     comm_plan_reverse_->nsends_ = nrecvs_;
00765     comm_plan_reverse_->nrecvs_ = nsends_;
00766     comm_plan_reverse_->self_msg_ = self_msg_;
00767 
00768     comm_plan_reverse_->max_send_length_ = max_recv_length;
00769     comm_plan_reverse_->total_recv_length_ = total_send_length;
00770 
00771     comm_plan_reverse_->request_ = new MPI_Request[ comm_plan_reverse_->nrecvs_ ];
00772     comm_plan_reverse_->status_= new MPI_Status[ comm_plan_reverse_->nrecvs_ ];
00773 
00774     comm_plan_reverse_->no_delete_ = true;
00775   }
00776 
00777   int comm_flag = comm_plan_reverse_->DoPosts(export_objs, obj_size, len_import_objs, import_objs);
00778 
00779   return(comm_flag);
00780 }
00781 
00782 //==============================================================================
00783 //---------------------------------------------------------------------------
00784 //DoReverse_Waits Method
00785 //---------------------------------------------------------------------------
00786 int Epetra_MpiDistributor::DoReverseWaits()
00787 {
00788   if( comm_plan_reverse_ == 0 ) return (-1);
00789 
00790   int comm_flag = comm_plan_reverse_->DoWaits();
00791 
00792   return(comm_flag);
00793 }
00794 
00795 //==============================================================================
00796 //---------------------------------------------------------------------------
00797 //Resize Method                      (Heaphy) 
00798 //---------------------------------------------------------------------------
00799 int Epetra_MpiDistributor::Resize_( int * sizes )
00800 { 
00801   int  i, j, k;         // loop counters
00802   int sum; 
00803  
00804   //if (sizes == 0) return 0; 
00805      
00806   int my_proc; 
00807   MPI_Comm_rank (comm_, &my_proc);  
00808   int nprocs;
00809   MPI_Comm_size( comm_, &nprocs );
00810      
00811   if( resized_ )
00812   {  
00813     //test and see if we are already setup for these sizes
00814     bool match = true; 
00815     for( i = 0; i < nexports_; ++i )
00816       match = match && (sizes_[i]==sizes[i]);
00817     int matched = match?1:0;
00818     int match_count = 0;
00819     MPI_Allreduce( &matched, &match_count, 1, MPI_INT, MPI_SUM, comm_ );
00820     if( match_count == nprocs )
00821       return 0;
00822     else //reset existing sizing arrays
00823       max_send_length_ = 0;
00824   } 
00825  
00826   if( !sizes_ && nexports_ ) sizes_ = new int[nexports_]; 
00827   for (i = 0; i < nexports_; i++)
00828     sizes_[i] = sizes[i]; 
00829  
00830   if( !sizes_to_ && (nsends_+self_msg_) ) sizes_to_ = new int[nsends_+self_msg_];
00831   for (i = 0; i < (nsends_+self_msg_); ++i) 
00832     sizes_to_[i] = 0;                       
00833  
00834   if( !starts_to_ptr_ && (nsends_+self_msg_) ) starts_to_ptr_ = new int[nsends_+self_msg_];
00835 
00836   if( !indices_to_ ) //blocked sends
00837   {
00838     
00839     int * index = 0;
00840     int * sort_val = 0;
00841     if (nsends_+self_msg_>0) {
00842       index = new int[nsends_+self_msg_];
00843       sort_val = new int[nsends_+self_msg_];
00844     }
00845     for( i = 0; i < (nsends_+self_msg_); ++i )
00846     {
00847       j = starts_to_[i];
00848       for( k = 0; k < lengths_to_[i]; ++k )
00849         sizes_to_[i] += sizes[j++];
00850       if( (sizes_to_[i] > max_send_length_) && (procs_to_[i] != my_proc) )
00851         max_send_length_ = sizes_to_[i];
00852     }
00853 
00854     for( i = 0; i < (nsends_+self_msg_); ++i )
00855     {
00856       sort_val[i] = starts_to_[i];
00857       index[i] = i;
00858     }
00859 
00860     if( nsends_+self_msg_ )
00861       Sort_ints_( sort_val, index, (nsends_+self_msg_) );
00862 
00863     sum = 0;
00864     for( i = 0; i < (nsends_+self_msg_); ++i )
00865     {
00866       starts_to_ptr_[ index[i] ] = sum;
00867       sum += sizes_to_[ index[i] ];
00868     }
00869 
00870     if (index!=0) {delete [] index; index = 0;}
00871     if (sort_val!=0) {delete [] sort_val; sort_val = 0;}
00872   }
00873   else //Sends not blocked, so have to do more work
00874   {
00875     if( !indices_to_ptr_ && nexports_ ) indices_to_ptr_ = new int[nexports_]; 
00876     int * offset = 0;
00877     if( nexports_ ) offset = new int[nexports_];
00878    
00879     //Compute address for every item in send array
00880     sum = 0; 
00881     for( i = 0; i < nexports_; ++i )
00882     {  
00883       offset[i] = sum; 
00884       sum += sizes_[i];
00885     }
00886    
00887     sum = 0;
00888     max_send_length_ = 0;
00889     for( i = 0; i < (nsends_+self_msg_); ++i )
00890     {
00891       starts_to_ptr_[i] = sum;
00892       for( j = starts_to_[i]; j < (starts_to_[i]+lengths_to_[i]); ++j )
00893       {
00894         indices_to_ptr_[j] = offset[ indices_to_[j] ];
00895         sizes_to_[i] += sizes_[ indices_to_[j] ];
00896       }
00897       if( sizes_to_[i] > max_send_length_ && procs_to_[i] != my_proc )
00898         max_send_length_ = sizes_to_[i];
00899       sum += sizes_to_[i];
00900     }
00901  
00902     if (offset!=0) {delete [] offset; offset = 0;}
00903   }
00904  
00905   //  Exchange sizes routine inserted here:
00906   int self_index_to = -1;
00907   total_recv_length_ = 0;
00908   if( !sizes_from_ && (nrecvs_+self_msg_) ) sizes_from_ = new int [nrecvs_+self_msg_];
00909 
00910 #ifndef EPETRA_NEW_COMM_PATTERN
00911   for (i = 0; i < (nsends_+self_msg_); i++)
00912   {
00913     if(procs_to_[i] != my_proc)
00914       MPI_Send ((void *) &(sizes_to_[i]), 1, MPI_INT, procs_to_[i], tag_, comm_);
00915     else
00916       self_index_to = i;
00917   }
00918 
00919   MPI_Status status;
00920   for (i = 0; i < (nrecvs_+self_msg_); ++i)
00921   {
00922     sizes_from_[i] = 0;
00923     if (procs_from_[i] != my_proc)
00924       MPI_Recv((void *) &(sizes_from_[i]), 1, MPI_INT, procs_from_[i], tag_, comm_, &status);
00925     else
00926       sizes_from_[i] = sizes_to_[self_index_to];
00927     total_recv_length_ += sizes_from_[i];
00928   }
00929 #else
00930   if (nrecvs_>0 && !request_) {
00931     request_ = new MPI_Request[ nrecvs_-self_msg_ ];
00932     status_ = new MPI_Status[ nrecvs_-self_msg_ ];
00933   }
00934 
00935   for (i = 0; i < (nsends_+self_msg_); i++)
00936   {
00937     if(procs_to_[i] == my_proc)
00938       self_index_to = i;
00939   }
00940 
00941   for (i = 0; i < (nrecvs_+self_msg_); ++i)
00942   {
00943     sizes_from_[i] = 0;
00944     if (procs_from_[i] != my_proc)
00945       MPI_Irecv((void *) &(sizes_from_[i]), 1, MPI_INT, procs_from_[i], tag_, comm_, &(request_[i]));
00946     else
00947     {
00948       sizes_from_[i] = sizes_to_[self_index_to];
00949       total_recv_length_ += sizes_from_[i];
00950     }
00951   }
00952 
00953   MPI_Barrier( comm_ );
00954 
00955   for (i = 0; i < (nsends_+self_msg_); i++)
00956   {
00957     if(procs_to_[i] != my_proc)
00958       MPI_Rsend ((void *) &(sizes_to_[i]), 1, MPI_INT, procs_to_[i], tag_, comm_);
00959   }
00960 
00961   if( nrecvs_ > 0 ) MPI_Waitall( nrecvs_, request_, status_ );
00962 
00963   for (i = 0; i < (nrecvs_+self_msg_); ++i)
00964   {
00965     if (procs_from_[i] != my_proc)
00966       total_recv_length_ += sizes_from_[i];
00967   }
00968 #endif
00969   // end of exchanges sizes insert
00970  
00971   sum = 0;
00972   if( !starts_from_ptr_ ) starts_from_ptr_  = new int[nrecvs_+self_msg_]; 
00973   for (i = 0; i < (nrecvs_+self_msg_); ++i)
00974   {
00975      starts_from_ptr_[i] = sum;
00976      sum += sizes_from_[i];
00977   }
00978 
00979   resized_ = true;
00980 
00981   return 0;
00982 }
00983 
00984 //==============================================================================
00985 // GSComm_Comm Do method
00986 int Epetra_MpiDistributor::Do( char * export_objs,
00987                                int obj_size,
00988                                int *& sizes,
00989                                int & len_import_objs,
00990                                char *& import_objs )
00991 {
00992   EPETRA_CHK_ERR( DoPosts(export_objs, obj_size, sizes,
00993         len_import_objs, import_objs) );
00994   EPETRA_CHK_ERR( DoWaits() );
00995 
00996   return(0);
00997 }
00998 
00999 //==============================================================================
01000 // GSComm_Comm DoReverse method
01001 int Epetra_MpiDistributor::DoReverse( char * export_objs,
01002                                       int obj_size,
01003                                       int *& sizes,
01004                                       int & len_import_objs,
01005                                       char *& import_objs )
01006 {
01007   EPETRA_CHK_ERR( DoReversePosts(export_objs, obj_size, sizes,
01008          len_import_objs, import_objs) );
01009   EPETRA_CHK_ERR( DoReverseWaits() );
01010 
01011   return(0);
01012 }
01013 //==============================================================================
01014 //---------------------------------------------------------------------------
01015 //Do_Posts Method (Variable Block Size)
01016 //---------------------------------------------------------------------------
01017 int Epetra_MpiDistributor::DoPosts( char * export_objs,
01018                                     int obj_size,
01019                                     int *& sizes,
01020                                     int & len_import_objs,
01021                                     char *& import_objs )
01022 {
01023   int ierr = Resize_(sizes);
01024   if (ierr != 0) {
01025     return(ierr);
01026   }
01027 
01028   MPI_Barrier( comm_ );
01029 
01030   int i, j, k;
01031 
01032   int my_proc = 0;
01033   int self_recv_address = 0;
01034 
01035   MPI_Comm_rank( comm_, &my_proc );
01036 
01037   if( len_import_objs < (total_recv_length_*obj_size) )
01038   {
01039     if( import_objs!=0 ) {delete [] import_objs; import_objs = 0;}
01040     len_import_objs = total_recv_length_*obj_size;
01041     if (len_import_objs>0) import_objs = new char[len_import_objs];
01042   }
01043 
01044   k = 0;
01045 
01046   for( i = 0; i < (nrecvs_+self_msg_); ++i )
01047   {
01048     if( procs_from_[i] != my_proc )
01049     {
01050       MPI_Irecv( &(import_objs[starts_from_ptr_[i] * obj_size]),
01051                  sizes_from_[i] * obj_size,
01052                  MPI_CHAR, procs_from_[i], tag_, comm_, &(request_[k]) );
01053       k++;
01054     }
01055     else
01056       self_recv_address = starts_from_ptr_[i] * obj_size;
01057   }
01058 
01059   MPI_Barrier( comm_ );
01060 
01061   //setup scan through procs_to list starting w/ higher numbered procs 
01062   //Should help balance msg traffic
01063   int nblocks = nsends_ + self_msg_; 
01064   int proc_index = 0; 
01065   while( proc_index < nblocks && procs_to_[proc_index] < my_proc )
01066     ++proc_index;                    
01067   if( proc_index == nblocks ) proc_index = 0;
01068    
01069   int self_num = 0;
01070   int p;
01071 
01072   if( !indices_to_ ) //data already blocked by processor
01073   {
01074     for( i = 0; i < nblocks; ++i )
01075     {
01076       p = i + proc_index;
01077       if( p > (nblocks-1) ) p -= nblocks;
01078 
01079       if( procs_to_[p] != my_proc )
01080         MPI_Rsend( &export_objs[starts_to_ptr_[p]*obj_size],
01081                    sizes_to_[p]*obj_size,
01082                    MPI_CHAR,
01083                    procs_to_[p],
01084                    tag_,
01085                    comm_ );
01086       else
01087         self_num = p;
01088     }
01089 
01090     if( self_msg_ )
01091       memcpy( &import_objs[self_recv_address],
01092               &export_objs[starts_to_ptr_[self_num]*obj_size],
01093               sizes_to_[self_num]*obj_size );
01094   }
01095   else //data not blocked by proc, need to copy to buffer
01096   {
01097     if( send_array_size_ && send_array_size_ < (max_send_length_*obj_size) )
01098     {
01099       if (send_array_!=0) {delete [] send_array_; send_array_ = 0;}
01100       send_array_ = 0;
01101       send_array_size_ = 0;
01102     }
01103     if( !send_array_size_ )
01104     {
01105       send_array_size_ = max_send_length_*obj_size;
01106       if (send_array_size_>0) send_array_ = new char[send_array_size_];
01107     }
01108 
01109     for( i=0; i<nblocks; ++i )
01110     {
01111       p = i + proc_index;
01112       if( p > (nblocks-1) ) p -= nblocks;
01113 
01114       if( procs_to_[p] != my_proc )
01115       {
01116         int offset = 0;
01117         j = starts_to_[p];
01118         for( k=0; k<lengths_to_[p]; ++k )
01119         {
01120           memcpy( &send_array_[offset],
01121                   &export_objs[indices_to_ptr_[j]*obj_size],
01122                   sizes_[indices_to_[j]]*obj_size );
01123           offset += sizes_[indices_to_[j]]*obj_size;
01124           ++j;
01125         }
01126         MPI_Rsend( send_array_, sizes_to_[p]*obj_size,
01127                    MPI_CHAR, procs_to_[p], tag_, comm_ );
01128       }
01129       else
01130         self_num = p;
01131     }
01132 
01133     if( self_msg_ )
01134     {
01135       int jj;
01136       j = starts_to_[self_num];
01137       for( k=0; k<lengths_to_[self_num]; ++k )
01138       {
01139         jj = indices_to_ptr_[j];
01140         memcpy( &import_objs[self_recv_address],
01141                 &export_objs[jj*obj_size],
01142                 sizes_[indices_to_[j]*obj_size] );
01143         self_recv_address += (obj_size*sizes_[indices_to_[j]]);
01144         ++jj;
01145       }
01146     }
01147   }
01148 
01149   return(0);
01150 }
01151 
01152 //==============================================================================
01153 //---------------------------------------------------------------------------
01154 //DoReverse_Posts Method
01155 //---------------------------------------------------------------------------
01156 int Epetra_MpiDistributor::DoReversePosts( char * export_objs,
01157                    int obj_size,
01158                                            int *& sizes,
01159                                            int & len_import_objs,
01160                    char *& import_objs )
01161 {
01162   assert(indices_to_==0); //Can only do reverse comm when original data
01163                           // is blocked by processor
01164 
01165   int i;
01166   int my_proc = 0;
01167 
01168   MPI_Comm_rank( comm_, &my_proc );
01169 
01170   if( comm_plan_reverse_ == 0 )
01171   {
01172     int total_send_length = 0;
01173     for( i = 0; i < nsends_+self_msg_; i++ )
01174       total_send_length += lengths_to_[i];
01175 
01176     int max_recv_length = 0;
01177     for( i = 0; i < nrecvs_; i++ )
01178       if( procs_from_[i] != my_proc )
01179         if( lengths_from_[i] > max_recv_length )
01180           max_recv_length = lengths_from_[i];
01181 
01182     comm_plan_reverse_ = new Epetra_MpiDistributor(*epComm_);
01183 
01184     comm_plan_reverse_->lengths_to_ = lengths_from_;
01185     comm_plan_reverse_->procs_to_ = procs_from_;
01186     comm_plan_reverse_->indices_to_ = indices_from_;
01187     comm_plan_reverse_->starts_to_ = starts_from_;
01188 
01189     comm_plan_reverse_->lengths_from_ = lengths_to_;
01190     comm_plan_reverse_->procs_from_ = procs_to_;
01191     comm_plan_reverse_->indices_from_ = indices_to_;
01192     comm_plan_reverse_->starts_from_ = starts_to_;
01193 
01194     comm_plan_reverse_->nsends_ = nrecvs_;
01195     comm_plan_reverse_->nrecvs_ = nsends_;
01196     comm_plan_reverse_->self_msg_ = self_msg_;
01197 
01198     comm_plan_reverse_->max_send_length_ = max_recv_length;
01199     comm_plan_reverse_->total_recv_length_ = total_send_length;
01200 
01201     comm_plan_reverse_->request_ = new MPI_Request[ comm_plan_reverse_->nrecvs_ ];
01202     comm_plan_reverse_->status_= new MPI_Status[ comm_plan_reverse_->nrecvs_ ];
01203 
01204     comm_plan_reverse_->no_delete_ = true;
01205   }
01206 
01207   int comm_flag = comm_plan_reverse_->DoPosts(export_objs, obj_size, sizes, len_import_objs, import_objs);
01208 
01209   return(comm_flag);
01210 }
01211 
01212 //==============================================================================
01213 void Epetra_MpiDistributor::Print( ostream & os) const
01214 {
01215   int i, j;
01216   os << "nsends: " << nsends_ << endl;
01217   os << "procs_to: ";
01218   for( i = 0; i < nsends_; i++ )
01219     os << " " << procs_to_[i];
01220   os << endl;
01221   os<< "lengths_to: ";
01222   for( i = 0; i < nsends_; i++ )
01223     os << " " << lengths_to_[i];
01224   os << endl;
01225   os << "indices_to: ";
01226   int k = 0;
01227   if( indices_to_ )
01228   {
01229     for( i = 0; i < nsends_; i++ )
01230     {
01231       for( j = 0; j < lengths_to_[i]; j++ )
01232         os << " " << indices_to_[j+k];
01233       k += lengths_to_[i];
01234     }
01235   }
01236   os << endl;
01237   os << "nrecvs: " << nrecvs_ << endl;
01238   os << "procs_from: ";
01239   for( i = 0; i < nrecvs_; i++ )
01240     os << " " << procs_from_[i];
01241   os << endl;
01242   os << "lengths_from: ";
01243   for( i = 0; i < nrecvs_; i++ )
01244     os << " " << lengths_from_[i];
01245   os << endl;
01246 /*
01247   os << "indices_from: ";
01248   k = 0;
01249   for( i = 0; i < nrecvs_; i++ )
01250   {
01251     for( j = 0; j < lengths_from_[i]; j++ )
01252       os << " " << indices_from_[j+k];
01253     k += lengths_from_[i];
01254   }
01255 */
01256   os << "self_msg: " << self_msg_ << endl;
01257   os << "max_send_length: " << max_send_length_ << endl;
01258   os << "total_recv_length: " << total_recv_length_ << endl;
01259   os << endl;
01260 
01261   return;
01262 }
01263 
01264 //---------------------------------------------------------------------------
01265 int Epetra_MpiDistributor::Sort_ints_(
01266  int *vals_sort,     //  values to be sorted  
01267  int *vals_other,    // other array to be reordered with sort
01268  int  nvals)         // length of these two arrays
01269 {
01270 // It is primarily used to sort messages to improve communication flow. 
01271 // This routine will also insure that the ordering produced by the invert_map
01272 // routines is deterministic.  This should make bugs more reproducible.  This
01273 // is accomplished by sorting the message lists by processor ID.
01274 // This is a distribution count sort algorithm (see Knuth)
01275 //  This version assumes non negative integers. 
01276    
01277     if (nvals <= 1) return 0;
01278         
01279     int i;                        // loop counter        
01280      
01281     // find largest int, n, to size sorting array, then allocate and clear it
01282     int n = 0;  
01283     for (i = 0; i < nvals; i++)  
01284        if (n < vals_sort[i]) n = vals_sort[i]; 
01285     int *pos = new int [n+2];  
01286     for (i = 0; i < n+2; i++) pos[i] = 0;
01287  
01288     // copy input arrays into temporary copies to allow sorting original arrays
01289     int *copy_sort  = new int [nvals]; 
01290     int *copy_other = new int [nvals]; 
01291     for (i = 0; i < nvals; i++)
01292     { 
01293       copy_sort[i]  = vals_sort[i]; 
01294       copy_other[i] = vals_other[i];  
01295     }                           
01296  
01297     // count the occurances of integers ("distribution count")
01298     int *p = pos+1;
01299     for (i = 0; i < nvals; i++) p[copy_sort[i]]++;
01300  
01301     // create the partial sum of distribution counts 
01302     for (i = 1; i < n; i++) p[i] += p[i-1]; 
01303  
01304     // the shifted partitial sum is the index to store the data  in sort order
01305     p = pos; 
01306     for (i = 0; i < nvals; i++)         
01307     {                                   
01308       vals_sort  [p[copy_sort [i]]]   = copy_sort[i];
01309       vals_other [p[copy_sort [i]]++] = copy_other[i]; 
01310     } 
01311  
01312     delete [] copy_sort;
01313     delete [] copy_other; 
01314     delete [] pos; 
01315  
01316     return 0;
01317 }
01318 
01319 //-------------------------------------------------------------------------
01320 Epetra_MpiDistributor& Epetra_MpiDistributor::operator=(const Epetra_MpiDistributor& src)
01321 {
01322   (void)src;
01323   //not currently supported
01324   bool throw_error = true;
01325   if (throw_error) {
01326     throw ReportError("Epetra_MpiDistributor::operator= not supported.",-1);
01327   }
01328   return( *this );
01329 }

Generated on Wed May 12 21:41:05 2010 for Epetra Package Browser (Single Doxygen Collection) by  doxygen 1.4.7