Sierra Toolkit Version of the Day
BulkDataGhosting.cpp
00001 /*------------------------------------------------------------------------*/
00002 /*                 Copyright 2010 Sandia Corporation.                     */
00003 /*  Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive   */
00004 /*  license for use of this work by or on behalf of the U.S. Government.  */
00005 /*  Export of this program may require a license from the                 */
00006 /*  United States Government.                                             */
00007 /*------------------------------------------------------------------------*/
00008 
00013 #include <cstring>
00014 #include <set>
00015 #include <stdexcept>
00016 #include <iostream>
00017 #include <sstream>
00018 #include <algorithm>
00019 
00020 #include <stk_util/parallel/ParallelComm.hpp>
00021 #include <stk_util/parallel/ParallelReduce.hpp>
00022 
00023 #include <stk_mesh/base/Ghosting.hpp>
00024 #include <stk_mesh/base/BulkData.hpp>
00025 #include <stk_mesh/base/MetaData.hpp>
00026 #include <stk_mesh/base/FieldData.hpp>
00027 #include <stk_mesh/base/EntityComm.hpp>
00028 #include <stk_mesh/base/Comm.hpp>
00029 #include <stk_mesh/base/Trace.hpp>
00030 
00031 using std::strcmp;
00032 
00033 namespace stk {
00034 namespace mesh {
00035 
00036 //----------------------------------------------------------------------
00037 
00038 Ghosting & BulkData::create_ghosting( const std::string & name )
00039 {
00040   Trace_("stk::mesh::BulkData::create_ghosting");
00041 
00042   require_ok_to_modify();
00043 
00044   // Verify name is the same on all processors,
00045   // if not then throw an exception on all processors.
00046   if (parallel_size() > 1) {
00047     CommBroadcast bc( parallel() , 0 );
00048 
00049     if ( bc.parallel_rank() == 0 ) {
00050       bc.send_buffer().skip<char>( name.size() + 1 );
00051     }
00052 
00053     bc.allocate_buffer();
00054 
00055     if ( bc.parallel_rank() == 0 ) {
00056       bc.send_buffer().pack<char>( name.c_str() , name.size() + 1 );
00057     }
00058 
00059     bc.communicate();
00060 
00061     const char * const bc_name =
00062       reinterpret_cast<const char *>( bc.recv_buffer().buffer() );
00063 
00064     int error = 0 != strcmp( bc_name , name.c_str() );
00065 
00066     all_reduce( parallel() , ReduceMax<1>( & error ) );
00067 
00068     ThrowErrorMsgIf( error, "Parallel name inconsistency");
00069   }
00070 
00071   Ghosting * const g =
00072     new Ghosting( *this , name , m_ghosting.size() , m_sync_count );
00073 
00074   m_ghosting.push_back( g );
00075 
00076   return *g ;
00077 }
00078 
00079 //----------------------------------------------------------------------
00080 //----------------------------------------------------------------------
00081 
00082 namespace {
00083 
00084 void insert_transitive_closure( std::set<EntityProc,EntityLess> & new_send ,
00085                                 const EntityProc & entry );
00086 
00087 void comm_recv_to_send(
00088   BulkData & mesh ,
00089   const std::set< Entity * , EntityLess > & new_recv ,
00090         std::set< EntityProc , EntityLess > & new_send );
00091 
00092 void comm_sync_send_recv(
00093   BulkData & mesh ,
00094   std::set< EntityProc , EntityLess > & new_send ,
00095   std::set< Entity * , EntityLess > & new_recv );
00096 
00097 } // namespace <>
00098 
00099 //----------------------------------------------------------------------
00100 //----------------------------------------------------------------------
00101 
00102 void BulkData::destroy_all_ghosting()
00103 {
00104   Trace_("stk::mesh::BulkData::destroy_all_ghosting");
00105 
00106   require_ok_to_modify();
00107 
00108   // Clear Ghosting data
00109 
00110   for ( std::vector<Ghosting*>::iterator
00111         ig = m_ghosting.begin() ; ig != m_ghosting.end() ; ++ig ) {
00112     Ghosting & gh = **ig ;
00113     gh.m_sync_count = m_sync_count ;
00114   }
00115 
00116   // Iterate backwards so as not to invalidate a closure.
00117 
00118   std::vector<Entity*>::iterator ie = m_entity_comm.end();
00119 
00120   while ( ie != m_entity_comm.begin() ) {
00121 
00122     Entity * entity = *--ie ;
00123 
00124     if ( in_receive_ghost( *entity ) ) {
00125       m_entity_comm_map.comm_clear(entity->key());
00126       destroy_entity( entity );
00127       *ie = NULL ;
00128     }
00129     else {
00130       m_entity_comm_map.comm_clear_ghosting(entity->key());
00131       if ( m_entity_comm_map.comm(entity->key()).empty() ) {
00132         *ie = NULL ;
00133       }
00134     }
00135   }
00136 
00137   ie = std::remove( m_entity_comm.begin() ,
00138                     m_entity_comm.end() , (Entity*) NULL );
00139 
00140   m_entity_comm.erase( ie , m_entity_comm.end() );
00141 }
00142 
00143 //----------------------------------------------------------------------
00144 
00145 void BulkData::change_ghosting(
00146   Ghosting & ghosts ,
00147   const std::vector<EntityProc> & add_send ,
00148   const std::vector<Entity*> & remove_receive )
00149 {
00150   Trace_("stk::mesh::BulkData::change_ghosting");
00151 
00152   //----------------------------------------
00153   // Verify inputs:
00154 
00155   require_ok_to_modify();
00156 
00157   const bool ok_mesh  = & BulkData::get(ghosts) == this ;
00158   const bool ok_ghost = 1 < ghosts.ordinal();
00159   bool ok_add    = true ;
00160   bool ok_remove = true ;
00161 
00162   // Verify all 'add' are locally owned.
00163 
00164   for ( std::vector<EntityProc>::const_iterator
00165         i = add_send.begin() ; ok_add && i != add_send.end() ; ++i ) {
00166     ok_add = i->first->owner_rank() == parallel_rank();
00167   }
00168 
00169   // Verify all 'remove' are members of the ghosting.
00170 
00171   for ( std::vector<Entity*>::const_iterator
00172         i = remove_receive.begin() ;
00173         ok_remove && i != remove_receive.end() ; ++i ) {
00174     ok_remove = in_receive_ghost( ghosts , **i );
00175   }
00176 
00177   int ok = ok_mesh && ok_ghost && ok_add && ok_remove ;
00178 
00179   all_reduce( parallel() , ReduceMin<1>( & ok ) );
00180 
00181   if ( 0 == ok ) {
00182     std::ostringstream msg ;
00183     msg << "For ghosts " << ghosts.name() << ", " ;
00184     if ( ! ok_mesh )  { msg << " : Mesh does not own this ghosting" ; }
00185     if ( ! ok_ghost ) { msg << " : Cannot modify this ghosting" ; }
00186     if ( ! ok_add ) {
00187       msg << " : Not owned add {" ;
00188       for ( std::vector<EntityProc>::const_iterator
00189             i = add_send.begin() ; i != add_send.end() ; ++i ) {
00190         if ( i->first->owner_rank() != parallel_rank() ) {
00191           msg << " " << print_entity_key( i->first );
00192         }
00193       }
00194       msg << " }" ;
00195     }
00196     if ( ! ok_remove ) {
00197       msg << " : Not in ghost receive {" ;
00198       for ( std::vector<Entity*>::const_iterator
00199             i = remove_receive.begin() ; i != remove_receive.end() ; ++i ) {
00200         if ( ! in_receive_ghost( ghosts , **i ) ) {
00201           msg << " " << print_entity_key( *i );
00202         }
00203       }
00204     }
00205 
00206     ThrowErrorMsg( msg.str() );
00207   }
00208   //----------------------------------------
00209   // Change the ghosting:
00210 
00211   internal_change_ghosting( ghosts , add_send , remove_receive );
00212 }
00213 
00214 //----------------------------------------------------------------------
00215 
00216 void BulkData::internal_change_ghosting(
00217   Ghosting & ghosts ,
00218   const std::vector<EntityProc> & add_send ,
00219   const std::vector<Entity*> & remove_receive )
00220 {
00221   Trace_("stk::mesh::BulkData::internal_change_ghosting");
00222 
00223   const MetaData & meta = m_mesh_meta_data ;
00224   const unsigned rank_count = meta.entity_rank_count();
00225   const unsigned p_size = m_parallel_size ;
00226 
00227   //------------------------------------
00228   // Copy ghosting lists into more efficiently editted container.
00229   // The send and receive lists must be in entity rank-order.
00230 
00231   std::set< EntityProc , EntityLess > new_send ;
00232   std::set< Entity * ,   EntityLess > new_recv ;
00233 
00234   //------------------------------------
00235   // Insert the current ghost receives and then remove from that list.
00236 
00237   // This if-check is an optimization; if remove_receive is m_entity_comm,
00238   // then we are removing all ghosting information and new_recv should
00239   // be left empty.
00240   if ( & entity_comm() != & remove_receive ) {
00241 
00242     // Iterate over all entities with communication information, adding
00243     // the entity if it's a ghost on this process. new_recv will contain
00244     // all ghosts on this process by the end of the loop.
00245     for ( std::vector<Entity*>::const_iterator
00246           i = entity_comm().begin() ; i != entity_comm().end() ; ++i ) {
00247       Entity * const entity = *i ;
00248       if ( in_receive_ghost( ghosts , *entity ) ) {
00249         new_recv.insert( entity );
00250       }
00251     }
00252 
00253     // Remove any entities that are in the remove list.
00254 
00255     for ( std::vector< Entity * >::const_iterator
00256           i = remove_receive.begin() ; i != remove_receive.end() ; ++i ) {
00257       new_recv.erase( *i );
00258     }
00259 
00260     // Keep the closure of the remaining received ghosts.
00261     // Working from highest-to-lowest key (rank entity type)
00262     // results in insertion of the closure because
00263     // inserted entities will get looped over after they are inserted.
00264 
00265     // Insertion will not invalidate the associative container's iterator.
00266 
00267     for ( std::set< Entity * , EntityLess >::reverse_iterator
00268           i = new_recv.rbegin() ; i != new_recv.rend() ; ++i) {
00269       const unsigned erank = (*i)->entity_rank();
00270 
00271       for ( PairIterRelation
00272             irel = (*i)->relations(); ! irel.empty() ; ++irel ) {
00273         if ( irel->entity_rank() < erank &&
00274              in_receive_ghost( ghosts , * irel->entity() ) ) {
00275           new_recv.insert( irel->entity() );
00276         }
00277       }
00278     }
00279   }
00280 
00281   //  Initialize the new_send from the new_recv
00282   comm_recv_to_send( *this , new_recv , new_send );
00283 
00284   //------------------------------------
00285   // Add the specified entities and their closure to the send ghosting
00286 
00287   for ( std::vector< EntityProc >::const_iterator
00288         i = add_send.begin() ; i != add_send.end() ; ++i ) {
00289     insert_transitive_closure( new_send , *i );
00290   }
00291 
00292   // Synchronize the send and receive list.
00293   // If the send list contains a not-owned entity
00294   // inform the owner and receiver to add that entity
00295   // to their ghost send and receive lists.
00296 
00297   comm_sync_send_recv( *this , new_send , new_recv );
00298 
00299   // The new_send list is now parallel complete and parallel accurate
00300   // The new_recv has those ghost entities that are to be kept.
00301   //------------------------------------
00302   // Remove the ghost entities that will not remain.
00303   // If the last reference to the receive ghost entity then delete it.
00304 
00305   bool removed = false ;
00306 
00307   for ( std::vector<Entity*>::reverse_iterator
00308         i = m_entity_comm.rbegin() ; i != m_entity_comm.rend() ; ++i) {
00309     Entity * entity = *i ;
00310 
00311     const bool is_owner = entity->owner_rank() == m_parallel_rank ;
00312     const bool remove_recv = ( ! is_owner ) &&
00313                              0 == new_recv.count( entity );
00314 
00315     if ( is_owner ) {
00316       // Is owner, potentially removing ghost-sends
00317       // Have to make a copy
00318 
00319       std::vector<EntityCommInfo> comm_ghost ;
00320       const PairIterEntityComm ec = m_entity_comm_map.comm(entity->key(),ghosts);
00321       comm_ghost.assign( ec.first , ec.second );
00322 
00323       for ( ; ! comm_ghost.empty() ; comm_ghost.pop_back() ) {
00324         const EntityCommInfo tmp = comm_ghost.back();
00325 
00326         if ( 0 == new_send.count( EntityProc( entity , tmp.proc ) ) ) {
00327           m_entity_comm_map.erase(entity->key(),tmp);
00328         }
00329       }
00330     }
00331     else if ( remove_recv ) {
00332       m_entity_comm_map.erase(entity->key(),ghosts);
00333     }
00334 
00335     if ( m_entity_comm_map.comm(entity->key()).empty() ) {
00336       removed = true ;
00337       *i = NULL ; // No longer communicated
00338       if ( remove_recv ) {
00339         ThrowRequireMsg( destroy_entity( entity ),
00340                          " FAILED attempt to destroy entity: " << print_entity_key(entity) );
00341       }
00342     }
00343   }
00344 
00345   if ( removed ) {
00346     std::vector<Entity*>::iterator i =
00347       std::remove( m_entity_comm.begin() ,
00348                    m_entity_comm.end() , (Entity*) NULL );
00349     m_entity_comm.erase( i , m_entity_comm.end() );
00350   }
00351 
00352   //------------------------------------
00353   // Push newly ghosted entities to the receivers and update the comm list.
00354   // Unpacking must proceed in entity-rank order so that higher ranking
00355   // entities that have relations to lower ranking entities will have
00356   // the lower ranking entities unpacked first.  The higher and lower
00357   // ranking entities may be owned by different processes,
00358   // as such unpacking must be performed in rank order.
00359 
00360   {
00361     const size_t entity_comm_size = m_entity_comm.size();
00362 
00363     CommAll comm( m_parallel_machine );
00364 
00365     for ( std::set< EntityProc , EntityLess >::iterator
00366           j = new_send.begin(); j != new_send.end() ; ++j ) {
00367 
00368       Entity & entity = * j->first ;
00369       const unsigned int proc =  j->second ;
00370 
00371       if ( ! in_ghost( ghosts , entity , proc ) ) {
00372         // Not already being sent , must send it.
00373         CommBuffer & buf = comm.send_buffer( proc );
00374         buf.pack<unsigned>( entity.entity_rank() );
00375         pack_entity_info(  buf , entity );
00376         pack_field_values( buf , entity );
00377       }
00378     }
00379 
00380     comm.allocate_buffers( p_size / 4 );
00381 
00382     for ( std::set< EntityProc , EntityLess >::iterator
00383           j = new_send.begin(); j != new_send.end() ; ++j ) {
00384 
00385       Entity & entity = * j->first ;
00386       const unsigned int proc =  j->second ;
00387 
00388       if ( ! in_ghost( ghosts , entity , proc ) ) {
00389         // Not already being sent , must send it.
00390         CommBuffer & buf = comm.send_buffer( proc );
00391         buf.pack<unsigned>( entity.entity_rank() );
00392         pack_entity_info(  buf , entity );
00393         pack_field_values( buf , entity );
00394 
00395         m_entity_comm_map.insert(entity.key(), EntityCommInfo(ghosts.ordinal(), proc));
00396 
00397         m_entity_comm.push_back( & entity );
00398       }
00399     }
00400 
00401     comm.communicate();
00402 
00403     std::ostringstream error_msg ;
00404     int error_count = 0 ;
00405 
00406     for ( unsigned rank = 0 ; rank < rank_count ; ++rank ) {
00407       for ( unsigned p = 0 ; p < p_size ; ++p ) {
00408         CommBuffer & buf = comm.recv_buffer(p);
00409         while ( buf.remaining() ) {
00410           // Only unpack if of the current entity rank.
00411           // If not the current entity rank, break the iteration
00412           // until a subsequent entity rank iteration.
00413           {
00414             unsigned this_rank = ~0u ;
00415             buf.peek<unsigned>( this_rank );
00416 
00417             if ( this_rank != rank ) break ;
00418 
00419             buf.unpack<unsigned>( this_rank );
00420           }
00421 
00422           PartVector parts ;
00423           std::vector<Relation> relations ;
00424           EntityKey key ;
00425           unsigned  owner = ~0u ;
00426 
00427           unpack_entity_info( buf, *this, key, owner, parts, relations );
00428 
00429           // Must not have the locally_owned_part or globally_shared_part
00430 
00431           remove( parts , meta.locally_owned_part() );
00432           remove( parts , meta.globally_shared_part() );
00433 
00434           std::pair<Entity*,bool> result =
00435             m_entity_repo.internal_create_entity( key );
00436 
00437           Entity* entity = result.first;
00438           const bool created   = result.second ;
00439           const bool recreated = EntityLogDeleted == entity->log_query();
00440 
00441           if ( created || recreated ) {
00442             m_entity_repo.log_created_parallel_copy( *(entity) );
00443             m_entity_repo.set_entity_owner_rank( *(entity), owner);
00444           }
00445 
00446           require_entity_owner( * entity , owner );
00447 
00448           internal_change_entity_parts( * entity , parts , PartVector() );
00449 
00450           declare_relation( * entity , relations );
00451 
00452           if ( ! unpack_field_values( buf , * entity , error_msg ) ) {
00453             ++error_count ;
00454           }
00455 
00456           const EntityCommInfo tmp( ghosts.ordinal() , owner );
00457 
00458           if ( m_entity_comm_map.insert(entity->key(),tmp) ) {
00459             m_entity_comm.push_back( entity );
00460           }
00461         }
00462       }
00463     }
00464 
00465     if (parallel_size() > 1) {
00466       all_reduce( m_parallel_machine , ReduceSum<1>( & error_count ) );
00467     }
00468 
00469     ThrowErrorMsgIf( error_count, error_msg.str() );
00470 
00471     if ( entity_comm_size < m_entity_comm.size() ) {
00472       // Added new ghosting entities to the list,
00473       // must now sort and merge.
00474 
00475       std::vector<Entity*>::iterator i = m_entity_comm.begin();
00476       i += entity_comm_size ;
00477       std::sort( i , m_entity_comm.end() , EntityLess() );
00478       std::inplace_merge( m_entity_comm.begin() , i ,
00479                           m_entity_comm.end() , EntityLess() );
00480       m_entity_comm.erase( std::unique( m_entity_comm.begin() , m_entity_comm.end() ) ,
00481                            m_entity_comm.end() );
00482     }
00483   }
00484 
00485   ghosts.m_sync_count = m_sync_count ;
00486 }
00487 
00488 //----------------------------------------------------------------------
00489 
00490 namespace {
00491 
00492 void insert_transitive_closure( std::set<EntityProc,EntityLess> & new_send ,
00493                                 const EntityProc & entry )
00494 {
00495   // Do not insert if I can determine that this entity is already
00496   // owned or shared by the receiving processor.
00497 
00498   if ( entry.second != entry.first->owner_rank() &&
00499        ! in_shared( * entry.first , entry.second ) ) {
00500 
00501     std::pair< std::set<EntityProc,EntityLess>::iterator , bool >
00502       result = new_send.insert( entry );
00503 
00504     if ( result.second ) {
00505       // A new insertion, must also insert the closure
00506 
00507       const unsigned etype = entry.first->entity_rank();
00508       PairIterRelation irel  = entry.first->relations();
00509 
00510       for ( ; ! irel.empty() ; ++irel ) {
00511         if ( irel->entity_rank() < etype ) {
00512           EntityProc tmp( irel->entity() , entry.second );
00513           insert_transitive_closure( new_send , tmp );
00514         }
00515       }
00516     }
00517   }
00518 }
00519 
00520 // Fill a new send list from the receive list.
00521 
00522 void comm_recv_to_send(
00523   BulkData & mesh ,
00524   const std::set< Entity * , EntityLess > & new_recv ,
00525         std::set< EntityProc , EntityLess > & new_send )
00526 {
00527   const unsigned parallel_size = mesh.parallel_size();
00528 
00529   CommAll all( mesh.parallel() );
00530 
00531   for ( int phase = 0; phase < 2; ++phase) {
00532     for ( std::set< Entity * , EntityLess >::const_iterator
00533             i = new_recv.begin() ; i != new_recv.end() ; ++i ) {
00534       const unsigned owner = (*i)->owner_rank();
00535       const EntityKey key = (*i)->key();
00536       all.send_buffer( owner ).pack<EntityKey>( key );
00537     }
00538     if (phase == 0) { //allocation phase
00539       all.allocate_buffers( parallel_size / 4 , false /* Not symmetric */ );
00540     }
00541     else { //communication phase
00542       all.communicate();
00543     }
00544   }
00545 
00546   for ( unsigned proc_rank = 0 ; proc_rank < parallel_size ; ++proc_rank ) {
00547     CommBuffer & buf = all.recv_buffer(proc_rank);
00548     while ( buf.remaining() ) {
00549       EntityKey key ;
00550       buf.unpack<EntityKey>( key );
00551       EntityProc tmp( mesh.get_entity( key ) , proc_rank );
00552       new_send.insert( tmp );
00553     }
00554   }
00555 }
00556 
00557 // Synchronize the send list to the receive list.
00558 
00559 void comm_sync_send_recv(
00560   BulkData & mesh ,
00561   std::set< EntityProc , EntityLess > & new_send ,
00562   std::set< Entity * , EntityLess > & new_recv )
00563 {
00564   const unsigned parallel_rank = mesh.parallel_rank();
00565   const unsigned parallel_size = mesh.parallel_size();
00566 
00567   CommAll all( mesh.parallel() );
00568 
00569   // Communication sizing:
00570 
00571   for ( std::set< EntityProc , EntityLess >::iterator
00572         i = new_send.begin() ; i != new_send.end() ; ++i ) {
00573     const unsigned owner = i->first->owner_rank();
00574     all.send_buffer( i->second ).skip<EntityKey>(2);
00575     if ( owner != parallel_rank ) {
00576       all.send_buffer( owner ).skip<EntityKey>(2);
00577     }
00578   }
00579 
00580   all.allocate_buffers( parallel_size / 4 , false /* Not symmetric */ );
00581 
00582   // Communication packing (with message content comments):
00583   for ( std::set< EntityProc , EntityLess >::iterator
00584         i = new_send.begin() ; i != new_send.end() ; ) {
00585     const unsigned owner = i->first->owner_rank();
00586 
00587     // Inform receiver of ghosting, the receiver does not own
00588     // and does not share this entity.
00589     // The ghost either already exists or is a to-be-done new ghost.
00590     // This status will be resolved on the final communication pass
00591     // when new ghosts are packed and sent.
00592 
00593     const EntityKey &entity_key = i->first->key();
00594     const uint64_t &proc = i->second;
00595 
00596     all.send_buffer( i->second ).pack(entity_key).pack(proc);
00597 
00598     if ( owner != parallel_rank ) {
00599       // I am not the owner of this entity.
00600       // Inform the owner of this ghosting need.
00601       all.send_buffer( owner ).pack(entity_key).pack(proc);
00602 
00603       // Erase it from my processor's ghosting responsibility:
00604       // The iterator passed to the erase method will be invalidated.
00605       std::set< EntityProc , EntityLess >::iterator jrem = i ; ++i ;
00606       new_send.erase( jrem );
00607     }
00608     else {
00609       ++i ;
00610     }
00611   }
00612 
00613   all.communicate();
00614 
00615   // Communication unpacking:
00616   for ( unsigned p = 0 ; p < parallel_size ; ++p ) {
00617     CommBuffer & buf = all.recv_buffer(p);
00618     while ( buf.remaining() ) {
00619 
00620       EntityKey entity_key;
00621       uint64_t proc(0);
00622 
00623       buf.unpack(entity_key).unpack(proc);
00624 
00625       Entity * const e = mesh.get_entity( entity_key );
00626 
00627       if ( parallel_rank != proc ) {
00628         //  Receiving a ghosting need for an entity I own.
00629         //  Add it to my send list.
00630         ThrowRequireMsg( e != NULL,
00631             "Unknown entity key: " <<
00632             MetaData::get(mesh).entity_rank_name(entity_key.rank()) <<
00633             "[" << entity_key.id() << "]");
00634         EntityProc tmp( e , proc );
00635         new_send.insert( tmp );
00636       }
00637       else if ( e != NULL ) {
00638         //  I am the receiver for this ghost.
00639         //  If I already have it add it to the receive list,
00640         //  otherwise don't worry about it - I will receive
00641         //  it in the final new-ghosting communication.
00642         new_recv.insert( e );
00643       }
00644     }
00645   }
00646 }
00647 
00648 void insert_upward_relations(Entity& rel_entity,
00649                              const EntityRank rank_of_orig_entity,
00650                              const unsigned my_rank,
00651                              const unsigned share_proc,
00652                              std::vector<EntityProc>& send)
00653 {
00654   // If related entity is higher rank, I own it, and it is not
00655   // already shared by proc, ghost it to the sharing processor.
00656   if ( rank_of_orig_entity < rel_entity.entity_rank() &&
00657        rel_entity.owner_rank() == my_rank &&
00658        ! in_shared( rel_entity , share_proc ) ) {
00659 
00660     EntityProc entry( &rel_entity , share_proc );
00661     send.push_back( entry );
00662 
00663     // There may be even higher-ranking entities that need to be ghosted, so we must recurse
00664     for ( PairIterRelation rel = rel_entity.relations() ; ! rel.empty() ; ++rel ) {
00665       Entity * const rel_of_rel_entity = rel->entity();
00666       insert_upward_relations(*rel_of_rel_entity, rel_entity.entity_rank(), my_rank, share_proc, send);
00667     }
00668   }
00669 }
00670 
00671 } // namespace <>
00672 
00673 //----------------------------------------------------------------------
00674 //----------------------------------------------------------------------
00675 
00676 void BulkData::internal_regenerate_shared_aura()
00677 {
00678   Trace_("stk::mesh::BulkData::internal_regenerate_shared_aura");
00679 
00680   require_ok_to_modify();
00681 
00682   std::vector<EntityProc> send ;
00683 
00684   // Iterate over all entities with communication info, get the sharing
00685   // comm info for each entity, and ensure that upwardly related
00686   // entities to the shared entity are ghosted on the sharing proc.
00687   for ( std::vector<Entity*>::const_iterator
00688         i = entity_comm().begin() ; i != entity_comm().end() ; ++i ) {
00689 
00690     Entity & entity = **i ;
00691 
00692     const unsigned erank = entity.entity_rank();
00693 
00694     const PairIterEntityComm sharing = entity.sharing();
00695 
00696     for ( size_t j = 0 ; j < sharing.size() ; ++j ) {
00697 
00698       const unsigned share_proc = sharing[j].proc ;
00699 
00700       for ( PairIterRelation rel = entity.relations() ; ! rel.empty() ; ++rel ) {
00701 
00702         Entity * const rel_entity = rel->entity();
00703 
00704         insert_upward_relations(*rel_entity, erank, m_parallel_rank, share_proc, send);
00705       }
00706     }
00707   }
00708 
00709   // Add new aura, remove all of the old aura.
00710   // The change_ghosting figures out what to actually delete and add.
00711   internal_change_ghosting( shared_aura() , send , m_entity_comm );
00712 }
00713 
00714 //----------------------------------------------------------------------
00715 //----------------------------------------------------------------------
00716 
00717 } // namespace mesh
00718 } // namespace stk
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends Defines