Sierra Toolkit Version of the Day
BulkDataEndSync.cpp
00001 /*------------------------------------------------------------------------*/
00002 /*                 Copyright 2010 Sandia Corporation.                     */
00003 /*  Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive   */
00004 /*  license for use of this work by or on behalf of the U.S. Government.  */
00005 /*  Export of this program may require a license from the                 */
00006 /*  United States Government.                                             */
00007 /*------------------------------------------------------------------------*/
00008 
00013 //----------------------------------------------------------------------
00014 
00015 #include <stdexcept>
00016 #include <iostream>
00017 #include <sstream>
00018 #include <set>
00019 #include <vector>
00020 #include <algorithm>
00021 
00022 #include <stk_util/environment/ReportHandler.hpp>
00023 
00024 #include <stk_util/parallel/ParallelComm.hpp>
00025 #include <stk_util/parallel/ParallelReduce.hpp>
00026 
00027 #include <stk_mesh/base/BulkData.hpp>
00028 #include <stk_mesh/base/MetaData.hpp>
00029 #include <stk_mesh/base/Entity.hpp>
00030 #include <stk_mesh/base/EntityComm.hpp>
00031 #include <stk_mesh/base/Trace.hpp>
00032 
00033 //----------------------------------------------------------------------
00034 
00035 namespace stk {
00036 namespace mesh {
00037 
00038 bool comm_mesh_verify_parallel_consistency(
00039   BulkData & M , std::ostream & error_log );
00040 
00041 //----------------------------------------------------------------------
00042 
00043 unsigned BulkData::determine_new_owner( Entity & entity ) const
00044 {
00045   // We will decide the new owner by looking at all the processes sharing
00046   // this entity. The new owner will be the sharing process with lowest rank.
00047 
00048   // The local process is a candidate only if the entity is not destroyed.
00049   unsigned new_owner =
00050     EntityLogDeleted == entity.log_query() ? ~0u : m_parallel_rank ;
00051 
00052   for ( PairIterEntityComm
00053         share = entity.sharing(); ! share.empty() ; ++share ) {
00054     if ( share->proc < m_parallel_size &&
00055          ( new_owner < share->proc || m_parallel_size <= new_owner ) ) {
00056       new_owner = share->proc ;
00057     }
00058   }
00059 
00060   return new_owner ;
00061 }
00062 
00063 //----------------------------------------------------------------------
00064 
00065 namespace {
00066 
00067 // A method for quickly finding an entity
00068 Entity* find_entity(const EntityVector& entities, const EntityKey& key,
00069                     bool expect_success = false)
00070 {
00071   EntityVector::const_iterator itr =
00072     std::lower_bound(entities.begin(),
00073                      entities.end(),
00074                      key,
00075                      EntityLess());
00076   if (itr == entities.end() || (*itr)->key() != key) {
00077     ThrowRequireMsg(!expect_success,
00078                     "Expected to be able to find entity of type: " <<
00079                     key.type() << " and rank: " << key.rank());
00080     return NULL;
00081   }
00082   return *itr;
00083 }
00084 
00085 struct EntityProcState {
00086   EntityProc entity_proc;
00087   EntityModificationLog state;
00088 
00089   bool operator<(const EntityProcState& rhs) const
00090   {
00091     EntityLess el;
00092     return el(entity_proc, rhs.entity_proc);
00093   }
00094 };
00095 
00096 bool pack_entity_modification( const BulkData & mesh ,
00097                                const bool pack_shared ,
00098                                CommAll & comm )
00099 {
00100   bool flag = false ;
00101 
00102   const std::vector<Entity*> & entity_comm = mesh.entity_comm();
00103 
00104   for ( std::vector<Entity*>::const_iterator
00105         i = entity_comm.begin() ; i != entity_comm.end() ; ++i ) {
00106 
00107     Entity & entity = **i ;
00108 
00109     if ( entity.log_query() == EntityLogModified ||
00110          entity.log_query() == EntityLogDeleted ) {
00111 
00112       for ( PairIterEntityComm ec = entity.comm(); ! ec.empty() ; ++ec ) {
00113         const bool shared = 0 == ec->ghost_id ;
00114         if ( pack_shared == shared ) {
00115           comm.send_buffer( ec->proc )
00116               .pack<EntityKey>( entity.key() )
00117               .pack<EntityModificationLog>( entity.log_query() );
00118 
00119           flag = true ;
00120         }
00121       }
00122     }
00123   }
00124 
00125   return flag ;
00126 }
00127 
00128 void communicate_entity_modification( const BulkData & mesh ,
00129                                       const bool shared ,
00130                                       std::vector<EntityProcState > & data )
00131 {
00132   CommAll comm( mesh.parallel() );
00133 
00134   // Sizing send buffers:
00135   const bool local_mod = pack_entity_modification( mesh , shared , comm );
00136 
00137   // Allocation of send and receive buffers:
00138   const bool global_mod =
00139     comm.allocate_buffers( comm.parallel_size() / 4 , false , local_mod );
00140 
00141   if ( global_mod ) {
00142     const std::vector<Entity*> & entity_comm = mesh.entity_comm();
00143 
00144     // Packing send buffers:
00145     pack_entity_modification( mesh , shared , comm );
00146 
00147     comm.communicate();
00148 
00149     for ( unsigned p = 0 ; p < comm.parallel_size() ; ++p ) {
00150       CommBuffer & buf = comm.recv_buffer( p );
00151       EntityKey key ;
00152       EntityProcState tmp ;
00153 
00154       while ( buf.remaining() ) {
00155 
00156         buf.unpack<EntityKey>( key )
00157            .unpack<EntityModificationLog>( tmp.state );
00158 
00159         // search through entity_comm, should only receive info on entities
00160         // that are communicated.
00161         tmp.entity_proc.first  = find_entity(entity_comm, key, true);
00162         tmp.entity_proc.second = p ;
00163 
00164         data.push_back( tmp );
00165       }
00166     }
00167   }
00168 
00169   std::sort( data.begin() , data.end() );
00170 }
00171 
00172 }
00173 
00174 //----------------------------------------------------------------------
00175 //----------------------------------------------------------------------
00176 
00177 // Postconditions:
00178 //  * DistributedIndex is updated based on entity creation/deletions in the
00179 //    last modification cycle.
00180 //  * Comm lists for shared entities are up-to-date.
00181 //  * shared_new contains all entities that were modified/created on a
00182 //    different process
00183 void BulkData::internal_update_distributed_index(
00184   std::vector<Entity*> & shared_new )
00185 {
00186   Trace_("stk::mesh::BulkData::internal_update_distributed_index");
00187 
00188   std::vector< parallel::DistributedIndex::KeyType >
00189     local_created_or_modified , // only store locally owned/shared entities
00190     del_entities_keys ;
00191 
00192   // Iterate over all entities known to this process, putting
00193   // locally deleted entities in del_entities_keys, and putting
00194   // modified shared/owned entities in local_created_or_modified.
00195   for ( impl::EntityRepository::iterator
00196         i = m_entity_repo.begin() ; i != m_entity_repo.end() ; ++i ) {
00197 
00198     Entity & entity = * i->second ;
00199 
00200     if ( EntityLogDeleted == entity.log_query() ) {
00201       // Has been destroyed
00202       del_entities_keys.push_back( entity.key().raw_key() );
00203     }
00204     else if ( entity.log_query() != EntityLogNoChange &&
00205               in_owned_closure( entity , m_parallel_rank ) ) {
00206       // Has been changed and is in owned closure, may be shared
00207       local_created_or_modified.push_back( entity.key().raw_key() );
00208     }
00209   }
00210 
00211   // Update distributed index. Note that the DistributedIndex only
00212   // tracks ownership and sharing information.
00213   m_entities_index.update_keys( local_created_or_modified , del_entities_keys );
00214 
00215   if (parallel_size() > 1) {
00216     // Retrieve data regarding which processes use the local_created_or_modified
00217     // including this process.
00218     std::vector< parallel::DistributedIndex::KeyProc >
00219       global_created_or_modified ;
00220     m_entities_index.query_to_usage( local_created_or_modified ,
00221                                      global_created_or_modified );
00222 
00223     //------------------------------
00224     // Take the usage data and update the sharing comm lists
00225     {
00226       Entity * entity = NULL ;
00227 
00228       // Iterate over all global modifications to this entity, this vector is
00229       // sorted, so we're guaranteed that all modifications to a particular
00230       // entities will be adjacent in this vector.
00231       for ( std::vector< parallel::DistributedIndex::KeyProc >::iterator
00232               i =  global_created_or_modified.begin() ;
00233             i != global_created_or_modified.end() ; ++i ) {
00234 
00235         EntityKey key( & i->first );
00236         unsigned modifying_proc = i->second;
00237 
00238         // key should not be in del_entities_keys
00239         ThrowAssertMsg( !std::binary_search(del_entities_keys.begin(),
00240                                             del_entities_keys.end(),
00241                                             i->first),
00242                         "Key: " << print_entity_key(mesh_meta_data(), key) <<
00243                         " was locally deleted, but somehow was included in global_created_or_modified; " <<
00244                         " this probably means there's problem in DistributedIndex." );
00245 
00246         if ( m_parallel_rank != modifying_proc ) {
00247           // Another process also created or updated this entity.
00248 
00249           // Only want to look up entities at most once
00250           if ( entity == NULL || entity->key() != key ) {
00251             // Have not looked this entity up by key
00252             entity = get_entity( key );
00253 
00254             shared_new.push_back( entity );
00255           }
00256 
00257           // Add the other_process to the entity's sharing info.
00258           m_entity_repo.insert_comm_info( *entity, EntityCommInfo( 0, // sharing
00259                                                                    modifying_proc ) );
00260         }
00261       }
00262     }
00263   }
00264 }
00265 
00266 //----------------------------------------------------------------------
00267 //----------------------------------------------------------------------
00268 
00269 namespace {
00270 
00271 // Enforce that shared entities must be in the owned closure:
00272 
00273 void destroy_dependent_ghosts( BulkData & mesh , Entity * entity )
00274 {
00275   for ( ; ; ) {
00276     PairIterRelation rel = entity->relations();
00277 
00278     if ( rel.empty() ) { break ; }
00279 
00280     Entity * e = rel.back().entity();
00281 
00282     if ( e->entity_rank() < entity->entity_rank() ) { break ; }
00283 
00284     ThrowRequireMsg( !in_owned_closure( *e , mesh.parallel_rank()),
00285         "Entity " << print_entity_key(e) << " should not be in closure." );
00286 
00287     destroy_dependent_ghosts( mesh , e );
00288   }
00289 
00290   mesh.destroy_entity( entity );
00291 }
00292 
00293 // Entities with sharing information that are not in the owned closure
00294 // have been modified such that they are no longer shared.
00295 // These may no longer be needed or may become ghost entities.
00296 // There is not enough information so assume they are to be deleted
00297 // and let these entities be re-ghosted if they are needed.
00298 
00299 // Open question: Should an owned and shared entity that does not
00300 // have an upward relation to an owned entity be destroyed so that
00301 // ownership transfers to another process?
00302 
00303 void resolve_shared_removed_from_owned_closure( BulkData & mesh )
00304 {
00305   for ( std::vector<Entity*>::const_reverse_iterator
00306         i =  mesh.entity_comm().rbegin() ;
00307         i != mesh.entity_comm().rend() ; ++i) {
00308 
00309     Entity * entity = *i ;
00310 
00311     if ( ! entity->sharing().empty() &&
00312          ! in_owned_closure( *entity , mesh.parallel_rank() ) ) {
00313 
00314       destroy_dependent_ghosts( mesh , entity );
00315     }
00316   }
00317 }
00318 
00319 }
00320 
00321 // Resolve modifications for shared entities:
00322 // If not locally destroyed and remotely modified
00323 // then set to locally modified.
00324 // If remotely destroyed then determine the new owner.
00325 //
00326 // Post condition:
00327 //  Shared entities are in-sync with respect to modification state.
00328 //  Shared communication lists are updated to reflect all deletions.
00329 //  Ownership has been re-assigned as necessary for deletion
00330 //  of shared entities.
00331 
00332 void BulkData::internal_resolve_shared_modify_delete()
00333 {
00334   Trace_("stk::mesh::BulkData::internal_resolve_shared_modify_delete");
00335 
00336   ThrowRequireMsg(parallel_size() > 1, "Do not call this in serial");
00337 
00338   resolve_shared_removed_from_owned_closure( *this );
00339 
00340   std::vector< EntityProcState > remote_mod ;
00341 
00342   // Communicate entity modification state for shared entities
00343   // the resulting vector is sorted by entity and process.
00344   const bool communicate_shared = true ;
00345   communicate_entity_modification( *this , communicate_shared , remote_mod );
00346 
00347   // We iterate backwards over remote_mod to ensure that we hit the
00348   // higher-ranking entities first.
00349   for ( std::vector<EntityProcState>::reverse_iterator
00350         i = remote_mod.rbegin(); i != remote_mod.rend() ; ) {
00351 
00352     Entity * const entity        = i->entity_proc.first ;
00353     const bool locally_destroyed = EntityLogDeleted == entity->log_query();
00354     bool remote_owner_destroyed  = false;
00355 
00356     // Iterate over all of this entity's remote changes
00357     for ( ; i != remote_mod.rend() && i->entity_proc.first == entity ; ++i ) {
00358 
00359       const unsigned remote_proc    = i->entity_proc.second ;
00360       const bool remotely_destroyed = EntityLogDeleted == i->state ;
00361 
00362       // When a shared entity is remotely modified or destroyed
00363       // then the local copy is also modified.  This modification
00364       // status is applied to all related higher ranking entities.
00365 
00366       if ( ! locally_destroyed ) {
00367         m_entity_repo.log_modified( *entity );
00368       }
00369 
00370       // A shared entity is being deleted on the remote process.
00371       // Remove it from the sharing communication list.
00372       // Ownership changes are processed later, but we'll need
00373       // to know if the remote owner destroyed the entity in order
00374       // to correctly resolve ownership (it is not sufficient to just
00375       // look at the comm list of the entity since there is no
00376       // guarantee that the comm list is correct or up-to-date).
00377 
00378       if ( remotely_destroyed ) {
00379         m_entity_repo.erase_comm_info( *entity, EntityCommInfo(0,remote_proc) );
00380 
00381         // check if owner is destroying
00382         if ( entity->owner_rank() == remote_proc ) {
00383           remote_owner_destroyed = true ;
00384         }
00385       }
00386     }
00387 
00388     // Have now processed all remote changes knowledge for this entity.
00389 
00390     PairIterEntityComm new_sharing = entity->sharing();
00391     const bool   exists_somewhere = ! ( remote_owner_destroyed &&
00392                                         locally_destroyed &&
00393                                         new_sharing.empty() );
00394 
00395     // If the entity has been deleted everywhere, nothing left to do
00396     if ( exists_somewhere ) {
00397 
00398       const bool old_local_owner = m_parallel_rank == entity->owner_rank();
00399 
00400       // Giving away ownership to another process in the sharing list:
00401       const bool give_ownership = locally_destroyed && old_local_owner ;
00402 
00403       // If we are giving away ownership or the remote owner destroyed
00404       // the entity, then we need to establish a new owner
00405       if ( give_ownership || remote_owner_destroyed ) {
00406 
00407         const unsigned new_owner = determine_new_owner( *entity );
00408 
00409         m_entity_repo.set_entity_owner_rank( *entity, new_owner );
00410         m_entity_repo.set_entity_sync_count( *entity, m_sync_count );
00411       }
00412 
00413       if ( ! locally_destroyed ) {
00414 
00415         PartVector add_part , remove_part ;
00416 
00417         if ( new_sharing.empty() ) {
00418           // Is no longer shared, remove the shared part.
00419           remove_part.push_back(& m_mesh_meta_data.globally_shared_part());
00420         }
00421 
00422         const bool new_local_owner = m_parallel_rank == entity->owner_rank();
00423 
00424         const bool local_claimed_ownership =
00425           ( ! old_local_owner && new_local_owner );
00426 
00427         if ( local_claimed_ownership ) {
00428           // Changing remotely owned to locally owned
00429           add_part.push_back( & m_mesh_meta_data.locally_owned_part() );
00430         }
00431 
00432         if ( ! add_part.empty() || ! remove_part.empty() ) {
00433           internal_change_entity_parts( *entity , add_part , remove_part );
00434         }
00435       } // if ( ! locally_destroyed )
00436     } // if ( exists_somewhere )
00437   } // remote mod loop
00438 
00439   // Erase all sharing communication lists for Destroyed entities:
00440   for ( std::vector<Entity*>::const_reverse_iterator
00441         i = entity_comm().rbegin() ; i != entity_comm().rend() ; ++i) {
00442     Entity * entity = *i ;
00443 
00444     if ( EntityLogDeleted == entity->log_query() ) {
00445       // m_ghosting[0] is the SHARED communication
00446       m_entity_repo.erase_ghosting( *entity , *m_ghosting[0] );
00447     }
00448   }
00449 }
00450 
00451 //----------------------------------------------------------------------
00452 // Resolve modifications for ghosted entities:
00453 // If a ghosted entity is modified or destroyed on the owning
00454 // process then the ghosted entity must be destroyed.
00455 //
00456 // Post condition:
00457 //  Ghosted entities of modified or deleted entities are destroyed.
00458 //  Ghosted communication lists are cleared to reflect all deletions.
00459 
00460 void BulkData::internal_resolve_ghosted_modify_delete()
00461 {
00462   Trace_("stk::mesh::BulkData::internal_resolve_ghosted_modify_delete");
00463 
00464   ThrowRequireMsg(parallel_size() > 1, "Do not call this in serial");
00465   // Resolve modifications for ghosted entities:
00466 
00467   std::vector<EntityProcState > remote_mod ;
00468 
00469   // Communicate entity modification state for ghost entities
00470   const bool communicate_shared = false ;
00471   communicate_entity_modification( *this , communicate_shared , remote_mod );
00472 
00473   const size_t ghosting_count = m_ghosting.size();
00474 
00475   std::vector< int > ghosting_change_flags( ghosting_count , 0 );
00476 
00477   // We iterate backwards over remote_mod to ensure that we hit the
00478   // higher-ranking entities first. This is important because higher-ranking
00479   // entities like element must be deleted before the nodes they have are
00480   // deleted.
00481   for ( std::vector<EntityProcState>::reverse_iterator
00482         i = remote_mod.rbegin(); i != remote_mod.rend() ; ++i ) {
00483     Entity *       entity       = i->entity_proc.first ;
00484     const unsigned remote_proc  = i->entity_proc.second ;
00485     const bool     local_owner  = entity->owner_rank() == m_parallel_rank ;
00486     const bool remotely_destroyed = EntityLogDeleted == i->state ;
00487     const bool locally_destroyed  = EntityLogDeleted == entity->log_query();
00488 
00489     if ( local_owner ) { // Sending to 'remote_proc' for ghosting
00490 
00491       if ( remotely_destroyed ) {
00492 
00493         // remove from ghost-send list
00494 
00495         for ( size_t j = ghosting_count ; j-- ; ) {
00496           if ( m_entity_repo.erase_comm_info( *entity, EntityCommInfo( j , remote_proc ) ) ) {
00497             ghosting_change_flags[ j ] = true ;
00498           }
00499         }
00500       }
00501 
00502       // Remotely modified ghosts are ignored
00503 
00504     }
00505     else { // Receiving from 'remote_proc' for ghosting
00506 
00507       // Owner modified or destroyed, must locally destroy.
00508 
00509       for ( PairIterEntityComm ec = entity->comm() ; ! ec.empty() ; ++ec ) {
00510         ghosting_change_flags[ ec->ghost_id ] = true ;
00511       }
00512 
00513       // This is a receive ghost so the only communication information
00514       // is the ghosting information, can clear it all out.
00515       m_entity_repo.comm_clear( *entity );
00516 
00517       if ( ! locally_destroyed ) {
00518 
00519         // If mesh modification causes a ghost entity to become
00520         // a member of an owned-closure then do not automatically
00521         // destroy it.  The new sharing status will be resolved
00522         // in 'internal_resolve_parallel_create'.
00523 
00524         if ( ! in_owned_closure( *entity , m_parallel_rank ) ) {
00525 
00526           const bool destroy_entity_successful = destroy_entity(entity);
00527           ThrowRequireMsg(destroy_entity_successful,
00528               "Could not destroy ghost entity " << print_entity_key(entity));
00529         }
00530       }
00531     }
00532   } // end loop on remote mod
00533 
00534   // Erase all ghosting communication lists for:
00535   // 1) Destroyed entities.
00536   // 2) Owned and modified entities.
00537 
00538   for ( std::vector<Entity*>::const_reverse_iterator
00539         i = entity_comm().rbegin() ; i != entity_comm().rend() ; ++i) {
00540 
00541     Entity & entity = **i ;
00542 
00543     const bool locally_destroyed = EntityLogDeleted == entity.log_query();
00544     const bool locally_owned_and_modified =
00545       EntityLogModified == entity.log_query() &&
00546       m_parallel_rank   == entity.owner_rank() ;
00547 
00548     if ( locally_destroyed || locally_owned_and_modified ) {
00549 
00550       // m_ghosting[0] is the SHARED communication
00551 
00552       for ( size_t j = ghosting_count ; j-- ; ) {
00553         if ( m_entity_repo.erase_ghosting( entity, *m_ghosting[j] ) ) {
00554           ghosting_change_flags[ j ] = true ;
00555         }
00556       }
00557     }
00558   }
00559 
00560   std::vector< int > ghosting_change_flags_global( ghosting_count , 0 );
00561 
00562   all_reduce_sum( m_parallel_machine ,
00563                   & ghosting_change_flags[0] ,
00564                   & ghosting_change_flags_global[0] ,
00565                   ghosting_change_flags.size() );
00566 
00567   for ( unsigned ic = 0 ; ic < ghosting_change_flags_global.size() ; ++ic ) {
00568     if ( ghosting_change_flags_global[ic] ) {
00569       m_ghosting[ic]->m_sync_count = m_sync_count ;
00570     }
00571   }
00572 }
00573 
00574 //----------------------------------------------------------------------
00575 
00576 // Postconditions:
00577 //  * All shared entities have parallel-consistent owner
00578 //  * Part membership of shared entities is up-to-date
00579 //  * m_entity_comm is up-to-date
00580 void BulkData::internal_resolve_parallel_create()
00581 {
00582   Trace_("stk::mesh::BulkData::internal_resolve_parallel_create");
00583 
00584   ThrowRequireMsg(parallel_size() > 1, "Do not call this in serial");
00585 
00586   std::vector<Entity*> shared_modified ;
00587 
00588   // Update the parallel index and
00589   // output shared and modified entities.
00590   internal_update_distributed_index( shared_modified );
00591 
00592   // ------------------------------------------------------------
00593   // Claim ownership on all shared_modified entities that I own
00594   // and which were not created in this modification cycle. All
00595   // sharing procs will need to be informed of this claim.
00596   CommAll comm_all( m_parallel_machine );
00597 
00598   for ( int phase = 0; phase < 2; ++phase ) {
00599     for ( std::vector<Entity*>::iterator
00600             i = shared_modified.begin() ; i != shared_modified.end() ; ++i ) {
00601       Entity & entity = **i ;
00602       if ( entity.owner_rank() == m_parallel_rank &&
00603            entity.log_query()  != EntityLogCreated ) {
00604 
00605         for ( PairIterEntityComm
00606                 jc = entity.sharing() ; ! jc.empty() ; ++jc ) {
00607           comm_all.send_buffer( jc->proc ) .pack<EntityKey>( entity.key() );
00608         }
00609       }
00610     }
00611 
00612     if (phase == 0) { //allocation phase
00613       comm_all.allocate_buffers( m_parallel_size / 4 );
00614     }
00615     else { // communication phase
00616       comm_all.communicate();
00617     }
00618   }
00619 
00620   for ( unsigned p = 0 ; p < m_parallel_size ; ++p ) {
00621     CommBuffer & buf = comm_all.recv_buffer( p );
00622     EntityKey key ;
00623     while ( buf.remaining() ) {
00624       buf.unpack<EntityKey>( key );
00625 
00626       Entity & entity = * get_entity( key );
00627 
00628       // Set owner, will correct part membership later
00629       m_entity_repo.set_entity_owner_rank( entity, p);
00630     }
00631   }
00632 
00633   // ------------------------------------------------------------
00634   // Update shared created entities.
00635   // - Revise ownership to selected processor
00636   // - Update sharing.
00637   // - Work backward so the 'in_owned_closure' function
00638   //   can evaluate related higher ranking entities.
00639 
00640   std::ostringstream error_msg ;
00641   int error_flag = 0 ;
00642 
00643   PartVector shared_part , owned_part ;
00644   shared_part.push_back( & m_mesh_meta_data.globally_shared_part() );
00645   owned_part.push_back(  & m_mesh_meta_data.locally_owned_part() );
00646 
00647   std::vector<Entity*>::const_reverse_iterator iend = shared_modified.rend();
00648   for ( std::vector<Entity*>::const_reverse_iterator
00649         i = shared_modified.rbegin() ; i != iend ; ++i) {
00650 
00651     Entity * entity = *i ;
00652 
00653     if ( entity->owner_rank() == m_parallel_rank &&
00654          entity->log_query() == EntityLogCreated ) {
00655 
00656       // Created and not claimed by an existing owner
00657 
00658       const unsigned new_owner = determine_new_owner( *entity );
00659 
00660       m_entity_repo.set_entity_owner_rank( *entity, new_owner);
00661     }
00662 
00663     if ( entity->owner_rank() != m_parallel_rank ) {
00664       // Do not own it and still have it.
00665       // Remove the locally owned, add the globally_shared
00666       m_entity_repo.set_entity_sync_count( *entity, m_sync_count);
00667       internal_change_entity_parts( *entity , shared_part /*add*/, owned_part /*remove*/);
00668     }
00669     else if ( ! entity->sharing().empty() ) {
00670       // Own it and has sharing information.
00671       // Add the globally_shared
00672       internal_change_entity_parts( *entity , shared_part /*add*/, PartVector() /*remove*/ );
00673     }
00674     else {
00675       // Own it and does not have sharing information.
00676       // Remove the globally_shared
00677       internal_change_entity_parts( *entity , PartVector() /*add*/, shared_part /*remove*/);
00678     }
00679 
00680     // Newly created shared entity had better be in the owned closure
00681     if ( ! in_owned_closure( *entity , m_parallel_rank ) ) {
00682       if ( 0 == error_flag ) {
00683         error_flag = 1 ;
00684         error_msg
00685           << "\nP" << m_parallel_rank << ": " << " FAILED\n"
00686           << "  The following entities were declared on multiple processors,\n"
00687           << "  cannot be parallel-shared, and were declared with"
00688           << "  parallel-ghosting information. {\n";
00689       }
00690       error_msg << "    " << print_entity_key(entity);
00691       error_msg << " also declared on" ;
00692       for ( PairIterEntityComm ec = entity->sharing(); ! ec.empty() ; ++ec ) {
00693         error_msg << " P" << ec->proc ;
00694       }
00695       error_msg << "\n" ;
00696     }
00697   }
00698 
00699   // Parallel-consistent error checking of above loop
00700   if ( error_flag ) { error_msg << "}\n" ; }
00701   all_reduce( m_parallel_machine , ReduceMax<1>( & error_flag ) );
00702   ThrowErrorMsgIf( error_flag, error_msg.str() );
00703 
00704   // ------------------------------------------------------------
00705   // Update m_entity_comm based on shared_modified
00706 
00707   const size_t n_old = m_entity_comm.size();
00708 
00709   m_entity_comm.insert( m_entity_comm.end() ,
00710                         shared_modified.begin() , shared_modified.end() );
00711 
00712   std::inplace_merge( m_entity_comm.begin() ,
00713                       m_entity_comm.begin() + n_old ,
00714                       m_entity_comm.end() ,
00715                       EntityLess() );
00716 
00717   {
00718     std::vector<Entity*>::iterator i =
00719       std::unique( m_entity_comm.begin() , m_entity_comm.end() );
00720 
00721     m_entity_comm.erase( i , m_entity_comm.end() );
00722   }
00723 }
00724 
00725 //----------------------------------------------------------------------
00726 
00727 bool BulkData::modification_end()
00728 {
00729   Trace_("stk::mesh::BulkData::modification_end");
00730 
00731   return internal_modification_end( true );
00732 }
00733 
00734 #if 0
00735 
00736 namespace {
00737 
00738 // Very, very handy for debugging parallel resolution...
00739 
00740 void print_comm_list( const BulkData & mesh , bool doit )
00741 {
00742   if ( doit ) {
00743     std::ostringstream msg ;
00744 
00745     msg << std::endl ;
00746 
00747     for ( std::vector<Entity*>::const_iterator
00748           i =  mesh.entity_comm().begin() ;
00749           i != mesh.entity_comm().end() ; ++i ) {
00750 
00751       Entity & entity = **i ;
00752       msg << "P" << mesh.parallel_rank() << ": " ;
00753 
00754       print_entity_key( msg , MetaData::get(mesh) , entity.key() );
00755 
00756       msg << " owner(" << entity.owner_rank() << ")" ;
00757 
00758       if ( EntityLogModified == entity.log_query() ) { msg << " mod" ; }
00759       else if ( EntityLogDeleted == entity.log_query() ) { msg << " del" ; }
00760       else { msg << "    " ; }
00761 
00762       for ( PairIterEntityComm ec = entity.comm(); ! ec.empty() ; ++ec ) {
00763         msg << " (" << ec->ghost_id << "," << ec->proc << ")" ;
00764       }
00765       msg << std::endl ;
00766     }
00767 
00768     std::cout << msg.str();
00769   }
00770 }
00771 
00772 }
00773 
00774 #endif
00775 
00776 bool BulkData::internal_modification_end( bool regenerate_aura )
00777 {
00778   Trace_("stk::mesh::BulkData::internal_modification_end");
00779 
00780   if ( m_sync_state == SYNCHRONIZED ) { return false ; }
00781 
00782   if (parallel_size() > 1) {
00783     // Resolve modification or deletion of shared entities
00784     // which can cause deletion of ghost entities.
00785     internal_resolve_shared_modify_delete();
00786 
00787     // Resolve modification or deletion of ghost entities
00788     // by destroying ghost entities that have been touched.
00789     internal_resolve_ghosted_modify_delete();
00790 
00791     // Resolution of shared and ghost modifications can empty
00792     // the communication information for entities.
00793     // If there is no communication information then the
00794     // entity must be removed from the communication list.
00795     {
00796       std::vector<Entity*>::iterator i = m_entity_comm.begin();
00797       bool changed = false ;
00798       for ( ; i != m_entity_comm.end() ; ++i ) {
00799         if ( (*i)->comm().empty() ) { *i = NULL ; changed = true ; }
00800       }
00801       if ( changed ) {
00802         i = std::remove( m_entity_comm.begin() ,
00803                          m_entity_comm.end() , (Entity *) NULL );
00804         m_entity_comm.erase( i , m_entity_comm.end() );
00805       }
00806     }
00807 
00808     // Resolve creation of entities: discover sharing and set unique ownership.
00809     internal_resolve_parallel_create();
00810 
00811     // Resolve part membership for shared entities.
00812     // This occurs after resolving creation so created and shared
00813     // entities are resolved along with previously existing shared entities.
00814     internal_resolve_shared_membership();
00815 
00816     // Regenerate the ghosting aura around all shared mesh entities.
00817     if ( regenerate_aura ) { internal_regenerate_shared_aura(); }
00818 
00819     // ------------------------------
00820     // Verify parallel consistency of mesh entities.
00821     // Unique ownership, communication lists, sharing part membership,
00822     // application part membership consistency.
00823     std::ostringstream msg ;
00824     bool is_consistent = true;
00825     is_consistent = comm_mesh_verify_parallel_consistency( *this , msg );
00826     ThrowErrorMsgIf( !is_consistent, msg.str() );
00827   }
00828   else {
00829     std::vector<Entity*> shared_modified ;
00830     internal_update_distributed_index( shared_modified );
00831   }
00832 
00833   // ------------------------------
00834   // The very last operation performed is to sort the bucket entities.
00835   // This does not change the entities, relations, or field data.
00836   // However, it insures that the ordering of entities and buckets
00837   // is independent of the order in which a set of changes were
00838   // performed.
00839   m_bucket_repository.internal_sort_bucket_entities();
00840 
00841   // ------------------------------
00842 
00843   m_sync_state = SYNCHRONIZED ;
00844 
00845   return true ;
00846 }
00847 
00848 //----------------------------------------------------------------------
00849 //----------------------------------------------------------------------
00850 
00851 enum { PART_ORD_UNIVERSAL = 0 };
00852 enum { PART_ORD_OWNED     = 1 };
00853 enum { PART_ORD_SHARED    = 2 };
00854 
00855 namespace {
00856 
00857 void pack_induced_memberships( CommAll & comm ,
00858                                const std::vector<Entity*> & entity_comm )
00859 {
00860   for ( std::vector<Entity*>::const_iterator
00861         i = entity_comm.begin() ; i != entity_comm.end() ; ++i ) {
00862 
00863     Entity & entity = **i ;
00864 
00865     if ( in_shared( entity , entity.owner_rank() ) ) {
00866       // Is shared with owner, send to owner.
00867 
00868       PartVector empty , induced ;
00869 
00870       induced_part_membership( entity , empty , induced );
00871 
00872       CommBuffer & buf = comm.send_buffer( entity.owner_rank() );
00873 
00874       unsigned tmp = induced.size();
00875 
00876       buf.pack<unsigned>( tmp );
00877 
00878       for ( PartVector::iterator
00879             j = induced.begin() ; j != induced.end() ; ++j ) {
00880         tmp = (*j)->mesh_meta_data_ordinal();
00881         buf.pack<unsigned>( tmp );
00882       }
00883     }
00884   }
00885 }
00886 
00887 void generate_send_list( const size_t sync_count ,
00888                          const unsigned p_rank ,
00889                          const std::vector<Entity*>    & entity_comm ,
00890                                std::vector<EntityProc> & send_list )
00891 {
00892   for ( std::vector<Entity*>::const_iterator
00893         i = entity_comm.begin() ; i != entity_comm.end() ; ++i ) {
00894 
00895     Entity & entity = **i ;
00896 
00897     if ( entity.owner_rank() == p_rank &&
00898          entity.synchronized_count() == sync_count ) {
00899 
00900       for ( PairIterEntityComm ec = entity.comm() ; ! ec.empty() ; ++ec ) {
00901         EntityProc tmp( & entity , ec->proc );
00902         send_list.push_back( tmp );
00903       }
00904     }
00905   }
00906 
00907   {
00908     std::sort( send_list.begin() , send_list.end() , EntityLess() );
00909     std::vector<EntityProc>::iterator i =
00910       std::unique( send_list.begin() , send_list.end() );
00911     send_list.erase( i , send_list.end() );
00912   }
00913 }
00914 
00915 void pack_part_memberships( CommAll & comm ,
00916                             const std::vector<EntityProc> & send_list )
00917 {
00918   for ( std::vector<EntityProc>::const_iterator
00919         i = send_list.begin() ; i != send_list.end() ; ++i ) {
00920 
00921     Entity & entity = * i->first ;
00922 
00923     std::pair<const unsigned *, const unsigned *>
00924       part_ord = entity.bucket().superset_part_ordinals();
00925 
00926     // I am the owner; therefore, the first three members are
00927     // universal, uses, and owns.  Don't send them.
00928 
00929     // I am the owner.  The first two memberships are
00930     // universal_part and locally_owned_part.  The third
00931     // membership may be globally_shared_part ;
00932 
00933     const unsigned count_all  = part_ord.second - part_ord.first ;
00934     const unsigned count_skip =
00935       ( 2 < count_all && part_ord.first[2] == PART_ORD_SHARED ) ? 3 : 2 ;
00936 
00937     const unsigned count_send = count_all - count_skip ;
00938 
00939     const unsigned * const start_send = part_ord.first + count_skip ;
00940 
00941     comm.send_buffer( i->second ).pack<EntityKey>( entity.key() )
00942                                  .pack<unsigned>( count_send )
00943                                  .pack<unsigned>( start_send , count_send );
00944   }
00945 }
00946 
00947 }
00948 
00949 //  Mesh entity membership changes must be synchronized among
00950 //  processes that share mesh entities and propagated to
00951 //  processes that ghost copies of the mesh entities.
00952 //
00953 //  Precondition: correct shared and ghosting lists.
00954 //
00955 //  Part memberships may have been added or removed
00956 //  either explicitly or indirectly via entity relationships
00957 //  being added or removed.
00958 
00959 void BulkData::internal_resolve_shared_membership()
00960 {
00961   Trace_("stk::mesh::BulkData::internal_resolve_shared_membership");
00962 
00963   ThrowRequireMsg(parallel_size() > 1, "Do not call this in serial");
00964 
00965   const MetaData & meta  = m_mesh_meta_data ;
00966   ParallelMachine p_comm = m_parallel_machine ;
00967   const unsigned  p_rank = m_parallel_rank ;
00968   const unsigned  p_size = m_parallel_size ;
00969   const PartVector & all_parts = meta.get_parts();
00970 
00971   const Part & part_universal = meta.universal_part();
00972   const Part & part_owned  = meta.locally_owned_part();
00973   const Part & part_shared = meta.globally_shared_part();
00974 
00975   // Quick verification of part ordinal assumptions
00976 
00977   ThrowRequireMsg(PART_ORD_UNIVERSAL == part_universal.mesh_meta_data_ordinal(),
00978                   "Universal part ordinal is wrong, expected "
00979                   << PART_ORD_UNIVERSAL << ", got: "
00980                   << part_universal.mesh_meta_data_ordinal());
00981 
00982   ThrowRequireMsg(PART_ORD_OWNED == part_owned.mesh_meta_data_ordinal(),
00983                   "Owned part ordinal is wrong, expected "
00984                   << PART_ORD_OWNED << ", got: "
00985                   << part_owned.mesh_meta_data_ordinal());
00986 
00987   ThrowRequireMsg(PART_ORD_SHARED == part_shared.mesh_meta_data_ordinal(),
00988                   "Shared part ordinal is wrong, expected "
00989                   << PART_ORD_SHARED << ", got: "
00990                   << part_shared.mesh_meta_data_ordinal());
00991 
00992   //  Shared entities may have been modified due to relationship changes.
00993   //  Send just the current induced memberships from the sharing to
00994   //  the owning processes.
00995   {
00996     CommAll comm( p_comm );
00997 
00998     pack_induced_memberships( comm , m_entity_comm );
00999 
01000     comm.allocate_buffers( p_size / 4 );
01001 
01002     pack_induced_memberships( comm , m_entity_comm );
01003 
01004     comm.communicate();
01005 
01006     for ( std::vector<Entity*>::iterator
01007           i = m_entity_comm.begin() ; i != m_entity_comm.end() ; ++i ) {
01008 
01009       Entity & entity = **i ;
01010 
01011       if ( entity.owner_rank() == p_rank ) {
01012         // Receiving from all sharing processes
01013 
01014         PartVector empty , induced_parts , current_parts , remove_parts ;
01015 
01016         induced_part_membership( entity , empty , induced_parts );
01017 
01018         for ( PairIterEntityComm
01019               ec = entity.sharing() ; ! ec.empty() ; ++ec ) {
01020 
01021           CommBuffer & buf = comm.recv_buffer( ec->proc );
01022 
01023           unsigned count = 0 ; buf.unpack<unsigned>( count );
01024           for ( unsigned j = 0 ; j < count ; ++j ) {
01025             unsigned part_ord = 0 ; buf.unpack<unsigned>( part_ord );
01026             insert( induced_parts , * all_parts[ part_ord ] );
01027           }
01028         }
01029 
01030         // Remove any part that is an induced part but is not
01031         // in the induced parts list.
01032 
01033         entity.bucket().supersets( current_parts );
01034 
01035         for ( PartVector::iterator
01036               p = current_parts.begin() ; p != current_parts.end() ; ++p ) {
01037           if ( membership_is_induced( **p , entity.entity_rank() ) &&
01038                ! contain( induced_parts , **p ) ) {
01039             remove_parts.push_back( *p );
01040           }
01041         }
01042 
01043         internal_change_entity_parts( entity, induced_parts, remove_parts );
01044       }
01045     }
01046   }
01047 
01048   //------------------------------
01049   // The owners have complete knowledge of memberships.
01050   // Send membership information to sync the shared and ghosted copies.
01051   // Only need to do this for entities that have actually changed.
01052 
01053   {
01054     std::vector<EntityProc> send_list ;
01055 
01056     generate_send_list( m_sync_count, p_rank, m_entity_comm, send_list);
01057 
01058     CommAll comm( p_comm );
01059 
01060     pack_part_memberships( comm , send_list );
01061 
01062     comm.allocate_buffers( p_size / 4 );
01063 
01064     pack_part_memberships( comm , send_list );
01065 
01066     comm.communicate();
01067 
01068     for ( unsigned p = 0 ; p < p_size ; ++p ) {
01069       CommBuffer & buf = comm.recv_buffer( p );
01070       while ( buf.remaining() ) {
01071 
01072         PartVector owner_parts , current_parts , remove_parts ;
01073 
01074         EntityKey key ; buf.unpack<EntityKey>( key );
01075         unsigned count = 0 ; buf.unpack<unsigned>( count );
01076         for ( unsigned j = 0 ; j < count ; ++j ) {
01077           unsigned part_ord = 0 ; buf.unpack<unsigned>( part_ord );
01078           insert( owner_parts , * all_parts[ part_ord ] );
01079         }
01080 
01081         // Any current part that is not a member of owners_parts
01082         // must be removed.
01083 
01084         Entity * const entity = find_entity(m_entity_comm, key, true);
01085 
01086         entity->bucket().supersets( current_parts );
01087 
01088         for ( PartVector::iterator
01089               ip = current_parts.begin() ; ip != current_parts.end() ; ++ip ) {
01090           Part * const part = *ip ;
01091           const unsigned part_ord = part->mesh_meta_data_ordinal();
01092           if ( PART_ORD_UNIVERSAL != part_ord &&
01093                PART_ORD_OWNED     != part_ord &&
01094                PART_ORD_SHARED    != part_ord &&
01095                ! contain( owner_parts , *part ) ) {
01096             remove_parts.push_back( part );
01097           }
01098         }
01099 
01100         internal_change_entity_parts( *entity , owner_parts , remove_parts );
01101       }
01102     }
01103   }
01104 }
01105 
01106 } // namespace mesh
01107 } // namespace stk
01108 
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends