Sierra Toolkit Version of the Day
BulkDataEndSync.cpp
00001 /*------------------------------------------------------------------------*/
00002 /*                 Copyright 2010 Sandia Corporation.                     */
00003 /*  Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive   */
00004 /*  license for use of this work by or on behalf of the U.S. Government.  */
00005 /*  Export of this program may require a license from the                 */
00006 /*  United States Government.                                             */
00007 /*------------------------------------------------------------------------*/
00008 
00013 //----------------------------------------------------------------------
00014 
00015 #include <stdexcept>
00016 #include <iostream>
00017 #include <sstream>
00018 #include <set>
00019 #include <vector>
00020 #include <algorithm>
00021 
00022 #include <stk_util/environment/ReportHandler.hpp>
00023 
00024 #include <stk_util/parallel/ParallelComm.hpp>
00025 #include <stk_util/parallel/ParallelReduce.hpp>
00026 
00027 #include <stk_mesh/base/BulkData.hpp>
00028 #include <stk_mesh/base/MetaData.hpp>
00029 #include <stk_mesh/base/Entity.hpp>
00030 #include <stk_mesh/base/EntityComm.hpp>
00031 #include <stk_mesh/base/Trace.hpp>
00032 
00033 //----------------------------------------------------------------------
00034 
00035 namespace stk {
00036 namespace mesh {
00037 
00038 bool comm_mesh_verify_parallel_consistency(
00039   BulkData & M , std::ostream & error_log );
00040 
00041 //----------------------------------------------------------------------
00042 
00043 unsigned BulkData::determine_new_owner( Entity & entity ) const
00044 {
00045   // We will decide the new owner by looking at all the processes sharing
00046   // this entity. The new owner will be the sharing process with lowest rank.
00047 
00048   // The local process is a candidate only if the entity is not destroyed.
00049   unsigned new_owner =
00050     EntityLogDeleted == entity.log_query() ? ~0u : m_parallel_rank ;
00051 
00052   for ( PairIterEntityComm
00053         share = entity.sharing(); ! share.empty() ; ++share ) {
00054     if ( share->proc < m_parallel_size &&
00055          ( new_owner < share->proc || m_parallel_size <= new_owner ) ) {
00056       new_owner = share->proc ;
00057     }
00058   }
00059 
00060   return new_owner ;
00061 }
00062 
00063 //----------------------------------------------------------------------
00064 
00065 namespace {
00066 
00067 // A method for quickly finding an entity
00068 Entity* find_entity(const EntityVector& entities, const EntityKey& key,
00069                     bool expect_success = false)
00070 {
00071   EntityVector::const_iterator itr =
00072     std::lower_bound(entities.begin(),
00073                      entities.end(),
00074                      key,
00075                      EntityLess());
00076   if (itr == entities.end() || (*itr)->key() != key) {
00077     ThrowRequireMsg(!expect_success,
00078                     "Expected to be able to find entity of type: " <<
00079                     key.type() << " and rank: " << key.rank());
00080     return NULL;
00081   }
00082   return *itr;
00083 }
00084 
00085 struct EntityProcState {
00086   EntityProc entity_proc;
00087   EntityModificationLog state;
00088 
00089   bool operator<(const EntityProcState& rhs) const
00090   {
00091     EntityLess el;
00092     return el(entity_proc, rhs.entity_proc);
00093   }
00094 };
00095 
00096 bool pack_entity_modification( const BulkData & mesh ,
00097                                const bool pack_shared ,
00098                                CommAll & comm )
00099 {
00100   bool flag = false ;
00101 
00102   const std::vector<Entity*> & entity_comm = mesh.entity_comm();
00103 
00104   for ( std::vector<Entity*>::const_iterator
00105         i = entity_comm.begin() ; i != entity_comm.end() ; ++i ) {
00106 
00107     Entity & entity = **i ;
00108 
00109     if ( entity.log_query() == EntityLogModified ||
00110          entity.log_query() == EntityLogDeleted ) {
00111 
00112       for ( PairIterEntityComm ec = entity.comm(); ! ec.empty() ; ++ec ) {
00113         const bool shared = 0 == ec->ghost_id ;
00114         if ( pack_shared == shared ) {
00115           comm.send_buffer( ec->proc )
00116               .pack<EntityKey>( entity.key() )
00117               .pack<EntityModificationLog>( entity.log_query() );
00118 
00119           flag = true ;
00120         }
00121       }
00122     }
00123   }
00124 
00125   return flag ;
00126 }
00127 
00128 void communicate_entity_modification( const BulkData & mesh ,
00129                                       const bool shared ,
00130                                       std::vector<EntityProcState > & data )
00131 {
00132   CommAll comm( mesh.parallel() );
00133 
00134   // Sizing send buffers:
00135   const bool local_mod = pack_entity_modification( mesh , shared , comm );
00136 
00137   // Allocation of send and receive buffers:
00138   const bool global_mod =
00139     comm.allocate_buffers( comm.parallel_size() / 4 , false , local_mod );
00140 
00141   if ( global_mod ) {
00142     const std::vector<Entity*> & entity_comm = mesh.entity_comm();
00143 
00144     // Packing send buffers:
00145     pack_entity_modification( mesh , shared , comm );
00146 
00147     comm.communicate();
00148 
00149     for ( unsigned p = 0 ; p < comm.parallel_size() ; ++p ) {
00150       CommBuffer & buf = comm.recv_buffer( p );
00151       EntityKey key ;
00152       EntityProcState tmp ;
00153 
00154       while ( buf.remaining() ) {
00155 
00156         buf.unpack<EntityKey>( key )
00157            .unpack<EntityModificationLog>( tmp.state );
00158 
00159         // search through entity_comm, should only receive info on entities
00160         // that are communicated.
00161         tmp.entity_proc.first  = find_entity(entity_comm, key, true);
00162         tmp.entity_proc.second = p ;
00163 
00164         data.push_back( tmp );
00165       }
00166     }
00167   }
00168 
00169   std::sort( data.begin() , data.end() );
00170 }
00171 
00172 }
00173 
00174 //----------------------------------------------------------------------
00175 //----------------------------------------------------------------------
00176 
00177 // Postconditions:
00178 //  * DistributedIndex is updated based on entity creation/deletions in the
00179 //    last modification cycle.
00180 //  * Comm lists for shared entities are up-to-date.
00181 //  * shared_new contains all entities that were modified/created on a
00182 //    different process
00183 void BulkData::internal_update_distributed_index(
00184   std::vector<Entity*> & shared_new )
00185 {
00186   Trace_("stk::mesh::BulkData::internal_update_distributed_index");
00187 
00188   std::vector< parallel::DistributedIndex::KeyType >
00189     local_created_or_modified , // only store locally owned/shared entities
00190     del_entities_keys ;
00191 
00192   // Iterate over all entities known to this process, putting
00193   // locally deleted entities in del_entities_keys, and putting
00194   // modified shared/owned entities in local_created_or_modified.
00195   for ( impl::EntityRepository::iterator
00196         i = m_entity_repo.begin() ; i != m_entity_repo.end() ; ++i ) {
00197 
00198     Entity & entity = * i->second ;
00199 
00200     if ( EntityLogDeleted == entity.log_query() ) {
00201       // Has been destroyed
00202       del_entities_keys.push_back( entity.key().raw_key() );
00203     }
00204     else if ( entity.log_query() != EntityLogNoChange &&
00205               in_owned_closure( entity , m_parallel_rank ) ) {
00206       // Has been changed and is in owned closure, may be shared
00207       local_created_or_modified.push_back( entity.key().raw_key() );
00208     }
00209   }
00210 
00211   // Update distributed index. Note that the DistributedIndex only
00212   // tracks ownership and sharing information.
00213   m_entities_index.update_keys( local_created_or_modified , del_entities_keys );
00214 
00215   if (parallel_size() > 1) {
00216     // Retrieve data regarding which processes use the local_created_or_modified
00217     // including this process.
00218     std::vector< parallel::DistributedIndex::KeyProc >
00219       global_created_or_modified ;
00220     m_entities_index.query_to_usage( local_created_or_modified ,
00221                                      global_created_or_modified );
00222 
00223     //------------------------------
00224     // Take the usage data and update the sharing comm lists
00225     {
00226       Entity * entity = NULL ;
00227 
00228       // Iterate over all global modifications to this entity, this vector is
00229       // sorted, so we're guaranteed that all modifications to a particular
00230       // entities will be adjacent in this vector.
00231       for ( std::vector< parallel::DistributedIndex::KeyProc >::iterator
00232               i =  global_created_or_modified.begin() ;
00233             i != global_created_or_modified.end() ; ++i ) {
00234 
00235         EntityKey key( & i->first );
00236         unsigned modifying_proc = i->second;
00237 
00238         // key should not be in del_entities_keys
00239         ThrowAssertMsg( !std::binary_search(del_entities_keys.begin(),
00240                                             del_entities_keys.end(),
00241                                             i->first),
00242                         "Key: " << print_entity_key(mesh_meta_data(), key) <<
00243                         " was locally deleted, but somehow was included in global_created_or_modified; " <<
00244                         " this probably means there's problem in DistributedIndex." );
00245 
00246         if ( m_parallel_rank != modifying_proc ) {
00247           // Another process also created or updated this entity.
00248 
00249           // Only want to look up entities at most once
00250           if ( entity == NULL || entity->key() != key ) {
00251             // Have not looked this entity up by key
00252             entity = get_entity( key );
00253 
00254             shared_new.push_back( entity );
00255           }
00256 
00257           // Add the other_process to the entity's sharing info.
00258           m_entity_repo.insert_comm_info( *entity, EntityCommInfo( 0, // sharing
00259                                                                    modifying_proc ) );
00260         }
00261       }
00262     }
00263   }
00264 }
00265 
00266 //----------------------------------------------------------------------
00267 //----------------------------------------------------------------------
00268 
00269 namespace {
00270 
00271 // Enforce that shared entities must be in the owned closure:
00272 
00273 void destroy_dependent_ghosts( BulkData & mesh , Entity * entity )
00274 {
00275   for ( ; ; ) {
00276     PairIterRelation rel = entity->relations();
00277 
00278     if ( rel.empty() ) { break ; }
00279 
00280     Entity * e = rel.back().entity();
00281 
00282     if ( e->entity_rank() < entity->entity_rank() ) { break ; }
00283 
00284     ThrowRequireMsg( !in_owned_closure( *e , mesh.parallel_rank()),
00285         "Entity " << print_entity_key(e) << " should not be in closure." );
00286 
00287     destroy_dependent_ghosts( mesh , e );
00288   }
00289 
00290   mesh.destroy_entity( entity );
00291 }
00292 
00293 // Entities with sharing information that are not in the owned closure
00294 // have been modified such that they are no longer shared.
00295 // These may no longer be needed or may become ghost entities.
00296 // There is not enough information so assume they are to be deleted
00297 // and let these entities be re-ghosted if they are needed.
00298 
00299 // Open question: Should an owned and shared entity that does not
00300 // have an upward relation to an owned entity be destroyed so that
00301 // ownership transfers to another process?
00302 
00303 void resolve_shared_removed_from_owned_closure( BulkData & mesh )
00304 {
00305   for ( std::vector<Entity*>::const_reverse_iterator
00306         i =  mesh.entity_comm().rbegin() ;
00307         i != mesh.entity_comm().rend() ; ++i) {
00308 
00309     Entity * entity = *i ;
00310 
00311     if ( ! entity->sharing().empty() &&
00312          ! in_owned_closure( *entity , mesh.parallel_rank() ) ) {
00313 
00314       destroy_dependent_ghosts( mesh , entity );
00315     }
00316   }
00317 }
00318 
00319 }
00320 
00321 // Resolve modifications for shared entities:
00322 // If not locally destroyed and remotely modified
00323 // then set to locally modified.
00324 // If remotely destroyed then determine the new owner.
00325 //
00326 // Post condition:
00327 //  Shared entities are in-sync with respect to modification state.
00328 //  Shared communication lists are updated to reflect all deletions.
00329 //  Ownership has been re-assigned as necessary for deletion
00330 //  of shared entities.
00331 
00332 void BulkData::internal_resolve_shared_modify_delete()
00333 {
00334   Trace_("stk::mesh::BulkData::internal_resolve_shared_modify_delete");
00335 
00336   ThrowRequireMsg(parallel_size() > 1, "Do not call this in serial");
00337 
00338   resolve_shared_removed_from_owned_closure( *this );
00339 
00340   std::vector< EntityProcState > remote_mod ;
00341 
00342   // Communicate entity modification state for shared entities
00343   // the resulting vector is sorted by entity and process.
00344   const bool communicate_shared = true ;
00345   communicate_entity_modification( *this , communicate_shared , remote_mod );
00346 
00347   // We iterate backwards over remote_mod to ensure that we hit the
00348   // higher-ranking entities first.
00349   for ( std::vector<EntityProcState>::reverse_iterator
00350         i = remote_mod.rbegin(); i != remote_mod.rend() ; ) {
00351 
00352     Entity * const entity        = i->entity_proc.first ;
00353     const bool locally_destroyed = EntityLogDeleted == entity->log_query();
00354     bool remote_owner_destroyed  = false;
00355 
00356     // Iterate over all of this entity's remote changes
00357     for ( ; i != remote_mod.rend() && i->entity_proc.first == entity ; ++i ) {
00358 
00359       const unsigned remote_proc    = i->entity_proc.second ;
00360       const bool remotely_destroyed = EntityLogDeleted == i->state ;
00361 
00362       // When a shared entity is remotely modified or destroyed
00363       // then the local copy is also modified.  This modification
00364       // status is applied to all related higher ranking entities.
00365 
00366       if ( ! locally_destroyed ) {
00367         m_entity_repo.log_modified( *entity );
00368       }
00369 
00370       // A shared entity is being deleted on the remote process.
00371       // Remove it from the sharing communication list.
00372       // Ownership changes are processed later, but we'll need
00373       // to know if the remote owner destroyed the entity in order
00374       // to correctly resolve ownership (it is not sufficient to just
00375       // look at the comm list of the entity since there is no
00376       // guarantee that the comm list is correct or up-to-date).
00377 
00378       if ( remotely_destroyed ) {
00379         m_entity_repo.erase_comm_info( *entity, EntityCommInfo(0,remote_proc) );
00380 
00381         // check if owner is destroying
00382         if ( entity->owner_rank() == remote_proc ) {
00383           remote_owner_destroyed = true ;
00384         }
00385       }
00386     }
00387 
00388     // Have now processed all remote changes knowledge for this entity.
00389 
00390     PairIterEntityComm new_sharing = entity->sharing();
00391     const bool   exists_somewhere = ! ( remote_owner_destroyed &&
00392                                         locally_destroyed &&
00393                                         new_sharing.empty() );
00394 
00395     // If the entity has been deleted everywhere, nothing left to do
00396     if ( exists_somewhere ) {
00397 
00398       const bool old_local_owner = m_parallel_rank == entity->owner_rank();
00399 
00400       // Giving away ownership to another process in the sharing list:
00401       const bool give_ownership = locally_destroyed && old_local_owner ;
00402 
00403       // If we are giving away ownership or the remote owner destroyed
00404       // the entity, then we need to establish a new owner
00405       if ( give_ownership || remote_owner_destroyed ) {
00406 
00407         const unsigned new_owner = determine_new_owner( *entity );
00408 
00409         m_entity_repo.set_entity_owner_rank( *entity, new_owner );
00410         m_entity_repo.set_entity_sync_count( *entity, m_sync_count );
00411       }
00412 
00413       if ( ! locally_destroyed ) {
00414 
00415         PartVector add_part , remove_part ;
00416 
00417         if ( new_sharing.empty() ) {
00418           // Is no longer shared, remove the shared part.
00419           remove_part.push_back(& m_mesh_meta_data.globally_shared_part());
00420         }
00421 
00422         const bool new_local_owner = m_parallel_rank == entity->owner_rank();
00423 
00424         const bool local_claimed_ownership =
00425           ( ! old_local_owner && new_local_owner );
00426 
00427         if ( local_claimed_ownership ) {
00428           // Changing remotely owned to locally owned
00429           add_part.push_back( & m_mesh_meta_data.locally_owned_part() );
00430         }
00431 
00432         if ( ! add_part.empty() || ! remove_part.empty() ) {
00433           internal_change_entity_parts( *entity , add_part , remove_part );
00434         }
00435       } // if ( ! locally_destroyed )
00436     } // if ( exists_somewhere )
00437   } // remote mod loop
00438 
00439   // Erase all sharing communication lists for Destroyed entities:
00440   for ( std::vector<Entity*>::const_reverse_iterator
00441         i = entity_comm().rbegin() ; i != entity_comm().rend() ; ++i) {
00442     Entity * entity = *i ;
00443 
00444     if ( EntityLogDeleted == entity->log_query() ) {
00445       // m_ghosting[0] is the SHARED communication
00446       m_entity_repo.erase_ghosting( *entity , *m_ghosting[0] );
00447     }
00448   }
00449 }
00450 
00451 
00452 
00453 //----------------------------------------------------------------------
00454 // Resolve modifications for ghosted entities:
00455 // If a ghosted entity is modified or destroyed on the owning
00456 // process then the ghosted entity must be destroyed.
00457 //
00458 // Post condition:
00459 //  Ghosted entities of modified or deleted entities are destroyed.
00460 //  Ghosted communication lists are cleared to reflect all deletions.
00461 
00462 void BulkData::internal_resolve_ghosted_modify_delete()
00463 {
00464   Trace_("stk::mesh::BulkData::internal_resolve_ghosted_modify_delete");
00465 
00466   ThrowRequireMsg(parallel_size() > 1, "Do not call this in serial");
00467   // Resolve modifications for ghosted entities:
00468 
00469   std::vector<EntityProcState > remote_mod ;
00470 
00471   // Communicate entity modification state for ghost entities
00472   const bool communicate_shared = false ;
00473   communicate_entity_modification( *this , communicate_shared , remote_mod );
00474 
00475   const size_t ghosting_count = m_ghosting.size();
00476 
00477   std::vector< int > ghosting_change_flags( ghosting_count , 0 );
00478 
00479   // We iterate backwards over remote_mod to ensure that we hit the
00480   // higher-ranking entities first. This is important because higher-ranking
00481   // entities like element must be deleted before the nodes they have are
00482   // deleted.
00483   for ( std::vector<EntityProcState>::reverse_iterator
00484         i = remote_mod.rbegin(); i != remote_mod.rend() ; ++i ) {
00485     Entity *       entity       = i->entity_proc.first ;
00486     const unsigned remote_proc  = i->entity_proc.second ;
00487     const bool     local_owner  = entity->owner_rank() == m_parallel_rank ;
00488     const bool remotely_destroyed = EntityLogDeleted == i->state ;
00489     const bool locally_destroyed  = EntityLogDeleted == entity->log_query();
00490 
00491     if ( local_owner ) { // Sending to 'remote_proc' for ghosting
00492 
00493       if ( remotely_destroyed ) {
00494 
00495         // remove from ghost-send list
00496 
00497         for ( size_t j = ghosting_count ; j-- ; ) {
00498           if ( m_entity_repo.erase_comm_info( *entity, EntityCommInfo( j , remote_proc ) ) ) {
00499             ghosting_change_flags[ j ] = true ;
00500           }
00501         }
00502       }
00503 
00504       // Remotely modified ghosts are ignored
00505 
00506     }
00507     else { // Receiving from 'remote_proc' for ghosting
00508 
00509       // Owner modified or destroyed, must locally destroy.
00510 
00511       for ( PairIterEntityComm ec = entity->comm() ; ! ec.empty() ; ++ec ) {
00512         ghosting_change_flags[ ec->ghost_id ] = true ;
00513       }
00514 
00515       // This is a receive ghost so the only communication information
00516       // is the ghosting information, can clear it all out.
00517       m_entity_repo.comm_clear( *entity );
00518 
00519       if ( ! locally_destroyed ) {
00520 
00521         // If mesh modification causes a ghost entity to become
00522         // a member of an owned-closure then do not automatically
00523         // destroy it.  The new sharing status will be resolved
00524         // in 'internal_resolve_parallel_create'.
00525 
00526         if ( ! in_owned_closure( *entity , m_parallel_rank ) ) {
00527 
00528           const bool destroy_entity_successful = destroy_entity(entity);
00529           ThrowRequireMsg(destroy_entity_successful,
00530               "Could not destroy ghost entity " << print_entity_key(entity));
00531         }
00532       }
00533     }
00534   } // end loop on remote mod
00535 
00536   // Erase all ghosting communication lists for:
00537   // 1) Destroyed entities.
00538   // 2) Owned and modified entities.
00539 
00540   for ( std::vector<Entity*>::const_reverse_iterator
00541         i = entity_comm().rbegin() ; i != entity_comm().rend() ; ++i) {
00542 
00543     Entity & entity = **i ;
00544 
00545     const bool locally_destroyed = EntityLogDeleted == entity.log_query();
00546     const bool locally_owned_and_modified =
00547       EntityLogModified == entity.log_query() &&
00548       m_parallel_rank   == entity.owner_rank() ;
00549 
00550     if ( locally_destroyed || locally_owned_and_modified ) {
00551 
00552       // m_ghosting[0] is the SHARED communication
00553 
00554       for ( size_t j = ghosting_count ; j-- ; ) {
00555         if ( m_entity_repo.erase_ghosting( entity, *m_ghosting[j] ) ) {
00556           ghosting_change_flags[ j ] = true ;
00557         }
00558       }
00559     }
00560   }
00561 
00562   std::vector< int > ghosting_change_flags_global( ghosting_count , 0 );
00563 
00564   all_reduce_sum( m_parallel_machine ,
00565                   & ghosting_change_flags[0] ,
00566                   & ghosting_change_flags_global[0] ,
00567                   ghosting_change_flags.size() );
00568 
00569   for ( unsigned ic = 0 ; ic < ghosting_change_flags_global.size() ; ++ic ) {
00570     if ( ghosting_change_flags_global[ic] ) {
00571       m_ghosting[ic]->m_sync_count = m_sync_count ;
00572     }
00573   }
00574 }
00575 
00576 //----------------------------------------------------------------------
00577 
00578 // Postconditions:
00579 //  * All shared entities have parallel-consistent owner
00580 //  * Part membership of shared entities is up-to-date
00581 //  * m_entity_comm is up-to-date
00582 void BulkData::internal_resolve_parallel_create()
00583 {
00584   Trace_("stk::mesh::BulkData::internal_resolve_parallel_create");
00585 
00586   ThrowRequireMsg(parallel_size() > 1, "Do not call this in serial");
00587 
00588   std::vector<Entity*> shared_modified ;
00589 
00590   // Update the parallel index and
00591   // output shared and modified entities.
00592   internal_update_distributed_index( shared_modified );
00593 
00594   // ------------------------------------------------------------
00595   // Claim ownership on all shared_modified entities that I own
00596   // and which were not created in this modification cycle. All
00597   // sharing procs will need to be informed of this claim.
00598   CommAll comm_all( m_parallel_machine );
00599 
00600   for ( int phase = 0; phase < 2; ++phase ) {
00601     for ( std::vector<Entity*>::iterator
00602             i = shared_modified.begin() ; i != shared_modified.end() ; ++i ) {
00603       Entity & entity = **i ;
00604       if ( entity.owner_rank() == m_parallel_rank &&
00605            entity.log_query()  != EntityLogCreated ) {
00606 
00607         for ( PairIterEntityComm
00608                 jc = entity.sharing() ; ! jc.empty() ; ++jc ) {
00609           comm_all.send_buffer( jc->proc ) .pack<EntityKey>( entity.key() );
00610         }
00611       }
00612     }
00613 
00614     if (phase == 0) { //allocation phase
00615       comm_all.allocate_buffers( m_parallel_size / 4 );
00616     }
00617     else { // communication phase
00618       comm_all.communicate();
00619     }
00620   }
00621 
00622   for ( unsigned p = 0 ; p < m_parallel_size ; ++p ) {
00623     CommBuffer & buf = comm_all.recv_buffer( p );
00624     EntityKey key ;
00625     while ( buf.remaining() ) {
00626       buf.unpack<EntityKey>( key );
00627 
00628       Entity & entity = * get_entity( key );
00629 
00630       // Set owner, will correct part membership later
00631       m_entity_repo.set_entity_owner_rank( entity, p);
00632     }
00633   }
00634 
00635   // ------------------------------------------------------------
00636   // Update shared created entities.
00637   // - Revise ownership to selected processor
00638   // - Update sharing.
00639   // - Work backward so the 'in_owned_closure' function
00640   //   can evaluate related higher ranking entities.
00641 
00642   std::ostringstream error_msg ;
00643   int error_flag = 0 ;
00644 
00645   PartVector shared_part , owned_part ;
00646   shared_part.push_back( & m_mesh_meta_data.globally_shared_part() );
00647   owned_part.push_back(  & m_mesh_meta_data.locally_owned_part() );
00648 
00649   std::vector<Entity*>::const_reverse_iterator iend = shared_modified.rend();
00650   for ( std::vector<Entity*>::const_reverse_iterator
00651         i = shared_modified.rbegin() ; i != iend ; ++i) {
00652 
00653     Entity * entity = *i ;
00654 
00655     if ( entity->owner_rank() == m_parallel_rank &&
00656          entity->log_query() == EntityLogCreated ) {
00657 
00658       // Created and not claimed by an existing owner
00659 
00660       const unsigned new_owner = determine_new_owner( *entity );
00661 
00662       m_entity_repo.set_entity_owner_rank( *entity, new_owner);
00663     }
00664 
00665     if ( entity->owner_rank() != m_parallel_rank ) {
00666       // Do not own it and still have it.
00667       // Remove the locally owned, add the globally_shared
00668       m_entity_repo.set_entity_sync_count( *entity, m_sync_count);
00669       internal_change_entity_parts( *entity , shared_part /*add*/, owned_part /*remove*/);
00670     }
00671     else if ( ! entity->sharing().empty() ) {
00672       // Own it and has sharing information.
00673       // Add the globally_shared
00674       internal_change_entity_parts( *entity , shared_part /*add*/, PartVector() /*remove*/ );
00675     }
00676     else {
00677       // Own it and does not have sharing information.
00678       // Remove the globally_shared
00679       internal_change_entity_parts( *entity , PartVector() /*add*/, shared_part /*remove*/);
00680     }
00681 
00682     // Newly created shared entity had better be in the owned closure
00683     if ( ! in_owned_closure( *entity , m_parallel_rank ) ) {
00684       if ( 0 == error_flag ) {
00685         error_flag = 1 ;
00686         error_msg
00687           << "\nP" << m_parallel_rank << ": " << " FAILED\n"
00688           << "  The following entities were declared on multiple processors,\n"
00689           << "  cannot be parallel-shared, and were declared with"
00690           << "  parallel-ghosting information. {\n";
00691       }
00692       error_msg << "    " << print_entity_key(entity);
00693       error_msg << " also declared on" ;
00694       for ( PairIterEntityComm ec = entity->sharing(); ! ec.empty() ; ++ec ) {
00695         error_msg << " P" << ec->proc ;
00696       }
00697       error_msg << "\n" ;
00698     }
00699   }
00700 
00701   // Parallel-consistent error checking of above loop
00702   if ( error_flag ) { error_msg << "}\n" ; }
00703   all_reduce( m_parallel_machine , ReduceMax<1>( & error_flag ) );
00704   ThrowErrorMsgIf( error_flag, error_msg.str() );
00705 
00706   // ------------------------------------------------------------
00707   // Update m_entity_comm based on shared_modified
00708 
00709   const size_t n_old = m_entity_comm.size();
00710 
00711   m_entity_comm.insert( m_entity_comm.end() ,
00712                         shared_modified.begin() , shared_modified.end() );
00713 
00714   std::inplace_merge( m_entity_comm.begin() ,
00715                       m_entity_comm.begin() + n_old ,
00716                       m_entity_comm.end() ,
00717                       EntityLess() );
00718 
00719   {
00720     std::vector<Entity*>::iterator i =
00721       std::unique( m_entity_comm.begin() , m_entity_comm.end() );
00722 
00723     m_entity_comm.erase( i , m_entity_comm.end() );
00724   }
00725 }
00726 
00727 //----------------------------------------------------------------------
00728 
00729 bool BulkData::modification_end()
00730 {
00731   Trace_("stk::mesh::BulkData::modification_end");
00732 
00733   return internal_modification_end( true );
00734 }
00735 
00736 #if 0
00737 
00738 namespace {
00739 
00740 // Very, very handy for debugging parallel resolution...
00741 
00742 void print_comm_list( const BulkData & mesh , bool doit )
00743 {
00744   if ( doit ) {
00745     std::ostringstream msg ;
00746 
00747     msg << std::endl ;
00748 
00749     for ( std::vector<Entity*>::const_iterator
00750           i =  mesh.entity_comm().begin() ;
00751           i != mesh.entity_comm().end() ; ++i ) {
00752 
00753       Entity & entity = **i ;
00754       msg << "P" << mesh.parallel_rank() << ": " ;
00755 
00756       print_entity_key( msg , MetaData::get(mesh) , entity.key() );
00757 
00758       msg << " owner(" << entity.owner_rank() << ")" ;
00759 
00760       if ( EntityLogModified == entity.log_query() ) { msg << " mod" ; }
00761       else if ( EntityLogDeleted == entity.log_query() ) { msg << " del" ; }
00762       else { msg << "    " ; }
00763 
00764       for ( PairIterEntityComm ec = entity.comm(); ! ec.empty() ; ++ec ) {
00765         msg << " (" << ec->ghost_id << "," << ec->proc << ")" ;
00766       }
00767       msg << std::endl ;
00768     }
00769 
00770     std::cout << msg.str();
00771   }
00772 }
00773 
00774 }
00775 
00776 #endif
00777 
00778 bool BulkData::internal_modification_end( bool regenerate_aura )
00779 {
00780   Trace_("stk::mesh::BulkData::internal_modification_end");
00781 
00782   if ( m_sync_state == SYNCHRONIZED ) { return false ; }
00783 
00784   if (parallel_size() > 1) {
00785     // Resolve modification or deletion of shared entities
00786     // which can cause deletion of ghost entities.
00787     internal_resolve_shared_modify_delete();
00788 
00789     // Resolve modification or deletion of ghost entities
00790     // by destroying ghost entities that have been touched.
00791     internal_resolve_ghosted_modify_delete();
00792 
00793     // Resolution of shared and ghost modifications can empty
00794     // the communication information for entities.
00795     // If there is no communication information then the
00796     // entity must be removed from the communication list.
00797     {
00798       std::vector<Entity*>::iterator i = m_entity_comm.begin();
00799       bool changed = false ;
00800       for ( ; i != m_entity_comm.end() ; ++i ) {
00801         if ( (*i)->comm().empty() ) { *i = NULL ; changed = true ; }
00802       }
00803       if ( changed ) {
00804         i = std::remove( m_entity_comm.begin() ,
00805                          m_entity_comm.end() , (Entity *) NULL );
00806         m_entity_comm.erase( i , m_entity_comm.end() );
00807       }
00808     }
00809 
00810     // Resolve creation of entities: discover sharing and set unique ownership.
00811     internal_resolve_parallel_create();
00812 
00813     // Resolve part membership for shared entities.
00814     // This occurs after resolving creation so created and shared
00815     // entities are resolved along with previously existing shared entities.
00816     internal_resolve_shared_membership();
00817 
00818     // Regenerate the ghosting aura around all shared mesh entities.
00819     if ( regenerate_aura ) { internal_regenerate_shared_aura(); }
00820 
00821     // ------------------------------
00822     // Verify parallel consistency of mesh entities.
00823     // Unique ownership, communication lists, sharing part membership,
00824     // application part membership consistency.
00825     std::ostringstream msg ;
00826     bool is_consistent = true;
00827     is_consistent = comm_mesh_verify_parallel_consistency( *this , msg );
00828     ThrowErrorMsgIf( !is_consistent, msg.str() );
00829   }
00830   else {
00831     std::vector<Entity*> shared_modified ;
00832     internal_update_distributed_index( shared_modified );
00833   }
00834 
00835   // ------------------------------
00836   // The very last operation performed is to sort the bucket entities.
00837   // This does not change the entities, relations, or field data.
00838   // However, it insures that the ordering of entities and buckets
00839   // is independent of the order in which a set of changes were
00840   // performed.
00841   m_bucket_repository.internal_sort_bucket_entities();
00842 
00843   // ------------------------------
00844 
00845   m_sync_state = SYNCHRONIZED ;
00846 
00847   return true ;
00848 }
00849 
00850 //----------------------------------------------------------------------
00851 //----------------------------------------------------------------------
00852 
00853 enum { PART_ORD_UNIVERSAL = 0 };
00854 enum { PART_ORD_OWNED     = 1 };
00855 enum { PART_ORD_SHARED    = 2 };
00856 
00857 namespace {
00858 
00859 void pack_induced_memberships( CommAll & comm ,
00860                                const std::vector<Entity*> & entity_comm )
00861 {
00862   for ( std::vector<Entity*>::const_iterator
00863         i = entity_comm.begin() ; i != entity_comm.end() ; ++i ) {
00864 
00865     Entity & entity = **i ;
00866 
00867     if ( in_shared( entity , entity.owner_rank() ) ) {
00868       // Is shared with owner, send to owner.
00869 
00870       PartVector empty , induced ;
00871 
00872       induced_part_membership( entity , empty , induced );
00873 
00874       CommBuffer & buf = comm.send_buffer( entity.owner_rank() );
00875 
00876       unsigned tmp = induced.size();
00877 
00878       buf.pack<unsigned>( tmp );
00879 
00880       for ( PartVector::iterator
00881             j = induced.begin() ; j != induced.end() ; ++j ) {
00882         tmp = (*j)->mesh_meta_data_ordinal();
00883         buf.pack<unsigned>( tmp );
00884       }
00885     }
00886   }
00887 }
00888 
00889 void generate_send_list( const size_t sync_count ,
00890                          const unsigned p_rank ,
00891                          const std::vector<Entity*>    & entity_comm ,
00892                                std::vector<EntityProc> & send_list )
00893 {
00894   for ( std::vector<Entity*>::const_iterator
00895         i = entity_comm.begin() ; i != entity_comm.end() ; ++i ) {
00896 
00897     Entity & entity = **i ;
00898 
00899     if ( entity.owner_rank() == p_rank &&
00900          entity.synchronized_count() == sync_count ) {
00901 
00902       for ( PairIterEntityComm ec = entity.comm() ; ! ec.empty() ; ++ec ) {
00903         EntityProc tmp( & entity , ec->proc );
00904         send_list.push_back( tmp );
00905       }
00906     }
00907   }
00908 
00909   {
00910     std::sort( send_list.begin() , send_list.end() , EntityLess() );
00911     std::vector<EntityProc>::iterator i =
00912       std::unique( send_list.begin() , send_list.end() );
00913     send_list.erase( i , send_list.end() );
00914   }
00915 }
00916 
00917 void pack_part_memberships( CommAll & comm ,
00918                             const std::vector<EntityProc> & send_list )
00919 {
00920   for ( std::vector<EntityProc>::const_iterator
00921         i = send_list.begin() ; i != send_list.end() ; ++i ) {
00922 
00923     Entity & entity = * i->first ;
00924 
00925     std::pair<const unsigned *, const unsigned *>
00926       part_ord = entity.bucket().superset_part_ordinals();
00927 
00928     // I am the owner; therefore, the first three members are
00929     // universal, uses, and owns.  Don't send them.
00930 
00931     // I am the owner.  The first two memberships are
00932     // universal_part and locally_owned_part.  The third
00933     // membership may be globally_shared_part ;
00934 
00935     const unsigned count_all  = part_ord.second - part_ord.first ;
00936     const unsigned count_skip =
00937       ( 2 < count_all && part_ord.first[2] == PART_ORD_SHARED ) ? 3 : 2 ;
00938 
00939     const unsigned count_send = count_all - count_skip ;
00940 
00941     const unsigned * const start_send = part_ord.first + count_skip ;
00942 
00943     comm.send_buffer( i->second ).pack<EntityKey>( entity.key() )
00944                                  .pack<unsigned>( count_send )
00945                                  .pack<unsigned>( start_send , count_send );
00946   }
00947 }
00948 
00949 }
00950 
00951 //  Mesh entity membership changes must be synchronized among
00952 //  processes that share mesh entities and propagated to
00953 //  processes that ghost copies of the mesh entities.
00954 //
00955 //  Precondition: correct shared and ghosting lists.
00956 //
00957 //  Part memberships may have been added or removed
00958 //  either explicitly or indirectly via entity relationships
00959 //  being added or removed.
00960 
00961 void BulkData::internal_resolve_shared_membership()
00962 {
00963   Trace_("stk::mesh::BulkData::internal_resolve_shared_membership");
00964 
00965   ThrowRequireMsg(parallel_size() > 1, "Do not call this in serial");
00966 
00967   const MetaData & meta  = m_mesh_meta_data ;
00968   ParallelMachine p_comm = m_parallel_machine ;
00969   const unsigned  p_rank = m_parallel_rank ;
00970   const unsigned  p_size = m_parallel_size ;
00971   const PartVector & all_parts = meta.get_parts();
00972 
00973   const Part & part_universal = meta.universal_part();
00974   const Part & part_owned  = meta.locally_owned_part();
00975   const Part & part_shared = meta.globally_shared_part();
00976 
00977   // Quick verification of part ordinal assumptions
00978 
00979   ThrowRequireMsg(PART_ORD_UNIVERSAL == part_universal.mesh_meta_data_ordinal(),
00980                   "Universal part ordinal is wrong, expected "
00981                   << PART_ORD_UNIVERSAL << ", got: "
00982                   << part_universal.mesh_meta_data_ordinal());
00983 
00984   ThrowRequireMsg(PART_ORD_OWNED == part_owned.mesh_meta_data_ordinal(),
00985                   "Owned part ordinal is wrong, expected "
00986                   << PART_ORD_OWNED << ", got: "
00987                   << part_owned.mesh_meta_data_ordinal());
00988 
00989   ThrowRequireMsg(PART_ORD_SHARED == part_shared.mesh_meta_data_ordinal(),
00990                   "Shared part ordinal is wrong, expected "
00991                   << PART_ORD_SHARED << ", got: "
00992                   << part_shared.mesh_meta_data_ordinal());
00993 
00994   //  Shared entities may have been modified due to relationship changes.
00995   //  Send just the current induced memberships from the sharing to
00996   //  the owning processes.
00997   {
00998     CommAll comm( p_comm );
00999 
01000     pack_induced_memberships( comm , m_entity_comm );
01001 
01002     comm.allocate_buffers( p_size / 4 );
01003 
01004     pack_induced_memberships( comm , m_entity_comm );
01005 
01006     comm.communicate();
01007 
01008     for ( std::vector<Entity*>::iterator
01009           i = m_entity_comm.begin() ; i != m_entity_comm.end() ; ++i ) {
01010 
01011       Entity & entity = **i ;
01012 
01013       if ( entity.owner_rank() == p_rank ) {
01014         // Receiving from all sharing processes
01015 
01016         PartVector empty , induced_parts , current_parts , remove_parts ;
01017 
01018         induced_part_membership( entity , empty , induced_parts );
01019 
01020         for ( PairIterEntityComm
01021               ec = entity.sharing() ; ! ec.empty() ; ++ec ) {
01022 
01023           CommBuffer & buf = comm.recv_buffer( ec->proc );
01024 
01025           unsigned count = 0 ; buf.unpack<unsigned>( count );
01026           for ( unsigned j = 0 ; j < count ; ++j ) {
01027             unsigned part_ord = 0 ; buf.unpack<unsigned>( part_ord );
01028             insert( induced_parts , * all_parts[ part_ord ] );
01029           }
01030         }
01031 
01032         // Remove any part that is an induced part but is not
01033         // in the induced parts list.
01034 
01035         entity.bucket().supersets( current_parts );
01036 
01037         for ( PartVector::iterator
01038               p = current_parts.begin() ; p != current_parts.end() ; ++p ) {
01039           if ( membership_is_induced( **p , entity.entity_rank() ) &&
01040                ! contain( induced_parts , **p ) ) {
01041             remove_parts.push_back( *p );
01042           }
01043         }
01044 
01045         internal_change_entity_parts( entity, induced_parts, remove_parts );
01046       }
01047     }
01048   }
01049 
01050   //------------------------------
01051   // The owners have complete knowledge of memberships.
01052   // Send membership information to sync the shared and ghosted copies.
01053   // Only need to do this for entities that have actually changed.
01054 
01055   {
01056     std::vector<EntityProc> send_list ;
01057 
01058     generate_send_list( m_sync_count, p_rank, m_entity_comm, send_list);
01059 
01060     CommAll comm( p_comm );
01061 
01062     pack_part_memberships( comm , send_list );
01063 
01064     comm.allocate_buffers( p_size / 4 );
01065 
01066     pack_part_memberships( comm , send_list );
01067 
01068     comm.communicate();
01069 
01070     for ( unsigned p = 0 ; p < p_size ; ++p ) {
01071       CommBuffer & buf = comm.recv_buffer( p );
01072       while ( buf.remaining() ) {
01073 
01074         PartVector owner_parts , current_parts , remove_parts ;
01075 
01076         EntityKey key ; buf.unpack<EntityKey>( key );
01077         unsigned count = 0 ; buf.unpack<unsigned>( count );
01078         for ( unsigned j = 0 ; j < count ; ++j ) {
01079           unsigned part_ord = 0 ; buf.unpack<unsigned>( part_ord );
01080           insert( owner_parts , * all_parts[ part_ord ] );
01081         }
01082 
01083         // Any current part that is not a member of owners_parts
01084         // must be removed.
01085 
01086         Entity * const entity = find_entity(m_entity_comm, key, true);
01087 
01088         entity->bucket().supersets( current_parts );
01089 
01090         for ( PartVector::iterator
01091               ip = current_parts.begin() ; ip != current_parts.end() ; ++ip ) {
01092           Part * const part = *ip ;
01093           const unsigned part_ord = part->mesh_meta_data_ordinal();
01094           if ( PART_ORD_UNIVERSAL != part_ord &&
01095                PART_ORD_OWNED     != part_ord &&
01096                PART_ORD_SHARED    != part_ord &&
01097                ! contain( owner_parts , *part ) ) {
01098             remove_parts.push_back( part );
01099           }
01100         }
01101 
01102         internal_change_entity_parts( *entity , owner_parts , remove_parts );
01103       }
01104     }
01105   }
01106 }
01107 
01108 } // namespace mesh
01109 } // namespace stk
01110 
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends