Sierra Toolkit Version of the Day
BucketRepository.cpp
00001 /*------------------------------------------------------------------------*/
00002 /*                 Copyright 2010 Sandia Corporation.                     */
00003 /*  Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive   */
00004 /*  license for use of this work by or on behalf of the U.S. Government.  */
00005 /*  Export of this program may require a license from the                 */
00006 /*  United States Government.                                             */
00007 /*------------------------------------------------------------------------*/
00008 
00009 #include <sstream>
00010 #include <cstdlib>
00011 #include <stdexcept>
00012 
00013 #include <stk_mesh/baseImpl/BucketRepository.hpp>
00014 #include <stk_mesh/baseImpl/EntityRepository.hpp>
00015 #include <stk_mesh/base/BulkData.hpp>
00016 #include <stk_mesh/base/Bucket.hpp>
00017 #include <stk_mesh/base/Trace.hpp>
00018 
00019 namespace stk {
00020 namespace mesh {
00021 namespace impl {
00022 
00023 //----------------------------------------------------------------------
00024 namespace {
00025 
00026 void * local_malloc( size_t n )
00027 {
00028   void * const ptr = std::malloc( n );
00029 
00030   ThrowErrorMsgIf( NULL == ptr, "malloc of size " << n << " failed" );
00031 
00032   return ptr ;
00033 }
00034 
00035 
00036 } // namespace
00037 
00038 //----------------------------------------------------------------------
00039 
00040 namespace {
00041 
00042 inline unsigned align( size_t nb )
00043 {
00044   enum { BYTE_ALIGN = 16 };
00045   const unsigned gap = nb % BYTE_ALIGN ;
00046   if ( gap ) { nb += BYTE_ALIGN - gap ; }
00047   return nb ;
00048 }
00049 
00050 const FieldBase::Restriction & empty_field_restriction()
00051 {
00052   static const FieldBase::Restriction empty ;
00053   return empty ;
00054 }
00055 
00056 const FieldBase::Restriction & dimension( const FieldBase & field ,
00057                                           EntityRank erank ,
00058                                           const unsigned num_part_ord ,
00059                                           const unsigned part_ord[] ,
00060                                           const char * const method )
00061 {
00062   const FieldBase::Restriction & empty = empty_field_restriction();
00063   const FieldBase::Restriction * dim = & empty ;
00064 
00065   const std::vector<FieldBase::Restriction> & dim_map = field.restrictions();
00066   const std::vector<FieldBase::Restriction>::const_iterator iend = dim_map.end();
00067         std::vector<FieldBase::Restriction>::const_iterator ibeg = dim_map.begin();
00068 
00069   for ( PartOrdinal i = 0 ; i < num_part_ord && iend != ibeg ; ++i ) {
00070 
00071     const FieldRestriction restr(erank,part_ord[i]);
00072 
00073     ibeg = std::lower_bound( ibeg , iend , restr );
00074 
00075     if ( (iend != ibeg) && (*ibeg == restr) ) {
00076       if ( dim == & empty ) { dim = & *ibeg ; }
00077 
00078       if ( ibeg->not_equal_stride(*dim) ) {
00079 
00080         Part & p_old = MetaData::get(field).get_part( ibeg->part_ordinal() );
00081         Part & p_new = MetaData::get(field).get_part( dim->part_ordinal() );
00082 
00083         std::ostringstream msg ;
00084         msg << method ;
00085         msg << " FAILED WITH INCOMPATIBLE DIMENSIONS FOR " ;
00086         msg << field ;
00087         msg << " Part[" << p_old.name() ;
00088         msg << "] and Part[" << p_new.name() ;
00089         msg << "]" ;
00090 
00091         ThrowErrorMsg( msg.str() );
00092       }
00093     }
00094   }
00095 
00096   return *dim ;
00097 }
00098 
00099 } // namespace
00100 
00101 //----------------------------------------------------------------------
00102 
00103 
00104 BucketRepository::BucketRepository(
00105     BulkData & mesh,
00106     unsigned bucket_capacity,
00107     unsigned entity_rank_count,
00108     EntityRepository & entity_repo
00109     )
00110   :m_mesh(mesh),
00111    m_bucket_capacity(bucket_capacity),
00112    m_buckets(entity_rank_count),
00113    m_nil_bucket(NULL),
00114    m_entity_repo(entity_repo)
00115 {
00116 }
00117 
00118 
00119 BucketRepository::~BucketRepository()
00120 {
00121   // Destroy buckets, which were *not* allocated by the set.
00122 
00123   try {
00124     for ( std::vector< std::vector<Bucket*> >::iterator
00125           i = m_buckets.end() ; i != m_buckets.begin() ; ) {
00126       try {
00127         std::vector<Bucket*> & kset = *--i ;
00128 
00129         while ( ! kset.empty() ) {
00130           try { destroy_bucket( kset.back() ); } catch(...) {}
00131           kset.pop_back();
00132         }
00133         kset.clear();
00134       } catch(...) {}
00135     }
00136     m_buckets.clear();
00137   } catch(...) {}
00138 
00139   try { if ( m_nil_bucket ) destroy_bucket( m_nil_bucket ); } catch(...) {}
00140 }
00141 
00142 
00143 //----------------------------------------------------------------------
00144 // The current 'last' bucket in a family is to be deleted.
00145 // The previous 'last' bucket becomes the new 'last' bucket in the family.
00146 
00147 void BucketRepository::destroy_bucket( const unsigned & entity_rank , Bucket * bucket_to_be_deleted )
00148 {
00149   TraceIfWatching("stk::mesh::impl::BucketRepository::destroy_bucket", LOG_BUCKET, bucket_to_be_deleted);
00150 
00151   ThrowRequireMsg(MetaData::get(m_mesh).check_rank(entity_rank),
00152                   "Entity rank " << entity_rank << " is invalid");
00153 
00154   std::vector<Bucket *> & bucket_set = m_buckets[entity_rank];
00155 
00156   // Get the first bucket in the same family as the bucket being deleted
00157   Bucket * const first = bucket_to_be_deleted->m_bucketImpl.first_bucket_in_family();
00158 
00159   ThrowRequireMsg( bucket_to_be_deleted->size() == 0,
00160       "Destroying non-empty bucket " << *(bucket_to_be_deleted->key()) );
00161 
00162   ThrowRequireMsg( bucket_to_be_deleted == first->m_bucketImpl.get_bucket_family_pointer(),
00163                    "Destroying bucket family") ;
00164 
00165   std::vector<Bucket*>::iterator ik = lower_bound(bucket_set, bucket_to_be_deleted->key());
00166   ThrowRequireMsg( ik != bucket_set.end() && bucket_to_be_deleted == *ik,
00167       "Bucket not found in bucket set for entity rank " << entity_rank );
00168 
00169   ik = bucket_set.erase( ik );
00170 
00171   if ( first != bucket_to_be_deleted ) {
00172 
00173     ThrowRequireMsg( ik != bucket_set.begin(),
00174                      "Where did first bucket go?" );
00175 
00176     first->m_bucketImpl.set_last_bucket_in_family( *--ik );
00177 
00178     ThrowRequireMsg ( first->m_bucketImpl.get_bucket_family_pointer()->size() != 0,
00179                       "TODO: Explain" );
00180   }
00181 
00182   destroy_bucket( bucket_to_be_deleted );
00183 }
00184 
00185 //----------------------------------------------------------------------
00186 void BucketRepository::destroy_bucket( Bucket * bucket )
00187 {
00188   TraceIfWatching("stk::mesh::impl::BucketRepository::destroy_bucket", LOG_BUCKET, bucket);
00189 
00190   bucket->~Bucket();
00191   delete [] reinterpret_cast<unsigned char*>( bucket );
00192 }
00193 
00194 //
00195 //----------------------------------------------------------------------
00196 // The input part ordinals are complete and contain all supersets.
00197 void
00198 BucketRepository::declare_nil_bucket()
00199 {
00200   TraceIf("stk::mesh::impl::BucketRepository::declare_nil_bucket", LOG_BUCKET);
00201 
00202   if (m_nil_bucket == NULL) {
00203     unsigned field_count = MetaData::get(m_mesh).get_fields().size();
00204 
00205     //----------------------------------
00206     // Field map gives NULL for all field data.
00207 
00208     impl::BucketImpl::DataMap * field_map =
00209       reinterpret_cast<impl::BucketImpl::DataMap*>(
00210         local_malloc( sizeof(impl::BucketImpl::DataMap) * ( field_count + 1 )));
00211 
00212     FieldBase::Restriction::size_type empty_stride[ MaximumFieldDimension ];
00213     Copy<MaximumFieldDimension>( empty_stride , FieldBase::Restriction::size_type(0) );
00214 
00215     for ( unsigned i = 0 ; i < field_count ; ++i ) {
00216       field_map[ i ].m_base = 0 ;
00217       field_map[ i ].m_size = 0 ;
00218       field_map[ i ].m_stride = empty_stride;
00219     }
00220     field_map[ field_count ].m_base   = 0 ;
00221     field_map[ field_count ].m_size   = 0 ;
00222     field_map[ field_count ].m_stride = NULL ;
00223 
00224     //----------------------------------
00225     // Allocation size:  sizeof(Bucket) + key_size * sizeof(unsigned);
00226 
00227     const unsigned alloc_size = align( sizeof(Bucket) ) +
00228                                 align( sizeof(unsigned) * 2 );
00229 
00230     // All fields checked and sized, Ready to allocate
00231 
00232     unsigned char * const alloc_ptr = new unsigned char[ alloc_size ];
00233 
00234     unsigned char * ptr = alloc_ptr;
00235 
00236     ptr += align( sizeof( Bucket ) );
00237 
00238     unsigned * const new_key = reinterpret_cast<unsigned *>( ptr );
00239 
00240     // Key layout:
00241     // { part_count + 1 , { part_ordinals } , family_count }
00242 
00243     new_key[0] = 1 ; // part_count + 1
00244     new_key[1] = 0 ; // family_count
00245 
00246     Bucket * bucket =
00247       new( alloc_ptr ) Bucket( m_mesh , InvalidEntityRank , new_key ,
00248                               alloc_size , 0 , field_map , NULL );
00249 
00250     bucket->m_bucketImpl.set_bucket_family_pointer( bucket );
00251 
00252     //----------------------------------
00253 
00254     m_nil_bucket = bucket;
00255   }
00256 }
00257 
00258 
00281 //----------------------------------------------------------------------
00282 // The input part ordinals are complete and contain all supersets.
00283 Bucket *
00284 BucketRepository::declare_bucket(
00285                         const unsigned arg_entity_rank ,
00286                         const unsigned part_count ,
00287                         const unsigned part_ord[] ,
00288                         const std::vector< FieldBase * > & field_set
00289                               )
00290 {
00291   enum { KEY_TMP_BUFFER_SIZE = 64 };
00292 
00293   static const char method[] = "stk::mesh::impl::BucketRepository::declare_bucket" ;
00294   TraceIf("stk::mesh::impl::BucketRepository::declare_bucket", LOG_BUCKET);
00295 
00296   const unsigned max = ~(0u);
00297   const size_t   num_fields = field_set.size();
00298 
00299   ThrowRequireMsg(MetaData::get(m_mesh).check_rank(arg_entity_rank),
00300                   "Entity rank " << arg_entity_rank << " is invalid");
00301 
00302   ThrowRequireMsg( !m_buckets.empty(),
00303     "m_buckets is empty! Did you forget to initialize MetaData before creating BulkData?");
00304   std::vector<Bucket *> & bucket_set = m_buckets[ arg_entity_rank ];
00305 
00306   //----------------------------------
00307   // For performance try not to allocate a temporary.
00308 
00309   unsigned key_tmp_buffer[ KEY_TMP_BUFFER_SIZE ];
00310 
00311   std::vector<unsigned> key_tmp_vector ;
00312 
00313   const unsigned key_size = 2 + part_count ;
00314 
00315   unsigned * const key =
00316     ( key_size <= KEY_TMP_BUFFER_SIZE )
00317     ? key_tmp_buffer
00318     : ( key_tmp_vector.resize( key_size ) , & key_tmp_vector[0] );
00319 
00320   //----------------------------------
00321   // Key layout:
00322   // { part_count + 1 , { part_ordinals } , family_count }
00323   // Thus family_count = key[ key[0] ]
00324   //
00325   // for upper bound search use the maximum key.
00326 
00327   key[ key[0] = part_count + 1 ] = max ;
00328 
00329   {
00330     unsigned * const k = key + 1 ;
00331     for ( unsigned i = 0 ; i < part_count ; ++i ) { k[i] = part_ord[i] ; }
00332   }
00333 
00334   //----------------------------------
00335   // Bucket family has all of the same parts.
00336   // Look for the last bucket in this family:
00337 
00338   const std::vector<Bucket*>::iterator ik = lower_bound( bucket_set , key );
00339 
00340   //----------------------------------
00341   // If a member of the bucket family has space, it is the last one
00342   // since buckets are kept packed.
00343   const bool bucket_family_exists =
00344     ik != bucket_set.begin() && bucket_part_equal( ik[-1]->key() , key );
00345 
00346   Bucket * const last_bucket = bucket_family_exists ? ik[-1] : NULL ;
00347 
00348   Bucket          * bucket    = NULL ;
00349   impl::BucketImpl::DataMap * field_map = NULL ;
00350 
00351   if ( last_bucket == NULL ) { // First bucket in this family
00352     key[ key[0] ] = 0 ; // Set the key's family count to zero
00353   }
00354   else { // Last bucket present, can it hold one more entity?
00355 
00356     ThrowRequireMsg( last_bucket->size() != 0,
00357                      "Last bucket should not be empty.");
00358 
00359     field_map = last_bucket->m_bucketImpl.get_field_map();
00360 
00361     const unsigned last_count = last_bucket->key()[ key[0] ];
00362 
00363     const unsigned cap = last_bucket->capacity();
00364 
00365     if ( last_bucket->size() < cap ) {
00366       bucket = last_bucket ;
00367     }
00368     else if ( last_count < max ) {
00369       key[ key[0] ] = 1 + last_count ; // Increment the key's family count.
00370     }
00371     else {
00372       // ERROR insane number of buckets!
00373       ThrowRequireMsg( false, "Insanely large number of buckets" );
00374     }
00375   }
00376 
00377   //----------------------------------
00378   // Family's field map does not exist, create it:
00379 
00380   if ( NULL == field_map ) {
00381 
00382     field_map = reinterpret_cast<impl::BucketImpl::DataMap*>(
00383                 local_malloc( sizeof(impl::BucketImpl::DataMap) * ( num_fields + 1 )));
00384 
00385     // Start field data memory after the array of member entity pointers:
00386     unsigned value_offset = align( sizeof(Entity*) * m_bucket_capacity );
00387 
00388     for ( unsigned i = 0 ; i < num_fields ; ++i ) {
00389       const FieldBase  & field = * field_set[i] ;
00390 
00391       unsigned num_bytes_per_entity = 0 ;
00392 
00393       const FieldBase::Restriction & dim =
00394         dimension( field, arg_entity_rank, part_count, part_ord, method);
00395 
00396       if ( dim.dimension() ) { // Exists
00397 
00398         const unsigned type_stride = field.data_traits().stride_of ;
00399         const unsigned field_rank  = field.rank();
00400 
00401         num_bytes_per_entity = type_stride *
00402           ( field_rank ? dim.stride( field_rank - 1 ) : 1 );
00403       }
00404 
00405       field_map[i].m_base = value_offset ;
00406       field_map[i].m_size = num_bytes_per_entity ;
00407       field_map[i].m_stride = &dim.stride(0);
00408 
00409       value_offset += align( num_bytes_per_entity * m_bucket_capacity );
00410     }
00411     field_map[ num_fields ].m_base  = value_offset ;
00412     field_map[ num_fields ].m_size = 0 ;
00413     field_map[ num_fields ].m_stride = NULL ;
00414   }
00415 
00416   //----------------------------------
00417 
00418   if ( NULL == bucket ) {
00419 
00420     // Required bucket does not exist, must allocate and insert
00421     //
00422     // Allocation size:
00423     //   sizeof(Bucket) +
00424     //   key_size * sizeof(unsigned) +
00425     //   sizeof(Entity*) * capacity() +
00426     //   sum[number_of_fields]( fieldsize * capacity )
00427     //
00428     // The field_map[ num_fields ].m_base spans
00429     //   sizeof(Entity*) * capacity() +
00430     //   sum[number_of_fields]( fieldsize * capacity )
00431 
00432     const unsigned alloc_size = align( sizeof(Bucket) ) +
00433                                 align( sizeof(unsigned) * key_size ) +
00434                                 field_map[ num_fields ].m_base ;
00435 
00436     // All fields checked and sized, Ready to allocate
00437 
00438     unsigned char * const alloc_ptr = new unsigned char[ alloc_size ];
00439 
00440     unsigned char * ptr = alloc_ptr;
00441 
00442     ptr += align( sizeof( Bucket ) );
00443 
00444     unsigned * const new_key = reinterpret_cast<unsigned *>( ptr );
00445 
00446     ptr += align( sizeof(unsigned) * key_size );
00447 
00448     Entity ** const entity_array = reinterpret_cast<Entity**>( ptr );
00449 
00450     for ( unsigned i = 0 ; i < key_size ; ++i ) { new_key[i] = key[i] ; }
00451 
00452     bucket = new( alloc_ptr ) Bucket( m_mesh, arg_entity_rank , new_key,
00453                                       alloc_size, m_bucket_capacity ,
00454                                       field_map , entity_array );
00455 
00456     Bucket * first_bucket = last_bucket ? last_bucket->m_bucketImpl.first_bucket_in_family() : bucket ;
00457 
00458     bucket->m_bucketImpl.set_first_bucket_in_family(first_bucket); // Family members point to first bucket
00459 
00460     first_bucket->m_bucketImpl.set_last_bucket_in_family(bucket); // First bucket points to new last bucket
00461 
00462     bucket_set.insert( ik , bucket );
00463   }
00464 
00465   //----------------------------------
00466 
00467   return bucket ;
00468 }
00469 
00470 //----------------------------------------------------------------------
00471 
00472 void BucketRepository::zero_fields( Bucket & k_dst , unsigned i_dst )
00473 {
00474   TraceIfWatching("stk::mesh::impl::BucketRepository::zero_fields", LOG_BUCKET, &k_dst);
00475   k_dst.m_bucketImpl.zero_fields(i_dst);
00476 }
00477 
00478 void BucketRepository::copy_fields( Bucket & k_dst , unsigned i_dst ,
00479                                     Bucket & k_src , unsigned i_src )
00480 {
00481   TraceIfWatching("stk::mesh::impl::BucketRepository::copy_fields", LOG_BUCKET, &k_dst);
00482   k_dst.m_bucketImpl.replace_fields(i_dst,k_src,i_src);
00483 }
00484 
00485 //----------------------------------------------------------------------
00486 
00487 void BucketRepository::update_field_data_states() const
00488 {
00489   TraceIf("stk::mesh::impl::BucketRepository::update_field_data_states", LOG_BUCKET);
00490 
00491   for ( std::vector< std::vector<Bucket*> >::const_iterator
00492         i = m_buckets.begin() ; i != m_buckets.end() ; ++i ) {
00493 
00494     const std::vector<Bucket*> & kset = *i ;
00495 
00496     for ( std::vector<Bucket*>::const_iterator
00497           ik = kset.begin() ; ik != kset.end() ; ++ik ) {
00498       (*ik)->m_bucketImpl.update_state();
00499     }
00500   }
00501 }
00502 
00503 
00504 //----------------------------------------------------------------------
00505 
00506 const std::vector<Bucket*> & BucketRepository::buckets( EntityRank rank ) const
00507 {
00508   ThrowRequireMsg( MetaData::get(m_mesh).check_rank(rank),
00509                    "Invalid entity rank " << rank );
00510 
00511   return m_buckets[ rank ];
00512 }
00513 
00514 //----------------------------------------------------------------------
00515 
00516 
00517 void BucketRepository::internal_sort_bucket_entities()
00518 {
00519   TraceIf("stk::mesh::impl::BucketRepository::internal_sort_bucket_entities", LOG_BUCKET);
00520 
00521   for ( EntityRank entity_rank = 0 ;
00522         entity_rank < m_buckets.size() ; ++entity_rank ) {
00523 
00524     std::vector<Bucket*> & buckets = m_buckets[ entity_rank ];
00525 
00526     size_t bk = 0 ; // Offset to first bucket of the family
00527     size_t ek = 0 ; // Offset to end   bucket of the family
00528 
00529     for ( ; bk < buckets.size() ; bk = ek ) {
00530       Bucket * b_scratch = NULL ;
00531       Bucket * ik_vacant = buckets[bk]->m_bucketImpl.last_bucket_in_family();
00532       unsigned ie_vacant = ik_vacant->size();
00533 
00534       if ( ik_vacant->capacity() <= ie_vacant ) {
00535         // Have to create a bucket just for the scratch space...
00536         const unsigned * const bucket_key = buckets[bk]->key() ;
00537         const unsigned         part_count = bucket_key[0] - 1 ;
00538         const unsigned * const part_ord   = bucket_key + 1 ;
00539 
00540         b_scratch = declare_bucket( entity_rank ,
00541             part_count , part_ord ,
00542             MetaData::get(m_mesh).get_fields() );
00543 
00544         ik_vacant = b_scratch ;
00545         ie_vacant = 0 ;
00546       }
00547 
00548       ik_vacant->m_bucketImpl.replace_entity( ie_vacant , NULL ) ;
00549 
00550       // Determine offset to the end bucket in this family:
00551       while ( ek < buckets.size() && ik_vacant != buckets[ek] ) { ++ek ; }
00552       ++ek ;
00553 
00554       unsigned count = 0 ;
00555       for ( size_t ik = bk ; ik != ek ; ++ik ) {
00556         count += buckets[ik]->size();
00557       }
00558 
00559       std::vector<Entity*> entities( count );
00560 
00561       std::vector<Entity*>::iterator j = entities.begin();
00562 
00563       for ( size_t ik = bk ; ik != ek ; ++ik ) {
00564         Bucket & b = * buckets[ik];
00565         const unsigned n = b.size();
00566         for ( unsigned i = 0 ; i < n ; ++i , ++j ) {
00567           *j = & b[i] ;
00568         }
00569       }
00570 
00571       std::sort( entities.begin() , entities.end() , EntityLess() );
00572 
00573       j = entities.begin();
00574 
00575       bool change_this_family = false ;
00576 
00577       for ( size_t ik = bk ; ik != ek ; ++ik ) {
00578         Bucket & b = * buckets[ik];
00579         const unsigned n = b.size();
00580         for ( unsigned i = 0 ; i < n ; ++i , ++j ) {
00581           Entity * const current = & b[i] ;
00582 
00583           if ( current != *j ) {
00584 
00585             if ( current ) {
00586               // Move current entity to the vacant spot
00587               copy_fields( *ik_vacant , ie_vacant , b, i );
00588               m_entity_repo.change_entity_bucket(*ik_vacant, *current, ie_vacant);
00589               ik_vacant->m_bucketImpl.replace_entity( ie_vacant , current ) ;
00590             }
00591 
00592             // Set the vacant spot to where the required entity is now.
00593             ik_vacant = & ((*j)->bucket()) ;
00594             ie_vacant = (*j)->bucket_ordinal() ;
00595             ik_vacant->m_bucketImpl.replace_entity( ie_vacant , NULL ) ;
00596 
00597             // Move required entity to the required spot
00598             copy_fields( b, i, *ik_vacant , ie_vacant );
00599             m_entity_repo.change_entity_bucket( b, **j, i);
00600             b.m_bucketImpl.replace_entity( i, *j );
00601 
00602             change_this_family = true ;
00603           }
00604 
00605           // Once a change has occured then need to propagate the
00606           // relocation for the remainder of the family.
00607           // This allows the propagation to be performed once per
00608           // entity as opposed to both times the entity is moved.
00609 
00610           if ( change_this_family ) { internal_propagate_relocation( **j ); }
00611         }
00612       }
00613 
00614       if ( b_scratch ) {
00615         // Created a last bucket, now have to destroy it.
00616         destroy_bucket( entity_rank , b_scratch );
00617         --ek ;
00618       }
00619     }
00620   }
00621 }
00622 
00623 //----------------------------------------------------------------------
00624 
00625 void BucketRepository::remove_entity( Bucket * k , unsigned i )
00626 {
00627   TraceIfWatching("stk::mesh::impl::BucketRepository::remove_entity", LOG_BUCKET, k);
00628 
00629   ThrowRequireMsg( k != m_nil_bucket, "Cannot remove entity from nil_bucket" );
00630 
00631   const EntityRank entity_rank = k->entity_rank();
00632 
00633   // Last bucket in the family of buckets with the same parts.
00634   // The last bucket is the only non-full bucket in the family.
00635 
00636   Bucket * const last = k->m_bucketImpl.last_bucket_in_family();
00637 
00638   // Fill in the gap if it is not the last entity being removed
00639 
00640   if ( last != k || k->size() != i + 1 ) {
00641 
00642     // Copy last entity in last bucket to bucket *k slot i
00643 
00644     Entity & entity = (*last)[ last->size() - 1 ];
00645 
00646     copy_fields( *k , i , *last , last->size() - 1 );
00647 
00648     k->m_bucketImpl.replace_entity(i, & entity ) ;
00649     m_entity_repo.change_entity_bucket( *k, entity, i);
00650 
00651     // Entity field data has relocated
00652 
00653     internal_propagate_relocation( entity );
00654   }
00655 
00656   last->m_bucketImpl.decrement_size();
00657 
00658   last->m_bucketImpl.replace_entity( last->size() , NULL ) ;
00659 
00660   if ( 0 == last->size() ) {
00661     destroy_bucket( entity_rank , last );
00662   }
00663 }
00664 
00665 //----------------------------------------------------------------------
00666 
00667 void BucketRepository::internal_propagate_relocation( Entity & entity )
00668 {
00669   TraceIf("stk::mesh::impl::BucketRepository::internal_propagate_relocation", LOG_BUCKET);
00670 
00671   const EntityRank erank = entity.entity_rank();
00672   PairIterRelation rel = entity.relations();
00673 
00674   for ( ; ! rel.empty() ; ++rel ) {
00675     const EntityRank rel_rank = rel->entity_rank();
00676     if ( rel_rank < erank ) {
00677       Entity & e_to = * rel->entity();
00678 
00679       set_field_relations( entity, e_to, rel->identifier() );
00680     }
00681     else if ( erank < rel_rank ) {
00682       Entity & e_from = * rel->entity();
00683 
00684       set_field_relations( e_from, entity, rel->identifier() );
00685     }
00686   }
00687 }
00688 
00689 
00690 } // namespace impl
00691 } // namespace mesh
00692 } // namespace stk
00693 
00694 
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends