Sierra Toolkit Version of the Day
BucketRepository.cpp
00001 /*------------------------------------------------------------------------*/
00002 /*                 Copyright 2010 Sandia Corporation.                     */
00003 /*  Under terms of Contract DE-AC04-94AL85000, there is a non-exclusive   */
00004 /*  license for use of this work by or on behalf of the U.S. Government.  */
00005 /*  Export of this program may require a license from the                 */
00006 /*  United States Government.                                             */
00007 /*------------------------------------------------------------------------*/
00008 
00009 #include <sstream>
00010 #include <cstdlib>
00011 #include <stdexcept>
00012 
00013 #include <stk_mesh/baseImpl/BucketRepository.hpp>
00014 #include <stk_mesh/baseImpl/EntityRepository.hpp>
00015 #include <stk_mesh/base/BulkData.hpp>
00016 #include <stk_mesh/base/Bucket.hpp>
00017 #include <stk_mesh/base/Trace.hpp>
00018 
00019 namespace stk {
00020 namespace mesh {
00021 namespace impl {
00022 
00023 //----------------------------------------------------------------------
00024 namespace {
00025 
00026 void * local_malloc( size_t n )
00027 {
00028   void * const ptr = std::malloc( n );
00029 
00030   ThrowErrorMsgIf( NULL == ptr, "malloc of size " << n << " failed" );
00031 
00032   return ptr ;
00033 }
00034 
00035 
00036 } // namespace
00037 
00038 //----------------------------------------------------------------------
00039 
00040 namespace {
00041 
00042 inline unsigned align( size_t nb )
00043 {
00044   enum { BYTE_ALIGN = 16 };
00045   const unsigned gap = nb % BYTE_ALIGN ;
00046   if ( gap ) { nb += BYTE_ALIGN - gap ; }
00047   return nb ;
00048 }
00049 
00050 const FieldBase::Restriction & empty_field_restriction()
00051 {
00052   static const FieldBase::Restriction empty ;
00053   return empty ;
00054 }
00055 
00056 const FieldBase::Restriction & dimension( const FieldBase & field ,
00057                                           EntityRank erank ,
00058                                           const unsigned num_part_ord ,
00059                                           const unsigned part_ord[] ,
00060                                           const char * const method )
00061 {
00062   const FieldBase::Restriction & empty = empty_field_restriction();
00063   const FieldBase::Restriction * dim = & empty ;
00064 
00065   const std::vector<FieldBase::Restriction> & dim_map = field.restrictions();
00066   const std::vector<FieldBase::Restriction>::const_iterator iend = dim_map.end();
00067         std::vector<FieldBase::Restriction>::const_iterator ibeg = dim_map.begin();
00068 
00069   for ( PartOrdinal i = 0 ; i < num_part_ord && iend != ibeg ; ++i ) {
00070 
00071     const FieldRestriction restr(erank,part_ord[i]);
00072 
00073     ibeg = std::lower_bound( ibeg , iend , restr );
00074 
00075     if ( (iend != ibeg) && (*ibeg == restr) ) {
00076       if ( dim == & empty ) { dim = & *ibeg ; }
00077 
00078       if ( ibeg->not_equal_stride(*dim) ) {
00079 
00080         Part & p_old = MetaData::get(field).get_part( ibeg->part_ordinal() );
00081         Part & p_new = MetaData::get(field).get_part( dim->part_ordinal() );
00082 
00083         std::ostringstream msg ;
00084         msg << method ;
00085         msg << " FAILED WITH INCOMPATIBLE DIMENSIONS FOR " ;
00086         msg << field ;
00087         msg << " Part[" << p_old.name() ;
00088         msg << "] and Part[" << p_new.name() ;
00089         msg << "]" ;
00090 
00091         ThrowErrorMsg( msg.str() );
00092       }
00093     }
00094   }
00095 
00096   return *dim ;
00097 }
00098 
00099 } // namespace
00100 
00101 //----------------------------------------------------------------------
00102 
00103 
00104 BucketRepository::BucketRepository(
00105     BulkData & mesh,
00106     unsigned bucket_capacity,
00107     unsigned entity_rank_count,
00108     EntityRepository & entity_repo
00109     )
00110   :m_mesh(mesh),
00111    m_bucket_capacity(bucket_capacity),
00112    m_buckets(entity_rank_count),
00113    m_nil_bucket(NULL),
00114    m_entity_repo(entity_repo)
00115 {
00116 }
00117 
00118 
00119 BucketRepository::~BucketRepository()
00120 {
00121   // Destroy buckets, which were *not* allocated by the set.
00122 
00123   try {
00124     for ( std::vector< std::vector<Bucket*> >::iterator
00125           i = m_buckets.end() ; i != m_buckets.begin() ; ) {
00126       try {
00127         std::vector<Bucket*> & kset = *--i ;
00128 
00129         while ( ! kset.empty() ) {
00130           try { destroy_bucket( kset.back() ); } catch(...) {}
00131           kset.pop_back();
00132         }
00133         kset.clear();
00134       } catch(...) {}
00135     }
00136     m_buckets.clear();
00137   } catch(...) {}
00138 
00139   try { if ( m_nil_bucket ) destroy_bucket( m_nil_bucket ); } catch(...) {}
00140 }
00141 
00142 
00143 //----------------------------------------------------------------------
00144 // The current 'last' bucket in a family is to be deleted.
00145 // The previous 'last' bucket becomes the new 'last' bucket in the family.
00146 
00147 void BucketRepository::destroy_bucket( const unsigned & entity_rank , Bucket * bucket_to_be_deleted )
00148 {
00149   TraceIfWatching("stk::mesh::impl::BucketRepository::destroy_bucket", LOG_BUCKET, bucket_to_be_deleted);
00150 
00151   ThrowRequireMsg(MetaData::get(m_mesh).check_rank(entity_rank),
00152                   "Entity rank " << entity_rank << " is invalid");
00153 
00154   std::vector<Bucket *> & bucket_set = m_buckets[entity_rank];
00155 
00156   // Get the first bucket in the same family as the bucket being deleted
00157   Bucket * const first = bucket_to_be_deleted->m_bucketImpl.first_bucket_in_family();
00158 
00159   ThrowRequireMsg( bucket_to_be_deleted->equivalent(*first), "Logic error - bucket_to_be_deleted is not in same family as first_bucket_in_family");
00160   ThrowRequireMsg( first->equivalent(*bucket_to_be_deleted), "Logic error - first_bucket_in_family is not in same family as bucket_to_be_deleted");
00161 
00162   ThrowRequireMsg( bucket_to_be_deleted->size() == 0,
00163       "Destroying non-empty bucket " << *(bucket_to_be_deleted->key()) );
00164 
00165   ThrowRequireMsg( bucket_to_be_deleted == first->m_bucketImpl.get_bucket_family_pointer(),
00166                    "Destroying bucket family") ;
00167 
00168   std::vector<Bucket*>::iterator ik = lower_bound(bucket_set, bucket_to_be_deleted->key());
00169   ThrowRequireMsg( ik != bucket_set.end() && bucket_to_be_deleted == *ik,
00170       "Bucket not found in bucket set for entity rank " << entity_rank );
00171 
00172   ik = bucket_set.erase( ik );
00173 
00174   if ( first != bucket_to_be_deleted ) {
00175 
00176     ThrowRequireMsg( ik != bucket_set.begin(),
00177                      "Where did first bucket go?" );
00178 
00179     first->m_bucketImpl.set_last_bucket_in_family( *--ik );
00180 
00181     ThrowRequireMsg ( first->m_bucketImpl.get_bucket_family_pointer()->size() != 0,
00182                       "TODO: Explain" );
00183   }
00184 
00185   destroy_bucket( bucket_to_be_deleted );
00186 }
00187 
00188 //----------------------------------------------------------------------
00189 void BucketRepository::destroy_bucket( Bucket * bucket )
00190 {
00191   TraceIfWatching("stk::mesh::impl::BucketRepository::destroy_bucket", LOG_BUCKET, bucket);
00192 
00193   bucket->~Bucket();
00194   delete [] reinterpret_cast<unsigned char*>( bucket );
00195 }
00196 
00197 //
00198 //----------------------------------------------------------------------
00199 // The input part ordinals are complete and contain all supersets.
00200 void
00201 BucketRepository::declare_nil_bucket()
00202 {
00203   TraceIf("stk::mesh::impl::BucketRepository::declare_nil_bucket", LOG_BUCKET);
00204 
00205   if (m_nil_bucket == NULL) {
00206     unsigned field_count = MetaData::get(m_mesh).get_fields().size();
00207 
00208     //----------------------------------
00209     // Field map gives NULL for all field data.
00210 
00211     impl::BucketImpl::DataMap * field_map =
00212       reinterpret_cast<impl::BucketImpl::DataMap*>(
00213         local_malloc( sizeof(impl::BucketImpl::DataMap) * ( field_count + 1 )));
00214 
00215     FieldBase::Restriction::size_type empty_stride[ MaximumFieldDimension ];
00216     Copy<MaximumFieldDimension>( empty_stride , FieldBase::Restriction::size_type(0) );
00217 
00218     for ( unsigned i = 0 ; i < field_count ; ++i ) {
00219       field_map[ i ].m_base = 0 ;
00220       field_map[ i ].m_size = 0 ;
00221       field_map[ i ].m_stride = empty_stride;
00222     }
00223     field_map[ field_count ].m_base   = 0 ;
00224     field_map[ field_count ].m_size   = 0 ;
00225     field_map[ field_count ].m_stride = NULL ;
00226 
00227     //----------------------------------
00228     // Allocation size:  sizeof(Bucket) + key_size * sizeof(unsigned);
00229 
00230     const unsigned alloc_size = align( sizeof(Bucket) ) +
00231                                 align( sizeof(unsigned) * 2 );
00232 
00233     // All fields checked and sized, Ready to allocate
00234 
00235     unsigned char * const alloc_ptr = new unsigned char[ alloc_size ];
00236 
00237     unsigned char * ptr = alloc_ptr;
00238 
00239     ptr += align( sizeof( Bucket ) );
00240 
00241     unsigned * const new_key = reinterpret_cast<unsigned *>( ptr );
00242 
00243     // Key layout:
00244     // { part_count + 1 , { part_ordinals } , family_count }
00245 
00246     new_key[0] = 1 ; // part_count + 1
00247     new_key[1] = 0 ; // family_count
00248 
00249     Bucket * bucket =
00250       new( alloc_ptr ) Bucket( m_mesh , InvalidEntityRank , new_key ,
00251                               alloc_size , 0 , field_map , NULL );
00252 
00253     bucket->m_bucketImpl.set_bucket_family_pointer( bucket );
00254 
00255     //----------------------------------
00256 
00257     m_nil_bucket = bucket;
00258   }
00259 }
00260 
00261 
00284 //----------------------------------------------------------------------
00285 // The input part ordinals are complete and contain all supersets.
00286 Bucket *
00287 BucketRepository::declare_bucket(
00288                         const unsigned arg_entity_rank ,
00289                         const unsigned part_count ,
00290                         const unsigned part_ord[] ,
00291                         const std::vector< FieldBase * > & field_set
00292                               )
00293 {
00294   enum { KEY_TMP_BUFFER_SIZE = 64 };
00295 
00296   static const char method[] = "stk::mesh::impl::BucketRepository::declare_bucket" ;
00297   TraceIf("stk::mesh::impl::BucketRepository::declare_bucket", LOG_BUCKET);
00298 
00299   const unsigned max = ~(0u);
00300   const size_t   num_fields = field_set.size();
00301 
00302   ThrowRequireMsg(MetaData::get(m_mesh).check_rank(arg_entity_rank),
00303                   "Entity rank " << arg_entity_rank << " is invalid");
00304 
00305   ThrowRequireMsg( !m_buckets.empty(),
00306     "m_buckets is empty! Did you forget to initialize MetaData before creating BulkData?");
00307   std::vector<Bucket *> & bucket_set = m_buckets[ arg_entity_rank ];
00308 
00309   //----------------------------------
00310   // For performance try not to allocate a temporary.
00311 
00312   unsigned key_tmp_buffer[ KEY_TMP_BUFFER_SIZE ];
00313 
00314   std::vector<unsigned> key_tmp_vector ;
00315 
00316   const unsigned key_size = 2 + part_count ;
00317 
00318   unsigned * const key =
00319     ( key_size <= KEY_TMP_BUFFER_SIZE )
00320     ? key_tmp_buffer
00321     : ( key_tmp_vector.resize( key_size ) , & key_tmp_vector[0] );
00322 
00323   //----------------------------------
00324   // Key layout:
00325   // { part_count + 1 , { part_ordinals } , family_count }
00326   // Thus family_count = key[ key[0] ]
00327   //
00328   // for upper bound search use the maximum key.
00329 
00330   key[ key[0] = part_count + 1 ] = max ;
00331 
00332   {
00333     unsigned * const k = key + 1 ;
00334     for ( unsigned i = 0 ; i < part_count ; ++i ) { k[i] = part_ord[i] ; }
00335   }
00336 
00337   //----------------------------------
00338   // Bucket family has all of the same parts.
00339   // Look for the last bucket in this family:
00340 
00341   const std::vector<Bucket*>::iterator ik = lower_bound( bucket_set , key );
00342 
00343   //----------------------------------
00344   // If a member of the bucket family has space, it is the last one
00345   // since buckets are kept packed.
00346   const bool bucket_family_exists =
00347     ik != bucket_set.begin() && bucket_part_equal( ik[-1]->key() , key );
00348 
00349   Bucket * const last_bucket = bucket_family_exists ? ik[-1] : NULL ;
00350 
00351   Bucket          * bucket    = NULL ;
00352   impl::BucketImpl::DataMap * field_map = NULL ;
00353 
00354   if ( last_bucket == NULL ) { // First bucket in this family
00355     key[ key[0] ] = 0 ; // Set the key's family count to zero
00356   }
00357   else { // Last bucket present, can it hold one more entity?
00358 
00359     ThrowRequireMsg( last_bucket->size() != 0,
00360                      "Last bucket should not be empty.");
00361 
00362     field_map = last_bucket->m_bucketImpl.get_field_map();
00363 
00364     const unsigned last_count = last_bucket->key()[ key[0] ];
00365 
00366     const unsigned cap = last_bucket->capacity();
00367 
00368     if ( last_bucket->size() < cap ) {
00369       bucket = last_bucket ;
00370     }
00371     else if ( last_count < max ) {
00372       key[ key[0] ] = 1 + last_count ; // Increment the key's family count.
00373     }
00374     else {
00375       // ERROR insane number of buckets!
00376       ThrowRequireMsg( false, "Insanely large number of buckets" );
00377     }
00378   }
00379 
00380   //----------------------------------
00381   // Family's field map does not exist, create it:
00382 
00383   if ( NULL == field_map ) {
00384 
00385     field_map = reinterpret_cast<impl::BucketImpl::DataMap*>(
00386                 local_malloc( sizeof(impl::BucketImpl::DataMap) * ( num_fields + 1 )));
00387 
00388     // Start field data memory after the array of member entity pointers:
00389     unsigned value_offset = align( sizeof(Entity*) * m_bucket_capacity );
00390 
00391     for ( unsigned i = 0 ; i < num_fields ; ++i ) {
00392       const FieldBase  & field = * field_set[i] ;
00393 
00394       unsigned num_bytes_per_entity = 0 ;
00395 
00396       const FieldBase::Restriction & dim =
00397         dimension( field, arg_entity_rank, part_count, part_ord, method);
00398 
00399       if ( dim.dimension() ) { // Exists
00400 
00401         const unsigned type_stride = field.data_traits().stride_of ;
00402         const unsigned field_rank  = field.rank();
00403 
00404         num_bytes_per_entity = type_stride *
00405           ( field_rank ? dim.stride( field_rank - 1 ) : 1 );
00406       }
00407 
00408       field_map[i].m_base = value_offset ;
00409       field_map[i].m_size = num_bytes_per_entity ;
00410       field_map[i].m_stride = &dim.stride(0);
00411 
00412       value_offset += align( num_bytes_per_entity * m_bucket_capacity );
00413     }
00414     field_map[ num_fields ].m_base  = value_offset ;
00415     field_map[ num_fields ].m_size = 0 ;
00416     field_map[ num_fields ].m_stride = NULL ;
00417   }
00418 
00419   //----------------------------------
00420 
00421   if ( NULL == bucket ) {
00422 
00423     // Required bucket does not exist, must allocate and insert
00424     //
00425     // Allocation size:
00426     //   sizeof(Bucket) +
00427     //   key_size * sizeof(unsigned) +
00428     //   sizeof(Entity*) * capacity() +
00429     //   sum[number_of_fields]( fieldsize * capacity )
00430     //
00431     // The field_map[ num_fields ].m_base spans
00432     //   sizeof(Entity*) * capacity() +
00433     //   sum[number_of_fields]( fieldsize * capacity )
00434 
00435     const unsigned alloc_size = align( sizeof(Bucket) ) +
00436                                 align( sizeof(unsigned) * key_size ) +
00437                                 field_map[ num_fields ].m_base ;
00438 
00439     // All fields checked and sized, Ready to allocate
00440 
00441     unsigned char * const alloc_ptr = new unsigned char[ alloc_size ];
00442 
00443     unsigned char * ptr = alloc_ptr;
00444 
00445     ptr += align( sizeof( Bucket ) );
00446 
00447     unsigned * const new_key = reinterpret_cast<unsigned *>( ptr );
00448 
00449     ptr += align( sizeof(unsigned) * key_size );
00450 
00451     Entity ** const entity_array = reinterpret_cast<Entity**>( ptr );
00452 
00453     for ( unsigned i = 0 ; i < key_size ; ++i ) { new_key[i] = key[i] ; }
00454 
00455     bucket = new( alloc_ptr ) Bucket( m_mesh, arg_entity_rank , new_key,
00456                                       alloc_size, m_bucket_capacity ,
00457                                       field_map , entity_array );
00458 
00459     Bucket * first_bucket = last_bucket ? last_bucket->m_bucketImpl.first_bucket_in_family() : bucket ;
00460 
00461     bucket->m_bucketImpl.set_first_bucket_in_family(first_bucket); // Family members point to first bucket
00462 
00463     first_bucket->m_bucketImpl.set_last_bucket_in_family(bucket); // First bucket points to new last bucket
00464 
00465     bucket_set.insert( ik , bucket );
00466   }
00467 
00468   //----------------------------------
00469 
00470   ThrowRequireMsg( bucket->equivalent(*bucket->m_bucketImpl.first_bucket_in_family()), "Logic error - new bucket is not in same family as first_bucket_in_family");
00471   ThrowRequireMsg( bucket->m_bucketImpl.first_bucket_in_family()->equivalent(*bucket), "Logic error - first_bucket_in_family is not in same family as new bucket");
00472 
00473   return bucket ;
00474 }
00475 
00476 //----------------------------------------------------------------------
00477 
00478 void BucketRepository::initialize_fields( Bucket & k_dst , unsigned i_dst )
00479 {
00480   TraceIfWatching("stk::mesh::impl::BucketRepository::initialize_fields", LOG_BUCKET, &k_dst);
00481   k_dst.m_bucketImpl.initialize_fields(i_dst);
00482 }
00483 
00484 void BucketRepository::copy_fields( Bucket & k_dst , unsigned i_dst ,
00485                                     Bucket & k_src , unsigned i_src )
00486 {
00487   TraceIfWatching("stk::mesh::impl::BucketRepository::copy_fields", LOG_BUCKET, &k_dst);
00488   k_dst.m_bucketImpl.replace_fields(i_dst,k_src,i_src);
00489 }
00490 
00491 //----------------------------------------------------------------------
00492 
00493 void BucketRepository::update_field_data_states() const
00494 {
00495   TraceIf("stk::mesh::impl::BucketRepository::update_field_data_states", LOG_BUCKET);
00496 
00497   for ( std::vector< std::vector<Bucket*> >::const_iterator
00498         i = m_buckets.begin() ; i != m_buckets.end() ; ++i ) {
00499 
00500     const std::vector<Bucket*> & kset = *i ;
00501 
00502     for ( std::vector<Bucket*>::const_iterator
00503           ik = kset.begin() ; ik != kset.end() ; ++ik ) {
00504       (*ik)->m_bucketImpl.update_state();
00505     }
00506   }
00507 }
00508 
00509 
00510 //----------------------------------------------------------------------
00511 
00512 const std::vector<Bucket*> & BucketRepository::buckets( EntityRank rank ) const
00513 {
00514   ThrowRequireMsg( MetaData::get(m_mesh).check_rank(rank),
00515                    "Invalid entity rank " << rank );
00516 
00517   return m_buckets[ rank ];
00518 }
00519 
00520 //----------------------------------------------------------------------
00521 
00522 
00523 void BucketRepository::internal_sort_bucket_entities()
00524 {
00525   TraceIf("stk::mesh::impl::BucketRepository::internal_sort_bucket_entities", LOG_BUCKET);
00526 
00527   for ( EntityRank entity_rank = 0 ;
00528         entity_rank < m_buckets.size() ; ++entity_rank ) {
00529 
00530     std::vector<Bucket*> & buckets = m_buckets[ entity_rank ];
00531 
00532     size_t bk = 0 ; // Offset to first bucket of the family
00533     size_t ek = 0 ; // Offset to end   bucket of the family
00534 
00535     for ( ; bk < buckets.size() ; bk = ek ) {
00536       Bucket * b_scratch = NULL ;
00537       Bucket * ik_vacant = buckets[bk]->m_bucketImpl.last_bucket_in_family();
00538       unsigned ie_vacant = ik_vacant->size();
00539 
00540       if ( ik_vacant->capacity() <= ie_vacant ) {
00541         // Have to create a bucket just for the scratch space...
00542         const unsigned * const bucket_key = buckets[bk]->key() ;
00543         const unsigned         part_count = bucket_key[0] - 1 ;
00544         const unsigned * const part_ord   = bucket_key + 1 ;
00545 
00546         b_scratch = declare_bucket( entity_rank ,
00547             part_count , part_ord ,
00548             MetaData::get(m_mesh).get_fields() );
00549 
00550         ik_vacant = b_scratch ;
00551         ie_vacant = 0 ;
00552       }
00553 
00554       ik_vacant->m_bucketImpl.replace_entity( ie_vacant , NULL ) ;
00555 
00556       // Determine offset to the end bucket in this family:
00557       while ( ek < buckets.size() && ik_vacant != buckets[ek] ) { ++ek ; }
00558       ++ek ;
00559 
00560       unsigned count = 0 ;
00561       for ( size_t ik = bk ; ik != ek ; ++ik ) {
00562         count += buckets[ik]->size();
00563       }
00564 
00565       std::vector<Entity*> entities( count );
00566 
00567       std::vector<Entity*>::iterator j = entities.begin();
00568 
00569       for ( size_t ik = bk ; ik != ek ; ++ik ) {
00570         Bucket & b = * buckets[ik];
00571         const unsigned n = b.size();
00572         for ( unsigned i = 0 ; i < n ; ++i , ++j ) {
00573           *j = & b[i] ;
00574         }
00575       }
00576 
00577       std::sort( entities.begin() , entities.end() , EntityLess() );
00578 
00579       j = entities.begin();
00580 
00581       bool change_this_family = false ;
00582 
00583       for ( size_t ik = bk ; ik != ek ; ++ik ) {
00584         Bucket & b = * buckets[ik];
00585         const unsigned n = b.size();
00586         for ( unsigned i = 0 ; i < n ; ++i , ++j ) {
00587           Entity * const current = & b[i] ;
00588 
00589           if ( current != *j ) {
00590 
00591             if ( current ) {
00592               // Move current entity to the vacant spot
00593               copy_fields( *ik_vacant , ie_vacant , b, i );
00594               m_entity_repo.change_entity_bucket(*ik_vacant, *current, ie_vacant);
00595               ik_vacant->m_bucketImpl.replace_entity( ie_vacant , current ) ;
00596             }
00597 
00598             // Set the vacant spot to where the required entity is now.
00599             ik_vacant = & ((*j)->bucket()) ;
00600             ie_vacant = (*j)->bucket_ordinal() ;
00601             ik_vacant->m_bucketImpl.replace_entity( ie_vacant , NULL ) ;
00602 
00603             // Move required entity to the required spot
00604             copy_fields( b, i, *ik_vacant , ie_vacant );
00605             m_entity_repo.change_entity_bucket( b, **j, i);
00606             b.m_bucketImpl.replace_entity( i, *j );
00607 
00608             change_this_family = true ;
00609           }
00610 
00611           // Once a change has occured then need to propagate the
00612           // relocation for the remainder of the family.
00613           // This allows the propagation to be performed once per
00614           // entity as opposed to both times the entity is moved.
00615 
00616           if ( change_this_family ) { internal_propagate_relocation( **j ); }
00617         }
00618       }
00619 
00620       if ( b_scratch ) {
00621         // Created a last bucket, now have to destroy it.
00622         destroy_bucket( entity_rank , b_scratch );
00623         --ek ;
00624       }
00625     }
00626   }
00627 }
00628 
00629 //----------------------------------------------------------------------
00630 
00631 void BucketRepository::remove_entity( Bucket * k , unsigned i )
00632 {
00633   TraceIfWatching("stk::mesh::impl::BucketRepository::remove_entity", LOG_BUCKET, k);
00634 
00635   ThrowRequireMsg( k != m_nil_bucket, "Cannot remove entity from nil_bucket" );
00636 
00637   const EntityRank entity_rank = k->entity_rank();
00638 
00639   // Last bucket in the family of buckets with the same parts.
00640   // The last bucket is the only non-full bucket in the family.
00641 
00642   Bucket * const last = k->m_bucketImpl.last_bucket_in_family();
00643 
00644   ThrowRequireMsg( last->equivalent(*k), "Logic error - last bucket in family not equivalent to bucket");
00645   ThrowRequireMsg( k->equivalent(*last), "Logic error - bucket not equivalent to last bucket in family");
00646 
00647   // Fill in the gap if it is not the last entity being removed
00648 
00649   if ( last != k || k->size() != i + 1 ) {
00650 
00651     // Copy last entity in last bucket to bucket *k slot i
00652 
00653     Entity & entity = (*last)[ last->size() - 1 ];
00654 
00655     copy_fields( *k , i , *last , last->size() - 1 );
00656 
00657     k->m_bucketImpl.replace_entity(i, & entity ) ;
00658     m_entity_repo.change_entity_bucket( *k, entity, i);
00659 
00660     // Entity field data has relocated
00661 
00662     internal_propagate_relocation( entity );
00663   }
00664 
00665   last->m_bucketImpl.decrement_size();
00666 
00667   last->m_bucketImpl.replace_entity( last->size() , NULL ) ;
00668 
00669   if ( 0 == last->size() ) {
00670     destroy_bucket( entity_rank , last );
00671   }
00672 }
00673 
00674 //----------------------------------------------------------------------
00675 
00676 void BucketRepository::internal_propagate_relocation( Entity & entity )
00677 {
00678   TraceIf("stk::mesh::impl::BucketRepository::internal_propagate_relocation", LOG_BUCKET);
00679 
00680   const EntityRank erank = entity.entity_rank();
00681   PairIterRelation rel = entity.relations();
00682 
00683   for ( ; ! rel.empty() ; ++rel ) {
00684     const EntityRank rel_rank = rel->entity_rank();
00685     if ( rel_rank < erank ) {
00686       Entity & e_to = * rel->entity();
00687 
00688       set_field_relations( entity, e_to, rel->identifier() );
00689     }
00690     else if ( erank < rel_rank ) {
00691       Entity & e_from = * rel->entity();
00692 
00693       set_field_relations( e_from, entity, rel->identifier() );
00694     }
00695   }
00696 }
00697 
00698 
00699 } // namespace impl
00700 } // namespace mesh
00701 } // namespace stk
00702 
00703 
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends