Sierra Toolkit Version of the Day
fixed_pool_eastl.h
00001 /*
00002 Copyright (C) 2005,2009-2010 Electronic Arts, Inc.  All rights reserved.
00003 
00004 Redistribution and use in source and binary forms, with or without
00005 modification, are permitted provided that the following conditions
00006 are met:
00007 
00008 1.  Redistributions of source code must retain the above copyright
00009     notice, this list of conditions and the following disclaimer.
00010 2.  Redistributions in binary form must reproduce the above copyright
00011     notice, this list of conditions and the following disclaimer in the
00012     documentation and/or other materials provided with the distribution.
00013 3.  Neither the name of Electronic Arts, Inc. ("EA") nor the names of
00014     its contributors may be used to endorse or promote products derived
00015     from this software without specific prior written permission.
00016 
00017 THIS SOFTWARE IS PROVIDED BY ELECTRONIC ARTS AND ITS CONTRIBUTORS "AS IS" AND ANY
00018 EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
00019 WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
00020 DISCLAIMED. IN NO EVENT SHALL ELECTRONIC ARTS OR ITS CONTRIBUTORS BE LIABLE FOR ANY
00021 DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
00022 (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
00023 LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
00024 ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
00025 (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
00026 THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
00027 */
00028 
00030 // EASTL/internal/fixed_pool.h
00031 // Written and maintained by Paul Pedriana - 2005.
00033 
00035 // This file implements the following
00036 //     aligned_buffer
00037 //     fixed_pool_base
00038 //     fixed_pool
00039 //     fixed_pool_with_overflow
00040 //     fixed_hashtable_allocator
00041 //     fixed_vector_allocator
00042 //     fixed_swap
00043 //
00045 
00046 
00047 #ifndef EASTL_INTERNAL_FIXED_POOL_H
00048 #define EASTL_INTERNAL_FIXED_POOL_H
00049 
00050 
00051 #include <stk_util/util/config_eastl.h>
00052 #include <stk_util/util/functional_eastl.h>
00053 #include <stk_util/util/memory_eastl.h>
00054 #include <stk_util/util/allocator_eastl.h>
00055 #include <stk_util/util/type_traits_eastl.h>
00056 
00057 #ifdef _MSC_VER
00058     #pragma warning(push, 0)
00059     #include <new>
00060     #pragma warning(pop)
00061 #else
00062     #include <new>
00063 #endif
00064 
00065 
00066 
00067 namespace eastl
00068 {
00069 
00074     #ifndef EASTL_FIXED_POOL_DEFAULT_NAME
00075         #define EASTL_FIXED_POOL_DEFAULT_NAME EASTL_DEFAULT_NAME_PREFIX " fixed_pool" // Unless the user overrides something, this is "EASTL fixed_pool".
00076     #endif
00077 
00078 
00079 
00081     // aligned_buffer
00083 
00104     typedef char EASTL_MAY_ALIAS aligned_buffer_char;
00105 
00106     template <size_t size, size_t alignment>
00107     struct aligned_buffer { aligned_buffer_char buffer[size]; };
00108 
00109     template<size_t size>
00110     struct aligned_buffer<size, 2>    { EA_PREFIX_ALIGN(2) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(2); };
00111 
00112     template<size_t size>
00113     struct aligned_buffer<size, 4>    { EA_PREFIX_ALIGN(4) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(4); };
00114 
00115     template<size_t size>
00116     struct aligned_buffer<size, 8>    { EA_PREFIX_ALIGN(8) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(8); };
00117 
00118     template<size_t size>
00119     struct aligned_buffer<size, 16>   { EA_PREFIX_ALIGN(16) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(16); };
00120 
00121     template<size_t size>
00122     struct aligned_buffer<size, 32>   { EA_PREFIX_ALIGN(32) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(32); };
00123 
00124     template<size_t size>
00125     struct aligned_buffer<size, 64>   { EA_PREFIX_ALIGN(64) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(64); };
00126 
00127     template<size_t size>
00128     struct aligned_buffer<size, 128>  { EA_PREFIX_ALIGN(128) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(128); };
00129 
00130     #if !defined(EA_PLATFORM_PSP) // This compiler fails to compile alignment >= 256 and gives an error.
00131 
00132     template<size_t size>
00133     struct aligned_buffer<size, 256>  { EA_PREFIX_ALIGN(256) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(256); };
00134 
00135     template<size_t size>
00136     struct aligned_buffer<size, 512>  { EA_PREFIX_ALIGN(512) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(512); };
00137 
00138     template<size_t size>
00139     struct aligned_buffer<size, 1024> { EA_PREFIX_ALIGN(1024) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(1024); };
00140 
00141     template<size_t size>
00142     struct aligned_buffer<size, 2048> { EA_PREFIX_ALIGN(2048) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(2048); };
00143 
00144     template<size_t size>
00145     struct aligned_buffer<size, 4096> { EA_PREFIX_ALIGN(4096) aligned_buffer_char buffer[size] EA_POSTFIX_ALIGN(4096); };
00146 
00147     #endif // EA_PLATFORM_PSP
00148 
00149 
00150 
00152     // fixed_pool_base
00154 
00161     struct EASTL_API fixed_pool_base
00162     {
00163     public:
00166         fixed_pool_base(void* pMemory = NULL)
00167             : mpHead((Link*)pMemory)
00168             , mpNext((Link*)pMemory)
00169             , mpCapacity((Link*)pMemory)
00170             #if EASTL_DEBUG
00171             , mnNodeSize(0) // This is normally set in the init function.
00172             #endif
00173         {
00174             #if EASTL_FIXED_SIZE_TRACKING_ENABLED
00175                 mnCurrentSize = 0;
00176                 mnPeakSize    = 0;
00177             #endif
00178         }
00179 
00180 
00183         fixed_pool_base& operator=(const fixed_pool_base&)
00184         {
00185             // By design we do nothing. We don't attempt to deep-copy member data.
00186             return *this;
00187         }
00188 
00189 
00197         void init(void* pMemory, size_t memorySize, size_t nodeSize,
00198                   size_t alignment, size_t alignmentOffset = 0);
00199 
00200 
00206         size_t peak_size() const
00207         {
00208             #if EASTL_FIXED_SIZE_TRACKING_ENABLED
00209                 return mnPeakSize;
00210             #else
00211                 return 0;
00212             #endif
00213         }
00214 
00215 
00220         bool can_allocate() const
00221         {
00222             return (mpHead != NULL) || (mpNext != mpCapacity);
00223         }
00224 
00225     public:
00228         struct Link
00229         {
00230             Link* mpNext;
00231         };
00232 
00233         Link*   mpHead;
00234         Link*   mpNext;
00235         Link*   mpCapacity;
00236         size_t  mnNodeSize;
00237 
00238         #if EASTL_FIXED_SIZE_TRACKING_ENABLED
00239             uint32_t mnCurrentSize; 
00240             uint32_t mnPeakSize;    
00241         #endif
00242 
00243     }; // fixed_pool_base
00244 
00245 
00246 
00247 
00248 
00250     // fixed_pool
00252 
00260     class EASTL_API fixed_pool : public fixed_pool_base
00261     {
00262     public:
00271         fixed_pool(void* pMemory = NULL)
00272             : fixed_pool_base(pMemory)
00273         {
00274         }
00275 
00276 
00281         fixed_pool(void* pMemory, size_t memorySize, size_t nodeSize,
00282                     size_t alignment, size_t alignmentOffset = 0)
00283         {
00284             init(pMemory, memorySize, nodeSize, alignment, alignmentOffset);
00285         }
00286 
00287 
00290         fixed_pool& operator=(const fixed_pool&)
00291         {
00292             // By design we do nothing. We don't attempt to deep-copy member data.
00293             return *this;
00294         }
00295 
00296 
00302         void* allocate()
00303         {
00304             Link* pLink = mpHead;
00305 
00306             if(pLink) // If we have space...
00307             {
00308                 #if EASTL_FIXED_SIZE_TRACKING_ENABLED
00309                     if(++mnCurrentSize > mnPeakSize)
00310                         mnPeakSize = mnCurrentSize;
00311                 #endif
00312 
00313                 mpHead = pLink->mpNext;
00314                 return pLink;
00315             }
00316             else
00317             {
00318                 // If there's no free node in the free list, just
00319                 // allocate another from the reserved memory area
00320 
00321                 if(mpNext != mpCapacity)
00322                 {
00323                     pLink = mpNext;
00324 
00325                     mpNext = reinterpret_cast<Link*>(reinterpret_cast<char8_t*>(mpNext) + mnNodeSize);
00326 
00327                     #if EASTL_FIXED_SIZE_TRACKING_ENABLED
00328                         if(++mnCurrentSize > mnPeakSize)
00329                             mnPeakSize = mnCurrentSize;
00330                     #endif
00331 
00332                     return pLink;
00333                 }
00334 
00335                 // EASTL_ASSERT(false); To consider: enable this assert. However, we intentionally disable it because this isn't necessarily an assertable error.
00336                 return NULL;
00337             }
00338         }
00339 
00340 
00347         void deallocate(void* p)
00348         {
00349             #if EASTL_FIXED_SIZE_TRACKING_ENABLED
00350                 --mnCurrentSize;
00351             #endif
00352 
00353             ((Link*)p)->mpNext = mpHead;
00354             mpHead = ((Link*)p);
00355         }
00356 
00357 
00358         using fixed_pool_base::can_allocate;
00359 
00360 
00361         const char* get_name() const
00362         {
00363             return EASTL_FIXED_POOL_DEFAULT_NAME;
00364         }
00365 
00366 
00367         void set_name(const char*)
00368         {
00369             // Nothing to do. We don't allocate memory.
00370         }
00371 
00372     }; // fixed_pool
00373 
00374 
00375 
00376 
00377 
00379     // fixed_pool_with_overflow
00381 
00384     template <typename Allocator = EASTLAllocatorType>
00385     class fixed_pool_with_overflow : public fixed_pool_base
00386     {
00387     public:
00388         fixed_pool_with_overflow(void* pMemory = NULL)
00389             : fixed_pool_base(pMemory),
00390               mOverflowAllocator(EASTL_FIXED_POOL_DEFAULT_NAME)
00391         {
00392             // Leave mpPoolBegin, mpPoolEnd uninitialized.
00393         }
00394 
00395 
00396         fixed_pool_with_overflow(void* pMemory, size_t memorySize, size_t nodeSize,
00397                                  size_t alignment, size_t alignmentOffset = 0)
00398             : mOverflowAllocator(EASTL_FIXED_POOL_DEFAULT_NAME)
00399         {
00400             fixed_pool_base::init(pMemory, memorySize, nodeSize, alignment, alignmentOffset);
00401 
00402             mpPoolBegin = pMemory;
00403         }
00404 
00405 
00408         fixed_pool_with_overflow& operator=(const fixed_pool_with_overflow& x)
00409         {
00410             #if EASTL_ALLOCATOR_COPY_ENABLED
00411                 mOverflowAllocator = x.mOverflowAllocator;
00412             #else
00413                 (void)x;
00414             #endif
00415 
00416             return *this;
00417         }
00418 
00419 
00420         void init(void* pMemory, size_t memorySize, size_t nodeSize,
00421                     size_t alignment, size_t alignmentOffset = 0)
00422         {
00423             fixed_pool_base::init(pMemory, memorySize, nodeSize, alignment, alignmentOffset);
00424 
00425             mpPoolBegin = pMemory;
00426         }
00427 
00428 
00429         void* allocate()
00430         {
00431             void* p     = NULL;
00432             Link* pLink = mpHead;
00433 
00434             if(pLink)
00435             {
00436                 // Unlink from chain
00437                 p      = pLink;
00438                 mpHead = pLink->mpNext;
00439             }
00440             else
00441             {
00442                 // If there's no free node in the free list, just
00443                 // allocate another from the reserved memory area
00444 
00445                 if(mpNext != mpCapacity)
00446                 {
00447                     p      = pLink = mpNext;
00448                     mpNext = reinterpret_cast<Link*>(reinterpret_cast<char8_t*>(mpNext) + mnNodeSize);
00449                 }
00450                 else
00451                     p = mOverflowAllocator.allocate(mnNodeSize);
00452             }
00453 
00454             #if EASTL_FIXED_SIZE_TRACKING_ENABLED
00455                 if(p && (++mnCurrentSize > mnPeakSize))
00456                     mnPeakSize = mnCurrentSize;
00457             #endif
00458 
00459             return p;
00460         }
00461 
00462 
00463         void deallocate(void* p)
00464         {
00465             #if EASTL_FIXED_SIZE_TRACKING_ENABLED
00466                 --mnCurrentSize;
00467             #endif
00468 
00469             if((p >= mpPoolBegin) && (p < mpCapacity))
00470             {
00471                 ((Link*)p)->mpNext = mpHead;
00472                 mpHead = ((Link*)p);
00473             }
00474             else
00475                 mOverflowAllocator.deallocate(p, (size_t)mnNodeSize);
00476         }
00477 
00478 
00479         using fixed_pool_base::can_allocate;
00480 
00481 
00482         const char* get_name() const
00483         {
00484             return mOverflowAllocator.get_name();
00485         }
00486 
00487 
00488         void set_name(const char* pName)
00489         {
00490             mOverflowAllocator.set_name(pName);
00491         }
00492 
00493     public:
00494         Allocator  mOverflowAllocator;
00495         void*      mpPoolBegin;         // Ideally we wouldn't need this member variable. he problem is that the information
00496                                   // about the pool buffer and object size is stored in the owning container and we
00497           //can't have access to it without increasing the amount of code we need and by templating more code.
00498           //It may turn out that simply storing data here is smaller in the end.
00499 
00500     }; // fixed_pool_with_overflow
00501 
00502 
00503 
00504 
00505 
00507     // fixed_node_allocator
00509 
00532     template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator = EASTLAllocatorType>
00533     class fixed_node_allocator
00534     {
00535     public:
00536         typedef typename type_select<bEnableOverflow, fixed_pool_with_overflow<Allocator>, fixed_pool>::type  pool_type;
00537         typedef fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>   this_type;
00538         typedef Allocator overflow_allocator_type;
00539 
00540         enum
00541         {
00542             kNodeSize            = nodeSize,
00543             kNodeCount           = nodeCount,
00544             kNodesSize           = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
00545             kBufferSize          = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
00546             kNodeAlignment       = nodeAlignment,
00547             kNodeAlignmentOffset = nodeAlignmentOffset
00548         };
00549 
00550     public:
00551         pool_type mPool;
00552 
00553     public:
00554         //fixed_node_allocator(const char* pName)
00555         //{
00556         //    mPool.set_name(pName);
00557         //}
00558 
00559 
00560         fixed_node_allocator(void* pNodeBuffer)
00561             : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
00562         {
00563         }
00564 
00565 
00580         fixed_node_allocator(const this_type& x)
00581             : mPool(x.mPool.mpNext, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
00582         {
00583             // Problem: how do we copy mPool.mOverflowAllocator if mPool is fixed_pool_with_overflow?
00584             // Probably we should use mPool = x.mPool, though it seems a little odd to do so after
00585             // doing the copying above.
00586             mPool = x.mPool;
00587         }
00588 
00589 
00590         this_type& operator=(const this_type& x)
00591         {
00592             mPool = x.mPool;
00593             return *this;
00594         }
00595 
00596 
00597         void* allocate(size_t n, int /*flags*/ = 0)
00598         {
00599             (void)n;
00600             EASTL_ASSERT(n == kNodeSize);
00601             return mPool.allocate();
00602         }
00603 
00604 
00605         void* allocate(size_t n, size_t /*alignment*/, size_t /*offset*/, int /*flags*/ = 0)
00606         {
00607             (void)n;
00608             EASTL_ASSERT(n == kNodeSize);
00609             return mPool.allocate();
00610         }
00611 
00612 
00613         void deallocate(void* p, size_t)
00614         {
00615             mPool.deallocate(p);
00616         }
00617 
00618 
00623         bool can_allocate() const
00624         {
00625             return mPool.can_allocate();
00626         }
00627 
00628 
00634         void reset(void* pNodeBuffer)
00635         {
00636             mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
00637         }
00638 
00639 
00640         const char* get_name() const
00641         {
00642             return mPool.get_name();
00643         }
00644 
00645 
00646         void set_name(const char* pName)
00647         {
00648             mPool.set_name(pName);
00649         }
00650 
00651 
00652         overflow_allocator_type& get_overflow_allocator()
00653         {
00654             return mPool.mOverflowAllocator;
00655         }
00656 
00657 
00658         void set_overflow_allocator(const overflow_allocator_type& allocator)
00659         {
00660             mPool.mOverflowAllocator = allocator;
00661         }
00662 
00663     }; // fixed_node_allocator
00664 
00665 
00666     // This is a near copy of the code above, with the only difference being
00667     // the 'false' bEnableOverflow template parameter, the pool_type and this_type typedefs,
00668     // and the get_overflow_allocator / set_overflow_allocator functions.
00669     template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, typename Allocator>
00670     class fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, Allocator>
00671     {
00672     public:
00673         typedef fixed_pool pool_type;
00674         typedef fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, Allocator>   this_type;
00675         typedef Allocator overflow_allocator_type;
00676 
00677         enum
00678         {
00679             kNodeSize            = nodeSize,
00680             kNodeCount           = nodeCount,
00681             kNodesSize           = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets
00682                                                    //sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
00683             kBufferSize          = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
00684             kNodeAlignment       = nodeAlignment,
00685             kNodeAlignmentOffset = nodeAlignmentOffset
00686         };
00687 
00688     public:
00689         pool_type mPool;
00690 
00691     public:
00692         fixed_node_allocator(void* pNodeBuffer)
00693             : mPool(pNodeBuffer, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
00694         {
00695         }
00696 
00697 
00698         fixed_node_allocator(const this_type& x)
00699             : mPool(x.mPool.mpNext, kNodesSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset)
00700         {
00701         }
00702 
00703 
00704         this_type& operator=(const this_type& x)
00705         {
00706             mPool = x.mPool;
00707             return *this;
00708         }
00709 
00710 
00711         void* allocate(size_t n, int /*flags*/ = 0)
00712         {
00713             (void)n;
00714             EASTL_ASSERT(n == kNodeSize);
00715             return mPool.allocate();
00716         }
00717 
00718 
00719         void* allocate(size_t n, size_t /*alignment*/, size_t /*offset*/, int /*flags*/ = 0)
00720         {
00721             (void)n;
00722             EASTL_ASSERT(n == kNodeSize);
00723             return mPool.allocate();
00724         }
00725 
00726 
00727         void deallocate(void* p, size_t)
00728         {
00729             mPool.deallocate(p);
00730         }
00731 
00732 
00733         bool can_allocate() const
00734         {
00735             return mPool.can_allocate();
00736         }
00737 
00738 
00739         void reset(void* pNodeBuffer)
00740         {
00741             mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
00742         }
00743 
00744 
00745         const char* get_name() const
00746         {
00747             return mPool.get_name();
00748         }
00749 
00750 
00751         void set_name(const char* pName)
00752         {
00753             mPool.set_name(pName);
00754         }
00755 
00756 
00757         overflow_allocator_type& get_overflow_allocator()
00758         {
00759             EASTL_ASSERT(false);
00760             return *(overflow_allocator_type*)NULL; // This is not pretty.
00761         }
00762 
00763 
00764         void set_overflow_allocator(const overflow_allocator_type& /*allocator*/)
00765         {
00766             // We don't have an overflow allocator.
00767             EASTL_ASSERT(false);
00768         }
00769 
00770     }; // fixed_node_allocator
00771 
00772 
00773 
00775     // global operators
00777 
00778     template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator>
00779     inline bool operator==(const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& a,
00780                            const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& b)
00781     {
00782         return (&a == &b); // They are only equal if they are the same object.
00783     }
00784 
00785 
00786     template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator>
00787     inline bool operator!=(const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& a,
00788                            const fixed_node_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& b)
00789     {
00790         return (&a != &b); // They are only equal if they are the same object.
00791     }
00792 
00793 
00794 
00795 
00796 
00797 
00799     // fixed_hashtable_allocator
00801 
00815     template <size_t bucketCount, size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator = EASTLAllocatorType>
00816     class fixed_hashtable_allocator
00817     {
00818     public:
00819         typedef typename type_select<bEnableOverflow, fixed_pool_with_overflow<Allocator>, fixed_pool>::type                                 pool_type;
00820         typedef fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>  this_type;
00821         typedef Allocator overflow_allocator_type;
00822 
00823         enum
00824         {
00825             kBucketCount         = bucketCount + 1, // '+1' because the hash table needs a null terminating bucket.
00826             kBucketsSize         = bucketCount * sizeof(void*),
00827             kNodeSize            = nodeSize,
00828             kNodeCount           = nodeCount,
00829             kNodesSize           = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the
00830                                                    //compiler sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
00831             kBufferSize          = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
00832                                                    // Don't need to include kBucketsSize in this calculation, as fixed_hash_xxx containers have a separate buffer for buckets.
00833             kNodeAlignment       = nodeAlignment,
00834             kNodeAlignmentOffset = nodeAlignmentOffset,
00835             kAllocFlagBuckets    = 0x00400000               // Flag to allocator which indicates that we are allocating buckets and not nodes.
00836         };
00837 
00838     protected:
00839         pool_type mPool;
00840         void*     mpBucketBuffer;
00841 
00842     public:
00843         //fixed_hashtable_allocator(const char* pName)
00844         //{
00845         //    mPool.set_name(pName);
00846         //}
00847 
00848         fixed_hashtable_allocator(void* pNodeBuffer)
00849             : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
00850               mpBucketBuffer(NULL)
00851         {
00852             // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called.
00853         }
00854 
00855 
00856         fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer)
00857             : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
00858               mpBucketBuffer(pBucketBuffer)
00859         {
00860         }
00861 
00862 
00868         fixed_hashtable_allocator(const this_type& x)
00869             : mPool(x.mPool.mpHead, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
00870               mpBucketBuffer(x.mpBucketBuffer)
00871         {
00872             // Problem: how do we copy mPool.mOverflowAllocator if mPool is fixed_pool_with_overflow?
00873             // Probably we should use mPool = x.mPool, though it seems a little odd to do so after
00874             // doing the copying above.
00875             mPool = x.mPool;
00876         }
00877 
00878 
00879         fixed_hashtable_allocator& operator=(const fixed_hashtable_allocator& x)
00880         {
00881             mPool = x.mPool;
00882             return *this; // Do nothing. Ignore the source type.
00883         }
00884 
00885 
00886         void* allocate(size_t n, int flags = 0)
00887         {
00888             // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes.
00889             EASTL_CT_ASSERT(kAllocFlagBuckets == 0x00400000); // Currently we expect this to be so, because the hashtable has a copy of this enum.
00890             if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets...
00891             {
00892                 EASTL_ASSERT(n == kNodeSize);  (void)n; // Make unused var warning go away.
00893                 return mPool.allocate();
00894             }
00895 
00896             EASTL_ASSERT(n <= kBucketsSize);
00897             return mpBucketBuffer;
00898         }
00899 
00900 
00901         void* allocate(size_t n, size_t /*alignment*/, size_t /*offset*/, int flags = 0)
00902         {
00903             // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes.
00904             if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets...
00905             {
00906                 EASTL_ASSERT(n == kNodeSize); (void)n; // Make unused var warning go away.
00907                 return mPool.allocate();
00908             }
00909 
00910             // To consider: allow for bucket allocations to overflow.
00911             EASTL_ASSERT(n <= kBucketsSize);
00912             return mpBucketBuffer;
00913         }
00914 
00915 
00916         void deallocate(void* p, size_t)
00917         {
00918             if(p != mpBucketBuffer) // If we are freeing a node and not buckets...
00919                 mPool.deallocate(p);
00920         }
00921 
00922 
00923         bool can_allocate() const
00924         {
00925             return mPool.can_allocate();
00926         }
00927 
00928 
00929         void reset(void* pNodeBuffer)
00930         {
00931             // No need to modify mpBucketBuffer, as that is constant.
00932             mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
00933         }
00934 
00935 
00936         const char* get_name() const
00937         {
00938             return mPool.get_name();
00939         }
00940 
00941 
00942         void set_name(const char* pName)
00943         {
00944             mPool.set_name(pName);
00945         }
00946 
00947 
00948         overflow_allocator_type& get_overflow_allocator()
00949         {
00950             return mPool.mOverflowAllocator;
00951         }
00952 
00953 
00954         void set_overflow_allocator(const overflow_allocator_type& allocator)
00955         {
00956             mPool.mOverflowAllocator = allocator;
00957         }
00958 
00959     }; // fixed_hashtable_allocator
00960 
00961 
00962     // This is a near copy of the code above, with the only difference being
00963     // the 'false' bEnableOverflow template parameter, the pool_type and this_type typedefs,
00964     // and the get_overflow_allocator / set_overflow_allocator functions.
00965     template <size_t bucketCount, size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, typename Allocator>
00966     class fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, Allocator>
00967     {
00968     public:
00969         typedef fixed_pool pool_type;
00970         typedef fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, Allocator>  this_type;
00971         typedef Allocator overflow_allocator_type;
00972 
00973         enum
00974         {
00975             kBucketCount         = bucketCount + 1, // '+1' because the hash table needs a null terminating bucket.
00976             kBucketsSize         = bucketCount * sizeof(void*),
00977             kNodeSize            = nodeSize,
00978             kNodeCount           = nodeCount,
00979             kNodesSize           = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler
00980                                                    //sets sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
00981             kBufferSize          = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset, // Don't need to include kBucketsSize
00982                                                                  //in this calculation, as fixed_hash_xxx containers have a separate buffer for buckets.
00983             kNodeAlignment       = nodeAlignment,
00984             kNodeAlignmentOffset = nodeAlignmentOffset,
00985             kAllocFlagBuckets    = 0x00400000               // Flag to allocator which indicates that we are allocating buckets and not nodes.
00986         };
00987 
00988     protected:
00989         pool_type mPool;
00990         void*     mpBucketBuffer;
00991 
00992     public:
00993         //fixed_hashtable_allocator(const char* pName)
00994         //{
00995         //    mPool.set_name(pName);
00996         //}
00997 
00998         fixed_hashtable_allocator(void* pNodeBuffer)
00999             : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
01000               mpBucketBuffer(NULL)
01001         {
01002             // EASTL_ASSERT(false); // As it stands now, this is not supposed to be called.
01003         }
01004 
01005 
01006         fixed_hashtable_allocator(void* pNodeBuffer, void* pBucketBuffer)
01007             : mPool(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
01008               mpBucketBuffer(pBucketBuffer)
01009         {
01010         }
01011 
01012 
01018         fixed_hashtable_allocator(const this_type& x)
01019             : mPool(x.mPool.mpHead, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset),
01020               mpBucketBuffer(x.mpBucketBuffer)
01021         {
01022         }
01023 
01024 
01025         fixed_hashtable_allocator& operator=(const fixed_hashtable_allocator& x)
01026         {
01027             mPool = x.mPool;
01028             return *this; // Do nothing. Ignore the source type.
01029         }
01030 
01031 
01032         void* allocate(size_t n, int flags = 0)
01033         {
01034             // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes.
01035             EASTL_CT_ASSERT(kAllocFlagBuckets == 0x00400000); // Currently we expect this to be so, because the hashtable has a copy of this enum.
01036             if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets...
01037             {
01038                 EASTL_ASSERT(n == kNodeSize);  (void)n; // Make unused var warning go away.
01039                 return mPool.allocate();
01040             }
01041 
01042             EASTL_ASSERT(n <= kBucketsSize);
01043             return mpBucketBuffer;
01044         }
01045 
01046 
01047         void* allocate(size_t n, size_t /*alignment*/, size_t /*offset*/, int flags = 0)
01048         {
01049             // We expect that the caller uses kAllocFlagBuckets when it wants us to allocate buckets instead of nodes.
01050             if((flags & kAllocFlagBuckets) == 0) // If we are allocating nodes and (probably) not buckets...
01051             {
01052                 EASTL_ASSERT(n == kNodeSize); (void)n; // Make unused var warning go away.
01053                 return mPool.allocate();
01054             }
01055 
01056             // To consider: allow for bucket allocations to overflow.
01057             EASTL_ASSERT(n <= kBucketsSize);
01058             return mpBucketBuffer;
01059         }
01060 
01061 
01062         void deallocate(void* p, size_t)
01063         {
01064             if(p != mpBucketBuffer) // If we are freeing a node and not buckets...
01065                 mPool.deallocate(p);
01066         }
01067 
01068 
01069         bool can_allocate() const
01070         {
01071             return mPool.can_allocate();
01072         }
01073 
01074 
01075         void reset(void* pNodeBuffer)
01076         {
01077             // No need to modify mpBucketBuffer, as that is constant.
01078             mPool.init(pNodeBuffer, kBufferSize, kNodeSize, kNodeAlignment, kNodeAlignmentOffset);
01079         }
01080 
01081 
01082         const char* get_name() const
01083         {
01084             return mPool.get_name();
01085         }
01086 
01087 
01088         void set_name(const char* pName)
01089         {
01090             mPool.set_name(pName);
01091         }
01092 
01093 
01094         overflow_allocator_type& get_overflow_allocator()
01095         {
01096             EASTL_ASSERT(false);
01097             return *(overflow_allocator_type*)NULL; // This is not pretty.
01098         }
01099 
01100         void set_overflow_allocator(const overflow_allocator_type& /*allocator*/)
01101         {
01102             // We don't have an overflow allocator.
01103             EASTL_ASSERT(false);
01104         }
01105 
01106     }; // fixed_hashtable_allocator
01107 
01108 
01110     // global operators
01112 
01113     template <size_t bucketCount, size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator>
01114     inline bool operator==(const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& a,
01115                            const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& b)
01116     {
01117         return (&a == &b); // They are only equal if they are the same object.
01118     }
01119 
01120 
01121     template <size_t bucketCount, size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator>
01122     inline bool operator!=(const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& a,
01123                            const fixed_hashtable_allocator<bucketCount, nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& b)
01124     {
01125         return (&a != &b); // They are only equal if they are the same object.
01126     }
01127 
01128 
01129 
01130 
01131 
01132 
01134     // fixed_vector_allocator
01136 
01147     template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator = EASTLAllocatorType>
01148     class fixed_vector_allocator
01149     {
01150     public:
01151         typedef fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>  this_type;
01152         typedef Allocator overflow_allocator_type;
01153 
01154         enum
01155         {
01156             kNodeSize            = nodeSize,
01157             kNodeCount           = nodeCount,
01158             kNodesSize           = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets sizeof(T)
01159                                                    //to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
01160             kBufferSize          = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
01161             kNodeAlignment       = nodeAlignment,
01162             kNodeAlignmentOffset = nodeAlignmentOffset
01163         };
01164 
01165     public:
01166         overflow_allocator_type mOverflowAllocator;
01167         void*                   mpPoolBegin;         // To consider: Find some way to make this data unnecessary, without increasing template proliferation.
01168 
01169     public:
01170         //fixed_vector_allocator(const char* pName = NULL)
01171         //{
01172         //    mOverflowAllocator.set_name(pName);
01173         //}
01174 
01175         fixed_vector_allocator(void* pNodeBuffer)
01176             : mpPoolBegin(pNodeBuffer)
01177         {
01178         }
01179 
01180         fixed_vector_allocator& operator=(const fixed_vector_allocator& x)
01181         {
01182             #if EASTL_ALLOCATOR_COPY_ENABLED
01183                 mOverflowAllocator = x.mOverflowAllocator;
01184             #else
01185                 (void)x;
01186             #endif
01187 
01188             return *this; // Do nothing. Ignore the source type.
01189         }
01190 
01191         void* allocate(size_t n, int flags = 0)
01192         {
01193             return mOverflowAllocator.allocate(n, flags);
01194         }
01195 
01196         void* allocate(size_t n, size_t alignment, size_t offset, int flags = 0)
01197         {
01198             return mOverflowAllocator.allocate(n, alignment, offset, flags);
01199         }
01200 
01201         void deallocate(void* p, size_t n)
01202         {
01203             if(p != mpPoolBegin)
01204                 mOverflowAllocator.deallocate(p, n); // Can't do this to our own allocation.
01205         }
01206 
01207         const char* get_name() const
01208         {
01209             return mOverflowAllocator.get_name();
01210         }
01211 
01212         void set_name(const char* pName)
01213         {
01214             mOverflowAllocator.set_name(pName);
01215         }
01216 
01217         overflow_allocator_type& get_overflow_allocator()
01218         {
01219             return mOverflowAllocator;
01220         }
01221 
01222         void set_overflow_allocator(const overflow_allocator_type& allocator)
01223         {
01224             mOverflowAllocator = allocator;
01225         }
01226 
01227     }; // fixed_vector_allocator
01228 
01229 
01230     template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, typename Allocator>
01231     class fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, Allocator>
01232     {
01233     public:
01234         typedef fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, false, Allocator>  this_type;
01235         typedef Allocator overflow_allocator_type;
01236 
01237         enum
01238         {
01239             kNodeSize            = nodeSize,
01240             kNodeCount           = nodeCount,
01241             kNodesSize           = nodeCount * nodeSize, // Note that the kBufferSize calculation assumes that the compiler sets
01242                                                    //sizeof(T) to be a multiple alignof(T), and so sizeof(T) is always >= alignof(T).
01243             kBufferSize          = kNodesSize + ((nodeAlignment > 1) ? nodeSize-1 : 0) + nodeAlignmentOffset,
01244             kNodeAlignment       = nodeAlignment,
01245             kNodeAlignmentOffset = nodeAlignmentOffset
01246         };
01247 
01248         //fixed_vector_allocator(const char* = NULL) // This char* parameter is present so that this class can be like the other version.
01249         //{
01250         //}
01251 
01252         fixed_vector_allocator(void* /*pNodeBuffer*/)
01253         {
01254         }
01255 
01256         void* allocate(size_t /*n*/, int /*flags*/ = 0)
01257         {
01258             EASTL_ASSERT(false); // A fixed_vector should not reallocate, else the user has exhausted its space.
01259             return NULL;
01260         }
01261 
01262         void* allocate(size_t /*n*/, size_t /*alignment*/, size_t /*offset*/, int /*flags*/ = 0)
01263         {
01264             EASTL_ASSERT(false);
01265             return NULL;
01266         }
01267 
01268         void deallocate(void* /*p*/, size_t /*n*/)
01269         {
01270         }
01271 
01272         const char* get_name() const
01273         {
01274             return EASTL_FIXED_POOL_DEFAULT_NAME;
01275         }
01276 
01277         void set_name(const char* /*pName*/)
01278         {
01279         }
01280 
01281         overflow_allocator_type& get_overflow_allocator()
01282         {
01283             EASTL_ASSERT(false);
01284             overflow_allocator_type* pNULL = NULL;
01285             return *pNULL; // This is not pretty, but it should never execute.
01286         }
01287 
01288         void set_overflow_allocator(const overflow_allocator_type& /*allocator*/)
01289         {
01290             // We don't have an overflow allocator.
01291             EASTL_ASSERT(false);
01292         }
01293 
01294     }; // fixed_vector_allocator
01295 
01296 
01298     // global operators
01300 
01301     template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator>
01302     inline bool operator==(const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& a,
01303                            const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& b)
01304     {
01305         return (&a == &b); // They are only equal if they are the same object.
01306     }
01307 
01308 
01309     template <size_t nodeSize, size_t nodeCount, size_t nodeAlignment, size_t nodeAlignmentOffset, bool bEnableOverflow, typename Allocator>
01310     inline bool operator!=(const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& a,
01311                            const fixed_vector_allocator<nodeSize, nodeCount, nodeAlignment, nodeAlignmentOffset, bEnableOverflow, Allocator>& b)
01312     {
01313         return (&a != &b); // They are only equal if they are the same object.
01314     }
01315 
01316 
01317 
01318 
01319 
01321     // fixed_swap
01323 
01332     template <typename Container>
01333     void fixed_swap(Container& a, Container& b)
01334     {
01335         // We must do a brute-force swap, because fixed containers cannot share memory allocations.
01336         eastl::less<size_t> compare;
01337 
01338         if(compare(sizeof(a), EASTL_MAX_STACK_USAGE)) // Using compare instead of just '<' avoids a stubborn compiler warning.
01339         {
01340             // Note: The C++ language does not define what happens when you declare
01341             // an object in too small of stack space but the object is never created.
01342             // This may result in a stack overflow exception on some systems, depending
01343             // on how they work and possibly depending on enabled debug functionality.
01344 
01345             const Container temp(a); // Can't use global swap because that could
01346             a = b;                   // itself call this swap function in return.
01347             b = temp;
01348         }
01349         else
01350         {
01351             EASTLAllocatorType allocator(*EASTLAllocatorDefault(), EASTL_TEMP_DEFAULT_NAME);
01352             void* const pMemory = allocator.allocate(sizeof(a));
01353 
01354             if(pMemory)
01355             {
01356                 Container* const pTemp = ::new(pMemory) Container(a);
01357                 a = b;
01358                 b = *pTemp;
01359 
01360                 pTemp->~Container();
01361                 allocator.deallocate(pMemory, sizeof(a));
01362             }
01363         }
01364     }
01365 
01366 
01367 
01368 } // namespace eastl
01369 
01370 
01371 #endif // Header include guard
 All Classes Namespaces Files Functions Variables Typedefs Enumerations Enumerator Friends