From: DaHyeong Kim Date: Fri, 15 Mar 2013 11:20:46 +0000 (+0900) Subject: Apply patch for [CVE-2012-2677][boost] ordered_malloc() overflow X-Git-Tag: submit/trunk/20140419.032036^0 X-Git-Url: http://review.tizen.org/git/?a=commitdiff_plain;h=HEAD;p=external%2Fboost.git Apply patch for [CVE-2012-2677][boost] ordered_malloc() overflow --- diff --git a/boost/pool/pool.hpp b/boost/pool/pool.hpp index 13c9875..672e0b8 100644 --- a/boost/pool/pool.hpp +++ b/boost/pool/pool.hpp @@ -26,12 +26,25 @@ #include -// boost::details::pool::ct_lcm -#include -// boost::details::pool::lcm -#include +// std::numeric_limits +#include +// boost::math::static_lcm +#include // boost::simple_segregated_storage #include +// boost::alignment_of +#include +// BOOST_ASSERT +#include + +#ifdef BOOST_POOL_INSTRUMENT +#include +#include +#endif +#ifdef BOOST_POOL_VALGRIND +#include +#include +#endif #ifdef BOOST_NO_STDC_NAMESPACE namespace std { using ::malloc; using ::free; } @@ -44,41 +57,99 @@ // parameter. // Thanks to Jens Maurer for pointing this out! -namespace boost { +/*! + \file + \brief Provides class \ref pool: a fast memory allocator that guarantees proper alignment of all allocated chunks, + and which extends and generalizes the framework provided by the simple segregated storage solution. + Also provides two UserAllocator classes which can be used in conjuction with \ref pool. +*/ + +/*! + \mainpage Boost.Pool Memory Allocation Scheme + + \section intro_sec Introduction + + Pool allocation is a memory allocation scheme that is very fast, but limited in its usage. + + This Doxygen-style documentation is complementary to the + full Quickbook-generated html and pdf documentation at www.boost.org. + + This page generated from file pool.hpp. + +*/ + +#ifdef BOOST_MSVC +#pragma warning(push) +#pragma warning(disable:4127) // Conditional expression is constant +#endif + namespace boost +{ + +//! \brief Allocator used as the default template parameter for +//! a UserAllocator +//! template parameter. Uses new and delete. struct default_user_allocator_new_delete { - typedef std::size_t size_type; - typedef std::ptrdiff_t difference_type; + typedef std::size_t size_type; //!< An unsigned integral type that can represent the size of the largest object to be allocated. + typedef std::ptrdiff_t difference_type; //!< A signed integral type that can represent the difference of any two pointers. static char * malloc BOOST_PREVENT_MACRO_SUBSTITUTION(const size_type bytes) - { return new (std::nothrow) char[bytes]; } + { //! Attempts to allocate n bytes from the system. Returns 0 if out-of-memory + return new (std::nothrow) char[bytes]; + } static void free BOOST_PREVENT_MACRO_SUBSTITUTION(char * const block) - { delete [] block; } + { //! Attempts to de-allocate block. + //! \pre Block must have been previously returned from a call to UserAllocator::malloc. + delete [] block; + } }; +//! \brief UserAllocator +//! used as template parameter for \ref pool and \ref object_pool. +//! Uses malloc and free internally. struct default_user_allocator_malloc_free { - typedef std::size_t size_type; - typedef std::ptrdiff_t difference_type; + typedef std::size_t size_type; //!< An unsigned integral type that can represent the size of the largest object to be allocated. + typedef std::ptrdiff_t difference_type; //!< A signed integral type that can represent the difference of any two pointers. static char * malloc BOOST_PREVENT_MACRO_SUBSTITUTION(const size_type bytes) - { return static_cast(std::malloc(bytes)); } + { return static_cast((std::malloc)(bytes)); } static void free BOOST_PREVENT_MACRO_SUBSTITUTION(char * const block) - { std::free(block); } + { (std::free)(block); } }; -namespace details { +namespace details +{ //! Implemention only. -// PODptr is a class that pretends to be a "pointer" to different class types -// that don't really exist. It provides member functions to access the "data" -// of the "object" it points to. Since these "class" types are of variable -// size, and contains some information at the *end* of its memory (for -// alignment reasons), PODptr must contain the size of this "class" as well as -// the pointer to this "object". template class PODptr -{ +{ //! PODptr is a class that pretends to be a "pointer" to different class types + //! that don't really exist. It provides member functions to access the "data" + //! of the "object" it points to. Since these "class" types are of variable + //! size, and contains some information at the *end* of its memory + //! (for alignment reasons), + //! PODptr must contain the size of this "class" as well as the pointer to this "object". + + /*! \details A PODptr holds the location and size of a memory block allocated from the system. + Each memory block is split logically into three sections: + + Chunk area. This section may be different sizes. PODptr does not care what the size of the chunks is, + but it does care (and keep track of) the total size of the chunk area. + + Next pointer. This section is always the same size for a given SizeType. It holds a pointer + to the location of the next memory block in the memory block list, or 0 if there is no such block. + + Next size. This section is always the same size for a given SizeType. It holds the size of the + next memory block in the memory block list. + +The PODptr class just provides cleaner ways of dealing with raw memory blocks. + +A PODptr object is either valid or invalid. An invalid PODptr is analogous to a null pointer. +The default constructor for PODptr will result in an invalid object. +Calling the member function invalidate will result in that object becoming invalid. +The member function valid can be used to test for validity. +*/ public: typedef SizeType size_type; @@ -87,84 +158,178 @@ class PODptr size_type sz; char * ptr_next_size() const - { return (ptr + sz - sizeof(size_type)); } + { + return (ptr + sz - sizeof(size_type)); + } char * ptr_next_ptr() const { return (ptr_next_size() - - pool::ct_lcm::value); + math::static_lcm::value); } public: PODptr(char * const nptr, const size_type nsize) - :ptr(nptr), sz(nsize) { } + :ptr(nptr), sz(nsize) + { + //! A PODptr may be created to point to a memory block by passing + //! the address and size of that memory block into the constructor. + //! A PODptr constructed in this way is valid. + } PODptr() - :ptr(0), sz(0) { } - - bool valid() const { return (begin() != 0); } - void invalidate() { begin() = 0; } - char * & begin() { return ptr; } - char * begin() const { return ptr; } - char * end() const { return ptr_next_ptr(); } - size_type total_size() const { return sz; } + : ptr(0), sz(0) + { //! default constructor for PODptr will result in an invalid object. + } + + bool valid() const + { //! A PODptr object is either valid or invalid. + //! An invalid PODptr is analogous to a null pointer. + //! \returns true if PODptr is valid, false if invalid. + return (begin() != 0); + } + void invalidate() + { //! Make object invalid. + begin() = 0; + } + char * & begin() + { //! Each PODptr keeps the address and size of its memory block. + //! \returns The address of its memory block. + return ptr; + } + char * begin() const + { //! Each PODptr keeps the address and size of its memory block. + //! \return The address of its memory block. + return ptr; + } + char * end() const + { //! \returns begin() plus element_size (a 'past the end' value). + return ptr_next_ptr(); + } + size_type total_size() const + { //! Each PODptr keeps the address and size of its memory block. + //! The address may be read or written by the member functions begin. + //! The size of the memory block may only be read, + //! \returns size of the memory block. + return sz; + } size_type element_size() const - { - return (sz - sizeof(size_type) - - pool::ct_lcm::value); + { //! \returns size of element pointer area. + return static_cast(sz - sizeof(size_type) - + math::static_lcm::value); } size_type & next_size() const - { + { //! + //! \returns next_size. return *(static_cast(static_cast((ptr_next_size())))); } char * & next_ptr() const - { return *(static_cast(static_cast(ptr_next_ptr()))); } + { //! \returns pointer to next pointer area. + return *(static_cast(static_cast(ptr_next_ptr()))); + } PODptr next() const - { return PODptr(next_ptr(), next_size()); } + { //! \returns next PODptr. + return PODptr(next_ptr(), next_size()); + } void next(const PODptr & arg) const - { + { //! Sets next PODptr. next_ptr() = arg.begin(); next_size() = arg.total_size(); } -}; - +}; // class PODptr } // namespace details +#ifndef BOOST_POOL_VALGRIND +/*! + \brief A fast memory allocator that guarantees proper alignment of all allocated chunks. + + \details Whenever an object of type pool needs memory from the system, + it will request it from its UserAllocator template parameter. + The amount requested is determined using a doubling algorithm; + that is, each time more system memory is allocated, + the amount of system memory requested is doubled. + + Users may control the doubling algorithm by using the following extensions: + + Users may pass an additional constructor parameter to pool. + This parameter is of type size_type, + and is the number of chunks to request from the system + the first time that object needs to allocate system memory. + The default is 32. This parameter may not be 0. + + Users may also pass an optional third parameter to pool's + constructor. This parameter is of type size_type, + and sets a maximum size for allocated chunks. When this + parameter takes the default value of 0, then there is no upper + limit on chunk size. + + Finally, if the doubling algorithm results in no memory + being allocated, the pool will backtrack just once, halving + the chunk size and trying again. + + UserAllocator type - the method that the Pool will use to allocate memory from the system. + + There are essentially two ways to use class pool: the client can call \ref malloc() and \ref free() to allocate + and free single chunks of memory, this is the most efficient way to use a pool, but does not allow for + the efficient allocation of arrays of chunks. Alternatively, the client may call \ref ordered_malloc() and \ref + ordered_free(), in which case the free list is maintained in an ordered state, and efficient allocation of arrays + of chunks are possible. However, this latter option can suffer from poor performance when large numbers of + allocations are performed. + +*/ template -class pool: protected simple_segregated_storage< - typename UserAllocator::size_type> +class pool: protected simple_segregated_storage < typename UserAllocator::size_type > { public: - typedef UserAllocator user_allocator; - typedef typename UserAllocator::size_type size_type; - typedef typename UserAllocator::difference_type difference_type; + typedef UserAllocator user_allocator; //!< User allocator. + typedef typename UserAllocator::size_type size_type; //!< An unsigned integral type that can represent the size of the largest object to be allocated. + typedef typename UserAllocator::difference_type difference_type; //!< A signed integral type that can represent the difference of any two pointers. private: - BOOST_STATIC_CONSTANT(unsigned, min_alloc_size = - (::boost::details::pool::ct_lcm::value) ); + BOOST_STATIC_CONSTANT(size_type, min_alloc_size = + (::boost::math::static_lcm::value) ); + BOOST_STATIC_CONSTANT(size_type, min_align = + (::boost::math::static_lcm< ::boost::alignment_of::value, ::boost::alignment_of::value>::value) ); - // Returns 0 if out-of-memory - // Called if malloc/ordered_malloc needs to resize the free list - void * malloc_need_resize(); - void * ordered_malloc_need_resize(); + //! \returns 0 if out-of-memory. + //! Called if malloc/ordered_malloc needs to resize the free list. + void * malloc_need_resize(); //! Called if malloc needs to resize the free list. + void * ordered_malloc_need_resize(); //! Called if ordered_malloc needs to resize the free list. protected: - details::PODptr list; + details::PODptr list; //!< List structure holding ordered blocks. - simple_segregated_storage & store() { return *this; } - const simple_segregated_storage & store() const { return *this; } + simple_segregated_storage & store() + { //! \returns pointer to store. + return *this; + } + const simple_segregated_storage & store() const + { //! \returns pointer to store. + return *this; + } const size_type requested_size; size_type next_size; size_type start_size; size_type max_size; - // finds which POD in the list 'chunk' was allocated from + //! finds which POD in the list 'chunk' was allocated from. details::PODptr find_POD(void * const chunk) const; - // is_from() tests a chunk to determine if it belongs in a block + // is_from() tests a chunk to determine if it belongs in a block. static bool is_from(void * const chunk, char * const i, const size_type sizeof_i) - { + { //! \param chunk chunk to check if is from this pool. + //! \param i memory chunk at i with element sizeof_i. + //! \param sizeof_i element size (size of the chunk area of that block, not the total size of that block). + //! \returns true if chunk was allocated or may be returned. + //! as the result of a future allocation. + //! + //! Returns false if chunk was allocated from some other pool, + //! or may be returned as the result of a future allocation from some other pool. + //! Otherwise, the return value is meaningless. + //! + //! Note that this function may not be used to reliably test random pointer values. + // We use std::less_equal and std::less to test 'chunk' // against the array bounds because standard operators // may return unspecified results. @@ -172,35 +337,63 @@ class pool: protected simple_segregated_storage< // defined for pointers to objects that are 1) in the same array, or // 2) subobjects of the same object [5.9/2]. // The functor objects guarantee a total order for any pointer [20.3.3/8] -//WAS: -// return (std::less_equal()(static_cast(i), chunk) -// && std::less()(chunk, -// static_cast(i + sizeof_i))); std::less_equal lt_eq; std::less lt; return (lt_eq(i, chunk) && lt(chunk, i + sizeof_i)); } size_type alloc_size() const - { - const unsigned min_size = min_alloc_size; - return details::pool::lcm(requested_size, min_size); + { //! Calculated size of the memory chunks that will be allocated by this Pool. + //! \returns allocated size. + // For alignment reasons, this used to be defined to be lcm(requested_size, sizeof(void *), sizeof(size_type)), + // but is now more parsimonious: just rounding up to the minimum required alignment of our housekeeping data + // when required. This works provided all alignments are powers of two. + size_type s = (std::max)(requested_size, min_alloc_size); + size_type rem = s % min_align; + if(rem) + s += min_align - rem; + BOOST_ASSERT(s >= min_alloc_size); + BOOST_ASSERT(s % min_align == 0); + return s; + } + + size_type max_chunks() const + { //! Calculated maximum number of memory chunks that can be allocated in a single call by this Pool. + size_type partition_size = alloc_size(); + size_type POD_size = math::static_lcm::value + sizeof(size_type); + size_type max_chunks = (std::numeric_limits::max() - POD_size) / alloc_size(); + + return max_chunks; } - // for the sake of code readability :) static void * & nextof(void * const ptr) - { return *(static_cast(ptr)); } + { //! \returns Pointer dereferenced. + //! (Provided and used for the sake of code readability :) + return *(static_cast(ptr)); + } public: - // The second parameter here is an extension! // pre: npartition_size != 0 && nnext_size != 0 explicit pool(const size_type nrequested_size, const size_type nnext_size = 32, const size_type nmax_size = 0) - :list(0, 0), requested_size(nrequested_size), next_size(nnext_size), start_size(nnext_size),max_size(nmax_size) - { } + : + list(0, 0), requested_size(nrequested_size), next_size(nnext_size), start_size(nnext_size),max_size(nmax_size) + { //! Constructs a new empty Pool that can be used to allocate chunks of size RequestedSize. + //! \param nrequested_size Requested chunk size + //! \param nnext_size parameter is of type size_type, + //! is the number of chunks to request from the system + //! the first time that object needs to allocate system memory. + //! The default is 32. This parameter may not be 0. + //! \param nmax_size is the maximum number of chunks to allocate in one block. + set_next_size(nnext_size); + set_max_size(nmax_size); + } - ~pool() { purge_memory(); } + ~pool() + { //! Destructs the Pool, freeing its list of memory blocks. + purge_memory(); + } // Releases memory blocks that don't have chunks allocated // pre: lists are ordered @@ -211,19 +404,41 @@ class pool: protected simple_segregated_storage< // Returns true if memory was actually deallocated bool purge_memory(); - // These functions are extensions! - size_type get_next_size() const { return next_size; } - void set_next_size(const size_type nnext_size) { next_size = start_size = nnext_size; } - size_type get_max_size() const { return max_size; } - void set_max_size(const size_type nmax_size) { max_size = nmax_size; } - size_type get_requested_size() const { return requested_size; } + size_type get_next_size() const + { //! Number of chunks to request from the system the next time that object needs to allocate system memory. This value should never be 0. + //! \returns next_size; + return next_size; + } + void set_next_size(const size_type nnext_size) + { //! Set number of chunks to request from the system the next time that object needs to allocate system memory. This value should never be set to 0. + BOOST_USING_STD_MIN(); + next_size = start_size = min BOOST_PREVENT_MACRO_SUBSTITUTION(nnext_size, max_chunks()); + } + size_type get_max_size() const + { //! \returns max_size. + return max_size; + } + void set_max_size(const size_type nmax_size) + { //! Set max_size. + BOOST_USING_STD_MIN(); + max_size = min BOOST_PREVENT_MACRO_SUBSTITUTION(nmax_size, max_chunks()); + } + size_type get_requested_size() const + { //! \returns the requested size passed into the constructor. + //! (This value will not change during the lifetime of a Pool object). + return requested_size; + } // Both malloc and ordered_malloc do a quick inlined check first for any // free chunks. Only if we need to get another memory block do we call // the non-inlined *_need_resize() functions. // Returns 0 if out-of-memory void * malloc BOOST_PREVENT_MACRO_SUBSTITUTION() - { + { //! Allocates a chunk of memory. Searches in the list of memory blocks + //! for a block that has a free chunk, and returns that free chunk if found. + //! Otherwise, creates a new memory block, adds its free list to pool's free list, + //! \returns a free chunk from that block. + //! If a new memory block cannot be allocated, returns 0. Amortized O(1). // Look for a non-empty storage if (!store().empty()) return (store().malloc)(); @@ -231,7 +446,9 @@ class pool: protected simple_segregated_storage< } void * ordered_malloc() - { + { //! Same as malloc, only merges the free lists, to preserve order. Amortized O(1). + //! \returns a free chunk from that block. + //! If a new memory block cannot be allocated, returns 0. Amortized O(1). // Look for a non-empty storage if (!store().empty()) return (store().malloc)(); @@ -241,21 +458,42 @@ class pool: protected simple_segregated_storage< // Returns 0 if out-of-memory // Allocate a contiguous section of n chunks void * ordered_malloc(size_type n); + //! Same as malloc, only allocates enough contiguous chunks to cover n * requested_size bytes. Amortized O(n). + //! \returns a free chunk from that block. + //! If a new memory block cannot be allocated, returns 0. Amortized O(1). // pre: 'chunk' must have been previously // returned by *this.malloc(). void free BOOST_PREVENT_MACRO_SUBSTITUTION(void * const chunk) - { (store().free)(chunk); } + { //! Deallocates a chunk of memory. Note that chunk may not be 0. O(1). + //! + //! Chunk must have been previously returned by t.malloc() or t.ordered_malloc(). + //! Assumes that chunk actually refers to a block of chunks + //! spanning n * partition_sz bytes. + //! deallocates each chunk in that block. + //! Note that chunk may not be 0. O(n). + (store().free)(chunk); + } // pre: 'chunk' must have been previously // returned by *this.malloc(). void ordered_free(void * const chunk) - { store().ordered_free(chunk); } + { //! Same as above, but is order-preserving. + //! + //! Note that chunk may not be 0. O(N) with respect to the size of the free list. + //! chunk must have been previously returned by t.malloc() or t.ordered_malloc(). + store().ordered_free(chunk); + } // pre: 'chunk' must have been previously // returned by *this.malloc(n). void free BOOST_PREVENT_MACRO_SUBSTITUTION(void * const chunks, const size_type n) - { + { //! Assumes that chunk actually refers to a block of chunks. + //! + //! chunk must have been previously returned by t.ordered_malloc(n) + //! spanning n * partition_sz bytes. + //! Deallocates each chunk in that block. + //! Note that chunk may not be 0. O(n). const size_type partition_size = alloc_size(); const size_type total_req_size = n * requested_size; const size_type num_chunks = total_req_size / partition_size + @@ -267,7 +505,12 @@ class pool: protected simple_segregated_storage< // pre: 'chunk' must have been previously // returned by *this.malloc(n). void ordered_free(void * const chunks, const size_type n) - { + { //! Assumes that chunk actually refers to a block of chunks spanning n * partition_sz bytes; + //! deallocates each chunk in that block. + //! + //! Note that chunk may not be 0. Order-preserving. O(N + n) where N is the size of the free list. + //! chunk must have been previously returned by t.malloc() or t.ordered_malloc(). + const size_type partition_size = alloc_size(); const size_type total_req_size = n * requested_size; const size_type num_chunks = total_req_size / partition_size + @@ -278,15 +521,29 @@ class pool: protected simple_segregated_storage< // is_from() tests a chunk to determine if it was allocated from *this bool is_from(void * const chunk) const - { + { //! \returns Returns true if chunk was allocated from u or + //! may be returned as the result of a future allocation from u. + //! Returns false if chunk was allocated from some other pool or + //! may be returned as the result of a future allocation from some other pool. + //! Otherwise, the return value is meaningless. + //! Note that this function may not be used to reliably test random pointer values. return (find_POD(chunk).valid()); } }; +#ifndef BOOST_NO_INCLASS_MEMBER_INITIALIZATION +template +typename pool::size_type const pool::min_alloc_size; +template +typename pool::size_type const pool::min_align; +#endif + template bool pool::release_memory() -{ - // This is the return value: it will be set to true when we actually call +{ //! pool must be ordered. Frees every memory block that doesn't have any allocated chunks. + //! \returns true if at least one memory block was freed. + + // ret is the return value: it will be set to true when we actually call // UserAllocator::free(..) bool ret = false; @@ -371,7 +628,7 @@ bool pool::release_memory() // free_p points to the first free chunk in some next memory block, or // 0 if there is no such chunk. // prev_free_p points to the last free chunk in this memory block. - + // We are just about to advance ptr. Maintain the invariant: // prev is the PODptr whose next() is ptr, or !valid() // if there is no such PODptr @@ -408,7 +665,13 @@ bool pool::release_memory() template bool pool::purge_memory() -{ +{ //! pool must be ordered. + //! Frees every memory block. + //! + //! This function invalidates any pointers previously returned + //! by allocation functions of t. + //! \returns true if at least one memory block was freed. + details::PODptr iter = list; if (!iter.valid()) @@ -435,21 +698,33 @@ bool pool::purge_memory() template void * pool::malloc_need_resize() -{ - // No memory in any of our storages; make a new storage, - const size_type partition_size = alloc_size(); - const size_type POD_size = next_size * partition_size + - details::pool::ct_lcm::value + sizeof(size_type); - char * const ptr = (UserAllocator::malloc)(POD_size); +{ //! No memory in any of our storages; make a new storage, + //! Allocates chunk in newly malloc aftert resize. + //! \returns pointer to chunk. + size_type partition_size = alloc_size(); + size_type POD_size = static_cast(next_size * partition_size + + math::static_lcm::value + sizeof(size_type)); + char * ptr = (UserAllocator::malloc)(POD_size); if (ptr == 0) - return 0; + { + if(next_size > 4) + { + next_size >>= 1; + partition_size = alloc_size(); + POD_size = static_cast(next_size * partition_size + + math::static_lcm::value + sizeof(size_type)); + ptr = (UserAllocator::malloc)(POD_size); + } + if(ptr == 0) + return 0; + } const details::PODptr node(ptr, POD_size); - + BOOST_USING_STD_MIN(); if(!max_size) - next_size <<= 1; - else if( next_size*partition_size/requested_size < max_size) - next_size = min BOOST_PREVENT_MACRO_SUBSTITUTION(next_size << 1, max_size*requested_size/ partition_size); + set_next_size(next_size << 1); + else if(next_size < max_size * requested_size / partition_size) + set_next_size(min BOOST_PREVENT_MACRO_SUBSTITUTION(next_size << 1, max_size * requested_size / partition_size)); // initialize it, store().add_block(node.begin(), node.element_size(), partition_size); @@ -464,21 +739,32 @@ void * pool::malloc_need_resize() template void * pool::ordered_malloc_need_resize() -{ - // No memory in any of our storages; make a new storage, - const size_type partition_size = alloc_size(); - const size_type POD_size = next_size * partition_size + - details::pool::ct_lcm::value + sizeof(size_type); - char * const ptr = (UserAllocator::malloc)(POD_size); +{ //! No memory in any of our storages; make a new storage, + //! \returns pointer to new chunk. + size_type partition_size = alloc_size(); + size_type POD_size = static_cast(next_size * partition_size + + math::static_lcm::value + sizeof(size_type)); + char * ptr = (UserAllocator::malloc)(POD_size); if (ptr == 0) - return 0; + { + if(next_size > 4) + { + next_size >>= 1; + partition_size = alloc_size(); + POD_size = static_cast(next_size * partition_size + + math::static_lcm::value + sizeof(size_type)); + ptr = (UserAllocator::malloc)(POD_size); + } + if(ptr == 0) + return 0; + } const details::PODptr node(ptr, POD_size); BOOST_USING_STD_MIN(); if(!max_size) - next_size <<= 1; - else if( next_size*partition_size/requested_size < max_size) - next_size = min BOOST_PREVENT_MACRO_SUBSTITUTION(next_size << 1, max_size*requested_size/ partition_size); + set_next_size(next_size << 1); + else if(next_size < max_size * requested_size / partition_size) + set_next_size(min BOOST_PREVENT_MACRO_SUBSTITUTION(next_size << 1, max_size * requested_size / partition_size)); // initialize it, // (we can use "add_block" here because we know that @@ -511,14 +797,18 @@ void * pool::ordered_malloc_need_resize() node.next(prev.next()); prev.next(node); } - // and return a chunk from it. return (store().malloc)(); } template void * pool::ordered_malloc(const size_type n) -{ +{ //! Gets address of a chunk n, allocating new memory if not already available. + //! \returns Address of chunk n if allocated ok. + //! \returns 0 if not enough memory for n chunks. + if (n > max_chunks()) + return 0; + const size_type partition_size = alloc_size(); const size_type total_req_size = n * requested_size; const size_type num_chunks = total_req_size / partition_size + @@ -526,35 +816,52 @@ void * pool::ordered_malloc(const size_type n) void * ret = store().malloc_n(num_chunks, partition_size); - if (ret != 0) +#ifdef BOOST_POOL_INSTRUMENT + std::cout << "Allocating " << n << " chunks from pool of size " << partition_size << std::endl; +#endif + if ((ret != 0) || (n == 0)) return ret; - // Not enougn memory in our storages; make a new storage, +#ifdef BOOST_POOL_INSTRUMENT + std::cout << "Cache miss, allocating another chunk...\n"; +#endif + + // Not enough memory in our storages; make a new storage, BOOST_USING_STD_MAX(); next_size = max BOOST_PREVENT_MACRO_SUBSTITUTION(next_size, num_chunks); - const size_type POD_size = next_size * partition_size + - details::pool::ct_lcm::value + sizeof(size_type); - char * const ptr = (UserAllocator::malloc)(POD_size); + size_type POD_size = static_cast(next_size * partition_size + + math::static_lcm::value + sizeof(size_type)); + char * ptr = (UserAllocator::malloc)(POD_size); if (ptr == 0) - return 0; + { + if(num_chunks < next_size) + { + // Try again with just enough memory to do the job, or at least whatever we + // allocated last time: + next_size >>= 1; + next_size = max BOOST_PREVENT_MACRO_SUBSTITUTION(next_size, num_chunks); + POD_size = static_cast(next_size * partition_size + + math::static_lcm::value + sizeof(size_type)); + ptr = (UserAllocator::malloc)(POD_size); + } + if(ptr == 0) + return 0; + } const details::PODptr node(ptr, POD_size); - // Split up block so we can use what wasn't requested - // (we can use "add_block" here because we know that - // the free list is empty, so we don't have to use - // the slower ordered version) + // Split up block so we can use what wasn't requested. if (next_size > num_chunks) store().add_ordered_block(node.begin() + num_chunks * partition_size, node.element_size() - num_chunks * partition_size, partition_size); BOOST_USING_STD_MIN(); if(!max_size) - next_size <<= 1; - else if( next_size*partition_size/requested_size < max_size) - next_size = min BOOST_PREVENT_MACRO_SUBSTITUTION(next_size << 1, max_size*requested_size/ partition_size); + set_next_size(next_size << 1); + else if(next_size < max_size * requested_size / partition_size) + set_next_size(min BOOST_PREVENT_MACRO_SUBSTITUTION(next_size << 1, max_size * requested_size / partition_size)); // insert it into the list, - // handle border case + // handle border case. if (!list.valid() || std::greater()(list.begin(), node.begin())) { node.next(list); @@ -566,8 +873,7 @@ void * pool::ordered_malloc(const size_type n) while (true) { - // if we're about to hit the end or - // if we've found where "node" goes + // if we're about to hit the end, or if we've found where "node" goes. if (prev.next_ptr() == 0 || std::greater()(prev.next_ptr(), node.begin())) break; @@ -586,8 +892,9 @@ void * pool::ordered_malloc(const size_type n) template details::PODptr::size_type> pool::find_POD(void * const chunk) const -{ - // We have to find which storage this chunk is from. +{ //! find which PODptr storage memory that this chunk is from. + //! \returns the PODptr that holds this chunk. + // Iterate down list to find which storage this chunk is from. details::PODptr iter = list; while (iter.valid()) { @@ -599,6 +906,135 @@ pool::find_POD(void * const chunk) const return iter; } +#else // BOOST_POOL_VALGRIND + +template +class pool +{ +public: + // types + typedef UserAllocator user_allocator; // User allocator. + typedef typename UserAllocator::size_type size_type; // An unsigned integral type that can represent the size of the largest object to be allocated. + typedef typename UserAllocator::difference_type difference_type; // A signed integral type that can represent the difference of any two pointers. + + // construct/copy/destruct + explicit pool(const size_type s, const size_type = 32, const size_type m = 0) : chunk_size(s), max_alloc_size(m) {} + ~pool() + { + purge_memory(); + } + + bool release_memory() + { + bool ret = free_list.empty() ? false : true; + for(std::set::iterator pos = free_list.begin(); pos != free_list.end(); ++pos) + { + (user_allocator::free)(static_cast(*pos)); + } + free_list.clear(); + return ret; + } + bool purge_memory() + { + bool ret = free_list.empty() && used_list.empty() ? false : true; + for(std::set::iterator pos = free_list.begin(); pos != free_list.end(); ++pos) + { + (user_allocator::free)(static_cast(*pos)); + } + free_list.clear(); + for(std::set::iterator pos = used_list.begin(); pos != used_list.end(); ++pos) + { + (user_allocator::free)(static_cast(*pos)); + } + used_list.clear(); + return ret; + } + size_type get_next_size() const + { + return 1; + } + void set_next_size(const size_type){} + size_type get_max_size() const + { + return max_alloc_size; + } + void set_max_size(const size_type s) + { + max_alloc_size = s; + } + size_type get_requested_size() const + { + return chunk_size; + } + void * malloc BOOST_PREVENT_MACRO_SUBSTITUTION() + { + void* ret; + if(free_list.empty()) + { + ret = (user_allocator::malloc)(chunk_size); + VALGRIND_MAKE_MEM_UNDEFINED(ret, chunk_size); + } + else + { + ret = *free_list.begin(); + free_list.erase(free_list.begin()); + VALGRIND_MAKE_MEM_UNDEFINED(ret, chunk_size); + } + used_list.insert(ret); + return ret; + } + void * ordered_malloc() + { + return (this->malloc)(); + } + void * ordered_malloc(size_type n) + { + if(max_alloc_size && (n > max_alloc_size)) + return 0; + void* ret = (user_allocator::malloc)(chunk_size * n); + used_list.insert(ret); + return ret; + } + void free BOOST_PREVENT_MACRO_SUBSTITUTION(void *const chunk) + { + BOOST_ASSERT(used_list.count(chunk) == 1); + BOOST_ASSERT(free_list.count(chunk) == 0); + used_list.erase(chunk); + free_list.insert(chunk); + VALGRIND_MAKE_MEM_NOACCESS(chunk, chunk_size); + } + void ordered_free(void *const chunk) + { + return (this->free)(chunk); + } + void free BOOST_PREVENT_MACRO_SUBSTITUTION(void *const chunk, const size_type) + { + BOOST_ASSERT(used_list.count(chunk) == 1); + BOOST_ASSERT(free_list.count(chunk) == 0); + used_list.erase(chunk); + (user_allocator::free)(static_cast(chunk)); + } + void ordered_free(void *const chunk, const size_type n) + { + (this->free)(chunk, n); + } + bool is_from(void *const chunk) const + { + return used_list.count(chunk) || free_list.count(chunk); + } + +protected: + size_type chunk_size, max_alloc_size; + std::set free_list, used_list; +}; + +#endif + } // namespace boost +#ifdef BOOST_MSVC +#pragma warning(pop) #endif + +#endif // #ifdef BOOST_POOL_HPP +