os/ossrv/ossrv_pub/boost_apis/boost/pool/pool.hpp
changeset 0 bde4ae8d615e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/os/ossrv/ossrv_pub/boost_apis/boost/pool/pool.hpp	Fri Jun 15 03:10:57 2012 +0200
     1.3 @@ -0,0 +1,580 @@
     1.4 +// Copyright (C) 2000, 2001 Stephen Cleary
     1.5 +//
     1.6 +// Distributed under the Boost Software License, Version 1.0. (See
     1.7 +// accompanying file LICENSE_1_0.txt or copy at
     1.8 +// http://www.boost.org/LICENSE_1_0.txt)
     1.9 +//
    1.10 +// See http://www.boost.org for updates, documentation, and revision history.
    1.11 +
    1.12 +#ifndef BOOST_POOL_HPP
    1.13 +#define BOOST_POOL_HPP
    1.14 +
    1.15 +#include <boost/config.hpp>  // for workarounds
    1.16 +
    1.17 +// std::less, std::less_equal, std::greater
    1.18 +#include <functional>
    1.19 +// new[], delete[], std::nothrow
    1.20 +#include <new>
    1.21 +// std::size_t, std::ptrdiff_t
    1.22 +#include <cstddef>
    1.23 +// std::malloc, std::free
    1.24 +#include <cstdlib>
    1.25 +// std::invalid_argument
    1.26 +#include <exception>
    1.27 +// std::max
    1.28 +#include <algorithm>
    1.29 +
    1.30 +#include <boost/pool/poolfwd.hpp>
    1.31 +
    1.32 +// boost::details::pool::ct_lcm
    1.33 +#include <boost/pool/detail/ct_gcd_lcm.hpp>
    1.34 +// boost::details::pool::lcm
    1.35 +#include <boost/pool/detail/gcd_lcm.hpp>
    1.36 +// boost::simple_segregated_storage
    1.37 +#include <boost/pool/simple_segregated_storage.hpp>
    1.38 +
    1.39 +#ifdef BOOST_NO_STDC_NAMESPACE
    1.40 + namespace std { using ::malloc; using ::free; }
    1.41 +#endif
    1.42 +
    1.43 +// There are a few places in this file where the expression "this->m" is used.
    1.44 +// This expression is used to force instantiation-time name lookup, which I am
    1.45 +//   informed is required for strict Standard compliance.  It's only necessary
    1.46 +//   if "m" is a member of a base class that is dependent on a template
    1.47 +//   parameter.
    1.48 +// Thanks to Jens Maurer for pointing this out!
    1.49 +
    1.50 +namespace boost {
    1.51 +
    1.52 +struct default_user_allocator_new_delete
    1.53 +{
    1.54 +  typedef std::size_t size_type;
    1.55 +  typedef std::ptrdiff_t difference_type;
    1.56 +
    1.57 +  static char * malloc(const size_type bytes)
    1.58 +  { return new (std::nothrow) char[bytes]; }
    1.59 +  static void free(char * const block)
    1.60 +  { delete [] block; }
    1.61 +};
    1.62 +
    1.63 +struct default_user_allocator_malloc_free
    1.64 +{
    1.65 +  typedef std::size_t size_type;
    1.66 +  typedef std::ptrdiff_t difference_type;
    1.67 +
    1.68 +  static char * malloc(const size_type bytes)
    1.69 +  { return reinterpret_cast<char *>(std::malloc(bytes)); }
    1.70 +  static void free(char * const block)
    1.71 +  { std::free(block); }
    1.72 +};
    1.73 +
    1.74 +namespace details {
    1.75 +
    1.76 +// PODptr is a class that pretends to be a "pointer" to different class types
    1.77 +//  that don't really exist.  It provides member functions to access the "data"
    1.78 +//  of the "object" it points to.  Since these "class" types are of variable
    1.79 +//  size, and contains some information at the *end* of its memory (for
    1.80 +//  alignment reasons), PODptr must contain the size of this "class" as well as
    1.81 +//  the pointer to this "object".
    1.82 +template <typename SizeType>
    1.83 +class PODptr
    1.84 +{
    1.85 +  public:
    1.86 +    typedef SizeType size_type;
    1.87 +
    1.88 +  private:
    1.89 +    char * ptr;
    1.90 +    size_type sz;
    1.91 +
    1.92 +    char * ptr_next_size() const
    1.93 +    { return (ptr + sz - sizeof(size_type)); }
    1.94 +    char * ptr_next_ptr() const
    1.95 +    {
    1.96 +      return (ptr_next_size() -
    1.97 +          pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value);
    1.98 +    }
    1.99 +
   1.100 +  public:
   1.101 +    PODptr(char * const nptr, const size_type nsize)
   1.102 +    :ptr(nptr), sz(nsize) { }
   1.103 +    PODptr()
   1.104 +    :ptr(0), sz(0) { }
   1.105 +
   1.106 +    bool valid() const { return (begin() != 0); }
   1.107 +    void invalidate() { begin() = 0; }
   1.108 +    char * & begin() { return ptr; }
   1.109 +    char * begin() const { return ptr; }
   1.110 +    char * end() const { return ptr_next_ptr(); }
   1.111 +    size_type total_size() const { return sz; }
   1.112 +    size_type element_size() const
   1.113 +    {
   1.114 +      return (sz - sizeof(size_type) -
   1.115 +          pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value);
   1.116 +    }
   1.117 +
   1.118 +    size_type & next_size() const
   1.119 +    { return *(reinterpret_cast<size_type *>(ptr_next_size())); }
   1.120 +    char * & next_ptr() const
   1.121 +    { return *(reinterpret_cast<char **>(ptr_next_ptr())); }
   1.122 +
   1.123 +    PODptr next() const
   1.124 +    { return PODptr<size_type>(next_ptr(), next_size()); }
   1.125 +    void next(const PODptr & arg) const
   1.126 +    {
   1.127 +      next_ptr() = arg.begin();
   1.128 +      next_size() = arg.total_size();
   1.129 +    }
   1.130 +};
   1.131 +
   1.132 +} // namespace details
   1.133 +
   1.134 +template <typename UserAllocator>
   1.135 +class pool: protected simple_segregated_storage<
   1.136 +    typename UserAllocator::size_type>
   1.137 +{
   1.138 +  public:
   1.139 +    typedef UserAllocator user_allocator;
   1.140 +    typedef typename UserAllocator::size_type size_type;
   1.141 +    typedef typename UserAllocator::difference_type difference_type;
   1.142 +
   1.143 +  private:
   1.144 +    BOOST_STATIC_CONSTANT(unsigned, min_alloc_size =
   1.145 +        (::boost::details::pool::ct_lcm<sizeof(void *), sizeof(size_type)>::value) );
   1.146 +
   1.147 +    // Returns 0 if out-of-memory
   1.148 +    // Called if malloc/ordered_malloc needs to resize the free list
   1.149 +    void * malloc_need_resize();
   1.150 +    void * ordered_malloc_need_resize();
   1.151 +
   1.152 +  protected:
   1.153 +    details::PODptr<size_type> list;
   1.154 +
   1.155 +    simple_segregated_storage<size_type> & store() { return *this; }
   1.156 +    const simple_segregated_storage<size_type> & store() const { return *this; }
   1.157 +    const size_type requested_size;
   1.158 +    size_type next_size;
   1.159 +
   1.160 +    // finds which POD in the list 'chunk' was allocated from
   1.161 +    details::PODptr<size_type> find_POD(void * const chunk) const;
   1.162 +
   1.163 +    // is_from() tests a chunk to determine if it belongs in a block
   1.164 +    static bool is_from(void * const chunk, char * const i,
   1.165 +        const size_type sizeof_i)
   1.166 +    {
   1.167 +      // We use std::less_equal and std::less to test 'chunk'
   1.168 +      //  against the array bounds because standard operators
   1.169 +      //  may return unspecified results.
   1.170 +      // This is to ensure portability.  The operators < <= > >= are only
   1.171 +      //  defined for pointers to objects that are 1) in the same array, or
   1.172 +      //  2) subobjects of the same object [5.9/2].
   1.173 +      // The functor objects guarantee a total order for any pointer [20.3.3/8]
   1.174 +//WAS:
   1.175 +//      return (std::less_equal<void *>()(static_cast<void *>(i), chunk)
   1.176 +//          && std::less<void *>()(chunk,
   1.177 +//              static_cast<void *>(i + sizeof_i)));
   1.178 +      std::less_equal<void *> lt_eq;
   1.179 +      std::less<void *> lt;
   1.180 +      return (lt_eq(i, chunk) && lt(chunk, i + sizeof_i));
   1.181 +    }
   1.182 +
   1.183 +    size_type alloc_size() const
   1.184 +    {
   1.185 +      const unsigned min_size = min_alloc_size;
   1.186 +      return details::pool::lcm<size_type>(requested_size, min_size);
   1.187 +    }
   1.188 +
   1.189 +    // for the sake of code readability :)
   1.190 +    static void * & nextof(void * const ptr)
   1.191 +    { return *(static_cast<void **>(ptr)); }
   1.192 +
   1.193 +  public:
   1.194 +    // The second parameter here is an extension!
   1.195 +    // pre: npartition_size != 0 && nnext_size != 0
   1.196 +    explicit pool(const size_type nrequested_size,
   1.197 +        const size_type nnext_size = 32)
   1.198 +    :list(0, 0), requested_size(nrequested_size), next_size(nnext_size)
   1.199 +    { }
   1.200 +
   1.201 +    ~pool() { purge_memory(); }
   1.202 +
   1.203 +    // Releases memory blocks that don't have chunks allocated
   1.204 +    // pre: lists are ordered
   1.205 +    //  Returns true if memory was actually deallocated
   1.206 +    bool release_memory();
   1.207 +
   1.208 +    // Releases *all* memory blocks, even if chunks are still allocated
   1.209 +    //  Returns true if memory was actually deallocated
   1.210 +    bool purge_memory();
   1.211 +
   1.212 +    // These functions are extensions!
   1.213 +    size_type get_next_size() const { return next_size; }
   1.214 +    void set_next_size(const size_type nnext_size) { next_size = nnext_size; }
   1.215 +
   1.216 +    // Both malloc and ordered_malloc do a quick inlined check first for any
   1.217 +    //  free chunks.  Only if we need to get another memory block do we call
   1.218 +    //  the non-inlined *_need_resize() functions.
   1.219 +    // Returns 0 if out-of-memory
   1.220 +    void * malloc()
   1.221 +    {
   1.222 +      // Look for a non-empty storage
   1.223 +      if (!store().empty())
   1.224 +        return store().malloc();
   1.225 +      return malloc_need_resize();
   1.226 +    }
   1.227 +
   1.228 +    void * ordered_malloc()
   1.229 +    {
   1.230 +      // Look for a non-empty storage
   1.231 +      if (!store().empty())
   1.232 +        return store().malloc();
   1.233 +      return ordered_malloc_need_resize();
   1.234 +    }
   1.235 +
   1.236 +    // Returns 0 if out-of-memory
   1.237 +    // Allocate a contiguous section of n chunks
   1.238 +    void * ordered_malloc(size_type n);
   1.239 +
   1.240 +    // pre: 'chunk' must have been previously
   1.241 +    //        returned by *this.malloc().
   1.242 +    void free(void * const chunk)
   1.243 +    { store().free(chunk); }
   1.244 +
   1.245 +    // pre: 'chunk' must have been previously
   1.246 +    //        returned by *this.malloc().
   1.247 +    void ordered_free(void * const chunk)
   1.248 +    { store().ordered_free(chunk); }
   1.249 +
   1.250 +    // pre: 'chunk' must have been previously
   1.251 +    //        returned by *this.malloc(n).
   1.252 +    void free(void * const chunks, const size_type n)
   1.253 +    {
   1.254 +      const size_type partition_size = alloc_size();
   1.255 +      const size_type total_req_size = n * requested_size;
   1.256 +      const size_type num_chunks = total_req_size / partition_size +
   1.257 +          ((total_req_size % partition_size) ? true : false);
   1.258 +
   1.259 +      store().free_n(chunks, num_chunks, partition_size);
   1.260 +    }
   1.261 +
   1.262 +    // pre: 'chunk' must have been previously
   1.263 +    //        returned by *this.malloc(n).
   1.264 +    void ordered_free(void * const chunks, const size_type n)
   1.265 +    {
   1.266 +      const size_type partition_size = alloc_size();
   1.267 +      const size_type total_req_size = n * requested_size;
   1.268 +      const size_type num_chunks = total_req_size / partition_size +
   1.269 +          ((total_req_size % partition_size) ? true : false);
   1.270 +
   1.271 +      store().ordered_free_n(chunks, num_chunks, partition_size);
   1.272 +    }
   1.273 +
   1.274 +    // is_from() tests a chunk to determine if it was allocated from *this
   1.275 +    bool is_from(void * const chunk) const
   1.276 +    {
   1.277 +      return (find_POD(chunk).valid());
   1.278 +    }
   1.279 +};
   1.280 +
   1.281 +template <typename UserAllocator>
   1.282 +bool pool<UserAllocator>::release_memory()
   1.283 +{
   1.284 +  // This is the return value: it will be set to true when we actually call
   1.285 +  //  UserAllocator::free(..)
   1.286 +  bool ret = false;
   1.287 +
   1.288 +  // This is a current & previous iterator pair over the memory block list
   1.289 +  details::PODptr<size_type> ptr = list;
   1.290 +  details::PODptr<size_type> prev;
   1.291 +
   1.292 +  // This is a current & previous iterator pair over the free memory chunk list
   1.293 +  //  Note that "prev_free" in this case does NOT point to the previous memory
   1.294 +  //  chunk in the free list, but rather the last free memory chunk before the
   1.295 +  //  current block.
   1.296 +  void * free = this->first;
   1.297 +  void * prev_free = 0;
   1.298 +
   1.299 +  const size_type partition_size = alloc_size();
   1.300 +
   1.301 +  // Search through all the all the allocated memory blocks
   1.302 +  while (ptr.valid())
   1.303 +  {
   1.304 +    // At this point:
   1.305 +    //  ptr points to a valid memory block
   1.306 +    //  free points to either:
   1.307 +    //    0 if there are no more free chunks
   1.308 +    //    the first free chunk in this or some next memory block
   1.309 +    //  prev_free points to either:
   1.310 +    //    the last free chunk in some previous memory block
   1.311 +    //    0 if there is no such free chunk
   1.312 +    //  prev is either:
   1.313 +    //    the PODptr whose next() is ptr
   1.314 +    //    !valid() if there is no such PODptr
   1.315 +
   1.316 +    // If there are no more free memory chunks, then every remaining
   1.317 +    //  block is allocated out to its fullest capacity, and we can't
   1.318 +    //  release any more memory
   1.319 +    if (free == 0)
   1.320 +      return ret;
   1.321 +
   1.322 +    // We have to check all the chunks.  If they are *all* free (i.e., present
   1.323 +    //  in the free list), then we can free the block.
   1.324 +    bool all_chunks_free = true;
   1.325 +
   1.326 +    // Iterate 'i' through all chunks in the memory block
   1.327 +    // if free starts in the memory block, be careful to keep it there
   1.328 +    void * saved_free = free;
   1.329 +    for (char * i = ptr.begin(); i != ptr.end(); i += partition_size)
   1.330 +    {
   1.331 +      // If this chunk is not free
   1.332 +      if (i != free)
   1.333 +      {
   1.334 +        // We won't be able to free this block
   1.335 +        all_chunks_free = false;
   1.336 +
   1.337 +        // free might have travelled outside ptr
   1.338 +        free = saved_free;
   1.339 +        // Abort searching the chunks; we won't be able to free this
   1.340 +        //  block because a chunk is not free.
   1.341 +        break;
   1.342 +      }
   1.343 +
   1.344 +      // We do not increment prev_free because we are in the same block
   1.345 +      free = nextof(free);
   1.346 +    }
   1.347 +
   1.348 +    // post: if the memory block has any chunks, free points to one of them
   1.349 +    // otherwise, our assertions above are still valid
   1.350 +
   1.351 +    const details::PODptr<size_type> next = ptr.next();
   1.352 +
   1.353 +    if (!all_chunks_free)
   1.354 +    {
   1.355 +      if (is_from(free, ptr.begin(), ptr.element_size()))
   1.356 +      {
   1.357 +        std::less<void *> lt;
   1.358 +        void * const end = ptr.end();
   1.359 +        do
   1.360 +        {
   1.361 +          prev_free = free;
   1.362 +          free = nextof(free);
   1.363 +        } while (free && lt(free, end));
   1.364 +      }
   1.365 +      // This invariant is now restored:
   1.366 +      //     free points to the first free chunk in some next memory block, or
   1.367 +      //       0 if there is no such chunk.
   1.368 +      //     prev_free points to the last free chunk in this memory block.
   1.369 +      
   1.370 +      // We are just about to advance ptr.  Maintain the invariant:
   1.371 +      // prev is the PODptr whose next() is ptr, or !valid()
   1.372 +      // if there is no such PODptr
   1.373 +      prev = ptr;
   1.374 +    }
   1.375 +    else
   1.376 +    {
   1.377 +      // All chunks from this block are free
   1.378 +
   1.379 +      // Remove block from list
   1.380 +      if (prev.valid())
   1.381 +        prev.next(next);
   1.382 +      else
   1.383 +        list = next;
   1.384 +
   1.385 +      // Remove all entries in the free list from this block
   1.386 +      if (prev_free != 0)
   1.387 +        nextof(prev_free) = free;
   1.388 +      else
   1.389 +        this->first = free;
   1.390 +
   1.391 +      // And release memory
   1.392 +      UserAllocator::free(ptr.begin());
   1.393 +      ret = true;
   1.394 +    }
   1.395 +
   1.396 +    // Increment ptr
   1.397 +    ptr = next;
   1.398 +  }
   1.399 +
   1.400 +  return ret;
   1.401 +}
   1.402 +
   1.403 +template <typename UserAllocator>
   1.404 +bool pool<UserAllocator>::purge_memory()
   1.405 +{
   1.406 +  details::PODptr<size_type> iter = list;
   1.407 +
   1.408 +  if (!iter.valid())
   1.409 +    return false;
   1.410 +
   1.411 +  do
   1.412 +  {
   1.413 +    // hold "next" pointer
   1.414 +    const details::PODptr<size_type> next = iter.next();
   1.415 +
   1.416 +    // delete the storage
   1.417 +    UserAllocator::free(iter.begin());
   1.418 +
   1.419 +    // increment iter
   1.420 +    iter = next;
   1.421 +  } while (iter.valid());
   1.422 +
   1.423 +  list.invalidate();
   1.424 +  this->first = 0;
   1.425 +
   1.426 +  return true;
   1.427 +}
   1.428 +
   1.429 +template <typename UserAllocator>
   1.430 +void * pool<UserAllocator>::malloc_need_resize()
   1.431 +{
   1.432 +  // No memory in any of our storages; make a new storage,
   1.433 +  const size_type partition_size = alloc_size();
   1.434 +  const size_type POD_size = next_size * partition_size +
   1.435 +      details::pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type);
   1.436 +  char * const ptr = UserAllocator::malloc(POD_size);
   1.437 +  if (ptr == 0)
   1.438 +    return 0;
   1.439 +  const details::PODptr<size_type> node(ptr, POD_size);
   1.440 +  next_size <<= 1;
   1.441 +
   1.442 +  //  initialize it,
   1.443 +  store().add_block(node.begin(), node.element_size(), partition_size);
   1.444 +
   1.445 +  //  insert it into the list,
   1.446 +  node.next(list);
   1.447 +  list = node;
   1.448 +
   1.449 +  //  and return a chunk from it.
   1.450 +  return store().malloc();
   1.451 +}
   1.452 +
   1.453 +template <typename UserAllocator>
   1.454 +void * pool<UserAllocator>::ordered_malloc_need_resize()
   1.455 +{
   1.456 +  // No memory in any of our storages; make a new storage,
   1.457 +  const size_type partition_size = alloc_size();
   1.458 +  const size_type POD_size = next_size * partition_size +
   1.459 +      details::pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type);
   1.460 +  char * const ptr = UserAllocator::malloc(POD_size);
   1.461 +  if (ptr == 0)
   1.462 +    return 0;
   1.463 +  const details::PODptr<size_type> node(ptr, POD_size);
   1.464 +  next_size <<= 1;
   1.465 +
   1.466 +  //  initialize it,
   1.467 +  //  (we can use "add_block" here because we know that
   1.468 +  //  the free list is empty, so we don't have to use
   1.469 +  //  the slower ordered version)
   1.470 +  store().add_block(node.begin(), node.element_size(), partition_size);
   1.471 +
   1.472 +  //  insert it into the list,
   1.473 +  //   handle border case
   1.474 +  if (!list.valid() || std::greater<void *>()(list.begin(), node.begin()))
   1.475 +  {
   1.476 +    node.next(list);
   1.477 +    list = node;
   1.478 +  }
   1.479 +  else
   1.480 +  {
   1.481 +    details::PODptr<size_type> prev = list;
   1.482 +
   1.483 +    while (true)
   1.484 +    {
   1.485 +      // if we're about to hit the end or
   1.486 +      //  if we've found where "node" goes
   1.487 +      if (prev.next_ptr() == 0
   1.488 +          || std::greater<void *>()(prev.next_ptr(), node.begin()))
   1.489 +        break;
   1.490 +
   1.491 +      prev = prev.next();
   1.492 +    }
   1.493 +
   1.494 +    node.next(prev.next());
   1.495 +    prev.next(node);
   1.496 +  }
   1.497 +
   1.498 +  //  and return a chunk from it.
   1.499 +  return store().malloc();
   1.500 +}
   1.501 +
   1.502 +template <typename UserAllocator>
   1.503 +void * pool<UserAllocator>::ordered_malloc(const size_type n)
   1.504 +{
   1.505 +  const size_type partition_size = alloc_size();
   1.506 +  const size_type total_req_size = n * requested_size;
   1.507 +  const size_type num_chunks = total_req_size / partition_size +
   1.508 +      ((total_req_size % partition_size) ? true : false);
   1.509 +
   1.510 +  void * ret = store().malloc_n(num_chunks, partition_size);
   1.511 +
   1.512 +  if (ret != 0)
   1.513 +    return ret;
   1.514 +
   1.515 +  // Not enougn memory in our storages; make a new storage,
   1.516 +  BOOST_USING_STD_MAX();
   1.517 +  next_size = max BOOST_PREVENT_MACRO_SUBSTITUTION(next_size, num_chunks);
   1.518 +  const size_type POD_size = next_size * partition_size +
   1.519 +      details::pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type);
   1.520 +  char * const ptr = UserAllocator::malloc(POD_size);
   1.521 +  if (ptr == 0)
   1.522 +    return 0;
   1.523 +  const details::PODptr<size_type> node(ptr, POD_size);
   1.524 +
   1.525 +  // Split up block so we can use what wasn't requested
   1.526 +  //  (we can use "add_block" here because we know that
   1.527 +  //  the free list is empty, so we don't have to use
   1.528 +  //  the slower ordered version)
   1.529 +  if (next_size > num_chunks)
   1.530 +    store().add_block(node.begin() + num_chunks * partition_size,
   1.531 +        node.element_size() - num_chunks * partition_size, partition_size);
   1.532 +
   1.533 +  next_size <<= 1;
   1.534 +
   1.535 +  //  insert it into the list,
   1.536 +  //   handle border case
   1.537 +  if (!list.valid() || std::greater<void *>()(list.begin(), node.begin()))
   1.538 +  {
   1.539 +    node.next(list);
   1.540 +    list = node;
   1.541 +  }
   1.542 +  else
   1.543 +  {
   1.544 +    details::PODptr<size_type> prev = list;
   1.545 +
   1.546 +    while (true)
   1.547 +    {
   1.548 +      // if we're about to hit the end or
   1.549 +      //  if we've found where "node" goes
   1.550 +      if (prev.next_ptr() == 0
   1.551 +          || std::greater<void *>()(prev.next_ptr(), node.begin()))
   1.552 +        break;
   1.553 +
   1.554 +      prev = prev.next();
   1.555 +    }
   1.556 +
   1.557 +    node.next(prev.next());
   1.558 +    prev.next(node);
   1.559 +  }
   1.560 +
   1.561 +  //  and return it.
   1.562 +  return node.begin();
   1.563 +}
   1.564 +
   1.565 +template <typename UserAllocator>
   1.566 +details::PODptr<typename pool<UserAllocator>::size_type>
   1.567 +pool<UserAllocator>::find_POD(void * const chunk) const
   1.568 +{
   1.569 +  // We have to find which storage this chunk is from.
   1.570 +  details::PODptr<size_type> iter = list;
   1.571 +  while (iter.valid())
   1.572 +  {
   1.573 +    if (is_from(chunk, iter.begin(), iter.element_size()))
   1.574 +      return iter;
   1.575 +    iter = iter.next();
   1.576 +  }
   1.577 +
   1.578 +  return iter;
   1.579 +}
   1.580 +
   1.581 +} // namespace boost
   1.582 +
   1.583 +#endif