os/ossrv/ossrv_pub/boost_apis/boost/pool/pool.hpp
author sl
Tue, 10 Jun 2014 14:32:02 +0200 (2014-06-10)
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (C) 2000, 2001 Stephen Cleary
sl@0
     2
//
sl@0
     3
// Distributed under the Boost Software License, Version 1.0. (See
sl@0
     4
// accompanying file LICENSE_1_0.txt or copy at
sl@0
     5
// http://www.boost.org/LICENSE_1_0.txt)
sl@0
     6
//
sl@0
     7
// See http://www.boost.org for updates, documentation, and revision history.
sl@0
     8
sl@0
     9
#ifndef BOOST_POOL_HPP
sl@0
    10
#define BOOST_POOL_HPP
sl@0
    11
sl@0
    12
#include <boost/config.hpp>  // for workarounds
sl@0
    13
sl@0
    14
// std::less, std::less_equal, std::greater
sl@0
    15
#include <functional>
sl@0
    16
// new[], delete[], std::nothrow
sl@0
    17
#include <new>
sl@0
    18
// std::size_t, std::ptrdiff_t
sl@0
    19
#include <cstddef>
sl@0
    20
// std::malloc, std::free
sl@0
    21
#include <cstdlib>
sl@0
    22
// std::invalid_argument
sl@0
    23
#include <exception>
sl@0
    24
// std::max
sl@0
    25
#include <algorithm>
sl@0
    26
sl@0
    27
#include <boost/pool/poolfwd.hpp>
sl@0
    28
sl@0
    29
// boost::details::pool::ct_lcm
sl@0
    30
#include <boost/pool/detail/ct_gcd_lcm.hpp>
sl@0
    31
// boost::details::pool::lcm
sl@0
    32
#include <boost/pool/detail/gcd_lcm.hpp>
sl@0
    33
// boost::simple_segregated_storage
sl@0
    34
#include <boost/pool/simple_segregated_storage.hpp>
sl@0
    35
sl@0
    36
#ifdef BOOST_NO_STDC_NAMESPACE
sl@0
    37
 namespace std { using ::malloc; using ::free; }
sl@0
    38
#endif
sl@0
    39
sl@0
    40
// There are a few places in this file where the expression "this->m" is used.
sl@0
    41
// This expression is used to force instantiation-time name lookup, which I am
sl@0
    42
//   informed is required for strict Standard compliance.  It's only necessary
sl@0
    43
//   if "m" is a member of a base class that is dependent on a template
sl@0
    44
//   parameter.
sl@0
    45
// Thanks to Jens Maurer for pointing this out!
sl@0
    46
sl@0
    47
namespace boost {
sl@0
    48
sl@0
    49
struct default_user_allocator_new_delete
sl@0
    50
{
sl@0
    51
  typedef std::size_t size_type;
sl@0
    52
  typedef std::ptrdiff_t difference_type;
sl@0
    53
sl@0
    54
  static char * malloc(const size_type bytes)
sl@0
    55
  { return new (std::nothrow) char[bytes]; }
sl@0
    56
  static void free(char * const block)
sl@0
    57
  { delete [] block; }
sl@0
    58
};
sl@0
    59
sl@0
    60
struct default_user_allocator_malloc_free
sl@0
    61
{
sl@0
    62
  typedef std::size_t size_type;
sl@0
    63
  typedef std::ptrdiff_t difference_type;
sl@0
    64
sl@0
    65
  static char * malloc(const size_type bytes)
sl@0
    66
  { return reinterpret_cast<char *>(std::malloc(bytes)); }
sl@0
    67
  static void free(char * const block)
sl@0
    68
  { std::free(block); }
sl@0
    69
};
sl@0
    70
sl@0
    71
namespace details {
sl@0
    72
sl@0
    73
// PODptr is a class that pretends to be a "pointer" to different class types
sl@0
    74
//  that don't really exist.  It provides member functions to access the "data"
sl@0
    75
//  of the "object" it points to.  Since these "class" types are of variable
sl@0
    76
//  size, and contains some information at the *end* of its memory (for
sl@0
    77
//  alignment reasons), PODptr must contain the size of this "class" as well as
sl@0
    78
//  the pointer to this "object".
sl@0
    79
template <typename SizeType>
sl@0
    80
class PODptr
sl@0
    81
{
sl@0
    82
  public:
sl@0
    83
    typedef SizeType size_type;
sl@0
    84
sl@0
    85
  private:
sl@0
    86
    char * ptr;
sl@0
    87
    size_type sz;
sl@0
    88
sl@0
    89
    char * ptr_next_size() const
sl@0
    90
    { return (ptr + sz - sizeof(size_type)); }
sl@0
    91
    char * ptr_next_ptr() const
sl@0
    92
    {
sl@0
    93
      return (ptr_next_size() -
sl@0
    94
          pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value);
sl@0
    95
    }
sl@0
    96
sl@0
    97
  public:
sl@0
    98
    PODptr(char * const nptr, const size_type nsize)
sl@0
    99
    :ptr(nptr), sz(nsize) { }
sl@0
   100
    PODptr()
sl@0
   101
    :ptr(0), sz(0) { }
sl@0
   102
sl@0
   103
    bool valid() const { return (begin() != 0); }
sl@0
   104
    void invalidate() { begin() = 0; }
sl@0
   105
    char * & begin() { return ptr; }
sl@0
   106
    char * begin() const { return ptr; }
sl@0
   107
    char * end() const { return ptr_next_ptr(); }
sl@0
   108
    size_type total_size() const { return sz; }
sl@0
   109
    size_type element_size() const
sl@0
   110
    {
sl@0
   111
      return (sz - sizeof(size_type) -
sl@0
   112
          pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value);
sl@0
   113
    }
sl@0
   114
sl@0
   115
    size_type & next_size() const
sl@0
   116
    { return *(reinterpret_cast<size_type *>(ptr_next_size())); }
sl@0
   117
    char * & next_ptr() const
sl@0
   118
    { return *(reinterpret_cast<char **>(ptr_next_ptr())); }
sl@0
   119
sl@0
   120
    PODptr next() const
sl@0
   121
    { return PODptr<size_type>(next_ptr(), next_size()); }
sl@0
   122
    void next(const PODptr & arg) const
sl@0
   123
    {
sl@0
   124
      next_ptr() = arg.begin();
sl@0
   125
      next_size() = arg.total_size();
sl@0
   126
    }
sl@0
   127
};
sl@0
   128
sl@0
   129
} // namespace details
sl@0
   130
sl@0
   131
template <typename UserAllocator>
sl@0
   132
class pool: protected simple_segregated_storage<
sl@0
   133
    typename UserAllocator::size_type>
sl@0
   134
{
sl@0
   135
  public:
sl@0
   136
    typedef UserAllocator user_allocator;
sl@0
   137
    typedef typename UserAllocator::size_type size_type;
sl@0
   138
    typedef typename UserAllocator::difference_type difference_type;
sl@0
   139
sl@0
   140
  private:
sl@0
   141
    BOOST_STATIC_CONSTANT(unsigned, min_alloc_size =
sl@0
   142
        (::boost::details::pool::ct_lcm<sizeof(void *), sizeof(size_type)>::value) );
sl@0
   143
sl@0
   144
    // Returns 0 if out-of-memory
sl@0
   145
    // Called if malloc/ordered_malloc needs to resize the free list
sl@0
   146
    void * malloc_need_resize();
sl@0
   147
    void * ordered_malloc_need_resize();
sl@0
   148
sl@0
   149
  protected:
sl@0
   150
    details::PODptr<size_type> list;
sl@0
   151
sl@0
   152
    simple_segregated_storage<size_type> & store() { return *this; }
sl@0
   153
    const simple_segregated_storage<size_type> & store() const { return *this; }
sl@0
   154
    const size_type requested_size;
sl@0
   155
    size_type next_size;
sl@0
   156
sl@0
   157
    // finds which POD in the list 'chunk' was allocated from
sl@0
   158
    details::PODptr<size_type> find_POD(void * const chunk) const;
sl@0
   159
sl@0
   160
    // is_from() tests a chunk to determine if it belongs in a block
sl@0
   161
    static bool is_from(void * const chunk, char * const i,
sl@0
   162
        const size_type sizeof_i)
sl@0
   163
    {
sl@0
   164
      // We use std::less_equal and std::less to test 'chunk'
sl@0
   165
      //  against the array bounds because standard operators
sl@0
   166
      //  may return unspecified results.
sl@0
   167
      // This is to ensure portability.  The operators < <= > >= are only
sl@0
   168
      //  defined for pointers to objects that are 1) in the same array, or
sl@0
   169
      //  2) subobjects of the same object [5.9/2].
sl@0
   170
      // The functor objects guarantee a total order for any pointer [20.3.3/8]
sl@0
   171
//WAS:
sl@0
   172
//      return (std::less_equal<void *>()(static_cast<void *>(i), chunk)
sl@0
   173
//          && std::less<void *>()(chunk,
sl@0
   174
//              static_cast<void *>(i + sizeof_i)));
sl@0
   175
      std::less_equal<void *> lt_eq;
sl@0
   176
      std::less<void *> lt;
sl@0
   177
      return (lt_eq(i, chunk) && lt(chunk, i + sizeof_i));
sl@0
   178
    }
sl@0
   179
sl@0
   180
    size_type alloc_size() const
sl@0
   181
    {
sl@0
   182
      const unsigned min_size = min_alloc_size;
sl@0
   183
      return details::pool::lcm<size_type>(requested_size, min_size);
sl@0
   184
    }
sl@0
   185
sl@0
   186
    // for the sake of code readability :)
sl@0
   187
    static void * & nextof(void * const ptr)
sl@0
   188
    { return *(static_cast<void **>(ptr)); }
sl@0
   189
sl@0
   190
  public:
sl@0
   191
    // The second parameter here is an extension!
sl@0
   192
    // pre: npartition_size != 0 && nnext_size != 0
sl@0
   193
    explicit pool(const size_type nrequested_size,
sl@0
   194
        const size_type nnext_size = 32)
sl@0
   195
    :list(0, 0), requested_size(nrequested_size), next_size(nnext_size)
sl@0
   196
    { }
sl@0
   197
sl@0
   198
    ~pool() { purge_memory(); }
sl@0
   199
sl@0
   200
    // Releases memory blocks that don't have chunks allocated
sl@0
   201
    // pre: lists are ordered
sl@0
   202
    //  Returns true if memory was actually deallocated
sl@0
   203
    bool release_memory();
sl@0
   204
sl@0
   205
    // Releases *all* memory blocks, even if chunks are still allocated
sl@0
   206
    //  Returns true if memory was actually deallocated
sl@0
   207
    bool purge_memory();
sl@0
   208
sl@0
   209
    // These functions are extensions!
sl@0
   210
    size_type get_next_size() const { return next_size; }
sl@0
   211
    void set_next_size(const size_type nnext_size) { next_size = nnext_size; }
sl@0
   212
sl@0
   213
    // Both malloc and ordered_malloc do a quick inlined check first for any
sl@0
   214
    //  free chunks.  Only if we need to get another memory block do we call
sl@0
   215
    //  the non-inlined *_need_resize() functions.
sl@0
   216
    // Returns 0 if out-of-memory
sl@0
   217
    void * malloc()
sl@0
   218
    {
sl@0
   219
      // Look for a non-empty storage
sl@0
   220
      if (!store().empty())
sl@0
   221
        return store().malloc();
sl@0
   222
      return malloc_need_resize();
sl@0
   223
    }
sl@0
   224
sl@0
   225
    void * ordered_malloc()
sl@0
   226
    {
sl@0
   227
      // Look for a non-empty storage
sl@0
   228
      if (!store().empty())
sl@0
   229
        return store().malloc();
sl@0
   230
      return ordered_malloc_need_resize();
sl@0
   231
    }
sl@0
   232
sl@0
   233
    // Returns 0 if out-of-memory
sl@0
   234
    // Allocate a contiguous section of n chunks
sl@0
   235
    void * ordered_malloc(size_type n);
sl@0
   236
sl@0
   237
    // pre: 'chunk' must have been previously
sl@0
   238
    //        returned by *this.malloc().
sl@0
   239
    void free(void * const chunk)
sl@0
   240
    { store().free(chunk); }
sl@0
   241
sl@0
   242
    // pre: 'chunk' must have been previously
sl@0
   243
    //        returned by *this.malloc().
sl@0
   244
    void ordered_free(void * const chunk)
sl@0
   245
    { store().ordered_free(chunk); }
sl@0
   246
sl@0
   247
    // pre: 'chunk' must have been previously
sl@0
   248
    //        returned by *this.malloc(n).
sl@0
   249
    void free(void * const chunks, const size_type n)
sl@0
   250
    {
sl@0
   251
      const size_type partition_size = alloc_size();
sl@0
   252
      const size_type total_req_size = n * requested_size;
sl@0
   253
      const size_type num_chunks = total_req_size / partition_size +
sl@0
   254
          ((total_req_size % partition_size) ? true : false);
sl@0
   255
sl@0
   256
      store().free_n(chunks, num_chunks, partition_size);
sl@0
   257
    }
sl@0
   258
sl@0
   259
    // pre: 'chunk' must have been previously
sl@0
   260
    //        returned by *this.malloc(n).
sl@0
   261
    void ordered_free(void * const chunks, const size_type n)
sl@0
   262
    {
sl@0
   263
      const size_type partition_size = alloc_size();
sl@0
   264
      const size_type total_req_size = n * requested_size;
sl@0
   265
      const size_type num_chunks = total_req_size / partition_size +
sl@0
   266
          ((total_req_size % partition_size) ? true : false);
sl@0
   267
sl@0
   268
      store().ordered_free_n(chunks, num_chunks, partition_size);
sl@0
   269
    }
sl@0
   270
sl@0
   271
    // is_from() tests a chunk to determine if it was allocated from *this
sl@0
   272
    bool is_from(void * const chunk) const
sl@0
   273
    {
sl@0
   274
      return (find_POD(chunk).valid());
sl@0
   275
    }
sl@0
   276
};
sl@0
   277
sl@0
   278
template <typename UserAllocator>
sl@0
   279
bool pool<UserAllocator>::release_memory()
sl@0
   280
{
sl@0
   281
  // This is the return value: it will be set to true when we actually call
sl@0
   282
  //  UserAllocator::free(..)
sl@0
   283
  bool ret = false;
sl@0
   284
sl@0
   285
  // This is a current & previous iterator pair over the memory block list
sl@0
   286
  details::PODptr<size_type> ptr = list;
sl@0
   287
  details::PODptr<size_type> prev;
sl@0
   288
sl@0
   289
  // This is a current & previous iterator pair over the free memory chunk list
sl@0
   290
  //  Note that "prev_free" in this case does NOT point to the previous memory
sl@0
   291
  //  chunk in the free list, but rather the last free memory chunk before the
sl@0
   292
  //  current block.
sl@0
   293
  void * free = this->first;
sl@0
   294
  void * prev_free = 0;
sl@0
   295
sl@0
   296
  const size_type partition_size = alloc_size();
sl@0
   297
sl@0
   298
  // Search through all the all the allocated memory blocks
sl@0
   299
  while (ptr.valid())
sl@0
   300
  {
sl@0
   301
    // At this point:
sl@0
   302
    //  ptr points to a valid memory block
sl@0
   303
    //  free points to either:
sl@0
   304
    //    0 if there are no more free chunks
sl@0
   305
    //    the first free chunk in this or some next memory block
sl@0
   306
    //  prev_free points to either:
sl@0
   307
    //    the last free chunk in some previous memory block
sl@0
   308
    //    0 if there is no such free chunk
sl@0
   309
    //  prev is either:
sl@0
   310
    //    the PODptr whose next() is ptr
sl@0
   311
    //    !valid() if there is no such PODptr
sl@0
   312
sl@0
   313
    // If there are no more free memory chunks, then every remaining
sl@0
   314
    //  block is allocated out to its fullest capacity, and we can't
sl@0
   315
    //  release any more memory
sl@0
   316
    if (free == 0)
sl@0
   317
      return ret;
sl@0
   318
sl@0
   319
    // We have to check all the chunks.  If they are *all* free (i.e., present
sl@0
   320
    //  in the free list), then we can free the block.
sl@0
   321
    bool all_chunks_free = true;
sl@0
   322
sl@0
   323
    // Iterate 'i' through all chunks in the memory block
sl@0
   324
    // if free starts in the memory block, be careful to keep it there
sl@0
   325
    void * saved_free = free;
sl@0
   326
    for (char * i = ptr.begin(); i != ptr.end(); i += partition_size)
sl@0
   327
    {
sl@0
   328
      // If this chunk is not free
sl@0
   329
      if (i != free)
sl@0
   330
      {
sl@0
   331
        // We won't be able to free this block
sl@0
   332
        all_chunks_free = false;
sl@0
   333
sl@0
   334
        // free might have travelled outside ptr
sl@0
   335
        free = saved_free;
sl@0
   336
        // Abort searching the chunks; we won't be able to free this
sl@0
   337
        //  block because a chunk is not free.
sl@0
   338
        break;
sl@0
   339
      }
sl@0
   340
sl@0
   341
      // We do not increment prev_free because we are in the same block
sl@0
   342
      free = nextof(free);
sl@0
   343
    }
sl@0
   344
sl@0
   345
    // post: if the memory block has any chunks, free points to one of them
sl@0
   346
    // otherwise, our assertions above are still valid
sl@0
   347
sl@0
   348
    const details::PODptr<size_type> next = ptr.next();
sl@0
   349
sl@0
   350
    if (!all_chunks_free)
sl@0
   351
    {
sl@0
   352
      if (is_from(free, ptr.begin(), ptr.element_size()))
sl@0
   353
      {
sl@0
   354
        std::less<void *> lt;
sl@0
   355
        void * const end = ptr.end();
sl@0
   356
        do
sl@0
   357
        {
sl@0
   358
          prev_free = free;
sl@0
   359
          free = nextof(free);
sl@0
   360
        } while (free && lt(free, end));
sl@0
   361
      }
sl@0
   362
      // This invariant is now restored:
sl@0
   363
      //     free points to the first free chunk in some next memory block, or
sl@0
   364
      //       0 if there is no such chunk.
sl@0
   365
      //     prev_free points to the last free chunk in this memory block.
sl@0
   366
      
sl@0
   367
      // We are just about to advance ptr.  Maintain the invariant:
sl@0
   368
      // prev is the PODptr whose next() is ptr, or !valid()
sl@0
   369
      // if there is no such PODptr
sl@0
   370
      prev = ptr;
sl@0
   371
    }
sl@0
   372
    else
sl@0
   373
    {
sl@0
   374
      // All chunks from this block are free
sl@0
   375
sl@0
   376
      // Remove block from list
sl@0
   377
      if (prev.valid())
sl@0
   378
        prev.next(next);
sl@0
   379
      else
sl@0
   380
        list = next;
sl@0
   381
sl@0
   382
      // Remove all entries in the free list from this block
sl@0
   383
      if (prev_free != 0)
sl@0
   384
        nextof(prev_free) = free;
sl@0
   385
      else
sl@0
   386
        this->first = free;
sl@0
   387
sl@0
   388
      // And release memory
sl@0
   389
      UserAllocator::free(ptr.begin());
sl@0
   390
      ret = true;
sl@0
   391
    }
sl@0
   392
sl@0
   393
    // Increment ptr
sl@0
   394
    ptr = next;
sl@0
   395
  }
sl@0
   396
sl@0
   397
  return ret;
sl@0
   398
}
sl@0
   399
sl@0
   400
template <typename UserAllocator>
sl@0
   401
bool pool<UserAllocator>::purge_memory()
sl@0
   402
{
sl@0
   403
  details::PODptr<size_type> iter = list;
sl@0
   404
sl@0
   405
  if (!iter.valid())
sl@0
   406
    return false;
sl@0
   407
sl@0
   408
  do
sl@0
   409
  {
sl@0
   410
    // hold "next" pointer
sl@0
   411
    const details::PODptr<size_type> next = iter.next();
sl@0
   412
sl@0
   413
    // delete the storage
sl@0
   414
    UserAllocator::free(iter.begin());
sl@0
   415
sl@0
   416
    // increment iter
sl@0
   417
    iter = next;
sl@0
   418
  } while (iter.valid());
sl@0
   419
sl@0
   420
  list.invalidate();
sl@0
   421
  this->first = 0;
sl@0
   422
sl@0
   423
  return true;
sl@0
   424
}
sl@0
   425
sl@0
   426
template <typename UserAllocator>
sl@0
   427
void * pool<UserAllocator>::malloc_need_resize()
sl@0
   428
{
sl@0
   429
  // No memory in any of our storages; make a new storage,
sl@0
   430
  const size_type partition_size = alloc_size();
sl@0
   431
  const size_type POD_size = next_size * partition_size +
sl@0
   432
      details::pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type);
sl@0
   433
  char * const ptr = UserAllocator::malloc(POD_size);
sl@0
   434
  if (ptr == 0)
sl@0
   435
    return 0;
sl@0
   436
  const details::PODptr<size_type> node(ptr, POD_size);
sl@0
   437
  next_size <<= 1;
sl@0
   438
sl@0
   439
  //  initialize it,
sl@0
   440
  store().add_block(node.begin(), node.element_size(), partition_size);
sl@0
   441
sl@0
   442
  //  insert it into the list,
sl@0
   443
  node.next(list);
sl@0
   444
  list = node;
sl@0
   445
sl@0
   446
  //  and return a chunk from it.
sl@0
   447
  return store().malloc();
sl@0
   448
}
sl@0
   449
sl@0
   450
template <typename UserAllocator>
sl@0
   451
void * pool<UserAllocator>::ordered_malloc_need_resize()
sl@0
   452
{
sl@0
   453
  // No memory in any of our storages; make a new storage,
sl@0
   454
  const size_type partition_size = alloc_size();
sl@0
   455
  const size_type POD_size = next_size * partition_size +
sl@0
   456
      details::pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type);
sl@0
   457
  char * const ptr = UserAllocator::malloc(POD_size);
sl@0
   458
  if (ptr == 0)
sl@0
   459
    return 0;
sl@0
   460
  const details::PODptr<size_type> node(ptr, POD_size);
sl@0
   461
  next_size <<= 1;
sl@0
   462
sl@0
   463
  //  initialize it,
sl@0
   464
  //  (we can use "add_block" here because we know that
sl@0
   465
  //  the free list is empty, so we don't have to use
sl@0
   466
  //  the slower ordered version)
sl@0
   467
  store().add_block(node.begin(), node.element_size(), partition_size);
sl@0
   468
sl@0
   469
  //  insert it into the list,
sl@0
   470
  //   handle border case
sl@0
   471
  if (!list.valid() || std::greater<void *>()(list.begin(), node.begin()))
sl@0
   472
  {
sl@0
   473
    node.next(list);
sl@0
   474
    list = node;
sl@0
   475
  }
sl@0
   476
  else
sl@0
   477
  {
sl@0
   478
    details::PODptr<size_type> prev = list;
sl@0
   479
sl@0
   480
    while (true)
sl@0
   481
    {
sl@0
   482
      // if we're about to hit the end or
sl@0
   483
      //  if we've found where "node" goes
sl@0
   484
      if (prev.next_ptr() == 0
sl@0
   485
          || std::greater<void *>()(prev.next_ptr(), node.begin()))
sl@0
   486
        break;
sl@0
   487
sl@0
   488
      prev = prev.next();
sl@0
   489
    }
sl@0
   490
sl@0
   491
    node.next(prev.next());
sl@0
   492
    prev.next(node);
sl@0
   493
  }
sl@0
   494
sl@0
   495
  //  and return a chunk from it.
sl@0
   496
  return store().malloc();
sl@0
   497
}
sl@0
   498
sl@0
   499
template <typename UserAllocator>
sl@0
   500
void * pool<UserAllocator>::ordered_malloc(const size_type n)
sl@0
   501
{
sl@0
   502
  const size_type partition_size = alloc_size();
sl@0
   503
  const size_type total_req_size = n * requested_size;
sl@0
   504
  const size_type num_chunks = total_req_size / partition_size +
sl@0
   505
      ((total_req_size % partition_size) ? true : false);
sl@0
   506
sl@0
   507
  void * ret = store().malloc_n(num_chunks, partition_size);
sl@0
   508
sl@0
   509
  if (ret != 0)
sl@0
   510
    return ret;
sl@0
   511
sl@0
   512
  // Not enougn memory in our storages; make a new storage,
sl@0
   513
  BOOST_USING_STD_MAX();
sl@0
   514
  next_size = max BOOST_PREVENT_MACRO_SUBSTITUTION(next_size, num_chunks);
sl@0
   515
  const size_type POD_size = next_size * partition_size +
sl@0
   516
      details::pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type);
sl@0
   517
  char * const ptr = UserAllocator::malloc(POD_size);
sl@0
   518
  if (ptr == 0)
sl@0
   519
    return 0;
sl@0
   520
  const details::PODptr<size_type> node(ptr, POD_size);
sl@0
   521
sl@0
   522
  // Split up block so we can use what wasn't requested
sl@0
   523
  //  (we can use "add_block" here because we know that
sl@0
   524
  //  the free list is empty, so we don't have to use
sl@0
   525
  //  the slower ordered version)
sl@0
   526
  if (next_size > num_chunks)
sl@0
   527
    store().add_block(node.begin() + num_chunks * partition_size,
sl@0
   528
        node.element_size() - num_chunks * partition_size, partition_size);
sl@0
   529
sl@0
   530
  next_size <<= 1;
sl@0
   531
sl@0
   532
  //  insert it into the list,
sl@0
   533
  //   handle border case
sl@0
   534
  if (!list.valid() || std::greater<void *>()(list.begin(), node.begin()))
sl@0
   535
  {
sl@0
   536
    node.next(list);
sl@0
   537
    list = node;
sl@0
   538
  }
sl@0
   539
  else
sl@0
   540
  {
sl@0
   541
    details::PODptr<size_type> prev = list;
sl@0
   542
sl@0
   543
    while (true)
sl@0
   544
    {
sl@0
   545
      // if we're about to hit the end or
sl@0
   546
      //  if we've found where "node" goes
sl@0
   547
      if (prev.next_ptr() == 0
sl@0
   548
          || std::greater<void *>()(prev.next_ptr(), node.begin()))
sl@0
   549
        break;
sl@0
   550
sl@0
   551
      prev = prev.next();
sl@0
   552
    }
sl@0
   553
sl@0
   554
    node.next(prev.next());
sl@0
   555
    prev.next(node);
sl@0
   556
  }
sl@0
   557
sl@0
   558
  //  and return it.
sl@0
   559
  return node.begin();
sl@0
   560
}
sl@0
   561
sl@0
   562
template <typename UserAllocator>
sl@0
   563
details::PODptr<typename pool<UserAllocator>::size_type>
sl@0
   564
pool<UserAllocator>::find_POD(void * const chunk) const
sl@0
   565
{
sl@0
   566
  // We have to find which storage this chunk is from.
sl@0
   567
  details::PODptr<size_type> iter = list;
sl@0
   568
  while (iter.valid())
sl@0
   569
  {
sl@0
   570
    if (is_from(chunk, iter.begin(), iter.element_size()))
sl@0
   571
      return iter;
sl@0
   572
    iter = iter.next();
sl@0
   573
  }
sl@0
   574
sl@0
   575
  return iter;
sl@0
   576
}
sl@0
   577
sl@0
   578
} // namespace boost
sl@0
   579
sl@0
   580
#endif