Update contrib.
1 // Copyright (C) 2000, 2001 Stephen Cleary
3 // Distributed under the Boost Software License, Version 1.0. (See
4 // accompanying file LICENSE_1_0.txt or copy at
5 // http://www.boost.org/LICENSE_1_0.txt)
7 // See http://www.boost.org for updates, documentation, and revision history.
10 #define BOOST_POOL_HPP
12 #include <boost/config.hpp> // for workarounds
14 // std::less, std::less_equal, std::greater
16 // new[], delete[], std::nothrow
18 // std::size_t, std::ptrdiff_t
20 // std::malloc, std::free
22 // std::invalid_argument
27 #include <boost/pool/poolfwd.hpp>
29 // boost::details::pool::ct_lcm
30 #include <boost/pool/detail/ct_gcd_lcm.hpp>
31 // boost::details::pool::lcm
32 #include <boost/pool/detail/gcd_lcm.hpp>
33 // boost::simple_segregated_storage
34 #include <boost/pool/simple_segregated_storage.hpp>
36 #ifdef BOOST_NO_STDC_NAMESPACE
37 namespace std { using ::malloc; using ::free; }
40 // There are a few places in this file where the expression "this->m" is used.
41 // This expression is used to force instantiation-time name lookup, which I am
42 // informed is required for strict Standard compliance. It's only necessary
43 // if "m" is a member of a base class that is dependent on a template
45 // Thanks to Jens Maurer for pointing this out!
49 struct default_user_allocator_new_delete
51 typedef std::size_t size_type;
52 typedef std::ptrdiff_t difference_type;
54 static char * malloc(const size_type bytes)
55 { return new (std::nothrow) char[bytes]; }
56 static void free(char * const block)
60 struct default_user_allocator_malloc_free
62 typedef std::size_t size_type;
63 typedef std::ptrdiff_t difference_type;
65 static char * malloc(const size_type bytes)
66 { return reinterpret_cast<char *>(std::malloc(bytes)); }
67 static void free(char * const block)
73 // PODptr is a class that pretends to be a "pointer" to different class types
74 // that don't really exist. It provides member functions to access the "data"
75 // of the "object" it points to. Since these "class" types are of variable
76 // size, and contains some information at the *end* of its memory (for
77 // alignment reasons), PODptr must contain the size of this "class" as well as
78 // the pointer to this "object".
79 template <typename SizeType>
83 typedef SizeType size_type;
89 char * ptr_next_size() const
90 { return (ptr + sz - sizeof(size_type)); }
91 char * ptr_next_ptr() const
93 return (ptr_next_size() -
94 pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value);
98 PODptr(char * const nptr, const size_type nsize)
99 :ptr(nptr), sz(nsize) { }
103 bool valid() const { return (begin() != 0); }
104 void invalidate() { begin() = 0; }
105 char * & begin() { return ptr; }
106 char * begin() const { return ptr; }
107 char * end() const { return ptr_next_ptr(); }
108 size_type total_size() const { return sz; }
109 size_type element_size() const
111 return (sz - sizeof(size_type) -
112 pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value);
115 size_type & next_size() const
116 { return *(reinterpret_cast<size_type *>(ptr_next_size())); }
117 char * & next_ptr() const
118 { return *(reinterpret_cast<char **>(ptr_next_ptr())); }
121 { return PODptr<size_type>(next_ptr(), next_size()); }
122 void next(const PODptr & arg) const
124 next_ptr() = arg.begin();
125 next_size() = arg.total_size();
129 } // namespace details
131 template <typename UserAllocator>
132 class pool: protected simple_segregated_storage<
133 typename UserAllocator::size_type>
136 typedef UserAllocator user_allocator;
137 typedef typename UserAllocator::size_type size_type;
138 typedef typename UserAllocator::difference_type difference_type;
141 BOOST_STATIC_CONSTANT(unsigned, min_alloc_size =
142 (::boost::details::pool::ct_lcm<sizeof(void *), sizeof(size_type)>::value) );
144 // Returns 0 if out-of-memory
145 // Called if malloc/ordered_malloc needs to resize the free list
146 void * malloc_need_resize();
147 void * ordered_malloc_need_resize();
150 details::PODptr<size_type> list;
152 simple_segregated_storage<size_type> & store() { return *this; }
153 const simple_segregated_storage<size_type> & store() const { return *this; }
154 const size_type requested_size;
157 // finds which POD in the list 'chunk' was allocated from
158 details::PODptr<size_type> find_POD(void * const chunk) const;
160 // is_from() tests a chunk to determine if it belongs in a block
161 static bool is_from(void * const chunk, char * const i,
162 const size_type sizeof_i)
164 // We use std::less_equal and std::less to test 'chunk'
165 // against the array bounds because standard operators
166 // may return unspecified results.
167 // This is to ensure portability. The operators < <= > >= are only
168 // defined for pointers to objects that are 1) in the same array, or
169 // 2) subobjects of the same object [5.9/2].
170 // The functor objects guarantee a total order for any pointer [20.3.3/8]
172 // return (std::less_equal<void *>()(static_cast<void *>(i), chunk)
173 // && std::less<void *>()(chunk,
174 // static_cast<void *>(i + sizeof_i)));
175 std::less_equal<void *> lt_eq;
176 std::less<void *> lt;
177 return (lt_eq(i, chunk) && lt(chunk, i + sizeof_i));
180 size_type alloc_size() const
182 const unsigned min_size = min_alloc_size;
183 return details::pool::lcm<size_type>(requested_size, min_size);
186 // for the sake of code readability :)
187 static void * & nextof(void * const ptr)
188 { return *(static_cast<void **>(ptr)); }
191 // The second parameter here is an extension!
192 // pre: npartition_size != 0 && nnext_size != 0
193 explicit pool(const size_type nrequested_size,
194 const size_type nnext_size = 32)
195 :list(0, 0), requested_size(nrequested_size), next_size(nnext_size)
198 ~pool() { purge_memory(); }
200 // Releases memory blocks that don't have chunks allocated
201 // pre: lists are ordered
202 // Returns true if memory was actually deallocated
203 bool release_memory();
205 // Releases *all* memory blocks, even if chunks are still allocated
206 // Returns true if memory was actually deallocated
209 // These functions are extensions!
210 size_type get_next_size() const { return next_size; }
211 void set_next_size(const size_type nnext_size) { next_size = nnext_size; }
213 // Both malloc and ordered_malloc do a quick inlined check first for any
214 // free chunks. Only if we need to get another memory block do we call
215 // the non-inlined *_need_resize() functions.
216 // Returns 0 if out-of-memory
219 // Look for a non-empty storage
220 if (!store().empty())
221 return store().malloc();
222 return malloc_need_resize();
225 void * ordered_malloc()
227 // Look for a non-empty storage
228 if (!store().empty())
229 return store().malloc();
230 return ordered_malloc_need_resize();
233 // Returns 0 if out-of-memory
234 // Allocate a contiguous section of n chunks
235 void * ordered_malloc(size_type n);
237 // pre: 'chunk' must have been previously
238 // returned by *this.malloc().
239 void free(void * const chunk)
240 { store().free(chunk); }
242 // pre: 'chunk' must have been previously
243 // returned by *this.malloc().
244 void ordered_free(void * const chunk)
245 { store().ordered_free(chunk); }
247 // pre: 'chunk' must have been previously
248 // returned by *this.malloc(n).
249 void free(void * const chunks, const size_type n)
251 const size_type partition_size = alloc_size();
252 const size_type total_req_size = n * requested_size;
253 const size_type num_chunks = total_req_size / partition_size +
254 ((total_req_size % partition_size) ? true : false);
256 store().free_n(chunks, num_chunks, partition_size);
259 // pre: 'chunk' must have been previously
260 // returned by *this.malloc(n).
261 void ordered_free(void * const chunks, const size_type n)
263 const size_type partition_size = alloc_size();
264 const size_type total_req_size = n * requested_size;
265 const size_type num_chunks = total_req_size / partition_size +
266 ((total_req_size % partition_size) ? true : false);
268 store().ordered_free_n(chunks, num_chunks, partition_size);
271 // is_from() tests a chunk to determine if it was allocated from *this
272 bool is_from(void * const chunk) const
274 return (find_POD(chunk).valid());
278 template <typename UserAllocator>
279 bool pool<UserAllocator>::release_memory()
281 // This is the return value: it will be set to true when we actually call
282 // UserAllocator::free(..)
285 // This is a current & previous iterator pair over the memory block list
286 details::PODptr<size_type> ptr = list;
287 details::PODptr<size_type> prev;
289 // This is a current & previous iterator pair over the free memory chunk list
290 // Note that "prev_free" in this case does NOT point to the previous memory
291 // chunk in the free list, but rather the last free memory chunk before the
293 void * free = this->first;
294 void * prev_free = 0;
296 const size_type partition_size = alloc_size();
298 // Search through all the all the allocated memory blocks
302 // ptr points to a valid memory block
303 // free points to either:
304 // 0 if there are no more free chunks
305 // the first free chunk in this or some next memory block
306 // prev_free points to either:
307 // the last free chunk in some previous memory block
308 // 0 if there is no such free chunk
310 // the PODptr whose next() is ptr
311 // !valid() if there is no such PODptr
313 // If there are no more free memory chunks, then every remaining
314 // block is allocated out to its fullest capacity, and we can't
315 // release any more memory
319 // We have to check all the chunks. If they are *all* free (i.e., present
320 // in the free list), then we can free the block.
321 bool all_chunks_free = true;
323 // Iterate 'i' through all chunks in the memory block
324 // if free starts in the memory block, be careful to keep it there
325 void * saved_free = free;
326 for (char * i = ptr.begin(); i != ptr.end(); i += partition_size)
328 // If this chunk is not free
331 // We won't be able to free this block
332 all_chunks_free = false;
334 // free might have travelled outside ptr
336 // Abort searching the chunks; we won't be able to free this
337 // block because a chunk is not free.
341 // We do not increment prev_free because we are in the same block
345 // post: if the memory block has any chunks, free points to one of them
346 // otherwise, our assertions above are still valid
348 const details::PODptr<size_type> next = ptr.next();
350 if (!all_chunks_free)
352 if (is_from(free, ptr.begin(), ptr.element_size()))
354 std::less<void *> lt;
355 void * const end = ptr.end();
360 } while (free && lt(free, end));
362 // This invariant is now restored:
363 // free points to the first free chunk in some next memory block, or
364 // 0 if there is no such chunk.
365 // prev_free points to the last free chunk in this memory block.
367 // We are just about to advance ptr. Maintain the invariant:
368 // prev is the PODptr whose next() is ptr, or !valid()
369 // if there is no such PODptr
374 // All chunks from this block are free
376 // Remove block from list
382 // Remove all entries in the free list from this block
384 nextof(prev_free) = free;
388 // And release memory
389 UserAllocator::free(ptr.begin());
400 template <typename UserAllocator>
401 bool pool<UserAllocator>::purge_memory()
403 details::PODptr<size_type> iter = list;
410 // hold "next" pointer
411 const details::PODptr<size_type> next = iter.next();
413 // delete the storage
414 UserAllocator::free(iter.begin());
418 } while (iter.valid());
426 template <typename UserAllocator>
427 void * pool<UserAllocator>::malloc_need_resize()
429 // No memory in any of our storages; make a new storage,
430 const size_type partition_size = alloc_size();
431 const size_type POD_size = next_size * partition_size +
432 details::pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type);
433 char * const ptr = UserAllocator::malloc(POD_size);
436 const details::PODptr<size_type> node(ptr, POD_size);
440 store().add_block(node.begin(), node.element_size(), partition_size);
442 // insert it into the list,
446 // and return a chunk from it.
447 return store().malloc();
450 template <typename UserAllocator>
451 void * pool<UserAllocator>::ordered_malloc_need_resize()
453 // No memory in any of our storages; make a new storage,
454 const size_type partition_size = alloc_size();
455 const size_type POD_size = next_size * partition_size +
456 details::pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type);
457 char * const ptr = UserAllocator::malloc(POD_size);
460 const details::PODptr<size_type> node(ptr, POD_size);
464 // (we can use "add_block" here because we know that
465 // the free list is empty, so we don't have to use
466 // the slower ordered version)
467 store().add_block(node.begin(), node.element_size(), partition_size);
469 // insert it into the list,
470 // handle border case
471 if (!list.valid() || std::greater<void *>()(list.begin(), node.begin()))
478 details::PODptr<size_type> prev = list;
482 // if we're about to hit the end or
483 // if we've found where "node" goes
484 if (prev.next_ptr() == 0
485 || std::greater<void *>()(prev.next_ptr(), node.begin()))
491 node.next(prev.next());
495 // and return a chunk from it.
496 return store().malloc();
499 template <typename UserAllocator>
500 void * pool<UserAllocator>::ordered_malloc(const size_type n)
502 const size_type partition_size = alloc_size();
503 const size_type total_req_size = n * requested_size;
504 const size_type num_chunks = total_req_size / partition_size +
505 ((total_req_size % partition_size) ? true : false);
507 void * ret = store().malloc_n(num_chunks, partition_size);
512 // Not enougn memory in our storages; make a new storage,
513 BOOST_USING_STD_MAX();
514 next_size = max BOOST_PREVENT_MACRO_SUBSTITUTION(next_size, num_chunks);
515 const size_type POD_size = next_size * partition_size +
516 details::pool::ct_lcm<sizeof(size_type), sizeof(void *)>::value + sizeof(size_type);
517 char * const ptr = UserAllocator::malloc(POD_size);
520 const details::PODptr<size_type> node(ptr, POD_size);
522 // Split up block so we can use what wasn't requested
523 // (we can use "add_block" here because we know that
524 // the free list is empty, so we don't have to use
525 // the slower ordered version)
526 if (next_size > num_chunks)
527 store().add_block(node.begin() + num_chunks * partition_size,
528 node.element_size() - num_chunks * partition_size, partition_size);
532 // insert it into the list,
533 // handle border case
534 if (!list.valid() || std::greater<void *>()(list.begin(), node.begin()))
541 details::PODptr<size_type> prev = list;
545 // if we're about to hit the end or
546 // if we've found where "node" goes
547 if (prev.next_ptr() == 0
548 || std::greater<void *>()(prev.next_ptr(), node.begin()))
554 node.next(prev.next());
562 template <typename UserAllocator>
563 details::PODptr<typename pool<UserAllocator>::size_type>
564 pool<UserAllocator>::find_POD(void * const chunk) const
566 // We have to find which storage this chunk is from.
567 details::PODptr<size_type> iter = list;
570 if (is_from(chunk, iter.begin(), iter.element_size()))