First public contribution.
2 * Portions Copyright (c) 2008 Nokia Corporation and/or its subsidiary(-ies). All rights reserved.
4 * Copyright (c) 1996,1997
5 * Silicon Graphics Computer Systems, Inc.
8 * Moscow Center for SPARC Technology
13 * This material is provided "as is", with absolutely no warranty expressed
14 * or implied. Any use is at your own risk.
16 * Permission to use or copy this software for any purpose is hereby granted
17 * without fee, provided the above notices are retained on all copies.
18 * Permission to modify the code and to distribute modified code is granted,
19 * provided the above notices are retained, and a notice that the code was
20 * modified is included with the above copyright notice.
24 #include "stlport_prefix.h"
28 #if defined (__GNUC__) && (defined (__CYGWIN__) || defined (__MINGW32__)) && (!defined (__SYMBIAN32__))
30 //# define _STLP_MALLOC_USABLE_SIZE(__buf) malloc_usable_size(__buf)
33 #if defined (_STLP_PTHREADS) && !defined (_STLP_NO_THREADS)
34 # include <pthread_alloc>
38 #include <stl/_threads.h>
40 #include "lock_free_slist.h"
42 #if defined(__SYMBIAN32__WSD__)
43 #include "libstdcppwsd.h"
45 #define __oom_handler get_oom_handler()
46 #define _S_lock get_allocator_S_lock()
47 #define _S_heap_size get_S_heap_size()
48 #define _S_start_free get_S_start_free()
49 #define _S_end_free get_S_end_free()
50 #define _S_free_list get_S_free_list()
51 #define _S_chunk_allocator_lock get_S_chunk_allocator_lock()
52 #define _S_free_per_thread_states get_S_free_per_thread_states()
53 #define _S_key get_S_key()
54 #define _S_key_initialized get_S_key_initialized()
60 IMPORT_C void* BackendAlloc(size_t );
61 IMPORT_C void BackendFree(void* );
65 EXPORT_C void* backend_allocate(size_t __n)
69 void* p = BackendAlloc(__n);
76 // set_new_handler uses Dll::Tls. So only this threads new handler will be changed
77 // for the time it is set back. No problems for other threads.
78 std::new_handler nh_func = std::set_new_handler(NULL);
79 std::set_new_handler(nh_func);
87 __THROW(std::bad_alloc());
92 EXPORT_C void backend_free(void* __p)
98 #if defined (__WATCOMC__)
100 # pragma warning 367 9
101 # pragma warning 368 9
104 #if defined (_STLP_SGI_THREADS)
105 // We test whether threads are in use before locking.
106 // Perhaps this should be moved into stl_threads.h, but that
107 // probably makes it harder to avoid the procedure call when
110 extern int __us_rsthread_malloc;
114 // Specialised debug form of malloc which does not provide "false"
115 // memory leaks when run with debug CRT libraries.
116 #if defined (_STLP_MSVC) && (_STLP_MSVC >= 1020 && defined (_STLP_DEBUG_ALLOC)) && !defined (_STLP_WCE)
118 inline void* __stlp_chunk_malloc(size_t __bytes) { _STLP_CHECK_NULL_ALLOC(_malloc_dbg(__bytes, _CRT_BLOCK, __FILE__, __LINE__)); }
119 inline void __stlp_chunck_free(void* __p) { _free_dbg(__p, _CRT_BLOCK); }
121 # ifdef _STLP_NODE_ALLOC_USE_MALLOC
123 inline void* __stlp_chunk_malloc(size_t __bytes) { _STLP_CHECK_NULL_ALLOC(_STLP_VENDOR_CSTD::malloc(__bytes)); }
124 inline void __stlp_chunck_free(void* __p) { _STLP_VENDOR_CSTD::free(__p); }
126 inline void* __stlp_chunk_malloc(size_t __bytes) {
127 return _STLP_STD::__stl_new(__bytes);
129 inline void __stlp_chunck_free(void* __p) {
130 _STLP_STD::__stl_delete(__p);
136 #define _S_FREELIST_INDEX(__bytes) ((__bytes - size_t(1)) >> (int)_ALIGN_SHIFT)
138 _STLP_BEGIN_NAMESPACE
140 class __malloc_alloc_impl {
142 static void* _S_oom_malloc(size_t __n) {
143 __oom_handler_type __my_malloc_handler;
147 __my_malloc_handler = __oom_handler;
148 if (0 == __my_malloc_handler) { __THROW_BAD_ALLOC; }
149 (*__my_malloc_handler)();
150 __result = malloc(__n);
151 if (__result) return(__result);
153 #if defined (_STLP_NEED_UNREACHABLE_RETURN)
157 #if defined(__SYMBIAN32__WSD__)
158 static _STLP_STATIC_MEMBER_DECLSPEC __oom_handler_type& get_oom_handler();
160 static __oom_handler_type __oom_handler;
163 // this one is needed for proper simple_alloc wrapping
164 typedef char value_type;
165 static void* allocate(size_t& __n) {
166 void* __result = malloc(__n);
168 __result = _S_oom_malloc(__n);
170 #if defined (_STLP_MALLOC_USABLE_SIZE)
172 size_t __new_n = _STLP_MALLOC_USABLE_SIZE(__result);
174 if (__n != __new_n) {
175 printf("requested size %d, usable %d\n", __n, __new_n);
183 static void deallocate(void* __p, size_t /* __n */) { free((char*)__p); }
184 static __oom_handler_type set_malloc_handler(__oom_handler_type __f) {
185 __oom_handler_type __old = __oom_handler;
189 #if defined(__SYMBIAN32__WSD__)
190 friend void ::stdcpp_allocators_init();
194 #if !defined(__SYMBIAN32__WSD__)
195 // malloc_alloc out-of-memory handling
196 __oom_handler_type __malloc_alloc_impl::__oom_handler = __STATIC_CAST(__oom_handler_type, 0);
199 void* _STLP_CALL __malloc_alloc::allocate(size_t& __n)
200 { return __malloc_alloc_impl::allocate(__n); }
201 __oom_handler_type _STLP_CALL __malloc_alloc::set_malloc_handler(__oom_handler_type __f)
202 { return __malloc_alloc_impl::set_malloc_handler(__f); }
204 // *******************************************************
205 // Default node allocator.
206 // With a reasonable compiler, this should be roughly as fast as the
207 // original STL class-specific allocators, but with less fragmentation.
209 // Important implementation properties:
210 // 1. If the client request an object of size > _MAX_BYTES, the resulting
211 // object will be obtained directly from malloc.
212 // 2. In all other cases, we allocate an object of size exactly
213 // _S_round_up(requested_size). Thus the client has enough size
214 // information that we can return the object to the proper free list
215 // without permanently losing part of the object.
218 #define _STLP_NFREELISTS 16
221 * On Symbian, the stlport is built as a dll and also dynamically linked against
222 * by the applications. The _STLP_USE_DYNAMIC_LIB should always be defined.
223 * _STLP_LEAKS_PEDANTIC is defined to prevent the memory leaks in __node_alloc
224 * when the library is dynamically loaded and unloaded.
226 #if defined (_STLP_LEAKS_PEDANTIC) && ( defined (_STLP_USE_DYNAMIC_LIB) || defined (__SYMBIAN32__) )
228 * We can only do cleanup of the node allocator memory pool if we are
229 * sure that the STLport library is used as a shared one as it guaranties
230 * the unicity of the node allocator instance. Without that guaranty node
231 * allocator instances might exchange memory blocks making the implementation
232 * of a cleaning process much more complicated.
234 # define _STLP_DO_CLEAN_NODE_ALLOC
237 /* When STLport is used without multi threaded safety we use the node allocator
238 * implementation with locks as locks becomes no-op. The lock free implementation
239 * always use system specific atomic operations which are slower than 'normal'
242 #if defined (_STLP_THREADS) && \
243 defined (_STLP_HAS_ATOMIC_FREELIST) && defined (_STLP_ATOMIC_ADD)
245 * We have an implementation of the atomic freelist (_STLP_atomic_freelist)
246 * for this architecture and compiler. That means we can use the non-blocking
247 * implementation of the node-allocation engine.*/
248 # define _STLP_USE_LOCK_FREE_IMPLEMENTATION
251 #if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
252 # if defined (_STLP_THREADS)
254 class _Node_Alloc_Lock {
257 # if defined (_STLP_SGI_THREADS)
258 if (__us_rsthread_malloc)
260 _S_lock._M_acquire_lock();
263 ~_Node_Alloc_Lock() {
264 # if defined (_STLP_SGI_THREADS)
265 if (__us_rsthread_malloc)
267 _S_lock._M_release_lock();
269 #if defined (__SYMBIAN32__WSD__)
270 static _STLP_STATIC_MUTEX& get_allocator_S_lock();
272 static _STLP_STATIC_MUTEX _S_lock;
276 #if !defined(__SYMBIAN32__WSD__)
277 _STLP_STATIC_MUTEX _Node_Alloc_Lock::_S_lock _STLP_MUTEX_INITIALIZER;
282 class _Node_Alloc_Lock {
284 _Node_Alloc_Lock() { }
285 ~_Node_Alloc_Lock() { }
290 struct _Node_alloc_obj {
291 _Node_alloc_obj * _M_next;
295 class __node_alloc_impl {
297 static inline size_t _STLP_CALL _S_round_up(size_t __bytes)
298 { return (((__bytes) + (size_t)_ALIGN-1) & ~((size_t)_ALIGN - 1)); }
300 #if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
301 typedef _STLP_atomic_freelist::item _Obj;
302 typedef _STLP_atomic_freelist _Freelist;
303 typedef _STLP_atomic_freelist _ChunkList;
305 // Header of blocks of memory that have been allocated as part of
306 // a larger chunk but have not yet been chopped up into nodes.
307 struct _FreeBlockHeader : public _STLP_atomic_freelist::item {
308 char* _M_end; // pointer to end of free memory
311 typedef _Node_alloc_obj _Obj;
312 typedef _Obj* _STLP_VOLATILE _Freelist;
313 typedef _Obj* _ChunkList;
317 // Returns an object of size __n, and optionally adds to size __n free list.
318 static _Obj* _S_refill(size_t __n);
319 // Allocates a chunk for nobjs of size __p_size. nobjs may be reduced
320 // if it is inconvenient to allocate the requested number.
321 static char* _S_chunk_alloc(size_t __p_size, int& __nobjs);
322 // Chunk allocation state.
323 #if defined(__SYMBIAN32__WSD__)
324 static _Freelist* get_S_free_list();
326 static _Freelist _S_free_list[_STLP_NFREELISTS];
329 // Amount of total allocated memory
330 #if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
331 static _STLP_VOLATILE __stl_atomic_t _S_heap_size;
333 #if defined(__SYMBIAN32__WSD__)
334 static size_t& get_S_heap_size();
336 static size_t _S_heap_size;
340 #if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
341 // List of blocks of free memory
342 static _STLP_atomic_freelist _S_free_mem_blocks;
344 #if defined(__SYMBIAN32__WSD__)
345 // Start of the current free memory buffer
346 static char*& get_S_start_free();
347 // End of the current free memory buffer
348 static char*& get_S_end_free();
350 // Start of the current free memory buffer
351 static char* _S_start_free;
352 // End of the current free memory buffer
353 static char* _S_end_free;
357 #if defined (_STLP_DO_CLEAN_NODE_ALLOC)
359 // Methods to report alloc/dealloc calls to the counter system.
360 # if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
361 typedef _STLP_VOLATILE __stl_atomic_t _AllocCounter;
363 typedef __stl_atomic_t _AllocCounter;
365 static _AllocCounter& _STLP_CALL _S_alloc_counter();
366 static void _S_alloc_call();
367 static void _S_dealloc_call();
370 // Free all the allocated chuncks of memory
371 static void _S_chunk_dealloc();
372 // Beginning of the linked list of allocated chunks of memory
373 static _ChunkList _S_chunks;
374 #endif /* _STLP_DO_CLEAN_NODE_ALLOC */
377 /* __n must be > 0 */
378 static void* _M_allocate(size_t& __n);
379 /* __p may not be 0 */
380 static void _M_deallocate(void *__p, size_t __n);
382 #if defined(__SYMBIAN32__WSD__)
383 friend void ::stdcpp_allocators_init();
387 #if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
388 void* __node_alloc_impl::_M_allocate(size_t& __n) {
389 __n = _S_round_up(__n);
390 _Obj * _STLP_VOLATILE * __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
393 // Acquire the lock here with a constructor call.
394 // This ensures that it is released in exit or during stack
396 _Node_Alloc_Lock __lock_instance;
398 if ( (__r = *__my_free_list) != 0 ) {
399 *__my_free_list = __r->_M_next;
401 __r = _S_refill(__n);
403 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
406 // lock is released here
410 void __node_alloc_impl::_M_deallocate(void *__p, size_t __n) {
411 _Obj * _STLP_VOLATILE * __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
412 _Obj * __pobj = __STATIC_CAST(_Obj*, __p);
415 _Node_Alloc_Lock __lock_instance;
416 __pobj->_M_next = *__my_free_list;
417 *__my_free_list = __pobj;
419 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
422 // lock is released here
425 /* We allocate memory in large chunks in order to avoid fragmenting */
426 /* the malloc heap too much. */
427 /* We assume that size is properly aligned. */
428 /* We hold the allocation lock. */
429 char* __node_alloc_impl::_S_chunk_alloc(size_t _p_size, int& __nobjs) {
431 size_t __total_bytes = _p_size * __nobjs;
432 size_t __bytes_left = _S_end_free - _S_start_free;
434 if (__bytes_left > 0) {
435 if (__bytes_left >= __total_bytes) {
436 __result = _S_start_free;
437 _S_start_free += __total_bytes;
441 if (__bytes_left >= _p_size) {
442 __nobjs = (int)(__bytes_left / _p_size);
443 __total_bytes = _p_size * __nobjs;
444 __result = _S_start_free;
445 _S_start_free += __total_bytes;
449 // Try to make use of the left-over piece.
450 _Obj* _STLP_VOLATILE* __my_free_list = _S_free_list + _S_FREELIST_INDEX(__bytes_left);
451 __REINTERPRET_CAST(_Obj*, _S_start_free)->_M_next = *__my_free_list;
452 *__my_free_list = __REINTERPRET_CAST(_Obj*, _S_start_free);
455 size_t __bytes_to_get =
456 2 * __total_bytes + _S_round_up(_S_heap_size >> 4)
457 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
462 _S_start_free = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
463 if (0 == _S_start_free) {
464 _Obj* _STLP_VOLATILE* __my_free_list;
466 // Try to do with what we have. That can't hurt.
467 // We do not try smaller requests, since that tends
468 // to result in disaster on multi-process machines.
469 for (size_t __i = _p_size; __i <= (size_t)_MAX_BYTES; __i += (size_t)_ALIGN) {
470 __my_free_list = _S_free_list + _S_FREELIST_INDEX(__i);
471 __p = *__my_free_list;
473 *__my_free_list = __p -> _M_next;
474 _S_start_free = __REINTERPRET_CAST(char*, __p);
475 _S_end_free = _S_start_free + __i;
476 return _S_chunk_alloc(_p_size, __nobjs);
477 // Any leftover piece will eventually make it to the
481 _S_end_free = 0; // In case of exception.
482 _S_start_free = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
484 (char*)malloc_alloc::allocate(__bytes_to_get);
487 // This should either throw an
488 // exception or remedy the situation. Thus we assume it
492 _S_heap_size += __bytes_to_get;
493 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
494 __REINTERPRET_CAST(_Obj*, _S_start_free)->_M_next = _S_chunks;
495 _S_chunks = __REINTERPRET_CAST(_Obj*, _S_start_free);
497 _S_end_free = _S_start_free + __bytes_to_get;
498 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
499 _S_start_free += sizeof(_Obj);
501 return _S_chunk_alloc(_p_size, __nobjs);
504 /* Returns an object of size __n, and optionally adds to size __n free list.*/
505 /* We assume that __n is properly aligned. */
506 /* We hold the allocation lock. */
507 _Node_alloc_obj* __node_alloc_impl::_S_refill(size_t __n) {
509 char* __chunk = _S_chunk_alloc(__n, __nobjs);
511 if (1 == __nobjs) return __REINTERPRET_CAST(_Obj*, __chunk);
513 _Obj* _STLP_VOLATILE* __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
518 /* Build free list in chunk */
519 __result = __REINTERPRET_CAST(_Obj*, __chunk);
520 *__my_free_list = __next_obj = __REINTERPRET_CAST(_Obj*, __chunk + __n);
521 for (--__nobjs; --__nobjs; ) {
522 __current_obj = __next_obj;
523 __next_obj = __REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __next_obj) + __n);
524 __current_obj->_M_next = __next_obj;
526 __next_obj->_M_next = 0;
530 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
531 void __node_alloc_impl::_S_alloc_call()
532 { ++_S_alloc_counter(); }
534 void __node_alloc_impl::_S_dealloc_call() {
535 __stl_atomic_t &counter = _S_alloc_counter();
537 { _S_chunk_dealloc(); }
540 /* We deallocate all the memory chunks */
541 void __node_alloc_impl::_S_chunk_dealloc() {
542 _Obj *__pcur = _S_chunks, *__pnext;
543 while (__pcur != 0) {
544 __pnext = __pcur->_M_next;
545 __stlp_chunck_free(__pcur);
549 _S_start_free = _S_end_free = 0;
551 // Reinterprest cast cant remove volatileness. So using C style cast
552 memset((char*)(&_S_free_list[0]), 0, _STLP_NFREELISTS * sizeof(_Obj*));
554 # endif /* _STLP_DO_CLEAN_NODE_ALLOC */
556 #else /* !defined(_STLP_USE_LOCK_FREE_IMPLEMENTATION) */
558 void* __node_alloc_impl::_M_allocate(size_t& __n) {
559 __n = _S_round_up(__n);
560 _Obj* __r = _S_free_list[_S_FREELIST_INDEX(__n)].pop();
562 { __r = _S_refill(__n); }
564 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
570 void __node_alloc_impl::_M_deallocate(void *__p, size_t __n) {
571 _S_free_list[_S_FREELIST_INDEX(__n)].push(__STATIC_CAST(_Obj*, __p));
573 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
578 /* Returns an object of size __n, and optionally adds additional ones to */
579 /* freelist of objects of size __n. */
580 /* We assume that __n is properly aligned. */
581 __node_alloc_impl::_Obj* __node_alloc_impl::_S_refill(size_t __n) {
583 char* __chunk = _S_chunk_alloc(__n, __nobjs);
586 return __REINTERPRET_CAST(_Obj*, __chunk);
588 // Push all new nodes (minus first one) onto freelist
589 _Obj* __result = __REINTERPRET_CAST(_Obj*, __chunk);
590 _Obj* __cur_item = __result;
591 _Freelist* __my_freelist = _S_free_list + _S_FREELIST_INDEX(__n);
592 for (--__nobjs; __nobjs != 0; --__nobjs) {
593 __cur_item = __REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __cur_item) + __n);
594 __my_freelist->push(__cur_item);
599 /* We allocate memory in large chunks in order to avoid fragmenting */
600 /* the malloc heap too much. */
601 /* We assume that size is properly aligned. */
602 char* __node_alloc_impl::_S_chunk_alloc(size_t _p_size, int& __nobjs) {
603 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
604 //We are going to add a small memory block to keep all the allocated blocks
605 //address, we need to do so respecting the memory alignment. The following
606 //static assert checks that the reserved block is big enough to store a pointer.
607 _STLP_STATIC_ASSERT(sizeof(_Obj) <= _ALIGN)
610 __stl_atomic_t __total_bytes = __STATIC_CAST(__stl_atomic_t, _p_size) * __nobjs;
612 _FreeBlockHeader* __block = __STATIC_CAST(_FreeBlockHeader*, _S_free_mem_blocks.pop());
614 // We checked a block out and can now mess with it with impugnity.
615 // We'll put the remainder back into the list if we're done with it below.
616 char* __buf_start = __REINTERPRET_CAST(char*, __block);
617 __stl_atomic_t __bytes_left = __block->_M_end - __buf_start;
619 if ((__bytes_left < __total_bytes) && (__bytes_left >= __STATIC_CAST(__stl_atomic_t, _p_size))) {
620 // There's enough left for at least one object, but not as much as we wanted
621 __result = __buf_start;
622 __nobjs = (int)(__bytes_left/_p_size);
623 __total_bytes = __STATIC_CAST(__stl_atomic_t, _p_size) * __nobjs;
624 __bytes_left -= __total_bytes;
625 __buf_start += __total_bytes;
627 else if (__bytes_left >= __total_bytes) {
628 // The block has enough left to satisfy all that was asked for
629 __result = __buf_start;
630 __bytes_left -= __total_bytes;
631 __buf_start += __total_bytes;
634 if (__bytes_left != 0) {
635 // There is still some memory left over in block after we satisfied our request.
636 if ((__result != 0) && (__bytes_left >= sizeof(_FreeBlockHeader))) {
637 // We were able to allocate at least one object and there is still enough
638 // left to put remainder back into list.
639 _FreeBlockHeader* __newblock = __REINTERPRET_CAST(_FreeBlockHeader*, __buf_start);
640 __newblock->_M_end = __block->_M_end;
641 _S_free_mem_blocks.push(__newblock);
644 // We were not able to allocate enough for at least one object.
645 // Shove into freelist of nearest (rounded-down!) size.
646 size_t __rounded_down = _S_round_up(__bytes_left + 1) - (size_t)_ALIGN;
647 if (__rounded_down > 0)
648 _S_free_list[_S_FREELIST_INDEX(__rounded_down)].push((_Obj*)__buf_start);
655 // We couldn't satisfy it from the list of free blocks, get new memory.
656 __stl_atomic_t __bytes_to_get = 2 * __total_bytes + __STATIC_CAST(__stl_atomic_t, _S_round_up(_S_heap_size >> 4))
657 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
662 __result = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
664 _STLP_VERBOSE_ASSERT(((__REINTERPRET_CAST(size_t, __result) & __STATIC_CAST(size_t, _ALIGN - 1)) == 0), _StlMsg_DBA_DELETED_TWICE)
667 // Allocation failed; try to canibalize from freelist of a larger object size.
668 for (size_t __i = _p_size; __i <= (size_t)_MAX_BYTES; __i += (size_t)_ALIGN) {
669 _Obj* __p = _S_free_list[_S_FREELIST_INDEX(__i)].pop();
671 if (__i < sizeof(_FreeBlockHeader)) {
672 // Not enough to put into list of free blocks, divvy it up here.
673 // Use as much as possible for this request and shove remainder into freelist.
674 __nobjs = (int)(__i/_p_size);
675 __total_bytes = __nobjs * __STATIC_CAST(__stl_atomic_t, _p_size);
676 size_t __bytes_left = __i - __total_bytes;
677 size_t __rounded_down = _S_round_up(__bytes_left+1) - (size_t)_ALIGN;
678 if (__rounded_down > 0) {
679 _S_free_list[_S_FREELIST_INDEX(__rounded_down)].push(__REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __p) + __total_bytes));
681 return __REINTERPRET_CAST(char*, __p);
684 // Add node to list of available blocks and recursively allocate from it.
685 _FreeBlockHeader* __newblock = (_FreeBlockHeader*)__p;
686 __newblock->_M_end = __REINTERPRET_CAST(char*, __p) + __i;
687 _S_free_mem_blocks.push(__newblock);
688 return _S_chunk_alloc(_p_size, __nobjs);
693 // We were not able to find something in a freelist, try to allocate a smaller amount.
694 __bytes_to_get = __total_bytes
695 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
699 __result = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
701 _STLP_VERBOSE_ASSERT(((__REINTERPRET_CAST(size_t, __result) & __STATIC_CAST(size_t, _ALIGN - 1)) == 0), _StlMsg_DBA_DELETED_TWICE)
703 // This should either throw an exception or remedy the situation.
704 // Thus we assume it succeeded.
707 _STLP_ATOMIC_ADD(&_S_heap_size, __bytes_to_get);
709 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
710 // We have to track the allocated memory chunks for release on exit.
711 _S_chunks.push(__REINTERPRET_CAST(_Obj*, __result));
713 __bytes_to_get -= _ALIGN;
716 if (__bytes_to_get > __total_bytes) {
717 // Push excess memory allocated in this chunk into list of free memory blocks
718 _FreeBlockHeader* __freeblock = __REINTERPRET_CAST(_FreeBlockHeader*, __result + __total_bytes);
719 __freeblock->_M_end = __result + __bytes_to_get;
720 _S_free_mem_blocks.push(__freeblock);
725 # if defined (_STLP_DO_CLEAN_NODE_ALLOC)
726 void __node_alloc_impl::_S_alloc_call()
727 { _STLP_ATOMIC_INCREMENT(&_S_alloc_counter()); }
729 void __node_alloc_impl::_S_dealloc_call() {
730 _STLP_VOLATILE __stl_atomic_t *pcounter = &_S_alloc_counter();
731 if (_STLP_ATOMIC_DECREMENT(pcounter) == 0)
735 /* We deallocate all the memory chunks */
736 void __node_alloc_impl::_S_chunk_dealloc() {
737 // Note: The _Node_alloc_helper class ensures that this function
738 // will only be called when the (shared) library is unloaded or the
739 // process is shutdown. It's thus not possible that another thread
740 // is currently trying to allocate a node (we're not thread-safe here).
743 // Clear the free blocks and all freelistst. This makes sure that if
744 // for some reason more memory is allocated again during shutdown
745 // (it'd also be really nasty to leave references to deallocated memory).
746 _S_free_mem_blocks.clear();
749 for (size_t __i = 0; __i < _STLP_NFREELISTS; ++__i) {
750 _S_free_list[__i].clear();
753 // Detach list of chunks and free them all
754 _Obj* __chunk = _S_chunks.clear();
755 while (__chunk != 0) {
756 _Obj* __next = __chunk->_M_next;
757 __stlp_chunck_free(__chunk);
761 # endif /* _STLP_DO_CLEAN_NODE_ALLOC */
763 #endif /* !defined(_STLP_USE_LOCK_FREE_IMPLEMENTATION) */
765 #if defined (_STLP_DO_CLEAN_NODE_ALLOC)
766 struct __node_alloc_cleaner {
767 ~__node_alloc_cleaner()
769 __node_alloc_impl::_S_dealloc_call();
773 # if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
774 _STLP_VOLATILE __stl_atomic_t& _STLP_CALL
776 __stl_atomic_t& _STLP_CALL
778 __node_alloc_impl::_S_alloc_counter() {
779 static _AllocCounter _S_counter = 1;
780 static __node_alloc_cleaner _S_node_alloc_cleaner;
785 #if !defined(__SYMBIAN32__WSD__)
786 #if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
787 _Node_alloc_obj * _STLP_VOLATILE
788 __node_alloc_impl::_S_free_list[_STLP_NFREELISTS]
789 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
790 // The 16 zeros are necessary to make version 4.1 of the SunPro
791 // compiler happy. Otherwise it appears to allocate too little
792 // space for the array.
794 _STLP_atomic_freelist __node_alloc_impl::_S_free_list[_STLP_NFREELISTS];
795 _STLP_atomic_freelist __node_alloc_impl::_S_free_mem_blocks;
798 #if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
799 char *__node_alloc_impl::_S_start_free = 0;
800 char *__node_alloc_impl::_S_end_free = 0;
803 #if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
804 _STLP_VOLATILE __stl_atomic_t
808 __node_alloc_impl::_S_heap_size = 0;
809 #endif //__SYMBIAN32__WSD__
811 #if defined (_STLP_DO_CLEAN_NODE_ALLOC)
812 # if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
813 _STLP_atomic_freelist __node_alloc_impl::_S_chunks;
815 _Node_alloc_obj* __node_alloc_impl::_S_chunks = 0;
819 _STLP_DECLSPEC void * _STLP_CALL __node_alloc::_M_allocate(size_t& __n)
820 { return __node_alloc_impl::_M_allocate(__n); }
822 _STLP_DECLSPEC void _STLP_CALL __node_alloc::_M_deallocate(void *__p, size_t __n)
823 { __node_alloc_impl::_M_deallocate(__p, __n); }
825 #if defined (_STLP_PTHREADS) && !defined (_STLP_NO_THREADS)
827 # define _STLP_DATA_ALIGNMENT 8
829 _STLP_MOVE_TO_PRIV_NAMESPACE
831 // *******************************************************
832 // __perthread_alloc implementation
833 union _Pthread_alloc_obj {
834 union _Pthread_alloc_obj * __free_list_link;
835 char __client_data[_STLP_DATA_ALIGNMENT]; /* The client sees this. */
838 // Pthread allocators don't appear to the client to have meaningful
839 // instances. We do in fact need to associate some state with each
840 // thread. That state is represented by _Pthread_alloc_per_thread_state.
842 struct _Pthread_alloc_per_thread_state {
843 typedef _Pthread_alloc_obj __obj;
844 enum { _S_NFREELISTS = _MAX_BYTES / _STLP_DATA_ALIGNMENT };
846 // Free list link for list of available per thread structures.
847 // When one of these becomes available for reuse due to thread
848 // termination, any objects in its free list remain associated
849 // with it. The whole structure may then be used by a newly
851 _Pthread_alloc_per_thread_state() : __next(0)
852 { memset((void *)__CONST_CAST(_Pthread_alloc_obj**, __free_list), 0, (size_t)_S_NFREELISTS * sizeof(__obj *)); }
853 // Returns an object of size __n, and possibly adds to size n free list.
854 void *_M_refill(size_t __n);
856 _Pthread_alloc_obj* volatile __free_list[_S_NFREELISTS];
857 _Pthread_alloc_per_thread_state *__next;
858 // this data member is only to be used by per_thread_allocator, which returns memory to the originating thread.
862 // Pthread-specific allocator.
863 class _Pthread_alloc_impl {
864 public: // but only for internal use:
865 typedef _Pthread_alloc_per_thread_state __state_type;
866 typedef char value_type;
868 // Allocates a chunk for nobjs of size size. nobjs may be reduced
869 // if it is inconvenient to allocate the requested number.
870 static char *_S_chunk_alloc(size_t __size, size_t &__nobjs, __state_type*);
872 enum {_S_ALIGN = _STLP_DATA_ALIGNMENT};
874 static size_t _S_round_up(size_t __bytes)
875 { return (((__bytes) + (int)_S_ALIGN - 1) & ~((int)_S_ALIGN - 1)); }
876 static size_t _S_freelist_index(size_t __bytes)
877 { return (((__bytes) + (int)_S_ALIGN - 1) / (int)_S_ALIGN - 1); }
880 // Chunk allocation state. And other shared state.
881 // Protected by _S_chunk_allocator_lock.
882 #if defined(__SYMBIAN32__WSD__)
884 static void pt_wsd_init() {
885 get_S_free_per_thread_states() = 0;
887 get_S_chunk_allocator_lock()._M_lock.iState = _ENeedsNormalInit;
888 get_S_chunk_allocator_lock()._M_lock.iPtr = 0;
889 get_S_chunk_allocator_lock()._M_lock.iReentry = 0;
890 get_S_key_initialized() = false;
891 get_S_start_free() = 0;
892 get_S_end_free() = 0;
893 get_S_heap_size() = 0;
896 static _STLP_STATIC_MUTEX& get_S_chunk_allocator_lock()
897 { return get_libcpp_wsd().wsd_pt_S_chunk_allocator_lock; }
898 static char*& get_S_start_free()
899 { return get_libcpp_wsd().wsd_pt_S_start_free; }
900 static char*& get_S_end_free()
901 { return get_libcpp_wsd().wsd_pt_S_end_free; }
902 static size_t& get_S_heap_size()
903 { return get_libcpp_wsd().wsd_pt_S_heap_size; }
904 static __state_type*& get_S_free_per_thread_states()
905 { return get_libcpp_wsd().wsd_pt_S_free_per_thread_states; }
906 static pthread_key_t& get_S_key()
907 { return get_libcpp_wsd().wsd_pt_S_key; }
908 static bool& get_S_key_initialized()
909 { return get_libcpp_wsd().wsd_pt_S_key_initialized; }
911 static _STLP_STATIC_MUTEX _S_chunk_allocator_lock;
912 static char *_S_start_free;
913 static char *_S_end_free;
914 static size_t _S_heap_size;
915 static __state_type *_S_free_per_thread_states;
916 static pthread_key_t _S_key;
917 static bool _S_key_initialized;
919 // Pthread key under which per thread state is stored.
920 // Allocator instances that are currently unclaimed by any thread.
921 static void _S_destructor(void *instance);
922 // Function to be called on thread exit to reclaim per thread
924 static __state_type *_S_new_per_thread_state();
926 // Return a recycled or new per thread state.
927 static __state_type *_S_get_per_thread_state();
929 // ensure that the current thread has an associated
932 friend class _M_lock;
935 _M_lock () { _S_chunk_allocator_lock._M_acquire_lock(); }
936 ~_M_lock () { _S_chunk_allocator_lock._M_release_lock(); }
942 static void * allocate(size_t& __n);
945 static void deallocate(void *__p, size_t __n);
947 // boris : versions for per_thread_allocator
949 static void * allocate(size_t& __n, __state_type* __a);
952 static void deallocate(void *__p, size_t __n, __state_type* __a);
954 static void * reallocate(void *__p, size_t __old_sz, size_t& __new_sz);
957 /* Returns an object of size n, and optionally adds to size n free list.*/
958 /* We assume that n is properly aligned. */
959 /* We hold the allocation lock. */
960 void *_Pthread_alloc_per_thread_state::_M_refill(size_t __n) {
961 typedef _Pthread_alloc_obj __obj;
962 size_t __nobjs = 128;
963 char * __chunk = _Pthread_alloc_impl::_S_chunk_alloc(__n, __nobjs, this);
964 __obj * volatile * __my_free_list;
966 __obj * __current_obj, * __next_obj;
973 __my_free_list = __free_list + _Pthread_alloc_impl::_S_freelist_index(__n);
975 /* Build free list in chunk */
976 __result = (__obj *)__chunk;
977 *__my_free_list = __next_obj = (__obj *)(__chunk + __n);
978 for (__i = 1; ; ++__i) {
979 __current_obj = __next_obj;
980 __next_obj = (__obj *)((char *)__next_obj + __n);
981 if (__nobjs - 1 == __i) {
982 __current_obj -> __free_list_link = 0;
985 __current_obj -> __free_list_link = __next_obj;
991 void _Pthread_alloc_impl::_S_destructor(void *__instance) {
992 _M_lock __lock_instance; // Need to acquire lock here.
993 _Pthread_alloc_per_thread_state* __s = (_Pthread_alloc_per_thread_state*)__instance;
994 __s -> __next = _S_free_per_thread_states;
995 _S_free_per_thread_states = __s;
998 _Pthread_alloc_per_thread_state* _Pthread_alloc_impl::_S_new_per_thread_state() {
999 /* lock already held here. */
1000 if (0 != _S_free_per_thread_states) {
1001 _Pthread_alloc_per_thread_state *__result = _S_free_per_thread_states;
1002 _S_free_per_thread_states = _S_free_per_thread_states -> __next;
1006 return _STLP_NEW _Pthread_alloc_per_thread_state;
1010 _Pthread_alloc_per_thread_state* _Pthread_alloc_impl::_S_get_per_thread_state() {
1012 __state_type* __result;
1014 if (_S_key_initialized && ((__result = (__state_type*) pthread_getspecific(_S_key)) != NULL))
1018 _M_lock __lock_instance; // Need to acquire lock here.
1019 if (!_S_key_initialized) {
1020 if (pthread_key_create(&_S_key, _S_destructor)) {
1021 __THROW_BAD_ALLOC; // failed
1023 _S_key_initialized = true;
1026 __result = _S_new_per_thread_state();
1027 __ret_code = pthread_setspecific(_S_key, __result);
1029 if (__ret_code == ENOMEM) {
1039 /* We allocate memory in large chunks in order to avoid fragmenting */
1040 /* the malloc heap too much. */
1041 /* We assume that size is properly aligned. */
1042 char *_Pthread_alloc_impl::_S_chunk_alloc(size_t __p_size, size_t &__nobjs, _Pthread_alloc_per_thread_state *__a) {
1043 typedef _Pthread_alloc_obj __obj;
1046 size_t __total_bytes;
1047 size_t __bytes_left;
1049 _M_lock __lock_instance; // Acquire lock for this routine
1051 __total_bytes = __p_size * __nobjs;
1052 __bytes_left = _S_end_free - _S_start_free;
1053 if (__bytes_left >= __total_bytes) {
1054 __result = _S_start_free;
1055 _S_start_free += __total_bytes;
1057 } else if (__bytes_left >= __p_size) {
1058 __nobjs = __bytes_left/__p_size;
1059 __total_bytes = __p_size * __nobjs;
1060 __result = _S_start_free;
1061 _S_start_free += __total_bytes;
1064 size_t __bytes_to_get = 2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
1065 // Try to make use of the left-over piece.
1066 if (__bytes_left > 0) {
1067 __obj * volatile * __my_free_list = __a->__free_list + _S_freelist_index(__bytes_left);
1068 ((__obj *)_S_start_free) -> __free_list_link = *__my_free_list;
1069 *__my_free_list = (__obj *)_S_start_free;
1072 // Try to get memory that's aligned on something like a
1073 // cache line boundary, so as to avoid parceling out
1074 // parts of the same line to different threads and thus
1075 // possibly different processors.
1077 const int __cache_line_size = 128; // probable upper bound
1078 __bytes_to_get &= ~(__cache_line_size-1);
1079 _S_start_free = (char *)memalign(__cache_line_size, __bytes_to_get);
1080 if (0 == _S_start_free) {
1081 _S_start_free = (char *)__malloc_alloc::allocate(__bytes_to_get);
1084 # else /* !SGI_SOURCE */
1085 _S_start_free = (char *)__malloc_alloc::allocate(__bytes_to_get);
1087 _S_heap_size += __bytes_to_get;
1088 _S_end_free = _S_start_free + __bytes_to_get;
1091 // lock is released here
1092 return _S_chunk_alloc(__p_size, __nobjs, __a);
1097 void *_Pthread_alloc_impl::allocate(size_t& __n) {
1098 typedef _Pthread_alloc_obj __obj;
1099 __obj * volatile * __my_free_list;
1103 if (__n > _MAX_BYTES) {
1104 return __malloc_alloc::allocate(__n);
1107 __n = _S_round_up(__n);
1108 __a = _S_get_per_thread_state();
1110 __my_free_list = __a->__free_list + _S_freelist_index(__n);
1111 __result = *__my_free_list;
1112 if (__result == 0) {
1113 void *__r = __a->_M_refill(__n);
1116 *__my_free_list = __result->__free_list_link;
1120 /* p may not be 0 */
1121 void _Pthread_alloc_impl::deallocate(void *__p, size_t __n) {
1122 typedef _Pthread_alloc_obj __obj;
1123 __obj *__q = (__obj *)__p;
1124 __obj * volatile * __my_free_list;
1127 if (__n > _MAX_BYTES) {
1128 __malloc_alloc::deallocate(__p, __n);
1132 __a = _S_get_per_thread_state();
1134 __my_free_list = __a->__free_list + _S_freelist_index(__n);
1135 __q -> __free_list_link = *__my_free_list;
1136 *__my_free_list = __q;
1139 // boris : versions for per_thread_allocator
1141 void *_Pthread_alloc_impl::allocate(size_t& __n, __state_type* __a) {
1142 typedef _Pthread_alloc_obj __obj;
1143 __obj * volatile * __my_free_list;
1146 if (__n > _MAX_BYTES) {
1147 return __malloc_alloc::allocate(__n);
1149 __n = _S_round_up(__n);
1151 // boris : here, we have to lock per thread state, as we may be getting memory from
1152 // different thread pool.
1153 _STLP_auto_lock __lock(__a->_M_lock);
1155 __my_free_list = __a->__free_list + _S_freelist_index(__n);
1156 __result = *__my_free_list;
1157 if (__result == 0) {
1158 void *__r = __a->_M_refill(__n);
1161 *__my_free_list = __result->__free_list_link;
1165 /* p may not be 0 */
1166 void _Pthread_alloc_impl::deallocate(void *__p, size_t __n, __state_type* __a) {
1167 typedef _Pthread_alloc_obj __obj;
1168 __obj *__q = (__obj *)__p;
1169 __obj * volatile * __my_free_list;
1171 if (__n > _MAX_BYTES) {
1172 __malloc_alloc::deallocate(__p, __n);
1176 // boris : here, we have to lock per thread state, as we may be returning memory from
1177 // different thread.
1178 _STLP_auto_lock __lock(__a->_M_lock);
1180 __my_free_list = __a->__free_list + _S_freelist_index(__n);
1181 __q -> __free_list_link = *__my_free_list;
1182 *__my_free_list = __q;
1185 void *_Pthread_alloc_impl::reallocate(void *__p, size_t __old_sz, size_t& __new_sz) {
1189 if (__old_sz > _MAX_BYTES && __new_sz > _MAX_BYTES) {
1190 return realloc(__p, __new_sz);
1193 if (_S_round_up(__old_sz) == _S_round_up(__new_sz)) return __p;
1194 __result = allocate(__new_sz);
1195 __copy_sz = __new_sz > __old_sz? __old_sz : __new_sz;
1196 memcpy(__result, __p, __copy_sz);
1197 deallocate(__p, __old_sz);
1200 #if !defined(__SYMBIAN32__WSD__)
1201 _Pthread_alloc_per_thread_state* _Pthread_alloc_impl::_S_free_per_thread_states = 0;
1202 pthread_key_t _Pthread_alloc_impl::_S_key = 0;
1203 _STLP_STATIC_MUTEX _Pthread_alloc_impl::_S_chunk_allocator_lock _STLP_MUTEX_INITIALIZER;
1204 bool _Pthread_alloc_impl::_S_key_initialized = false;
1205 char *_Pthread_alloc_impl::_S_start_free = 0;
1206 char *_Pthread_alloc_impl::_S_end_free = 0;
1207 size_t _Pthread_alloc_impl::_S_heap_size = 0;
1210 inline __oom_handler_type& __malloc_alloc_impl::get_oom_handler()
1212 return get_libcpp_wsd().wsd__oom_handler;
1215 inline __node_alloc_impl::_Freelist* __node_alloc_impl::get_S_free_list()
1217 return (__node_alloc_impl::_Freelist*)get_libcpp_wsd().wsd_S_free_list;
1220 inline size_t& __node_alloc_impl::get_S_heap_size()
1222 return get_libcpp_wsd().wsd__node_alloc_impl_S_heap_size;
1225 inline char*& __node_alloc_impl::get_S_start_free()
1227 return get_libcpp_wsd().wsd_S_start_free;
1230 inline char*& __node_alloc_impl::get_S_end_free()
1232 return get_libcpp_wsd().wsd_S_end_free;
1235 inline _STLP_STATIC_MUTEX& _Node_Alloc_Lock::get_allocator_S_lock()
1237 return get_libcpp_wsd().wsd_allocator_S_lock;
1242 void * _STLP_CALL _Pthread_alloc::allocate(size_t& __n)
1243 { return _Pthread_alloc_impl::allocate(__n); }
1244 void _STLP_CALL _Pthread_alloc::deallocate(void *__p, size_t __n)
1245 { _Pthread_alloc_impl::deallocate(__p, __n); }
1246 void * _STLP_CALL _Pthread_alloc::allocate(size_t& __n, __state_type* __a)
1247 { return _Pthread_alloc_impl::allocate(__n, __a); }
1248 void _STLP_CALL _Pthread_alloc::deallocate(void *__p, size_t __n, __state_type* __a)
1249 { _Pthread_alloc_impl::deallocate(__p, __n, __a); }
1250 void * _STLP_CALL _Pthread_alloc::reallocate(void *__p, size_t __old_sz, size_t& __new_sz)
1251 { return _Pthread_alloc_impl::reallocate(__p, __old_sz, __new_sz); }
1252 _Pthread_alloc_per_thread_state* _STLP_CALL _Pthread_alloc::_S_get_per_thread_state()
1253 { return _Pthread_alloc_impl::_S_get_per_thread_state(); }
1255 _STLP_MOVE_TO_STD_NAMESPACE
1262 #if defined(__SYMBIAN32__WSD__)
1263 // to be called from an stdcpp init. (to init WSD)
1264 void stdcpp_allocators_init()
1267 std::__malloc_alloc_impl::get_oom_handler() = NULL;
1270 stlp_priv::_Node_Alloc_Lock::get_allocator_S_lock()._M_lock.iState = _ENeedsNormalInit;
1271 stlp_priv::_Node_Alloc_Lock::get_allocator_S_lock()._M_lock.iPtr = 0;
1272 stlp_priv::_Node_Alloc_Lock::get_allocator_S_lock()._M_lock.iReentry = 0;
1274 // init _node_alloc_impl::x
1275 stlp_priv::__node_alloc_impl::get_S_heap_size() = 0;
1276 stlp_priv::__node_alloc_impl::get_S_start_free() = 0;
1277 stlp_priv::__node_alloc_impl::get_S_end_free() = 0;
1279 // initialize free list
1280 for (int count = 0; count < _STLP_NFREELISTS; count++)
1281 stlp_priv::__node_alloc_impl::_S_free_list[count] = 0;
1283 //pthread_alloc_impl
1284 stlp_priv::_Pthread_alloc_impl::pt_wsd_init();
1288 #undef _S_FREELIST_INDEX