1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/ossrv/genericopenlibs/cppstdlib/stl/src/allocators.cpp Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,1288 @@
1.4 +/*
1.5 + * Portions Copyright (c) 2008 Nokia Corporation and/or its subsidiary(-ies). All rights reserved.
1.6 + *
1.7 + * Copyright (c) 1996,1997
1.8 + * Silicon Graphics Computer Systems, Inc.
1.9 + *
1.10 + * Copyright (c) 1997
1.11 + * Moscow Center for SPARC Technology
1.12 + *
1.13 + * Copyright (c) 1999
1.14 + * Boris Fomitchev
1.15 + *
1.16 + * This material is provided "as is", with absolutely no warranty expressed
1.17 + * or implied. Any use is at your own risk.
1.18 + *
1.19 + * Permission to use or copy this software for any purpose is hereby granted
1.20 + * without fee, provided the above notices are retained on all copies.
1.21 + * Permission to modify the code and to distribute modified code is granted,
1.22 + * provided the above notices are retained, and a notice that the code was
1.23 + * modified is included with the above copyright notice.
1.24 + *
1.25 + */
1.26 +
1.27 +#include "stlport_prefix.h"
1.28 +
1.29 +#include <memory>
1.30 +
1.31 +#if defined (__GNUC__) && (defined (__CYGWIN__) || defined (__MINGW32__)) && (!defined (__SYMBIAN32__))
1.32 +# include <malloc.h>
1.33 +//# define _STLP_MALLOC_USABLE_SIZE(__buf) malloc_usable_size(__buf)
1.34 +#endif
1.35 +
1.36 +#if defined (_STLP_PTHREADS) && !defined (_STLP_NO_THREADS)
1.37 +# include <pthread_alloc>
1.38 +# include <cerrno>
1.39 +#endif
1.40 +
1.41 +#include <stl/_threads.h>
1.42 +
1.43 +#include "lock_free_slist.h"
1.44 +
1.45 +#if defined(__SYMBIAN32__WSD__)
1.46 +#include "libstdcppwsd.h"
1.47 +
1.48 +#define __oom_handler get_oom_handler()
1.49 +#define _S_lock get_allocator_S_lock()
1.50 +#define _S_heap_size get_S_heap_size()
1.51 +#define _S_start_free get_S_start_free()
1.52 +#define _S_end_free get_S_end_free()
1.53 +#define _S_free_list get_S_free_list()
1.54 +#define _S_chunk_allocator_lock get_S_chunk_allocator_lock()
1.55 +#define _S_free_per_thread_states get_S_free_per_thread_states()
1.56 +#define _S_key get_S_key()
1.57 +#define _S_key_initialized get_S_key_initialized()
1.58 +#endif
1.59 +
1.60 +#ifdef __SYMBIAN32__
1.61 +extern "C"
1.62 +{
1.63 +IMPORT_C void* BackendAlloc(size_t );
1.64 +IMPORT_C void BackendFree(void* );
1.65 +}
1.66 +
1.67 +
1.68 +EXPORT_C void* backend_allocate(size_t __n)
1.69 + {
1.70 + for (;;)
1.71 + {
1.72 + void* p = BackendAlloc(__n);
1.73 +
1.74 + if (p)
1.75 + {
1.76 + return p;
1.77 + }
1.78 +
1.79 + // set_new_handler uses Dll::Tls. So only this threads new handler will be changed
1.80 + // for the time it is set back. No problems for other threads.
1.81 + std::new_handler nh_func = std::set_new_handler(NULL);
1.82 + std::set_new_handler(nh_func);
1.83 +
1.84 + if (nh_func)
1.85 + {
1.86 + nh_func();
1.87 + }
1.88 + else
1.89 + {
1.90 + __THROW(std::bad_alloc());
1.91 + }
1.92 + }
1.93 + }
1.94 +
1.95 +EXPORT_C void backend_free(void* __p)
1.96 + {
1.97 + BackendFree(__p);
1.98 + }
1.99 +#endif
1.100 +
1.101 +#if defined (__WATCOMC__)
1.102 +# pragma warning 13 9
1.103 +# pragma warning 367 9
1.104 +# pragma warning 368 9
1.105 +#endif
1.106 +
1.107 +#if defined (_STLP_SGI_THREADS)
1.108 + // We test whether threads are in use before locking.
1.109 + // Perhaps this should be moved into stl_threads.h, but that
1.110 + // probably makes it harder to avoid the procedure call when
1.111 + // it isn't needed.
1.112 +extern "C" {
1.113 + extern int __us_rsthread_malloc;
1.114 +}
1.115 +#endif
1.116 +
1.117 +// Specialised debug form of malloc which does not provide "false"
1.118 +// memory leaks when run with debug CRT libraries.
1.119 +#if defined (_STLP_MSVC) && (_STLP_MSVC >= 1020 && defined (_STLP_DEBUG_ALLOC)) && !defined (_STLP_WCE)
1.120 +# include <crtdbg.h>
1.121 +inline void* __stlp_chunk_malloc(size_t __bytes) { _STLP_CHECK_NULL_ALLOC(_malloc_dbg(__bytes, _CRT_BLOCK, __FILE__, __LINE__)); }
1.122 +inline void __stlp_chunck_free(void* __p) { _free_dbg(__p, _CRT_BLOCK); }
1.123 +#else // !_DEBUG
1.124 +# ifdef _STLP_NODE_ALLOC_USE_MALLOC
1.125 +# include <cstdlib>
1.126 +inline void* __stlp_chunk_malloc(size_t __bytes) { _STLP_CHECK_NULL_ALLOC(_STLP_VENDOR_CSTD::malloc(__bytes)); }
1.127 +inline void __stlp_chunck_free(void* __p) { _STLP_VENDOR_CSTD::free(__p); }
1.128 +# else
1.129 +inline void* __stlp_chunk_malloc(size_t __bytes) {
1.130 + return _STLP_STD::__stl_new(__bytes);
1.131 +}
1.132 +inline void __stlp_chunck_free(void* __p) {
1.133 + _STLP_STD::__stl_delete(__p);
1.134 +}
1.135 +
1.136 +# endif
1.137 +#endif // !_DEBUG
1.138 +
1.139 +#define _S_FREELIST_INDEX(__bytes) ((__bytes - size_t(1)) >> (int)_ALIGN_SHIFT)
1.140 +
1.141 +_STLP_BEGIN_NAMESPACE
1.142 +
1.143 +class __malloc_alloc_impl {
1.144 +private:
1.145 + static void* _S_oom_malloc(size_t __n) {
1.146 + __oom_handler_type __my_malloc_handler;
1.147 + void * __result;
1.148 +
1.149 + for (;;) {
1.150 + __my_malloc_handler = __oom_handler;
1.151 + if (0 == __my_malloc_handler) { __THROW_BAD_ALLOC; }
1.152 + (*__my_malloc_handler)();
1.153 + __result = malloc(__n);
1.154 + if (__result) return(__result);
1.155 + }
1.156 +#if defined (_STLP_NEED_UNREACHABLE_RETURN)
1.157 + return 0;
1.158 +#endif
1.159 + }
1.160 +#if defined(__SYMBIAN32__WSD__)
1.161 + static _STLP_STATIC_MEMBER_DECLSPEC __oom_handler_type& get_oom_handler();
1.162 +#else
1.163 + static __oom_handler_type __oom_handler;
1.164 +#endif
1.165 +public:
1.166 + // this one is needed for proper simple_alloc wrapping
1.167 + typedef char value_type;
1.168 + static void* allocate(size_t& __n) {
1.169 + void* __result = malloc(__n);
1.170 + if (0 == __result) {
1.171 + __result = _S_oom_malloc(__n);
1.172 + }
1.173 +#if defined (_STLP_MALLOC_USABLE_SIZE)
1.174 + else {
1.175 + size_t __new_n = _STLP_MALLOC_USABLE_SIZE(__result);
1.176 + /*
1.177 + if (__n != __new_n) {
1.178 + printf("requested size %d, usable %d\n", __n, __new_n);
1.179 + }
1.180 + */
1.181 + __n = __new_n;
1.182 + }
1.183 +#endif
1.184 + return __result;
1.185 + }
1.186 + static void deallocate(void* __p, size_t /* __n */) { free((char*)__p); }
1.187 + static __oom_handler_type set_malloc_handler(__oom_handler_type __f) {
1.188 + __oom_handler_type __old = __oom_handler;
1.189 + __oom_handler = __f;
1.190 + return __old;
1.191 + }
1.192 +#if defined(__SYMBIAN32__WSD__)
1.193 + friend void ::stdcpp_allocators_init();
1.194 +#endif
1.195 +};
1.196 +
1.197 +#if !defined(__SYMBIAN32__WSD__)
1.198 +// malloc_alloc out-of-memory handling
1.199 +__oom_handler_type __malloc_alloc_impl::__oom_handler = __STATIC_CAST(__oom_handler_type, 0);
1.200 +#endif
1.201 +
1.202 +void* _STLP_CALL __malloc_alloc::allocate(size_t& __n)
1.203 +{ return __malloc_alloc_impl::allocate(__n); }
1.204 +__oom_handler_type _STLP_CALL __malloc_alloc::set_malloc_handler(__oom_handler_type __f)
1.205 +{ return __malloc_alloc_impl::set_malloc_handler(__f); }
1.206 +
1.207 +// *******************************************************
1.208 +// Default node allocator.
1.209 +// With a reasonable compiler, this should be roughly as fast as the
1.210 +// original STL class-specific allocators, but with less fragmentation.
1.211 +//
1.212 +// Important implementation properties:
1.213 +// 1. If the client request an object of size > _MAX_BYTES, the resulting
1.214 +// object will be obtained directly from malloc.
1.215 +// 2. In all other cases, we allocate an object of size exactly
1.216 +// _S_round_up(requested_size). Thus the client has enough size
1.217 +// information that we can return the object to the proper free list
1.218 +// without permanently losing part of the object.
1.219 +//
1.220 +
1.221 +#define _STLP_NFREELISTS 16
1.222 +
1.223 +/*
1.224 + * On Symbian, the stlport is built as a dll and also dynamically linked against
1.225 + * by the applications. The _STLP_USE_DYNAMIC_LIB should always be defined.
1.226 + * _STLP_LEAKS_PEDANTIC is defined to prevent the memory leaks in __node_alloc
1.227 + * when the library is dynamically loaded and unloaded.
1.228 + */
1.229 +#if defined (_STLP_LEAKS_PEDANTIC) && ( defined (_STLP_USE_DYNAMIC_LIB) || defined (__SYMBIAN32__) )
1.230 +/*
1.231 + * We can only do cleanup of the node allocator memory pool if we are
1.232 + * sure that the STLport library is used as a shared one as it guaranties
1.233 + * the unicity of the node allocator instance. Without that guaranty node
1.234 + * allocator instances might exchange memory blocks making the implementation
1.235 + * of a cleaning process much more complicated.
1.236 + */
1.237 +# define _STLP_DO_CLEAN_NODE_ALLOC
1.238 +#endif
1.239 +
1.240 +/* When STLport is used without multi threaded safety we use the node allocator
1.241 + * implementation with locks as locks becomes no-op. The lock free implementation
1.242 + * always use system specific atomic operations which are slower than 'normal'
1.243 + * ones.
1.244 + */
1.245 +#if defined (_STLP_THREADS) && \
1.246 + defined (_STLP_HAS_ATOMIC_FREELIST) && defined (_STLP_ATOMIC_ADD)
1.247 +/*
1.248 + * We have an implementation of the atomic freelist (_STLP_atomic_freelist)
1.249 + * for this architecture and compiler. That means we can use the non-blocking
1.250 + * implementation of the node-allocation engine.*/
1.251 +# define _STLP_USE_LOCK_FREE_IMPLEMENTATION
1.252 +#endif
1.253 +
1.254 +#if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
1.255 +# if defined (_STLP_THREADS)
1.256 +
1.257 +class _Node_Alloc_Lock {
1.258 +public:
1.259 + _Node_Alloc_Lock() {
1.260 +# if defined (_STLP_SGI_THREADS)
1.261 + if (__us_rsthread_malloc)
1.262 +# endif
1.263 + _S_lock._M_acquire_lock();
1.264 + }
1.265 +
1.266 + ~_Node_Alloc_Lock() {
1.267 +# if defined (_STLP_SGI_THREADS)
1.268 + if (__us_rsthread_malloc)
1.269 +# endif
1.270 + _S_lock._M_release_lock();
1.271 + }
1.272 +#if defined (__SYMBIAN32__WSD__)
1.273 + static _STLP_STATIC_MUTEX& get_allocator_S_lock();
1.274 +#else
1.275 + static _STLP_STATIC_MUTEX _S_lock;
1.276 +#endif
1.277 +};
1.278 +
1.279 +#if !defined(__SYMBIAN32__WSD__)
1.280 +_STLP_STATIC_MUTEX _Node_Alloc_Lock::_S_lock _STLP_MUTEX_INITIALIZER;
1.281 +#endif
1.282 +
1.283 +# else
1.284 +
1.285 +class _Node_Alloc_Lock {
1.286 +public:
1.287 + _Node_Alloc_Lock() { }
1.288 + ~_Node_Alloc_Lock() { }
1.289 +};
1.290 +
1.291 +# endif
1.292 +
1.293 +struct _Node_alloc_obj {
1.294 + _Node_alloc_obj * _M_next;
1.295 +};
1.296 +#endif
1.297 +
1.298 +class __node_alloc_impl {
1.299 +_STLP_PRIVATE:
1.300 + static inline size_t _STLP_CALL _S_round_up(size_t __bytes)
1.301 + { return (((__bytes) + (size_t)_ALIGN-1) & ~((size_t)_ALIGN - 1)); }
1.302 +
1.303 +#if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
1.304 + typedef _STLP_atomic_freelist::item _Obj;
1.305 + typedef _STLP_atomic_freelist _Freelist;
1.306 + typedef _STLP_atomic_freelist _ChunkList;
1.307 +
1.308 + // Header of blocks of memory that have been allocated as part of
1.309 + // a larger chunk but have not yet been chopped up into nodes.
1.310 + struct _FreeBlockHeader : public _STLP_atomic_freelist::item {
1.311 + char* _M_end; // pointer to end of free memory
1.312 + };
1.313 +#else
1.314 + typedef _Node_alloc_obj _Obj;
1.315 + typedef _Obj* _STLP_VOLATILE _Freelist;
1.316 + typedef _Obj* _ChunkList;
1.317 +#endif
1.318 +
1.319 +private:
1.320 + // Returns an object of size __n, and optionally adds to size __n free list.
1.321 + static _Obj* _S_refill(size_t __n);
1.322 + // Allocates a chunk for nobjs of size __p_size. nobjs may be reduced
1.323 + // if it is inconvenient to allocate the requested number.
1.324 + static char* _S_chunk_alloc(size_t __p_size, int& __nobjs);
1.325 + // Chunk allocation state.
1.326 +#if defined(__SYMBIAN32__WSD__)
1.327 + static _Freelist* get_S_free_list();
1.328 +#else
1.329 + static _Freelist _S_free_list[_STLP_NFREELISTS];
1.330 +#endif
1.331 +
1.332 + // Amount of total allocated memory
1.333 +#if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
1.334 + static _STLP_VOLATILE __stl_atomic_t _S_heap_size;
1.335 +#else
1.336 +#if defined(__SYMBIAN32__WSD__)
1.337 + static size_t& get_S_heap_size();
1.338 +#else
1.339 + static size_t _S_heap_size;
1.340 +#endif
1.341 +#endif
1.342 +
1.343 +#if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
1.344 + // List of blocks of free memory
1.345 + static _STLP_atomic_freelist _S_free_mem_blocks;
1.346 +#else
1.347 +#if defined(__SYMBIAN32__WSD__)
1.348 + // Start of the current free memory buffer
1.349 + static char*& get_S_start_free();
1.350 + // End of the current free memory buffer
1.351 + static char*& get_S_end_free();
1.352 +#else
1.353 + // Start of the current free memory buffer
1.354 + static char* _S_start_free;
1.355 + // End of the current free memory buffer
1.356 + static char* _S_end_free;
1.357 +#endif
1.358 +#endif
1.359 +
1.360 +#if defined (_STLP_DO_CLEAN_NODE_ALLOC)
1.361 +public:
1.362 + // Methods to report alloc/dealloc calls to the counter system.
1.363 +# if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
1.364 + typedef _STLP_VOLATILE __stl_atomic_t _AllocCounter;
1.365 +# else
1.366 + typedef __stl_atomic_t _AllocCounter;
1.367 +# endif
1.368 + static _AllocCounter& _STLP_CALL _S_alloc_counter();
1.369 + static void _S_alloc_call();
1.370 + static void _S_dealloc_call();
1.371 +
1.372 +private:
1.373 + // Free all the allocated chuncks of memory
1.374 + static void _S_chunk_dealloc();
1.375 + // Beginning of the linked list of allocated chunks of memory
1.376 + static _ChunkList _S_chunks;
1.377 +#endif /* _STLP_DO_CLEAN_NODE_ALLOC */
1.378 +
1.379 +public:
1.380 + /* __n must be > 0 */
1.381 + static void* _M_allocate(size_t& __n);
1.382 + /* __p may not be 0 */
1.383 + static void _M_deallocate(void *__p, size_t __n);
1.384 +
1.385 +#if defined(__SYMBIAN32__WSD__)
1.386 + friend void ::stdcpp_allocators_init();
1.387 +#endif
1.388 +};
1.389 +
1.390 +#if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
1.391 +void* __node_alloc_impl::_M_allocate(size_t& __n) {
1.392 + __n = _S_round_up(__n);
1.393 + _Obj * _STLP_VOLATILE * __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
1.394 + _Obj *__r;
1.395 +
1.396 + // Acquire the lock here with a constructor call.
1.397 + // This ensures that it is released in exit or during stack
1.398 + // unwinding.
1.399 + _Node_Alloc_Lock __lock_instance;
1.400 +
1.401 + if ( (__r = *__my_free_list) != 0 ) {
1.402 + *__my_free_list = __r->_M_next;
1.403 + } else {
1.404 + __r = _S_refill(__n);
1.405 + }
1.406 +# if defined (_STLP_DO_CLEAN_NODE_ALLOC)
1.407 + _S_alloc_call();
1.408 +# endif
1.409 + // lock is released here
1.410 + return __r;
1.411 +}
1.412 +
1.413 +void __node_alloc_impl::_M_deallocate(void *__p, size_t __n) {
1.414 + _Obj * _STLP_VOLATILE * __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
1.415 + _Obj * __pobj = __STATIC_CAST(_Obj*, __p);
1.416 +
1.417 + // acquire lock
1.418 + _Node_Alloc_Lock __lock_instance;
1.419 + __pobj->_M_next = *__my_free_list;
1.420 + *__my_free_list = __pobj;
1.421 +
1.422 +# if defined (_STLP_DO_CLEAN_NODE_ALLOC)
1.423 + _S_dealloc_call();
1.424 +# endif
1.425 + // lock is released here
1.426 +}
1.427 +
1.428 +/* We allocate memory in large chunks in order to avoid fragmenting */
1.429 +/* the malloc heap too much. */
1.430 +/* We assume that size is properly aligned. */
1.431 +/* We hold the allocation lock. */
1.432 +char* __node_alloc_impl::_S_chunk_alloc(size_t _p_size, int& __nobjs) {
1.433 + char* __result;
1.434 + size_t __total_bytes = _p_size * __nobjs;
1.435 + size_t __bytes_left = _S_end_free - _S_start_free;
1.436 +
1.437 + if (__bytes_left > 0) {
1.438 + if (__bytes_left >= __total_bytes) {
1.439 + __result = _S_start_free;
1.440 + _S_start_free += __total_bytes;
1.441 + return __result;
1.442 + }
1.443 +
1.444 + if (__bytes_left >= _p_size) {
1.445 + __nobjs = (int)(__bytes_left / _p_size);
1.446 + __total_bytes = _p_size * __nobjs;
1.447 + __result = _S_start_free;
1.448 + _S_start_free += __total_bytes;
1.449 + return __result;
1.450 + }
1.451 +
1.452 + // Try to make use of the left-over piece.
1.453 + _Obj* _STLP_VOLATILE* __my_free_list = _S_free_list + _S_FREELIST_INDEX(__bytes_left);
1.454 + __REINTERPRET_CAST(_Obj*, _S_start_free)->_M_next = *__my_free_list;
1.455 + *__my_free_list = __REINTERPRET_CAST(_Obj*, _S_start_free);
1.456 + }
1.457 +
1.458 + size_t __bytes_to_get =
1.459 + 2 * __total_bytes + _S_round_up(_S_heap_size >> 4)
1.460 +# if defined (_STLP_DO_CLEAN_NODE_ALLOC)
1.461 + + sizeof(_Obj)
1.462 +# endif
1.463 + ;
1.464 +
1.465 + _S_start_free = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
1.466 + if (0 == _S_start_free) {
1.467 + _Obj* _STLP_VOLATILE* __my_free_list;
1.468 + _Obj* __p;
1.469 + // Try to do with what we have. That can't hurt.
1.470 + // We do not try smaller requests, since that tends
1.471 + // to result in disaster on multi-process machines.
1.472 + for (size_t __i = _p_size; __i <= (size_t)_MAX_BYTES; __i += (size_t)_ALIGN) {
1.473 + __my_free_list = _S_free_list + _S_FREELIST_INDEX(__i);
1.474 + __p = *__my_free_list;
1.475 + if (0 != __p) {
1.476 + *__my_free_list = __p -> _M_next;
1.477 + _S_start_free = __REINTERPRET_CAST(char*, __p);
1.478 + _S_end_free = _S_start_free + __i;
1.479 + return _S_chunk_alloc(_p_size, __nobjs);
1.480 + // Any leftover piece will eventually make it to the
1.481 + // right free list.
1.482 + }
1.483 + }
1.484 + _S_end_free = 0; // In case of exception.
1.485 + _S_start_free = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
1.486 + /*
1.487 + (char*)malloc_alloc::allocate(__bytes_to_get);
1.488 + */
1.489 +
1.490 + // This should either throw an
1.491 + // exception or remedy the situation. Thus we assume it
1.492 + // succeeded.
1.493 + }
1.494 +
1.495 + _S_heap_size += __bytes_to_get;
1.496 +# if defined (_STLP_DO_CLEAN_NODE_ALLOC)
1.497 + __REINTERPRET_CAST(_Obj*, _S_start_free)->_M_next = _S_chunks;
1.498 + _S_chunks = __REINTERPRET_CAST(_Obj*, _S_start_free);
1.499 +# endif
1.500 + _S_end_free = _S_start_free + __bytes_to_get;
1.501 +# if defined (_STLP_DO_CLEAN_NODE_ALLOC)
1.502 + _S_start_free += sizeof(_Obj);
1.503 +# endif
1.504 + return _S_chunk_alloc(_p_size, __nobjs);
1.505 +}
1.506 +
1.507 +/* Returns an object of size __n, and optionally adds to size __n free list.*/
1.508 +/* We assume that __n is properly aligned. */
1.509 +/* We hold the allocation lock. */
1.510 +_Node_alloc_obj* __node_alloc_impl::_S_refill(size_t __n) {
1.511 + int __nobjs = 20;
1.512 + char* __chunk = _S_chunk_alloc(__n, __nobjs);
1.513 +
1.514 + if (1 == __nobjs) return __REINTERPRET_CAST(_Obj*, __chunk);
1.515 +
1.516 + _Obj* _STLP_VOLATILE* __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
1.517 + _Obj* __result;
1.518 + _Obj* __current_obj;
1.519 + _Obj* __next_obj;
1.520 +
1.521 + /* Build free list in chunk */
1.522 + __result = __REINTERPRET_CAST(_Obj*, __chunk);
1.523 + *__my_free_list = __next_obj = __REINTERPRET_CAST(_Obj*, __chunk + __n);
1.524 + for (--__nobjs; --__nobjs; ) {
1.525 + __current_obj = __next_obj;
1.526 + __next_obj = __REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __next_obj) + __n);
1.527 + __current_obj->_M_next = __next_obj;
1.528 + }
1.529 + __next_obj->_M_next = 0;
1.530 + return __result;
1.531 +}
1.532 +
1.533 +# if defined (_STLP_DO_CLEAN_NODE_ALLOC)
1.534 +void __node_alloc_impl::_S_alloc_call()
1.535 +{ ++_S_alloc_counter(); }
1.536 +
1.537 +void __node_alloc_impl::_S_dealloc_call() {
1.538 + __stl_atomic_t &counter = _S_alloc_counter();
1.539 + if (--counter == 0)
1.540 + { _S_chunk_dealloc(); }
1.541 +}
1.542 +
1.543 +/* We deallocate all the memory chunks */
1.544 +void __node_alloc_impl::_S_chunk_dealloc() {
1.545 + _Obj *__pcur = _S_chunks, *__pnext;
1.546 + while (__pcur != 0) {
1.547 + __pnext = __pcur->_M_next;
1.548 + __stlp_chunck_free(__pcur);
1.549 + __pcur = __pnext;
1.550 + }
1.551 + _S_chunks = 0;
1.552 + _S_start_free = _S_end_free = 0;
1.553 + _S_heap_size = 0;
1.554 + // Reinterprest cast cant remove volatileness. So using C style cast
1.555 + memset((char*)(&_S_free_list[0]), 0, _STLP_NFREELISTS * sizeof(_Obj*));
1.556 +}
1.557 +# endif /* _STLP_DO_CLEAN_NODE_ALLOC */
1.558 +
1.559 +#else /* !defined(_STLP_USE_LOCK_FREE_IMPLEMENTATION) */
1.560 +
1.561 +void* __node_alloc_impl::_M_allocate(size_t& __n) {
1.562 + __n = _S_round_up(__n);
1.563 + _Obj* __r = _S_free_list[_S_FREELIST_INDEX(__n)].pop();
1.564 + if (__r == 0)
1.565 + { __r = _S_refill(__n); }
1.566 +
1.567 +# if defined (_STLP_DO_CLEAN_NODE_ALLOC)
1.568 + _S_alloc_call();
1.569 +# endif
1.570 + return __r;
1.571 +}
1.572 +
1.573 +void __node_alloc_impl::_M_deallocate(void *__p, size_t __n) {
1.574 + _S_free_list[_S_FREELIST_INDEX(__n)].push(__STATIC_CAST(_Obj*, __p));
1.575 +
1.576 +# if defined (_STLP_DO_CLEAN_NODE_ALLOC)
1.577 + _S_dealloc_call();
1.578 +# endif
1.579 +}
1.580 +
1.581 +/* Returns an object of size __n, and optionally adds additional ones to */
1.582 +/* freelist of objects of size __n. */
1.583 +/* We assume that __n is properly aligned. */
1.584 +__node_alloc_impl::_Obj* __node_alloc_impl::_S_refill(size_t __n) {
1.585 + int __nobjs = 20;
1.586 + char* __chunk = _S_chunk_alloc(__n, __nobjs);
1.587 +
1.588 + if (__nobjs <= 1)
1.589 + return __REINTERPRET_CAST(_Obj*, __chunk);
1.590 +
1.591 + // Push all new nodes (minus first one) onto freelist
1.592 + _Obj* __result = __REINTERPRET_CAST(_Obj*, __chunk);
1.593 + _Obj* __cur_item = __result;
1.594 + _Freelist* __my_freelist = _S_free_list + _S_FREELIST_INDEX(__n);
1.595 + for (--__nobjs; __nobjs != 0; --__nobjs) {
1.596 + __cur_item = __REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __cur_item) + __n);
1.597 + __my_freelist->push(__cur_item);
1.598 + }
1.599 + return __result;
1.600 +}
1.601 +
1.602 +/* We allocate memory in large chunks in order to avoid fragmenting */
1.603 +/* the malloc heap too much. */
1.604 +/* We assume that size is properly aligned. */
1.605 +char* __node_alloc_impl::_S_chunk_alloc(size_t _p_size, int& __nobjs) {
1.606 +# if defined (_STLP_DO_CLEAN_NODE_ALLOC)
1.607 + //We are going to add a small memory block to keep all the allocated blocks
1.608 + //address, we need to do so respecting the memory alignment. The following
1.609 + //static assert checks that the reserved block is big enough to store a pointer.
1.610 + _STLP_STATIC_ASSERT(sizeof(_Obj) <= _ALIGN)
1.611 +# endif
1.612 + char* __result = 0;
1.613 + __stl_atomic_t __total_bytes = __STATIC_CAST(__stl_atomic_t, _p_size) * __nobjs;
1.614 +
1.615 + _FreeBlockHeader* __block = __STATIC_CAST(_FreeBlockHeader*, _S_free_mem_blocks.pop());
1.616 + if (__block != 0) {
1.617 + // We checked a block out and can now mess with it with impugnity.
1.618 + // We'll put the remainder back into the list if we're done with it below.
1.619 + char* __buf_start = __REINTERPRET_CAST(char*, __block);
1.620 + __stl_atomic_t __bytes_left = __block->_M_end - __buf_start;
1.621 +
1.622 + if ((__bytes_left < __total_bytes) && (__bytes_left >= __STATIC_CAST(__stl_atomic_t, _p_size))) {
1.623 + // There's enough left for at least one object, but not as much as we wanted
1.624 + __result = __buf_start;
1.625 + __nobjs = (int)(__bytes_left/_p_size);
1.626 + __total_bytes = __STATIC_CAST(__stl_atomic_t, _p_size) * __nobjs;
1.627 + __bytes_left -= __total_bytes;
1.628 + __buf_start += __total_bytes;
1.629 + }
1.630 + else if (__bytes_left >= __total_bytes) {
1.631 + // The block has enough left to satisfy all that was asked for
1.632 + __result = __buf_start;
1.633 + __bytes_left -= __total_bytes;
1.634 + __buf_start += __total_bytes;
1.635 + }
1.636 +
1.637 + if (__bytes_left != 0) {
1.638 + // There is still some memory left over in block after we satisfied our request.
1.639 + if ((__result != 0) && (__bytes_left >= sizeof(_FreeBlockHeader))) {
1.640 + // We were able to allocate at least one object and there is still enough
1.641 + // left to put remainder back into list.
1.642 + _FreeBlockHeader* __newblock = __REINTERPRET_CAST(_FreeBlockHeader*, __buf_start);
1.643 + __newblock->_M_end = __block->_M_end;
1.644 + _S_free_mem_blocks.push(__newblock);
1.645 + }
1.646 + else {
1.647 + // We were not able to allocate enough for at least one object.
1.648 + // Shove into freelist of nearest (rounded-down!) size.
1.649 + size_t __rounded_down = _S_round_up(__bytes_left + 1) - (size_t)_ALIGN;
1.650 + if (__rounded_down > 0)
1.651 + _S_free_list[_S_FREELIST_INDEX(__rounded_down)].push((_Obj*)__buf_start);
1.652 + }
1.653 + }
1.654 + if (__result != 0)
1.655 + return __result;
1.656 + }
1.657 +
1.658 + // We couldn't satisfy it from the list of free blocks, get new memory.
1.659 + __stl_atomic_t __bytes_to_get = 2 * __total_bytes + __STATIC_CAST(__stl_atomic_t, _S_round_up(_S_heap_size >> 4))
1.660 +# if defined (_STLP_DO_CLEAN_NODE_ALLOC)
1.661 + + _ALIGN
1.662 +# endif
1.663 + ;
1.664 +
1.665 + __result = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
1.666 + // Alignment check
1.667 + _STLP_VERBOSE_ASSERT(((__REINTERPRET_CAST(size_t, __result) & __STATIC_CAST(size_t, _ALIGN - 1)) == 0), _StlMsg_DBA_DELETED_TWICE)
1.668 +
1.669 + if (0 == __result) {
1.670 + // Allocation failed; try to canibalize from freelist of a larger object size.
1.671 + for (size_t __i = _p_size; __i <= (size_t)_MAX_BYTES; __i += (size_t)_ALIGN) {
1.672 + _Obj* __p = _S_free_list[_S_FREELIST_INDEX(__i)].pop();
1.673 + if (0 != __p) {
1.674 + if (__i < sizeof(_FreeBlockHeader)) {
1.675 + // Not enough to put into list of free blocks, divvy it up here.
1.676 + // Use as much as possible for this request and shove remainder into freelist.
1.677 + __nobjs = (int)(__i/_p_size);
1.678 + __total_bytes = __nobjs * __STATIC_CAST(__stl_atomic_t, _p_size);
1.679 + size_t __bytes_left = __i - __total_bytes;
1.680 + size_t __rounded_down = _S_round_up(__bytes_left+1) - (size_t)_ALIGN;
1.681 + if (__rounded_down > 0) {
1.682 + _S_free_list[_S_FREELIST_INDEX(__rounded_down)].push(__REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __p) + __total_bytes));
1.683 + }
1.684 + return __REINTERPRET_CAST(char*, __p);
1.685 + }
1.686 + else {
1.687 + // Add node to list of available blocks and recursively allocate from it.
1.688 + _FreeBlockHeader* __newblock = (_FreeBlockHeader*)__p;
1.689 + __newblock->_M_end = __REINTERPRET_CAST(char*, __p) + __i;
1.690 + _S_free_mem_blocks.push(__newblock);
1.691 + return _S_chunk_alloc(_p_size, __nobjs);
1.692 + }
1.693 + }
1.694 + }
1.695 +
1.696 + // We were not able to find something in a freelist, try to allocate a smaller amount.
1.697 + __bytes_to_get = __total_bytes
1.698 +# if defined (_STLP_DO_CLEAN_NODE_ALLOC)
1.699 + + _ALIGN
1.700 +# endif
1.701 + ;
1.702 + __result = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
1.703 + // Alignment check
1.704 + _STLP_VERBOSE_ASSERT(((__REINTERPRET_CAST(size_t, __result) & __STATIC_CAST(size_t, _ALIGN - 1)) == 0), _StlMsg_DBA_DELETED_TWICE)
1.705 +
1.706 + // This should either throw an exception or remedy the situation.
1.707 + // Thus we assume it succeeded.
1.708 + }
1.709 +
1.710 + _STLP_ATOMIC_ADD(&_S_heap_size, __bytes_to_get);
1.711 +
1.712 +# if defined (_STLP_DO_CLEAN_NODE_ALLOC)
1.713 + // We have to track the allocated memory chunks for release on exit.
1.714 + _S_chunks.push(__REINTERPRET_CAST(_Obj*, __result));
1.715 + __result += _ALIGN;
1.716 + __bytes_to_get -= _ALIGN;
1.717 +# endif
1.718 +
1.719 + if (__bytes_to_get > __total_bytes) {
1.720 + // Push excess memory allocated in this chunk into list of free memory blocks
1.721 + _FreeBlockHeader* __freeblock = __REINTERPRET_CAST(_FreeBlockHeader*, __result + __total_bytes);
1.722 + __freeblock->_M_end = __result + __bytes_to_get;
1.723 + _S_free_mem_blocks.push(__freeblock);
1.724 + }
1.725 + return __result;
1.726 +}
1.727 +
1.728 +# if defined (_STLP_DO_CLEAN_NODE_ALLOC)
1.729 +void __node_alloc_impl::_S_alloc_call()
1.730 +{ _STLP_ATOMIC_INCREMENT(&_S_alloc_counter()); }
1.731 +
1.732 +void __node_alloc_impl::_S_dealloc_call() {
1.733 + _STLP_VOLATILE __stl_atomic_t *pcounter = &_S_alloc_counter();
1.734 + if (_STLP_ATOMIC_DECREMENT(pcounter) == 0)
1.735 + _S_chunk_dealloc();
1.736 +}
1.737 +
1.738 +/* We deallocate all the memory chunks */
1.739 +void __node_alloc_impl::_S_chunk_dealloc() {
1.740 + // Note: The _Node_alloc_helper class ensures that this function
1.741 + // will only be called when the (shared) library is unloaded or the
1.742 + // process is shutdown. It's thus not possible that another thread
1.743 + // is currently trying to allocate a node (we're not thread-safe here).
1.744 + //
1.745 +
1.746 + // Clear the free blocks and all freelistst. This makes sure that if
1.747 + // for some reason more memory is allocated again during shutdown
1.748 + // (it'd also be really nasty to leave references to deallocated memory).
1.749 + _S_free_mem_blocks.clear();
1.750 + _S_heap_size = 0;
1.751 +
1.752 + for (size_t __i = 0; __i < _STLP_NFREELISTS; ++__i) {
1.753 + _S_free_list[__i].clear();
1.754 + }
1.755 +
1.756 + // Detach list of chunks and free them all
1.757 + _Obj* __chunk = _S_chunks.clear();
1.758 + while (__chunk != 0) {
1.759 + _Obj* __next = __chunk->_M_next;
1.760 + __stlp_chunck_free(__chunk);
1.761 + __chunk = __next;
1.762 + }
1.763 +}
1.764 +# endif /* _STLP_DO_CLEAN_NODE_ALLOC */
1.765 +
1.766 +#endif /* !defined(_STLP_USE_LOCK_FREE_IMPLEMENTATION) */
1.767 +
1.768 +#if defined (_STLP_DO_CLEAN_NODE_ALLOC)
1.769 +struct __node_alloc_cleaner {
1.770 + ~__node_alloc_cleaner()
1.771 + {
1.772 + __node_alloc_impl::_S_dealloc_call();
1.773 + }
1.774 +};
1.775 +
1.776 +# if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
1.777 +_STLP_VOLATILE __stl_atomic_t& _STLP_CALL
1.778 +# else
1.779 +__stl_atomic_t& _STLP_CALL
1.780 +# endif
1.781 +__node_alloc_impl::_S_alloc_counter() {
1.782 + static _AllocCounter _S_counter = 1;
1.783 + static __node_alloc_cleaner _S_node_alloc_cleaner;
1.784 + return _S_counter;
1.785 +}
1.786 +#endif
1.787 +
1.788 +#if !defined(__SYMBIAN32__WSD__)
1.789 +#if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
1.790 +_Node_alloc_obj * _STLP_VOLATILE
1.791 +__node_alloc_impl::_S_free_list[_STLP_NFREELISTS]
1.792 += {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1.793 +// The 16 zeros are necessary to make version 4.1 of the SunPro
1.794 +// compiler happy. Otherwise it appears to allocate too little
1.795 +// space for the array.
1.796 +#else
1.797 +_STLP_atomic_freelist __node_alloc_impl::_S_free_list[_STLP_NFREELISTS];
1.798 +_STLP_atomic_freelist __node_alloc_impl::_S_free_mem_blocks;
1.799 +#endif
1.800 +
1.801 +#if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
1.802 +char *__node_alloc_impl::_S_start_free = 0;
1.803 +char *__node_alloc_impl::_S_end_free = 0;
1.804 +#endif
1.805 +
1.806 +#if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
1.807 +_STLP_VOLATILE __stl_atomic_t
1.808 +#else
1.809 +size_t
1.810 +#endif
1.811 +__node_alloc_impl::_S_heap_size = 0;
1.812 +#endif //__SYMBIAN32__WSD__
1.813 +
1.814 +#if defined (_STLP_DO_CLEAN_NODE_ALLOC)
1.815 +# if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
1.816 +_STLP_atomic_freelist __node_alloc_impl::_S_chunks;
1.817 +# else
1.818 +_Node_alloc_obj* __node_alloc_impl::_S_chunks = 0;
1.819 +# endif
1.820 +#endif
1.821 +
1.822 +_STLP_DECLSPEC void * _STLP_CALL __node_alloc::_M_allocate(size_t& __n)
1.823 +{ return __node_alloc_impl::_M_allocate(__n); }
1.824 +
1.825 +_STLP_DECLSPEC void _STLP_CALL __node_alloc::_M_deallocate(void *__p, size_t __n)
1.826 +{ __node_alloc_impl::_M_deallocate(__p, __n); }
1.827 +
1.828 +#if defined (_STLP_PTHREADS) && !defined (_STLP_NO_THREADS)
1.829 +
1.830 +# define _STLP_DATA_ALIGNMENT 8
1.831 +
1.832 +_STLP_MOVE_TO_PRIV_NAMESPACE
1.833 +
1.834 +// *******************************************************
1.835 +// __perthread_alloc implementation
1.836 +union _Pthread_alloc_obj {
1.837 + union _Pthread_alloc_obj * __free_list_link;
1.838 + char __client_data[_STLP_DATA_ALIGNMENT]; /* The client sees this. */
1.839 +};
1.840 +
1.841 +// Pthread allocators don't appear to the client to have meaningful
1.842 +// instances. We do in fact need to associate some state with each
1.843 +// thread. That state is represented by _Pthread_alloc_per_thread_state.
1.844 +
1.845 +struct _Pthread_alloc_per_thread_state {
1.846 + typedef _Pthread_alloc_obj __obj;
1.847 + enum { _S_NFREELISTS = _MAX_BYTES / _STLP_DATA_ALIGNMENT };
1.848 +
1.849 + // Free list link for list of available per thread structures.
1.850 + // When one of these becomes available for reuse due to thread
1.851 + // termination, any objects in its free list remain associated
1.852 + // with it. The whole structure may then be used by a newly
1.853 + // created thread.
1.854 + _Pthread_alloc_per_thread_state() : __next(0)
1.855 + { memset((void *)__CONST_CAST(_Pthread_alloc_obj**, __free_list), 0, (size_t)_S_NFREELISTS * sizeof(__obj *)); }
1.856 + // Returns an object of size __n, and possibly adds to size n free list.
1.857 + void *_M_refill(size_t __n);
1.858 +
1.859 + _Pthread_alloc_obj* volatile __free_list[_S_NFREELISTS];
1.860 + _Pthread_alloc_per_thread_state *__next;
1.861 + // this data member is only to be used by per_thread_allocator, which returns memory to the originating thread.
1.862 + _STLP_mutex _M_lock;
1.863 +};
1.864 +
1.865 +// Pthread-specific allocator.
1.866 +class _Pthread_alloc_impl {
1.867 +public: // but only for internal use:
1.868 + typedef _Pthread_alloc_per_thread_state __state_type;
1.869 + typedef char value_type;
1.870 +
1.871 + // Allocates a chunk for nobjs of size size. nobjs may be reduced
1.872 + // if it is inconvenient to allocate the requested number.
1.873 + static char *_S_chunk_alloc(size_t __size, size_t &__nobjs, __state_type*);
1.874 +
1.875 + enum {_S_ALIGN = _STLP_DATA_ALIGNMENT};
1.876 +
1.877 + static size_t _S_round_up(size_t __bytes)
1.878 + { return (((__bytes) + (int)_S_ALIGN - 1) & ~((int)_S_ALIGN - 1)); }
1.879 + static size_t _S_freelist_index(size_t __bytes)
1.880 + { return (((__bytes) + (int)_S_ALIGN - 1) / (int)_S_ALIGN - 1); }
1.881 +
1.882 +private:
1.883 + // Chunk allocation state. And other shared state.
1.884 + // Protected by _S_chunk_allocator_lock.
1.885 +#if defined(__SYMBIAN32__WSD__)
1.886 +public:
1.887 + static void pt_wsd_init() {
1.888 + get_S_free_per_thread_states() = 0;
1.889 + get_S_key() = 0;
1.890 + get_S_chunk_allocator_lock()._M_lock.iState = _ENeedsNormalInit;
1.891 + get_S_chunk_allocator_lock()._M_lock.iPtr = 0;
1.892 + get_S_chunk_allocator_lock()._M_lock.iReentry = 0;
1.893 + get_S_key_initialized() = false;
1.894 + get_S_start_free() = 0;
1.895 + get_S_end_free() = 0;
1.896 + get_S_heap_size() = 0;
1.897 + }
1.898 +private:
1.899 + static _STLP_STATIC_MUTEX& get_S_chunk_allocator_lock()
1.900 + { return get_libcpp_wsd().wsd_pt_S_chunk_allocator_lock; }
1.901 + static char*& get_S_start_free()
1.902 + { return get_libcpp_wsd().wsd_pt_S_start_free; }
1.903 + static char*& get_S_end_free()
1.904 + { return get_libcpp_wsd().wsd_pt_S_end_free; }
1.905 + static size_t& get_S_heap_size()
1.906 + { return get_libcpp_wsd().wsd_pt_S_heap_size; }
1.907 + static __state_type*& get_S_free_per_thread_states()
1.908 + { return get_libcpp_wsd().wsd_pt_S_free_per_thread_states; }
1.909 + static pthread_key_t& get_S_key()
1.910 + { return get_libcpp_wsd().wsd_pt_S_key; }
1.911 + static bool& get_S_key_initialized()
1.912 + { return get_libcpp_wsd().wsd_pt_S_key_initialized; }
1.913 +#else
1.914 + static _STLP_STATIC_MUTEX _S_chunk_allocator_lock;
1.915 + static char *_S_start_free;
1.916 + static char *_S_end_free;
1.917 + static size_t _S_heap_size;
1.918 + static __state_type *_S_free_per_thread_states;
1.919 + static pthread_key_t _S_key;
1.920 + static bool _S_key_initialized;
1.921 +#endif
1.922 + // Pthread key under which per thread state is stored.
1.923 + // Allocator instances that are currently unclaimed by any thread.
1.924 + static void _S_destructor(void *instance);
1.925 + // Function to be called on thread exit to reclaim per thread
1.926 + // state.
1.927 + static __state_type *_S_new_per_thread_state();
1.928 +public:
1.929 + // Return a recycled or new per thread state.
1.930 + static __state_type *_S_get_per_thread_state();
1.931 +private:
1.932 + // ensure that the current thread has an associated
1.933 + // per thread state.
1.934 + class _M_lock;
1.935 + friend class _M_lock;
1.936 + class _M_lock {
1.937 + public:
1.938 + _M_lock () { _S_chunk_allocator_lock._M_acquire_lock(); }
1.939 + ~_M_lock () { _S_chunk_allocator_lock._M_release_lock(); }
1.940 + };
1.941 +
1.942 +public:
1.943 +
1.944 + /* n must be > 0 */
1.945 + static void * allocate(size_t& __n);
1.946 +
1.947 + /* p may not be 0 */
1.948 + static void deallocate(void *__p, size_t __n);
1.949 +
1.950 + // boris : versions for per_thread_allocator
1.951 + /* n must be > 0 */
1.952 + static void * allocate(size_t& __n, __state_type* __a);
1.953 +
1.954 + /* p may not be 0 */
1.955 + static void deallocate(void *__p, size_t __n, __state_type* __a);
1.956 +
1.957 + static void * reallocate(void *__p, size_t __old_sz, size_t& __new_sz);
1.958 +};
1.959 +
1.960 +/* Returns an object of size n, and optionally adds to size n free list.*/
1.961 +/* We assume that n is properly aligned. */
1.962 +/* We hold the allocation lock. */
1.963 +void *_Pthread_alloc_per_thread_state::_M_refill(size_t __n) {
1.964 + typedef _Pthread_alloc_obj __obj;
1.965 + size_t __nobjs = 128;
1.966 + char * __chunk = _Pthread_alloc_impl::_S_chunk_alloc(__n, __nobjs, this);
1.967 + __obj * volatile * __my_free_list;
1.968 + __obj * __result;
1.969 + __obj * __current_obj, * __next_obj;
1.970 + size_t __i;
1.971 +
1.972 + if (1 == __nobjs) {
1.973 + return __chunk;
1.974 + }
1.975 +
1.976 + __my_free_list = __free_list + _Pthread_alloc_impl::_S_freelist_index(__n);
1.977 +
1.978 + /* Build free list in chunk */
1.979 + __result = (__obj *)__chunk;
1.980 + *__my_free_list = __next_obj = (__obj *)(__chunk + __n);
1.981 + for (__i = 1; ; ++__i) {
1.982 + __current_obj = __next_obj;
1.983 + __next_obj = (__obj *)((char *)__next_obj + __n);
1.984 + if (__nobjs - 1 == __i) {
1.985 + __current_obj -> __free_list_link = 0;
1.986 + break;
1.987 + } else {
1.988 + __current_obj -> __free_list_link = __next_obj;
1.989 + }
1.990 + }
1.991 + return __result;
1.992 +}
1.993 +
1.994 +void _Pthread_alloc_impl::_S_destructor(void *__instance) {
1.995 + _M_lock __lock_instance; // Need to acquire lock here.
1.996 + _Pthread_alloc_per_thread_state* __s = (_Pthread_alloc_per_thread_state*)__instance;
1.997 + __s -> __next = _S_free_per_thread_states;
1.998 + _S_free_per_thread_states = __s;
1.999 +}
1.1000 +
1.1001 +_Pthread_alloc_per_thread_state* _Pthread_alloc_impl::_S_new_per_thread_state() {
1.1002 + /* lock already held here. */
1.1003 + if (0 != _S_free_per_thread_states) {
1.1004 + _Pthread_alloc_per_thread_state *__result = _S_free_per_thread_states;
1.1005 + _S_free_per_thread_states = _S_free_per_thread_states -> __next;
1.1006 + return __result;
1.1007 + }
1.1008 + else {
1.1009 + return _STLP_NEW _Pthread_alloc_per_thread_state;
1.1010 + }
1.1011 +}
1.1012 +
1.1013 +_Pthread_alloc_per_thread_state* _Pthread_alloc_impl::_S_get_per_thread_state() {
1.1014 + int __ret_code;
1.1015 + __state_type* __result;
1.1016 +
1.1017 + if (_S_key_initialized && ((__result = (__state_type*) pthread_getspecific(_S_key)) != NULL))
1.1018 + return __result;
1.1019 +
1.1020 + /*REFERENCED*/
1.1021 + _M_lock __lock_instance; // Need to acquire lock here.
1.1022 + if (!_S_key_initialized) {
1.1023 + if (pthread_key_create(&_S_key, _S_destructor)) {
1.1024 + __THROW_BAD_ALLOC; // failed
1.1025 + }
1.1026 + _S_key_initialized = true;
1.1027 + }
1.1028 +
1.1029 + __result = _S_new_per_thread_state();
1.1030 + __ret_code = pthread_setspecific(_S_key, __result);
1.1031 + if (__ret_code) {
1.1032 + if (__ret_code == ENOMEM) {
1.1033 + __THROW_BAD_ALLOC;
1.1034 + } else {
1.1035 + // EINVAL
1.1036 + _STLP_ABORT();
1.1037 + }
1.1038 + }
1.1039 + return __result;
1.1040 +}
1.1041 +
1.1042 +/* We allocate memory in large chunks in order to avoid fragmenting */
1.1043 +/* the malloc heap too much. */
1.1044 +/* We assume that size is properly aligned. */
1.1045 +char *_Pthread_alloc_impl::_S_chunk_alloc(size_t __p_size, size_t &__nobjs, _Pthread_alloc_per_thread_state *__a) {
1.1046 + typedef _Pthread_alloc_obj __obj;
1.1047 + {
1.1048 + char * __result;
1.1049 + size_t __total_bytes;
1.1050 + size_t __bytes_left;
1.1051 + /*REFERENCED*/
1.1052 + _M_lock __lock_instance; // Acquire lock for this routine
1.1053 +
1.1054 + __total_bytes = __p_size * __nobjs;
1.1055 + __bytes_left = _S_end_free - _S_start_free;
1.1056 + if (__bytes_left >= __total_bytes) {
1.1057 + __result = _S_start_free;
1.1058 + _S_start_free += __total_bytes;
1.1059 + return __result;
1.1060 + } else if (__bytes_left >= __p_size) {
1.1061 + __nobjs = __bytes_left/__p_size;
1.1062 + __total_bytes = __p_size * __nobjs;
1.1063 + __result = _S_start_free;
1.1064 + _S_start_free += __total_bytes;
1.1065 + return __result;
1.1066 + } else {
1.1067 + size_t __bytes_to_get = 2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
1.1068 + // Try to make use of the left-over piece.
1.1069 + if (__bytes_left > 0) {
1.1070 + __obj * volatile * __my_free_list = __a->__free_list + _S_freelist_index(__bytes_left);
1.1071 + ((__obj *)_S_start_free) -> __free_list_link = *__my_free_list;
1.1072 + *__my_free_list = (__obj *)_S_start_free;
1.1073 + }
1.1074 +# ifdef _SGI_SOURCE
1.1075 + // Try to get memory that's aligned on something like a
1.1076 + // cache line boundary, so as to avoid parceling out
1.1077 + // parts of the same line to different threads and thus
1.1078 + // possibly different processors.
1.1079 + {
1.1080 + const int __cache_line_size = 128; // probable upper bound
1.1081 + __bytes_to_get &= ~(__cache_line_size-1);
1.1082 + _S_start_free = (char *)memalign(__cache_line_size, __bytes_to_get);
1.1083 + if (0 == _S_start_free) {
1.1084 + _S_start_free = (char *)__malloc_alloc::allocate(__bytes_to_get);
1.1085 + }
1.1086 + }
1.1087 +# else /* !SGI_SOURCE */
1.1088 + _S_start_free = (char *)__malloc_alloc::allocate(__bytes_to_get);
1.1089 +# endif
1.1090 + _S_heap_size += __bytes_to_get;
1.1091 + _S_end_free = _S_start_free + __bytes_to_get;
1.1092 + }
1.1093 + }
1.1094 + // lock is released here
1.1095 + return _S_chunk_alloc(__p_size, __nobjs, __a);
1.1096 +}
1.1097 +
1.1098 +
1.1099 +/* n must be > 0 */
1.1100 +void *_Pthread_alloc_impl::allocate(size_t& __n) {
1.1101 + typedef _Pthread_alloc_obj __obj;
1.1102 + __obj * volatile * __my_free_list;
1.1103 + __obj * __result;
1.1104 + __state_type* __a;
1.1105 +
1.1106 + if (__n > _MAX_BYTES) {
1.1107 + return __malloc_alloc::allocate(__n);
1.1108 + }
1.1109 +
1.1110 + __n = _S_round_up(__n);
1.1111 + __a = _S_get_per_thread_state();
1.1112 +
1.1113 + __my_free_list = __a->__free_list + _S_freelist_index(__n);
1.1114 + __result = *__my_free_list;
1.1115 + if (__result == 0) {
1.1116 + void *__r = __a->_M_refill(__n);
1.1117 + return __r;
1.1118 + }
1.1119 + *__my_free_list = __result->__free_list_link;
1.1120 + return __result;
1.1121 +};
1.1122 +
1.1123 +/* p may not be 0 */
1.1124 +void _Pthread_alloc_impl::deallocate(void *__p, size_t __n) {
1.1125 + typedef _Pthread_alloc_obj __obj;
1.1126 + __obj *__q = (__obj *)__p;
1.1127 + __obj * volatile * __my_free_list;
1.1128 + __state_type* __a;
1.1129 +
1.1130 + if (__n > _MAX_BYTES) {
1.1131 + __malloc_alloc::deallocate(__p, __n);
1.1132 + return;
1.1133 + }
1.1134 +
1.1135 + __a = _S_get_per_thread_state();
1.1136 +
1.1137 + __my_free_list = __a->__free_list + _S_freelist_index(__n);
1.1138 + __q -> __free_list_link = *__my_free_list;
1.1139 + *__my_free_list = __q;
1.1140 +}
1.1141 +
1.1142 +// boris : versions for per_thread_allocator
1.1143 +/* n must be > 0 */
1.1144 +void *_Pthread_alloc_impl::allocate(size_t& __n, __state_type* __a) {
1.1145 + typedef _Pthread_alloc_obj __obj;
1.1146 + __obj * volatile * __my_free_list;
1.1147 + __obj * __result;
1.1148 +
1.1149 + if (__n > _MAX_BYTES) {
1.1150 + return __malloc_alloc::allocate(__n);
1.1151 + }
1.1152 + __n = _S_round_up(__n);
1.1153 +
1.1154 + // boris : here, we have to lock per thread state, as we may be getting memory from
1.1155 + // different thread pool.
1.1156 + _STLP_auto_lock __lock(__a->_M_lock);
1.1157 +
1.1158 + __my_free_list = __a->__free_list + _S_freelist_index(__n);
1.1159 + __result = *__my_free_list;
1.1160 + if (__result == 0) {
1.1161 + void *__r = __a->_M_refill(__n);
1.1162 + return __r;
1.1163 + }
1.1164 + *__my_free_list = __result->__free_list_link;
1.1165 + return __result;
1.1166 +};
1.1167 +
1.1168 +/* p may not be 0 */
1.1169 +void _Pthread_alloc_impl::deallocate(void *__p, size_t __n, __state_type* __a) {
1.1170 + typedef _Pthread_alloc_obj __obj;
1.1171 + __obj *__q = (__obj *)__p;
1.1172 + __obj * volatile * __my_free_list;
1.1173 +
1.1174 + if (__n > _MAX_BYTES) {
1.1175 + __malloc_alloc::deallocate(__p, __n);
1.1176 + return;
1.1177 + }
1.1178 +
1.1179 + // boris : here, we have to lock per thread state, as we may be returning memory from
1.1180 + // different thread.
1.1181 + _STLP_auto_lock __lock(__a->_M_lock);
1.1182 +
1.1183 + __my_free_list = __a->__free_list + _S_freelist_index(__n);
1.1184 + __q -> __free_list_link = *__my_free_list;
1.1185 + *__my_free_list = __q;
1.1186 +}
1.1187 +
1.1188 +void *_Pthread_alloc_impl::reallocate(void *__p, size_t __old_sz, size_t& __new_sz) {
1.1189 + void * __result;
1.1190 + size_t __copy_sz;
1.1191 +
1.1192 + if (__old_sz > _MAX_BYTES && __new_sz > _MAX_BYTES) {
1.1193 + return realloc(__p, __new_sz);
1.1194 + }
1.1195 +
1.1196 + if (_S_round_up(__old_sz) == _S_round_up(__new_sz)) return __p;
1.1197 + __result = allocate(__new_sz);
1.1198 + __copy_sz = __new_sz > __old_sz? __old_sz : __new_sz;
1.1199 + memcpy(__result, __p, __copy_sz);
1.1200 + deallocate(__p, __old_sz);
1.1201 + return __result;
1.1202 +}
1.1203 +#if !defined(__SYMBIAN32__WSD__)
1.1204 +_Pthread_alloc_per_thread_state* _Pthread_alloc_impl::_S_free_per_thread_states = 0;
1.1205 +pthread_key_t _Pthread_alloc_impl::_S_key = 0;
1.1206 +_STLP_STATIC_MUTEX _Pthread_alloc_impl::_S_chunk_allocator_lock _STLP_MUTEX_INITIALIZER;
1.1207 +bool _Pthread_alloc_impl::_S_key_initialized = false;
1.1208 +char *_Pthread_alloc_impl::_S_start_free = 0;
1.1209 +char *_Pthread_alloc_impl::_S_end_free = 0;
1.1210 +size_t _Pthread_alloc_impl::_S_heap_size = 0;
1.1211 +#else
1.1212 +
1.1213 +inline __oom_handler_type& __malloc_alloc_impl::get_oom_handler()
1.1214 + {
1.1215 + return get_libcpp_wsd().wsd__oom_handler;
1.1216 + }
1.1217 +
1.1218 +inline __node_alloc_impl::_Freelist* __node_alloc_impl::get_S_free_list()
1.1219 + {
1.1220 + return (__node_alloc_impl::_Freelist*)get_libcpp_wsd().wsd_S_free_list;
1.1221 + }
1.1222 +
1.1223 +inline size_t& __node_alloc_impl::get_S_heap_size()
1.1224 + {
1.1225 + return get_libcpp_wsd().wsd__node_alloc_impl_S_heap_size;
1.1226 + }
1.1227 +
1.1228 +inline char*& __node_alloc_impl::get_S_start_free()
1.1229 + {
1.1230 + return get_libcpp_wsd().wsd_S_start_free;
1.1231 + }
1.1232 +
1.1233 +inline char*& __node_alloc_impl::get_S_end_free()
1.1234 + {
1.1235 + return get_libcpp_wsd().wsd_S_end_free;
1.1236 + }
1.1237 +
1.1238 +inline _STLP_STATIC_MUTEX& _Node_Alloc_Lock::get_allocator_S_lock()
1.1239 + {
1.1240 + return get_libcpp_wsd().wsd_allocator_S_lock;
1.1241 + }
1.1242 +
1.1243 +#endif
1.1244 +
1.1245 +void * _STLP_CALL _Pthread_alloc::allocate(size_t& __n)
1.1246 +{ return _Pthread_alloc_impl::allocate(__n); }
1.1247 +void _STLP_CALL _Pthread_alloc::deallocate(void *__p, size_t __n)
1.1248 +{ _Pthread_alloc_impl::deallocate(__p, __n); }
1.1249 +void * _STLP_CALL _Pthread_alloc::allocate(size_t& __n, __state_type* __a)
1.1250 +{ return _Pthread_alloc_impl::allocate(__n, __a); }
1.1251 +void _STLP_CALL _Pthread_alloc::deallocate(void *__p, size_t __n, __state_type* __a)
1.1252 +{ _Pthread_alloc_impl::deallocate(__p, __n, __a); }
1.1253 +void * _STLP_CALL _Pthread_alloc::reallocate(void *__p, size_t __old_sz, size_t& __new_sz)
1.1254 +{ return _Pthread_alloc_impl::reallocate(__p, __old_sz, __new_sz); }
1.1255 +_Pthread_alloc_per_thread_state* _STLP_CALL _Pthread_alloc::_S_get_per_thread_state()
1.1256 +{ return _Pthread_alloc_impl::_S_get_per_thread_state(); }
1.1257 +
1.1258 +_STLP_MOVE_TO_STD_NAMESPACE
1.1259 +
1.1260 +#endif
1.1261 +
1.1262 +_STLP_END_NAMESPACE
1.1263 +
1.1264 +
1.1265 +#if defined(__SYMBIAN32__WSD__)
1.1266 +// to be called from an stdcpp init. (to init WSD)
1.1267 +void stdcpp_allocators_init()
1.1268 + {
1.1269 + // init oom handler
1.1270 + std::__malloc_alloc_impl::get_oom_handler() = NULL;
1.1271 +
1.1272 + // lock init
1.1273 + stlp_priv::_Node_Alloc_Lock::get_allocator_S_lock()._M_lock.iState = _ENeedsNormalInit;
1.1274 + stlp_priv::_Node_Alloc_Lock::get_allocator_S_lock()._M_lock.iPtr = 0;
1.1275 + stlp_priv::_Node_Alloc_Lock::get_allocator_S_lock()._M_lock.iReentry = 0;
1.1276 +
1.1277 + // init _node_alloc_impl::x
1.1278 + stlp_priv::__node_alloc_impl::get_S_heap_size() = 0;
1.1279 + stlp_priv::__node_alloc_impl::get_S_start_free() = 0;
1.1280 + stlp_priv::__node_alloc_impl::get_S_end_free() = 0;
1.1281 +
1.1282 + // initialize free list
1.1283 + for (int count = 0; count < _STLP_NFREELISTS; count++)
1.1284 + stlp_priv::__node_alloc_impl::_S_free_list[count] = 0;
1.1285 +
1.1286 + //pthread_alloc_impl
1.1287 + stlp_priv::_Pthread_alloc_impl::pt_wsd_init();
1.1288 + }
1.1289 +#endif
1.1290 +
1.1291 +#undef _S_FREELIST_INDEX