os/ossrv/genericopenlibs/cppstdlib/stl/src/allocators.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
     1 /*
     2  * Portions Copyright (c) 2008 Nokia Corporation and/or its subsidiary(-ies). All rights reserved.
     3  *
     4  * Copyright (c) 1996,1997
     5  * Silicon Graphics Computer Systems, Inc.
     6  *
     7  * Copyright (c) 1997
     8  * Moscow Center for SPARC Technology
     9  *
    10  * Copyright (c) 1999
    11  * Boris Fomitchev
    12  *
    13  * This material is provided "as is", with absolutely no warranty expressed
    14  * or implied. Any use is at your own risk.
    15  *
    16  * Permission to use or copy this software for any purpose is hereby granted
    17  * without fee, provided the above notices are retained on all copies.
    18  * Permission to modify the code and to distribute modified code is granted,
    19  * provided the above notices are retained, and a notice that the code was
    20  * modified is included with the above copyright notice.
    21  *
    22  */
    23 
    24 #include "stlport_prefix.h"
    25 
    26 #include <memory>
    27 
    28 #if defined (__GNUC__) && (defined (__CYGWIN__) || defined (__MINGW32__)) && (!defined (__SYMBIAN32__))
    29 #  include <malloc.h>
    30 //#  define _STLP_MALLOC_USABLE_SIZE(__buf) malloc_usable_size(__buf)
    31 #endif
    32 
    33 #if defined (_STLP_PTHREADS) && !defined (_STLP_NO_THREADS)
    34 #  include <pthread_alloc>
    35 #  include <cerrno>
    36 #endif
    37 
    38 #include <stl/_threads.h>
    39 
    40 #include "lock_free_slist.h"
    41 
    42 #if defined(__SYMBIAN32__WSD__)
    43 #include "libstdcppwsd.h"
    44 
    45 #define __oom_handler 	get_oom_handler()
    46 #define _S_lock 		get_allocator_S_lock()
    47 #define _S_heap_size 	get_S_heap_size()
    48 #define _S_start_free 	get_S_start_free()
    49 #define _S_end_free		get_S_end_free()
    50 #define _S_free_list 	get_S_free_list()
    51 #define _S_chunk_allocator_lock 	get_S_chunk_allocator_lock()
    52 #define _S_free_per_thread_states	get_S_free_per_thread_states()
    53 #define _S_key						get_S_key()
    54 #define _S_key_initialized			get_S_key_initialized()
    55 #endif
    56 
    57 #ifdef __SYMBIAN32__
    58 extern "C"
    59 {
    60 IMPORT_C void* BackendAlloc(size_t );
    61 IMPORT_C void BackendFree(void* );
    62 }
    63 
    64 
    65 EXPORT_C void* backend_allocate(size_t __n)
    66     {
    67     for (;;)
    68         {
    69         void* p = BackendAlloc(__n);
    70 
    71         if (p)
    72             {
    73             return p; 
    74             }
    75         
    76         // set_new_handler uses Dll::Tls. So only this threads new handler will be changed
    77         // for the time it is set back. No problems for other threads.
    78         std::new_handler nh_func  = std::set_new_handler(NULL);
    79         std::set_new_handler(nh_func);
    80 
    81         if (nh_func)
    82             {
    83             nh_func();
    84             }
    85         else
    86             {
    87             __THROW(std::bad_alloc());
    88             }
    89         }
    90     }
    91 
    92 EXPORT_C void  backend_free(void* __p)
    93     {
    94     BackendFree(__p);
    95     }
    96 #endif
    97 
    98 #if defined (__WATCOMC__)
    99 #  pragma warning 13 9
   100 #  pragma warning 367 9
   101 #  pragma warning 368 9
   102 #endif
   103 
   104 #if defined (_STLP_SGI_THREADS)
   105   // We test whether threads are in use before locking.
   106   // Perhaps this should be moved into stl_threads.h, but that
   107   // probably makes it harder to avoid the procedure call when
   108   // it isn't needed.
   109 extern "C" {
   110   extern int __us_rsthread_malloc;
   111 }
   112 #endif
   113 
   114 // Specialised debug form of malloc which does not provide "false"
   115 // memory leaks when run with debug CRT libraries.
   116 #if defined (_STLP_MSVC) && (_STLP_MSVC >= 1020 && defined (_STLP_DEBUG_ALLOC)) && !defined (_STLP_WCE)
   117 #  include <crtdbg.h>
   118 inline void* __stlp_chunk_malloc(size_t __bytes) { _STLP_CHECK_NULL_ALLOC(_malloc_dbg(__bytes, _CRT_BLOCK, __FILE__, __LINE__)); }
   119 inline void __stlp_chunck_free(void* __p) { _free_dbg(__p, _CRT_BLOCK); }
   120 #else  // !_DEBUG
   121 #  ifdef _STLP_NODE_ALLOC_USE_MALLOC
   122 #    include <cstdlib>
   123 inline void* __stlp_chunk_malloc(size_t __bytes) { _STLP_CHECK_NULL_ALLOC(_STLP_VENDOR_CSTD::malloc(__bytes)); }
   124 inline void __stlp_chunck_free(void* __p) { _STLP_VENDOR_CSTD::free(__p); }
   125 #  else
   126 inline void* __stlp_chunk_malloc(size_t __bytes) {
   127     return _STLP_STD::__stl_new(__bytes);
   128 }
   129 inline void __stlp_chunck_free(void* __p) {
   130     _STLP_STD::__stl_delete(__p);     
   131 }
   132  
   133 #  endif
   134 #endif  // !_DEBUG
   135 
   136 #define _S_FREELIST_INDEX(__bytes) ((__bytes - size_t(1)) >> (int)_ALIGN_SHIFT)
   137 
   138 _STLP_BEGIN_NAMESPACE
   139 
   140 class __malloc_alloc_impl {
   141 private:
   142   static void* _S_oom_malloc(size_t __n) {
   143     __oom_handler_type __my_malloc_handler;
   144     void * __result;
   145 
   146     for (;;) {
   147       __my_malloc_handler = __oom_handler;
   148       if (0 == __my_malloc_handler) { __THROW_BAD_ALLOC; }
   149       (*__my_malloc_handler)();
   150       __result = malloc(__n);
   151       if (__result) return(__result);
   152     }
   153 #if defined (_STLP_NEED_UNREACHABLE_RETURN)
   154     return 0;
   155 #endif
   156   }
   157 #if defined(__SYMBIAN32__WSD__)  
   158   static _STLP_STATIC_MEMBER_DECLSPEC __oom_handler_type& get_oom_handler();
   159 #else
   160   static __oom_handler_type __oom_handler;
   161 #endif
   162 public:
   163   // this one is needed for proper simple_alloc wrapping
   164   typedef char value_type;
   165   static void* allocate(size_t& __n) {
   166     void* __result = malloc(__n);
   167     if (0 == __result) {
   168       __result = _S_oom_malloc(__n);
   169     }
   170 #if defined (_STLP_MALLOC_USABLE_SIZE)
   171     else {
   172       size_t __new_n = _STLP_MALLOC_USABLE_SIZE(__result);
   173       /*
   174       if (__n != __new_n) {
   175         printf("requested size %d, usable %d\n", __n, __new_n);
   176       }
   177       */
   178       __n = __new_n;
   179     }
   180 #endif
   181     return __result;
   182   }
   183   static void deallocate(void* __p, size_t /* __n */) { free((char*)__p); }
   184   static __oom_handler_type set_malloc_handler(__oom_handler_type __f) {
   185     __oom_handler_type __old = __oom_handler;
   186     __oom_handler = __f;
   187     return __old;
   188   }
   189 #if defined(__SYMBIAN32__WSD__)  
   190   friend void ::stdcpp_allocators_init();
   191 #endif
   192 };
   193 
   194 #if !defined(__SYMBIAN32__WSD__)
   195 // malloc_alloc out-of-memory handling
   196 __oom_handler_type __malloc_alloc_impl::__oom_handler = __STATIC_CAST(__oom_handler_type, 0);
   197 #endif
   198 
   199 void* _STLP_CALL __malloc_alloc::allocate(size_t& __n)
   200 { return __malloc_alloc_impl::allocate(__n); }
   201 __oom_handler_type _STLP_CALL __malloc_alloc::set_malloc_handler(__oom_handler_type __f)
   202 { return __malloc_alloc_impl::set_malloc_handler(__f); }
   203 
   204 // *******************************************************
   205 // Default node allocator.
   206 // With a reasonable compiler, this should be roughly as fast as the
   207 // original STL class-specific allocators, but with less fragmentation.
   208 //
   209 // Important implementation properties:
   210 // 1. If the client request an object of size > _MAX_BYTES, the resulting
   211 //    object will be obtained directly from malloc.
   212 // 2. In all other cases, we allocate an object of size exactly
   213 //    _S_round_up(requested_size).  Thus the client has enough size
   214 //    information that we can return the object to the proper free list
   215 //    without permanently losing part of the object.
   216 //
   217 
   218 #define _STLP_NFREELISTS 16
   219 
   220 /*
   221  * On Symbian, the stlport is built as a dll and also dynamically linked against 
   222  * by the applications. The _STLP_USE_DYNAMIC_LIB should always be defined.
   223  * _STLP_LEAKS_PEDANTIC is defined to prevent the memory leaks in __node_alloc 
   224  * when the library is dynamically loaded and unloaded.
   225  */
   226 #if defined (_STLP_LEAKS_PEDANTIC) && ( defined (_STLP_USE_DYNAMIC_LIB) || defined (__SYMBIAN32__) )
   227 /*
   228  * We can only do cleanup of the node allocator memory pool if we are
   229  * sure that the STLport library is used as a shared one as it guaranties
   230  * the unicity of the node allocator instance. Without that guaranty node
   231  * allocator instances might exchange memory blocks making the implementation
   232  * of a cleaning process much more complicated.
   233  */
   234 #  define _STLP_DO_CLEAN_NODE_ALLOC
   235 #endif
   236 
   237 /* When STLport is used without multi threaded safety we use the node allocator
   238  * implementation with locks as locks becomes no-op. The lock free implementation
   239  * always use system specific atomic operations which are slower than 'normal'
   240  * ones.
   241  */
   242 #if defined (_STLP_THREADS) && \
   243     defined (_STLP_HAS_ATOMIC_FREELIST) && defined (_STLP_ATOMIC_ADD)
   244 /*
   245  * We have an implementation of the atomic freelist (_STLP_atomic_freelist)
   246  * for this architecture and compiler.  That means we can use the non-blocking
   247  * implementation of the node-allocation engine.*/
   248 #  define _STLP_USE_LOCK_FREE_IMPLEMENTATION
   249 #endif
   250 
   251 #if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
   252 #  if defined (_STLP_THREADS)
   253 
   254 class _Node_Alloc_Lock {
   255 public:
   256   _Node_Alloc_Lock() {
   257 #  if defined (_STLP_SGI_THREADS)
   258     if (__us_rsthread_malloc)
   259 #  endif
   260       _S_lock._M_acquire_lock();
   261   }
   262 
   263   ~_Node_Alloc_Lock() {
   264 #  if defined (_STLP_SGI_THREADS)
   265     if (__us_rsthread_malloc)
   266 #  endif
   267         _S_lock._M_release_lock();
   268   }
   269 #if defined (__SYMBIAN32__WSD__)
   270   static _STLP_STATIC_MUTEX& get_allocator_S_lock();
   271 #else
   272   static _STLP_STATIC_MUTEX _S_lock;
   273 #endif
   274 };
   275 
   276 #if !defined(__SYMBIAN32__WSD__)
   277 _STLP_STATIC_MUTEX _Node_Alloc_Lock::_S_lock _STLP_MUTEX_INITIALIZER;
   278 #endif
   279 
   280 #  else
   281 
   282 class _Node_Alloc_Lock {
   283 public:
   284   _Node_Alloc_Lock() { }
   285   ~_Node_Alloc_Lock() { }
   286 };
   287 
   288 #  endif
   289 
   290 struct _Node_alloc_obj {
   291   _Node_alloc_obj * _M_next;
   292 };
   293 #endif
   294 
   295 class __node_alloc_impl {
   296 _STLP_PRIVATE:
   297   static inline size_t _STLP_CALL _S_round_up(size_t __bytes)
   298   { return (((__bytes) + (size_t)_ALIGN-1) & ~((size_t)_ALIGN - 1)); }
   299 
   300 #if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
   301   typedef _STLP_atomic_freelist::item   _Obj;
   302   typedef _STLP_atomic_freelist         _Freelist;
   303   typedef _STLP_atomic_freelist         _ChunkList;
   304 
   305   // Header of blocks of memory that have been allocated as part of
   306   // a larger chunk but have not yet been chopped up into nodes.
   307   struct _FreeBlockHeader : public _STLP_atomic_freelist::item {
   308     char* _M_end;     // pointer to end of free memory
   309   };
   310 #else
   311   typedef _Node_alloc_obj       _Obj;
   312   typedef _Obj* _STLP_VOLATILE  _Freelist;
   313   typedef _Obj*                 _ChunkList;
   314 #endif
   315 
   316 private:
   317   // Returns an object of size __n, and optionally adds to size __n free list.
   318   static _Obj* _S_refill(size_t __n);
   319   // Allocates a chunk for nobjs of size __p_size.  nobjs may be reduced
   320   // if it is inconvenient to allocate the requested number.
   321   static char* _S_chunk_alloc(size_t __p_size, int& __nobjs);
   322   // Chunk allocation state.
   323 #if defined(__SYMBIAN32__WSD__)  
   324   static _Freelist* get_S_free_list();
   325 #else
   326   static _Freelist _S_free_list[_STLP_NFREELISTS];
   327 #endif
   328   
   329   // Amount of total allocated memory
   330 #if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
   331   static _STLP_VOLATILE __stl_atomic_t _S_heap_size;
   332 #else
   333 #if defined(__SYMBIAN32__WSD__)
   334   static size_t& get_S_heap_size();
   335 #else
   336   static size_t _S_heap_size;
   337 #endif
   338 #endif
   339 
   340 #if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
   341   // List of blocks of free memory
   342   static _STLP_atomic_freelist  _S_free_mem_blocks;
   343 #else
   344 #if defined(__SYMBIAN32__WSD__)
   345   // Start of the current free memory buffer
   346   static char*& get_S_start_free();
   347   // End of the current free memory buffer
   348   static char*& get_S_end_free();
   349 #else
   350   // Start of the current free memory buffer
   351   static char* _S_start_free;
   352   // End of the current free memory buffer
   353   static char* _S_end_free;
   354 #endif
   355 #endif
   356 
   357 #if defined (_STLP_DO_CLEAN_NODE_ALLOC)
   358 public:
   359   // Methods to report alloc/dealloc calls to the counter system.
   360 #  if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
   361   typedef _STLP_VOLATILE __stl_atomic_t _AllocCounter;
   362 #  else
   363   typedef __stl_atomic_t _AllocCounter;
   364 #  endif
   365   static _AllocCounter& _STLP_CALL _S_alloc_counter();
   366   static void _S_alloc_call();
   367   static void _S_dealloc_call();
   368 
   369 private:
   370   // Free all the allocated chuncks of memory
   371   static void _S_chunk_dealloc();
   372   // Beginning of the linked list of allocated chunks of memory
   373   static _ChunkList _S_chunks;
   374 #endif /* _STLP_DO_CLEAN_NODE_ALLOC */
   375 
   376 public:
   377   /* __n must be > 0      */
   378   static void* _M_allocate(size_t& __n);
   379   /* __p may not be 0 */
   380   static void _M_deallocate(void *__p, size_t __n);
   381   
   382 #if defined(__SYMBIAN32__WSD__)
   383 	friend void ::stdcpp_allocators_init();
   384 #endif
   385 };
   386 
   387 #if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
   388 void* __node_alloc_impl::_M_allocate(size_t& __n) {
   389   __n = _S_round_up(__n);
   390   _Obj * _STLP_VOLATILE * __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
   391   _Obj *__r;
   392 
   393   // Acquire the lock here with a constructor call.
   394   // This ensures that it is released in exit or during stack
   395   // unwinding.
   396   _Node_Alloc_Lock __lock_instance;
   397 
   398   if ( (__r  = *__my_free_list) != 0 ) {
   399     *__my_free_list = __r->_M_next;
   400   } else {
   401     __r = _S_refill(__n);
   402   }
   403 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
   404   _S_alloc_call();
   405 #  endif
   406   // lock is released here
   407   return __r;
   408 }
   409 
   410 void __node_alloc_impl::_M_deallocate(void *__p, size_t __n) {
   411   _Obj * _STLP_VOLATILE * __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
   412   _Obj * __pobj = __STATIC_CAST(_Obj*, __p);
   413 
   414   // acquire lock
   415   _Node_Alloc_Lock __lock_instance;
   416   __pobj->_M_next = *__my_free_list;
   417   *__my_free_list = __pobj;
   418 
   419 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
   420   _S_dealloc_call();
   421 #  endif
   422   // lock is released here
   423 }
   424 
   425 /* We allocate memory in large chunks in order to avoid fragmenting     */
   426 /* the malloc heap too much.                                            */
   427 /* We assume that size is properly aligned.                             */
   428 /* We hold the allocation lock.                                         */
   429 char* __node_alloc_impl::_S_chunk_alloc(size_t _p_size, int& __nobjs) {
   430   char* __result;
   431   size_t __total_bytes = _p_size * __nobjs;
   432   size_t __bytes_left = _S_end_free - _S_start_free;
   433 
   434   if (__bytes_left > 0) {
   435     if (__bytes_left >= __total_bytes) {
   436       __result = _S_start_free;
   437       _S_start_free += __total_bytes;
   438       return __result;
   439     }
   440 
   441     if (__bytes_left >= _p_size) {
   442       __nobjs = (int)(__bytes_left / _p_size);
   443       __total_bytes = _p_size * __nobjs;
   444       __result = _S_start_free;
   445       _S_start_free += __total_bytes;
   446       return __result;
   447     }
   448 
   449     // Try to make use of the left-over piece.
   450     _Obj* _STLP_VOLATILE* __my_free_list = _S_free_list + _S_FREELIST_INDEX(__bytes_left);
   451     __REINTERPRET_CAST(_Obj*, _S_start_free)->_M_next = *__my_free_list;
   452     *__my_free_list = __REINTERPRET_CAST(_Obj*, _S_start_free);
   453   }
   454 
   455   size_t __bytes_to_get =
   456     2 * __total_bytes + _S_round_up(_S_heap_size >> 4)
   457 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
   458     + sizeof(_Obj)
   459 #  endif
   460     ;
   461 
   462   _S_start_free = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
   463   if (0 == _S_start_free) {
   464     _Obj* _STLP_VOLATILE* __my_free_list;
   465     _Obj* __p;
   466     // Try to do with what we have.  That can't hurt.
   467     // We do not try smaller requests, since that tends
   468     // to result in disaster on multi-process machines.
   469     for (size_t __i = _p_size; __i <= (size_t)_MAX_BYTES; __i += (size_t)_ALIGN) {
   470       __my_free_list = _S_free_list + _S_FREELIST_INDEX(__i);
   471       __p = *__my_free_list;
   472       if (0 != __p) {
   473         *__my_free_list = __p -> _M_next;
   474         _S_start_free = __REINTERPRET_CAST(char*, __p);
   475         _S_end_free = _S_start_free + __i;
   476         return _S_chunk_alloc(_p_size, __nobjs);
   477         // Any leftover piece will eventually make it to the
   478         // right free list.
   479       }
   480     }
   481     _S_end_free = 0;    // In case of exception.
   482     _S_start_free = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
   483     /*
   484     (char*)malloc_alloc::allocate(__bytes_to_get);
   485     */
   486 
   487     // This should either throw an
   488     // exception or remedy the situation.  Thus we assume it
   489     // succeeded.
   490   }
   491 
   492   _S_heap_size += __bytes_to_get;
   493 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
   494   __REINTERPRET_CAST(_Obj*, _S_start_free)->_M_next = _S_chunks;
   495   _S_chunks = __REINTERPRET_CAST(_Obj*, _S_start_free);
   496 #  endif
   497   _S_end_free = _S_start_free + __bytes_to_get;
   498 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
   499   _S_start_free += sizeof(_Obj);
   500 #  endif
   501   return _S_chunk_alloc(_p_size, __nobjs);
   502 }
   503 
   504 /* Returns an object of size __n, and optionally adds to size __n free list.*/
   505 /* We assume that __n is properly aligned.                                  */
   506 /* We hold the allocation lock.                                             */
   507 _Node_alloc_obj* __node_alloc_impl::_S_refill(size_t __n) {
   508   int __nobjs = 20;
   509   char* __chunk = _S_chunk_alloc(__n, __nobjs);
   510 
   511   if (1 == __nobjs) return __REINTERPRET_CAST(_Obj*, __chunk);
   512 
   513   _Obj* _STLP_VOLATILE* __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
   514   _Obj* __result;
   515   _Obj* __current_obj;
   516   _Obj* __next_obj;
   517 
   518   /* Build free list in chunk */
   519   __result = __REINTERPRET_CAST(_Obj*, __chunk);
   520   *__my_free_list = __next_obj = __REINTERPRET_CAST(_Obj*, __chunk + __n);
   521   for (--__nobjs; --__nobjs; ) {
   522     __current_obj = __next_obj;
   523     __next_obj = __REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __next_obj) + __n);
   524     __current_obj->_M_next = __next_obj;
   525   }
   526   __next_obj->_M_next = 0;
   527   return __result;
   528 }
   529 
   530 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
   531 void __node_alloc_impl::_S_alloc_call()
   532 { ++_S_alloc_counter(); }
   533 
   534 void __node_alloc_impl::_S_dealloc_call() {
   535   __stl_atomic_t &counter = _S_alloc_counter();
   536   if (--counter == 0)
   537   { _S_chunk_dealloc(); }
   538 }
   539 
   540 /* We deallocate all the memory chunks      */
   541 void __node_alloc_impl::_S_chunk_dealloc() {
   542   _Obj *__pcur = _S_chunks, *__pnext;
   543   while (__pcur != 0) {
   544     __pnext = __pcur->_M_next;
   545     __stlp_chunck_free(__pcur);
   546     __pcur = __pnext;
   547   }
   548   _S_chunks = 0;
   549   _S_start_free = _S_end_free = 0;
   550   _S_heap_size = 0;
   551   // Reinterprest cast cant remove volatileness. So using C style cast
   552   memset((char*)(&_S_free_list[0]), 0, _STLP_NFREELISTS * sizeof(_Obj*));
   553 }
   554 #  endif /* _STLP_DO_CLEAN_NODE_ALLOC */
   555 
   556 #else /* !defined(_STLP_USE_LOCK_FREE_IMPLEMENTATION) */
   557 
   558 void* __node_alloc_impl::_M_allocate(size_t& __n) {
   559   __n = _S_round_up(__n);
   560   _Obj* __r = _S_free_list[_S_FREELIST_INDEX(__n)].pop();
   561   if (__r  == 0)
   562   { __r = _S_refill(__n); }
   563 
   564 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
   565   _S_alloc_call();
   566 #  endif
   567   return __r;
   568 }
   569 
   570 void __node_alloc_impl::_M_deallocate(void *__p, size_t __n) {
   571   _S_free_list[_S_FREELIST_INDEX(__n)].push(__STATIC_CAST(_Obj*, __p));
   572 
   573 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
   574   _S_dealloc_call();
   575 #  endif
   576 }
   577 
   578 /* Returns an object of size __n, and optionally adds additional ones to    */
   579 /* freelist of objects of size __n.                                         */
   580 /* We assume that __n is properly aligned.                                  */
   581 __node_alloc_impl::_Obj* __node_alloc_impl::_S_refill(size_t __n) {
   582   int __nobjs = 20;
   583   char* __chunk = _S_chunk_alloc(__n, __nobjs);
   584 
   585   if (__nobjs <= 1)
   586     return __REINTERPRET_CAST(_Obj*, __chunk);
   587 
   588   // Push all new nodes (minus first one) onto freelist
   589   _Obj* __result   = __REINTERPRET_CAST(_Obj*, __chunk);
   590   _Obj* __cur_item = __result;
   591   _Freelist* __my_freelist = _S_free_list + _S_FREELIST_INDEX(__n);
   592   for (--__nobjs; __nobjs != 0; --__nobjs) {
   593     __cur_item  = __REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __cur_item) + __n);
   594     __my_freelist->push(__cur_item);
   595   }
   596   return __result;
   597 }
   598 
   599 /* We allocate memory in large chunks in order to avoid fragmenting     */
   600 /* the malloc heap too much.                                            */
   601 /* We assume that size is properly aligned.                             */
   602 char* __node_alloc_impl::_S_chunk_alloc(size_t _p_size, int& __nobjs) {
   603 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
   604   //We are going to add a small memory block to keep all the allocated blocks
   605   //address, we need to do so respecting the memory alignment. The following
   606   //static assert checks that the reserved block is big enough to store a pointer.
   607   _STLP_STATIC_ASSERT(sizeof(_Obj) <= _ALIGN)
   608 #  endif
   609   char*  __result       = 0;
   610   __stl_atomic_t __total_bytes  = __STATIC_CAST(__stl_atomic_t, _p_size) * __nobjs;
   611 
   612   _FreeBlockHeader* __block = __STATIC_CAST(_FreeBlockHeader*, _S_free_mem_blocks.pop());
   613   if (__block != 0) {
   614     // We checked a block out and can now mess with it with impugnity.
   615     // We'll put the remainder back into the list if we're done with it below.
   616     char*  __buf_start  = __REINTERPRET_CAST(char*, __block);
   617     __stl_atomic_t __bytes_left = __block->_M_end - __buf_start;
   618 
   619     if ((__bytes_left < __total_bytes) && (__bytes_left >= __STATIC_CAST(__stl_atomic_t, _p_size))) {
   620       // There's enough left for at least one object, but not as much as we wanted
   621       __result      = __buf_start;
   622       __nobjs       = (int)(__bytes_left/_p_size);
   623       __total_bytes = __STATIC_CAST(__stl_atomic_t, _p_size) * __nobjs;
   624       __bytes_left -= __total_bytes;
   625       __buf_start  += __total_bytes;
   626     }
   627     else if (__bytes_left >= __total_bytes) {
   628       // The block has enough left to satisfy all that was asked for
   629       __result      = __buf_start;
   630       __bytes_left -= __total_bytes;
   631       __buf_start  += __total_bytes;
   632     }
   633 
   634     if (__bytes_left != 0) {
   635       // There is still some memory left over in block after we satisfied our request.
   636       if ((__result != 0) && (__bytes_left >= sizeof(_FreeBlockHeader))) {
   637         // We were able to allocate at least one object and there is still enough
   638         // left to put remainder back into list.
   639         _FreeBlockHeader* __newblock = __REINTERPRET_CAST(_FreeBlockHeader*, __buf_start);
   640         __newblock->_M_end  = __block->_M_end;
   641         _S_free_mem_blocks.push(__newblock);
   642       }
   643       else {
   644         // We were not able to allocate enough for at least one object.
   645         // Shove into freelist of nearest (rounded-down!) size.
   646         size_t __rounded_down = _S_round_up(__bytes_left + 1) - (size_t)_ALIGN;
   647         if (__rounded_down > 0)
   648           _S_free_list[_S_FREELIST_INDEX(__rounded_down)].push((_Obj*)__buf_start);
   649       }
   650     }
   651     if (__result != 0)
   652       return __result;
   653   }
   654 
   655   // We couldn't satisfy it from the list of free blocks, get new memory.
   656   __stl_atomic_t __bytes_to_get = 2 * __total_bytes + __STATIC_CAST(__stl_atomic_t, _S_round_up(_S_heap_size >> 4))
   657 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
   658     + _ALIGN
   659 #  endif
   660     ;
   661 
   662   __result = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
   663   // Alignment check
   664   _STLP_VERBOSE_ASSERT(((__REINTERPRET_CAST(size_t, __result) & __STATIC_CAST(size_t, _ALIGN - 1)) == 0), _StlMsg_DBA_DELETED_TWICE)
   665 
   666   if (0 == __result) {
   667     // Allocation failed; try to canibalize from freelist of a larger object size.
   668     for (size_t __i = _p_size; __i <= (size_t)_MAX_BYTES; __i += (size_t)_ALIGN) {
   669       _Obj* __p  = _S_free_list[_S_FREELIST_INDEX(__i)].pop();
   670       if (0 != __p) {
   671         if (__i < sizeof(_FreeBlockHeader)) {
   672           // Not enough to put into list of free blocks, divvy it up here.
   673           // Use as much as possible for this request and shove remainder into freelist.
   674           __nobjs = (int)(__i/_p_size);
   675           __total_bytes = __nobjs * __STATIC_CAST(__stl_atomic_t, _p_size);
   676           size_t __bytes_left = __i - __total_bytes;
   677           size_t __rounded_down = _S_round_up(__bytes_left+1) - (size_t)_ALIGN;
   678           if (__rounded_down > 0) {
   679             _S_free_list[_S_FREELIST_INDEX(__rounded_down)].push(__REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __p) + __total_bytes));
   680           }
   681           return __REINTERPRET_CAST(char*, __p);
   682         }
   683         else {
   684           // Add node to list of available blocks and recursively allocate from it.
   685           _FreeBlockHeader* __newblock = (_FreeBlockHeader*)__p;
   686           __newblock->_M_end  = __REINTERPRET_CAST(char*, __p) + __i;
   687           _S_free_mem_blocks.push(__newblock);
   688           return _S_chunk_alloc(_p_size, __nobjs);
   689         }
   690       }
   691     }
   692 
   693     // We were not able to find something in a freelist, try to allocate a smaller amount.
   694     __bytes_to_get  = __total_bytes
   695 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
   696       + _ALIGN
   697 #  endif
   698       ;
   699     __result = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
   700     // Alignment check
   701     _STLP_VERBOSE_ASSERT(((__REINTERPRET_CAST(size_t, __result) & __STATIC_CAST(size_t, _ALIGN - 1)) == 0), _StlMsg_DBA_DELETED_TWICE)
   702 
   703     // This should either throw an exception or remedy the situation.
   704     // Thus we assume it succeeded.
   705   }
   706 
   707   _STLP_ATOMIC_ADD(&_S_heap_size, __bytes_to_get);
   708 
   709 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
   710   // We have to track the allocated memory chunks for release on exit.
   711   _S_chunks.push(__REINTERPRET_CAST(_Obj*, __result));
   712   __result       += _ALIGN;
   713   __bytes_to_get -= _ALIGN;
   714 #  endif
   715 
   716   if (__bytes_to_get > __total_bytes) {
   717     // Push excess memory allocated in this chunk into list of free memory blocks
   718     _FreeBlockHeader* __freeblock = __REINTERPRET_CAST(_FreeBlockHeader*, __result + __total_bytes);
   719     __freeblock->_M_end  = __result + __bytes_to_get;
   720     _S_free_mem_blocks.push(__freeblock);
   721   }
   722   return __result;
   723 }
   724 
   725 #  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
   726 void __node_alloc_impl::_S_alloc_call()
   727 { _STLP_ATOMIC_INCREMENT(&_S_alloc_counter()); }
   728 
   729 void __node_alloc_impl::_S_dealloc_call() {
   730   _STLP_VOLATILE __stl_atomic_t *pcounter = &_S_alloc_counter();
   731   if (_STLP_ATOMIC_DECREMENT(pcounter) == 0)
   732     _S_chunk_dealloc();
   733 }
   734 
   735 /* We deallocate all the memory chunks      */
   736 void __node_alloc_impl::_S_chunk_dealloc() {
   737   // Note: The _Node_alloc_helper class ensures that this function
   738   // will only be called when the (shared) library is unloaded or the
   739   // process is shutdown.  It's thus not possible that another thread
   740   // is currently trying to allocate a node (we're not thread-safe here).
   741   //
   742 
   743   // Clear the free blocks and all freelistst.  This makes sure that if
   744   // for some reason more memory is allocated again during shutdown
   745   // (it'd also be really nasty to leave references to deallocated memory).
   746   _S_free_mem_blocks.clear();
   747   _S_heap_size      = 0;
   748 
   749   for (size_t __i = 0; __i < _STLP_NFREELISTS; ++__i) {
   750     _S_free_list[__i].clear();
   751   }
   752 
   753   // Detach list of chunks and free them all
   754   _Obj* __chunk = _S_chunks.clear();
   755   while (__chunk != 0) {
   756     _Obj* __next = __chunk->_M_next;
   757     __stlp_chunck_free(__chunk);
   758     __chunk  = __next;
   759   }
   760 }
   761 #  endif /* _STLP_DO_CLEAN_NODE_ALLOC */
   762 
   763 #endif /* !defined(_STLP_USE_LOCK_FREE_IMPLEMENTATION) */
   764 
   765 #if defined (_STLP_DO_CLEAN_NODE_ALLOC)
   766 struct __node_alloc_cleaner {
   767   ~__node_alloc_cleaner()
   768       {
   769       __node_alloc_impl::_S_dealloc_call(); 
   770       }
   771 };
   772 
   773 #  if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
   774 _STLP_VOLATILE __stl_atomic_t& _STLP_CALL
   775 #  else
   776 __stl_atomic_t& _STLP_CALL
   777 #  endif
   778 __node_alloc_impl::_S_alloc_counter() {
   779   static _AllocCounter _S_counter = 1;
   780   static __node_alloc_cleaner _S_node_alloc_cleaner;
   781   return _S_counter;
   782 }
   783 #endif
   784 
   785 #if !defined(__SYMBIAN32__WSD__)
   786 #if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
   787 _Node_alloc_obj * _STLP_VOLATILE
   788 __node_alloc_impl::_S_free_list[_STLP_NFREELISTS]
   789 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
   790 // The 16 zeros are necessary to make version 4.1 of the SunPro
   791 // compiler happy.  Otherwise it appears to allocate too little
   792 // space for the array.
   793 #else
   794 _STLP_atomic_freelist __node_alloc_impl::_S_free_list[_STLP_NFREELISTS];
   795 _STLP_atomic_freelist __node_alloc_impl::_S_free_mem_blocks;
   796 #endif
   797 
   798 #if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
   799 char *__node_alloc_impl::_S_start_free = 0;
   800 char *__node_alloc_impl::_S_end_free = 0;
   801 #endif
   802 
   803 #if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
   804 _STLP_VOLATILE __stl_atomic_t
   805 #else
   806 size_t
   807 #endif
   808 __node_alloc_impl::_S_heap_size = 0;
   809 #endif //__SYMBIAN32__WSD__
   810 
   811 #if defined (_STLP_DO_CLEAN_NODE_ALLOC)
   812 #  if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
   813 _STLP_atomic_freelist __node_alloc_impl::_S_chunks;
   814 #  else
   815 _Node_alloc_obj* __node_alloc_impl::_S_chunks  = 0;
   816 #  endif
   817 #endif
   818 
   819 _STLP_DECLSPEC void * _STLP_CALL __node_alloc::_M_allocate(size_t& __n)
   820 { return __node_alloc_impl::_M_allocate(__n); }
   821 
   822 _STLP_DECLSPEC void _STLP_CALL __node_alloc::_M_deallocate(void *__p, size_t __n)
   823 { __node_alloc_impl::_M_deallocate(__p, __n); }
   824 
   825 #if defined (_STLP_PTHREADS) && !defined (_STLP_NO_THREADS)
   826 
   827 #  define _STLP_DATA_ALIGNMENT 8
   828 
   829 _STLP_MOVE_TO_PRIV_NAMESPACE
   830 
   831 // *******************************************************
   832 // __perthread_alloc implementation
   833 union _Pthread_alloc_obj {
   834   union _Pthread_alloc_obj * __free_list_link;
   835   char __client_data[_STLP_DATA_ALIGNMENT];    /* The client sees this.    */
   836 };
   837 
   838 // Pthread allocators don't appear to the client to have meaningful
   839 // instances.  We do in fact need to associate some state with each
   840 // thread.  That state is represented by _Pthread_alloc_per_thread_state.
   841 
   842 struct _Pthread_alloc_per_thread_state {
   843   typedef _Pthread_alloc_obj __obj;
   844   enum { _S_NFREELISTS = _MAX_BYTES / _STLP_DATA_ALIGNMENT };
   845 
   846   // Free list link for list of available per thread structures.
   847   // When one of these becomes available for reuse due to thread
   848   // termination, any objects in its free list remain associated
   849   // with it.  The whole structure may then be used by a newly
   850   // created thread.
   851   _Pthread_alloc_per_thread_state() : __next(0)
   852   { memset((void *)__CONST_CAST(_Pthread_alloc_obj**, __free_list), 0, (size_t)_S_NFREELISTS * sizeof(__obj *)); }
   853   // Returns an object of size __n, and possibly adds to size n free list.
   854   void *_M_refill(size_t __n);
   855 
   856   _Pthread_alloc_obj* volatile __free_list[_S_NFREELISTS];
   857   _Pthread_alloc_per_thread_state *__next;
   858   // this data member is only to be used by per_thread_allocator, which returns memory to the originating thread.
   859   _STLP_mutex _M_lock;
   860 };
   861 
   862 // Pthread-specific allocator.
   863 class _Pthread_alloc_impl {
   864 public: // but only for internal use:
   865   typedef _Pthread_alloc_per_thread_state __state_type;
   866   typedef char value_type;
   867 
   868   // Allocates a chunk for nobjs of size size.  nobjs may be reduced
   869   // if it is inconvenient to allocate the requested number.
   870   static char *_S_chunk_alloc(size_t __size, size_t &__nobjs, __state_type*);
   871 
   872   enum {_S_ALIGN = _STLP_DATA_ALIGNMENT};
   873 
   874   static size_t _S_round_up(size_t __bytes)
   875   { return (((__bytes) + (int)_S_ALIGN - 1) & ~((int)_S_ALIGN - 1)); }
   876   static size_t _S_freelist_index(size_t __bytes)
   877   { return (((__bytes) + (int)_S_ALIGN - 1) / (int)_S_ALIGN - 1); }
   878 
   879 private:
   880   // Chunk allocation state. And other shared state.
   881   // Protected by _S_chunk_allocator_lock.
   882 #if defined(__SYMBIAN32__WSD__)
   883 public:	
   884 	static void pt_wsd_init() {
   885 		get_S_free_per_thread_states() = 0;
   886 		get_S_key() = 0;
   887 		get_S_chunk_allocator_lock()._M_lock.iState   = _ENeedsNormalInit;
   888 		get_S_chunk_allocator_lock()._M_lock.iPtr     = 0;
   889 		get_S_chunk_allocator_lock()._M_lock.iReentry = 0;
   890 		get_S_key_initialized() = false;
   891 		get_S_start_free() = 0;
   892 		get_S_end_free() = 0;
   893 		get_S_heap_size() = 0;
   894 	}
   895 private:
   896   static _STLP_STATIC_MUTEX& get_S_chunk_allocator_lock()
   897 	{ return get_libcpp_wsd().wsd_pt_S_chunk_allocator_lock; }
   898   static char*& get_S_start_free()
   899   	{ return get_libcpp_wsd().wsd_pt_S_start_free; }
   900   static char*& get_S_end_free()
   901   	{ return get_libcpp_wsd().wsd_pt_S_end_free; }
   902   static size_t& get_S_heap_size()
   903   	{ return get_libcpp_wsd().wsd_pt_S_heap_size; }
   904   static __state_type*& get_S_free_per_thread_states()
   905 	{ return get_libcpp_wsd().wsd_pt_S_free_per_thread_states; }
   906   static pthread_key_t& get_S_key()
   907   	{ return get_libcpp_wsd().wsd_pt_S_key; }
   908   static bool& get_S_key_initialized()
   909   	{ return get_libcpp_wsd().wsd_pt_S_key_initialized; }
   910 #else
   911   static _STLP_STATIC_MUTEX _S_chunk_allocator_lock;
   912   static char *_S_start_free;
   913   static char *_S_end_free;
   914   static size_t _S_heap_size;
   915   static __state_type *_S_free_per_thread_states;
   916   static pthread_key_t _S_key;
   917   static bool _S_key_initialized;
   918 #endif
   919   // Pthread key under which per thread state is stored.
   920   // Allocator instances that are currently unclaimed by any thread.
   921   static void _S_destructor(void *instance);
   922   // Function to be called on thread exit to reclaim per thread
   923   // state.
   924   static __state_type *_S_new_per_thread_state();
   925 public:
   926   // Return a recycled or new per thread state.
   927   static __state_type *_S_get_per_thread_state();
   928 private:
   929         // ensure that the current thread has an associated
   930         // per thread state.
   931   class _M_lock;
   932   friend class _M_lock;
   933   class _M_lock {
   934   public:
   935     _M_lock () { _S_chunk_allocator_lock._M_acquire_lock(); }
   936     ~_M_lock () { _S_chunk_allocator_lock._M_release_lock(); }
   937   };
   938 
   939 public:
   940 
   941   /* n must be > 0      */
   942   static void * allocate(size_t& __n);
   943 
   944   /* p may not be 0 */
   945   static void deallocate(void *__p, size_t __n);
   946 
   947   // boris : versions for per_thread_allocator
   948   /* n must be > 0      */
   949   static void * allocate(size_t& __n, __state_type* __a);
   950 
   951   /* p may not be 0 */
   952   static void deallocate(void *__p, size_t __n, __state_type* __a);
   953 
   954   static void * reallocate(void *__p, size_t __old_sz, size_t& __new_sz);
   955 };
   956 
   957 /* Returns an object of size n, and optionally adds to size n free list.*/
   958 /* We assume that n is properly aligned.                                */
   959 /* We hold the allocation lock.                                         */
   960 void *_Pthread_alloc_per_thread_state::_M_refill(size_t __n) {
   961   typedef _Pthread_alloc_obj __obj;
   962   size_t __nobjs = 128;
   963   char * __chunk = _Pthread_alloc_impl::_S_chunk_alloc(__n, __nobjs, this);
   964   __obj * volatile * __my_free_list;
   965   __obj * __result;
   966   __obj * __current_obj, * __next_obj;
   967   size_t __i;
   968 
   969   if (1 == __nobjs)  {
   970     return __chunk;
   971   }
   972 
   973   __my_free_list = __free_list + _Pthread_alloc_impl::_S_freelist_index(__n);
   974 
   975   /* Build free list in chunk */
   976   __result = (__obj *)__chunk;
   977   *__my_free_list = __next_obj = (__obj *)(__chunk + __n);
   978   for (__i = 1; ; ++__i) {
   979     __current_obj = __next_obj;
   980     __next_obj = (__obj *)((char *)__next_obj + __n);
   981     if (__nobjs - 1 == __i) {
   982       __current_obj -> __free_list_link = 0;
   983       break;
   984     } else {
   985       __current_obj -> __free_list_link = __next_obj;
   986     }
   987   }
   988   return __result;
   989 }
   990 
   991 void _Pthread_alloc_impl::_S_destructor(void *__instance) {
   992   _M_lock __lock_instance;  // Need to acquire lock here.
   993   _Pthread_alloc_per_thread_state* __s = (_Pthread_alloc_per_thread_state*)__instance;
   994   __s -> __next = _S_free_per_thread_states;
   995   _S_free_per_thread_states = __s;
   996 }
   997 
   998 _Pthread_alloc_per_thread_state* _Pthread_alloc_impl::_S_new_per_thread_state() {
   999   /* lock already held here.  */
  1000   if (0 != _S_free_per_thread_states) {
  1001     _Pthread_alloc_per_thread_state *__result = _S_free_per_thread_states;
  1002     _S_free_per_thread_states = _S_free_per_thread_states -> __next;
  1003     return __result;
  1004   }
  1005   else {
  1006     return _STLP_NEW _Pthread_alloc_per_thread_state;
  1007   }
  1008 }
  1009 
  1010 _Pthread_alloc_per_thread_state* _Pthread_alloc_impl::_S_get_per_thread_state() {
  1011   int __ret_code;
  1012   __state_type* __result;
  1013 
  1014   if (_S_key_initialized && ((__result = (__state_type*) pthread_getspecific(_S_key)) != NULL))
  1015     return __result;
  1016 
  1017   /*REFERENCED*/
  1018   _M_lock __lock_instance;  // Need to acquire lock here.
  1019   if (!_S_key_initialized) {
  1020     if (pthread_key_create(&_S_key, _S_destructor)) {
  1021       __THROW_BAD_ALLOC;  // failed
  1022     }
  1023     _S_key_initialized = true;
  1024   }
  1025 
  1026   __result = _S_new_per_thread_state();
  1027   __ret_code = pthread_setspecific(_S_key, __result);
  1028   if (__ret_code) {
  1029     if (__ret_code == ENOMEM) {
  1030       __THROW_BAD_ALLOC;
  1031     } else {
  1032   // EINVAL
  1033       _STLP_ABORT();
  1034     }
  1035   }
  1036   return __result;
  1037 }
  1038 
  1039 /* We allocate memory in large chunks in order to avoid fragmenting     */
  1040 /* the malloc heap too much.                                            */
  1041 /* We assume that size is properly aligned.                             */
  1042 char *_Pthread_alloc_impl::_S_chunk_alloc(size_t __p_size, size_t &__nobjs, _Pthread_alloc_per_thread_state *__a) {
  1043   typedef _Pthread_alloc_obj __obj;
  1044   {
  1045     char * __result;
  1046     size_t __total_bytes;
  1047     size_t __bytes_left;
  1048     /*REFERENCED*/
  1049     _M_lock __lock_instance;         // Acquire lock for this routine
  1050 
  1051     __total_bytes = __p_size * __nobjs;
  1052     __bytes_left = _S_end_free - _S_start_free;
  1053     if (__bytes_left >= __total_bytes) {
  1054       __result = _S_start_free;
  1055       _S_start_free += __total_bytes;
  1056       return __result;
  1057     } else if (__bytes_left >= __p_size) {
  1058       __nobjs = __bytes_left/__p_size;
  1059       __total_bytes = __p_size * __nobjs;
  1060       __result = _S_start_free;
  1061       _S_start_free += __total_bytes;
  1062       return __result;
  1063     } else {
  1064       size_t __bytes_to_get = 2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
  1065       // Try to make use of the left-over piece.
  1066       if (__bytes_left > 0) {
  1067         __obj * volatile * __my_free_list = __a->__free_list + _S_freelist_index(__bytes_left);
  1068         ((__obj *)_S_start_free) -> __free_list_link = *__my_free_list;
  1069         *__my_free_list = (__obj *)_S_start_free;
  1070       }
  1071 #  ifdef _SGI_SOURCE
  1072       // Try to get memory that's aligned on something like a
  1073       // cache line boundary, so as to avoid parceling out
  1074       // parts of the same line to different threads and thus
  1075       // possibly different processors.
  1076       {
  1077         const int __cache_line_size = 128;  // probable upper bound
  1078         __bytes_to_get &= ~(__cache_line_size-1);
  1079         _S_start_free = (char *)memalign(__cache_line_size, __bytes_to_get);
  1080         if (0 == _S_start_free) {
  1081           _S_start_free = (char *)__malloc_alloc::allocate(__bytes_to_get);
  1082         }
  1083       }
  1084 #  else  /* !SGI_SOURCE */
  1085       _S_start_free = (char *)__malloc_alloc::allocate(__bytes_to_get);
  1086 #  endif
  1087       _S_heap_size += __bytes_to_get;
  1088       _S_end_free = _S_start_free + __bytes_to_get;
  1089     }
  1090   }
  1091   // lock is released here
  1092   return _S_chunk_alloc(__p_size, __nobjs, __a);
  1093 }
  1094 
  1095 
  1096 /* n must be > 0      */
  1097 void *_Pthread_alloc_impl::allocate(size_t& __n) {
  1098   typedef _Pthread_alloc_obj __obj;
  1099   __obj * volatile * __my_free_list;
  1100   __obj * __result;
  1101   __state_type* __a;
  1102 
  1103   if (__n > _MAX_BYTES) {
  1104     return __malloc_alloc::allocate(__n);
  1105   }
  1106 
  1107   __n = _S_round_up(__n);
  1108   __a = _S_get_per_thread_state();
  1109 
  1110   __my_free_list = __a->__free_list + _S_freelist_index(__n);
  1111   __result = *__my_free_list;
  1112   if (__result == 0) {
  1113     void *__r = __a->_M_refill(__n);
  1114     return __r;
  1115   }
  1116   *__my_free_list = __result->__free_list_link;
  1117   return __result;
  1118 };
  1119 
  1120 /* p may not be 0 */
  1121 void _Pthread_alloc_impl::deallocate(void *__p, size_t __n) {
  1122   typedef _Pthread_alloc_obj __obj;
  1123   __obj *__q = (__obj *)__p;
  1124   __obj * volatile * __my_free_list;
  1125   __state_type* __a;
  1126 
  1127   if (__n > _MAX_BYTES) {
  1128       __malloc_alloc::deallocate(__p, __n);
  1129       return;
  1130   }
  1131 
  1132   __a = _S_get_per_thread_state();
  1133 
  1134   __my_free_list = __a->__free_list + _S_freelist_index(__n);
  1135   __q -> __free_list_link = *__my_free_list;
  1136   *__my_free_list = __q;
  1137 }
  1138 
  1139 // boris : versions for per_thread_allocator
  1140 /* n must be > 0      */
  1141 void *_Pthread_alloc_impl::allocate(size_t& __n, __state_type* __a) {
  1142   typedef _Pthread_alloc_obj __obj;
  1143   __obj * volatile * __my_free_list;
  1144   __obj * __result;
  1145 
  1146   if (__n > _MAX_BYTES) {
  1147     return __malloc_alloc::allocate(__n);
  1148   }
  1149   __n = _S_round_up(__n);
  1150 
  1151   // boris : here, we have to lock per thread state, as we may be getting memory from
  1152   // different thread pool.
  1153   _STLP_auto_lock __lock(__a->_M_lock);
  1154 
  1155   __my_free_list = __a->__free_list + _S_freelist_index(__n);
  1156   __result = *__my_free_list;
  1157   if (__result == 0) {
  1158     void *__r = __a->_M_refill(__n);
  1159     return __r;
  1160   }
  1161   *__my_free_list = __result->__free_list_link;
  1162   return __result;
  1163 };
  1164 
  1165 /* p may not be 0 */
  1166 void _Pthread_alloc_impl::deallocate(void *__p, size_t __n, __state_type* __a) {
  1167   typedef _Pthread_alloc_obj __obj;
  1168   __obj *__q = (__obj *)__p;
  1169   __obj * volatile * __my_free_list;
  1170 
  1171   if (__n > _MAX_BYTES) {
  1172     __malloc_alloc::deallocate(__p, __n);
  1173     return;
  1174   }
  1175 
  1176   // boris : here, we have to lock per thread state, as we may be returning memory from
  1177   // different thread.
  1178   _STLP_auto_lock __lock(__a->_M_lock);
  1179 
  1180   __my_free_list = __a->__free_list + _S_freelist_index(__n);
  1181   __q -> __free_list_link = *__my_free_list;
  1182   *__my_free_list = __q;
  1183 }
  1184 
  1185 void *_Pthread_alloc_impl::reallocate(void *__p, size_t __old_sz, size_t& __new_sz) {
  1186   void * __result;
  1187   size_t __copy_sz;
  1188 
  1189   if (__old_sz > _MAX_BYTES && __new_sz > _MAX_BYTES) {
  1190     return realloc(__p, __new_sz);
  1191   }
  1192 
  1193   if (_S_round_up(__old_sz) == _S_round_up(__new_sz)) return __p;
  1194   __result = allocate(__new_sz);
  1195   __copy_sz = __new_sz > __old_sz? __old_sz : __new_sz;
  1196   memcpy(__result, __p, __copy_sz);
  1197   deallocate(__p, __old_sz);
  1198   return __result;
  1199 }
  1200 #if !defined(__SYMBIAN32__WSD__)
  1201 _Pthread_alloc_per_thread_state* _Pthread_alloc_impl::_S_free_per_thread_states = 0;
  1202 pthread_key_t _Pthread_alloc_impl::_S_key = 0;
  1203 _STLP_STATIC_MUTEX _Pthread_alloc_impl::_S_chunk_allocator_lock _STLP_MUTEX_INITIALIZER;
  1204 bool _Pthread_alloc_impl::_S_key_initialized = false;
  1205 char *_Pthread_alloc_impl::_S_start_free = 0;
  1206 char *_Pthread_alloc_impl::_S_end_free = 0;
  1207 size_t _Pthread_alloc_impl::_S_heap_size = 0;
  1208 #else
  1209 
  1210 inline __oom_handler_type& __malloc_alloc_impl::get_oom_handler()
  1211 	{
  1212 	return get_libcpp_wsd().wsd__oom_handler;
  1213 	}
  1214 
  1215 inline __node_alloc_impl::_Freelist* __node_alloc_impl::get_S_free_list()
  1216 	{
  1217 	return (__node_alloc_impl::_Freelist*)get_libcpp_wsd().wsd_S_free_list;
  1218 	}
  1219 
  1220 inline size_t& __node_alloc_impl::get_S_heap_size()
  1221 	{
  1222 	return get_libcpp_wsd().wsd__node_alloc_impl_S_heap_size;
  1223 	}
  1224 
  1225 inline char*& __node_alloc_impl::get_S_start_free()
  1226 	{
  1227 	return get_libcpp_wsd().wsd_S_start_free;
  1228 	}
  1229 
  1230 inline char*& __node_alloc_impl::get_S_end_free()
  1231 	{
  1232 	return get_libcpp_wsd().wsd_S_end_free;
  1233 	}
  1234 
  1235 inline _STLP_STATIC_MUTEX& _Node_Alloc_Lock::get_allocator_S_lock()
  1236 	{
  1237 	return get_libcpp_wsd().wsd_allocator_S_lock;
  1238 	}
  1239 
  1240 #endif
  1241 
  1242 void * _STLP_CALL _Pthread_alloc::allocate(size_t& __n)
  1243 { return _Pthread_alloc_impl::allocate(__n); }
  1244 void _STLP_CALL _Pthread_alloc::deallocate(void *__p, size_t __n)
  1245 { _Pthread_alloc_impl::deallocate(__p, __n); }
  1246 void * _STLP_CALL _Pthread_alloc::allocate(size_t& __n, __state_type* __a)
  1247 { return _Pthread_alloc_impl::allocate(__n, __a); }
  1248 void _STLP_CALL _Pthread_alloc::deallocate(void *__p, size_t __n, __state_type* __a)
  1249 { _Pthread_alloc_impl::deallocate(__p, __n, __a); }
  1250 void * _STLP_CALL _Pthread_alloc::reallocate(void *__p, size_t __old_sz, size_t& __new_sz)
  1251 { return _Pthread_alloc_impl::reallocate(__p, __old_sz, __new_sz); }
  1252 _Pthread_alloc_per_thread_state* _STLP_CALL _Pthread_alloc::_S_get_per_thread_state()
  1253 { return _Pthread_alloc_impl::_S_get_per_thread_state(); }
  1254 
  1255 _STLP_MOVE_TO_STD_NAMESPACE
  1256 
  1257 #endif
  1258 
  1259 _STLP_END_NAMESPACE
  1260 
  1261 
  1262 #if defined(__SYMBIAN32__WSD__)
  1263 // to be called from an stdcpp init.  (to init WSD)
  1264 void stdcpp_allocators_init()
  1265 	{
  1266 	// init oom handler	
  1267 	std::__malloc_alloc_impl::get_oom_handler() = NULL;
  1268 	
  1269 	// lock init
  1270 	stlp_priv::_Node_Alloc_Lock::get_allocator_S_lock()._M_lock.iState   = _ENeedsNormalInit;
  1271 	stlp_priv::_Node_Alloc_Lock::get_allocator_S_lock()._M_lock.iPtr     = 0;
  1272 	stlp_priv::_Node_Alloc_Lock::get_allocator_S_lock()._M_lock.iReentry = 0;
  1273 
  1274 	// init _node_alloc_impl::x
  1275 	stlp_priv::__node_alloc_impl::get_S_heap_size() = 0;	
  1276 	stlp_priv::__node_alloc_impl::get_S_start_free() = 0;
  1277 	stlp_priv::__node_alloc_impl::get_S_end_free()	  = 0;
  1278 	
  1279 	// initialize free list
  1280 	for (int count = 0; count < _STLP_NFREELISTS; count++)
  1281 		stlp_priv::__node_alloc_impl::_S_free_list[count] = 0;
  1282 
  1283 	//pthread_alloc_impl
  1284 	stlp_priv::_Pthread_alloc_impl::pt_wsd_init();
  1285 	}
  1286 #endif
  1287 
  1288 #undef _S_FREELIST_INDEX