os/ossrv/genericopenlibs/cppstdlib/stl/src/allocators.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
/*
sl@0
     2
 * Portions Copyright (c) 2008 Nokia Corporation and/or its subsidiary(-ies). All rights reserved.
sl@0
     3
 *
sl@0
     4
 * Copyright (c) 1996,1997
sl@0
     5
 * Silicon Graphics Computer Systems, Inc.
sl@0
     6
 *
sl@0
     7
 * Copyright (c) 1997
sl@0
     8
 * Moscow Center for SPARC Technology
sl@0
     9
 *
sl@0
    10
 * Copyright (c) 1999
sl@0
    11
 * Boris Fomitchev
sl@0
    12
 *
sl@0
    13
 * This material is provided "as is", with absolutely no warranty expressed
sl@0
    14
 * or implied. Any use is at your own risk.
sl@0
    15
 *
sl@0
    16
 * Permission to use or copy this software for any purpose is hereby granted
sl@0
    17
 * without fee, provided the above notices are retained on all copies.
sl@0
    18
 * Permission to modify the code and to distribute modified code is granted,
sl@0
    19
 * provided the above notices are retained, and a notice that the code was
sl@0
    20
 * modified is included with the above copyright notice.
sl@0
    21
 *
sl@0
    22
 */
sl@0
    23
sl@0
    24
#include "stlport_prefix.h"
sl@0
    25
sl@0
    26
#include <memory>
sl@0
    27
sl@0
    28
#if defined (__GNUC__) && (defined (__CYGWIN__) || defined (__MINGW32__)) && (!defined (__SYMBIAN32__))
sl@0
    29
#  include <malloc.h>
sl@0
    30
//#  define _STLP_MALLOC_USABLE_SIZE(__buf) malloc_usable_size(__buf)
sl@0
    31
#endif
sl@0
    32
sl@0
    33
#if defined (_STLP_PTHREADS) && !defined (_STLP_NO_THREADS)
sl@0
    34
#  include <pthread_alloc>
sl@0
    35
#  include <cerrno>
sl@0
    36
#endif
sl@0
    37
sl@0
    38
#include <stl/_threads.h>
sl@0
    39
sl@0
    40
#include "lock_free_slist.h"
sl@0
    41
sl@0
    42
#if defined(__SYMBIAN32__WSD__)
sl@0
    43
#include "libstdcppwsd.h"
sl@0
    44
sl@0
    45
#define __oom_handler 	get_oom_handler()
sl@0
    46
#define _S_lock 		get_allocator_S_lock()
sl@0
    47
#define _S_heap_size 	get_S_heap_size()
sl@0
    48
#define _S_start_free 	get_S_start_free()
sl@0
    49
#define _S_end_free		get_S_end_free()
sl@0
    50
#define _S_free_list 	get_S_free_list()
sl@0
    51
#define _S_chunk_allocator_lock 	get_S_chunk_allocator_lock()
sl@0
    52
#define _S_free_per_thread_states	get_S_free_per_thread_states()
sl@0
    53
#define _S_key						get_S_key()
sl@0
    54
#define _S_key_initialized			get_S_key_initialized()
sl@0
    55
#endif
sl@0
    56
sl@0
    57
#ifdef __SYMBIAN32__
sl@0
    58
extern "C"
sl@0
    59
{
sl@0
    60
IMPORT_C void* BackendAlloc(size_t );
sl@0
    61
IMPORT_C void BackendFree(void* );
sl@0
    62
}
sl@0
    63
sl@0
    64
sl@0
    65
EXPORT_C void* backend_allocate(size_t __n)
sl@0
    66
    {
sl@0
    67
    for (;;)
sl@0
    68
        {
sl@0
    69
        void* p = BackendAlloc(__n);
sl@0
    70
sl@0
    71
        if (p)
sl@0
    72
            {
sl@0
    73
            return p; 
sl@0
    74
            }
sl@0
    75
        
sl@0
    76
        // set_new_handler uses Dll::Tls. So only this threads new handler will be changed
sl@0
    77
        // for the time it is set back. No problems for other threads.
sl@0
    78
        std::new_handler nh_func  = std::set_new_handler(NULL);
sl@0
    79
        std::set_new_handler(nh_func);
sl@0
    80
sl@0
    81
        if (nh_func)
sl@0
    82
            {
sl@0
    83
            nh_func();
sl@0
    84
            }
sl@0
    85
        else
sl@0
    86
            {
sl@0
    87
            __THROW(std::bad_alloc());
sl@0
    88
            }
sl@0
    89
        }
sl@0
    90
    }
sl@0
    91
sl@0
    92
EXPORT_C void  backend_free(void* __p)
sl@0
    93
    {
sl@0
    94
    BackendFree(__p);
sl@0
    95
    }
sl@0
    96
#endif
sl@0
    97
sl@0
    98
#if defined (__WATCOMC__)
sl@0
    99
#  pragma warning 13 9
sl@0
   100
#  pragma warning 367 9
sl@0
   101
#  pragma warning 368 9
sl@0
   102
#endif
sl@0
   103
sl@0
   104
#if defined (_STLP_SGI_THREADS)
sl@0
   105
  // We test whether threads are in use before locking.
sl@0
   106
  // Perhaps this should be moved into stl_threads.h, but that
sl@0
   107
  // probably makes it harder to avoid the procedure call when
sl@0
   108
  // it isn't needed.
sl@0
   109
extern "C" {
sl@0
   110
  extern int __us_rsthread_malloc;
sl@0
   111
}
sl@0
   112
#endif
sl@0
   113
sl@0
   114
// Specialised debug form of malloc which does not provide "false"
sl@0
   115
// memory leaks when run with debug CRT libraries.
sl@0
   116
#if defined (_STLP_MSVC) && (_STLP_MSVC >= 1020 && defined (_STLP_DEBUG_ALLOC)) && !defined (_STLP_WCE)
sl@0
   117
#  include <crtdbg.h>
sl@0
   118
inline void* __stlp_chunk_malloc(size_t __bytes) { _STLP_CHECK_NULL_ALLOC(_malloc_dbg(__bytes, _CRT_BLOCK, __FILE__, __LINE__)); }
sl@0
   119
inline void __stlp_chunck_free(void* __p) { _free_dbg(__p, _CRT_BLOCK); }
sl@0
   120
#else  // !_DEBUG
sl@0
   121
#  ifdef _STLP_NODE_ALLOC_USE_MALLOC
sl@0
   122
#    include <cstdlib>
sl@0
   123
inline void* __stlp_chunk_malloc(size_t __bytes) { _STLP_CHECK_NULL_ALLOC(_STLP_VENDOR_CSTD::malloc(__bytes)); }
sl@0
   124
inline void __stlp_chunck_free(void* __p) { _STLP_VENDOR_CSTD::free(__p); }
sl@0
   125
#  else
sl@0
   126
inline void* __stlp_chunk_malloc(size_t __bytes) {
sl@0
   127
    return _STLP_STD::__stl_new(__bytes);
sl@0
   128
}
sl@0
   129
inline void __stlp_chunck_free(void* __p) {
sl@0
   130
    _STLP_STD::__stl_delete(__p);     
sl@0
   131
}
sl@0
   132
 
sl@0
   133
#  endif
sl@0
   134
#endif  // !_DEBUG
sl@0
   135
sl@0
   136
#define _S_FREELIST_INDEX(__bytes) ((__bytes - size_t(1)) >> (int)_ALIGN_SHIFT)
sl@0
   137
sl@0
   138
_STLP_BEGIN_NAMESPACE
sl@0
   139
sl@0
   140
class __malloc_alloc_impl {
sl@0
   141
private:
sl@0
   142
  static void* _S_oom_malloc(size_t __n) {
sl@0
   143
    __oom_handler_type __my_malloc_handler;
sl@0
   144
    void * __result;
sl@0
   145
sl@0
   146
    for (;;) {
sl@0
   147
      __my_malloc_handler = __oom_handler;
sl@0
   148
      if (0 == __my_malloc_handler) { __THROW_BAD_ALLOC; }
sl@0
   149
      (*__my_malloc_handler)();
sl@0
   150
      __result = malloc(__n);
sl@0
   151
      if (__result) return(__result);
sl@0
   152
    }
sl@0
   153
#if defined (_STLP_NEED_UNREACHABLE_RETURN)
sl@0
   154
    return 0;
sl@0
   155
#endif
sl@0
   156
  }
sl@0
   157
#if defined(__SYMBIAN32__WSD__)  
sl@0
   158
  static _STLP_STATIC_MEMBER_DECLSPEC __oom_handler_type& get_oom_handler();
sl@0
   159
#else
sl@0
   160
  static __oom_handler_type __oom_handler;
sl@0
   161
#endif
sl@0
   162
public:
sl@0
   163
  // this one is needed for proper simple_alloc wrapping
sl@0
   164
  typedef char value_type;
sl@0
   165
  static void* allocate(size_t& __n) {
sl@0
   166
    void* __result = malloc(__n);
sl@0
   167
    if (0 == __result) {
sl@0
   168
      __result = _S_oom_malloc(__n);
sl@0
   169
    }
sl@0
   170
#if defined (_STLP_MALLOC_USABLE_SIZE)
sl@0
   171
    else {
sl@0
   172
      size_t __new_n = _STLP_MALLOC_USABLE_SIZE(__result);
sl@0
   173
      /*
sl@0
   174
      if (__n != __new_n) {
sl@0
   175
        printf("requested size %d, usable %d\n", __n, __new_n);
sl@0
   176
      }
sl@0
   177
      */
sl@0
   178
      __n = __new_n;
sl@0
   179
    }
sl@0
   180
#endif
sl@0
   181
    return __result;
sl@0
   182
  }
sl@0
   183
  static void deallocate(void* __p, size_t /* __n */) { free((char*)__p); }
sl@0
   184
  static __oom_handler_type set_malloc_handler(__oom_handler_type __f) {
sl@0
   185
    __oom_handler_type __old = __oom_handler;
sl@0
   186
    __oom_handler = __f;
sl@0
   187
    return __old;
sl@0
   188
  }
sl@0
   189
#if defined(__SYMBIAN32__WSD__)  
sl@0
   190
  friend void ::stdcpp_allocators_init();
sl@0
   191
#endif
sl@0
   192
};
sl@0
   193
sl@0
   194
#if !defined(__SYMBIAN32__WSD__)
sl@0
   195
// malloc_alloc out-of-memory handling
sl@0
   196
__oom_handler_type __malloc_alloc_impl::__oom_handler = __STATIC_CAST(__oom_handler_type, 0);
sl@0
   197
#endif
sl@0
   198
sl@0
   199
void* _STLP_CALL __malloc_alloc::allocate(size_t& __n)
sl@0
   200
{ return __malloc_alloc_impl::allocate(__n); }
sl@0
   201
__oom_handler_type _STLP_CALL __malloc_alloc::set_malloc_handler(__oom_handler_type __f)
sl@0
   202
{ return __malloc_alloc_impl::set_malloc_handler(__f); }
sl@0
   203
sl@0
   204
// *******************************************************
sl@0
   205
// Default node allocator.
sl@0
   206
// With a reasonable compiler, this should be roughly as fast as the
sl@0
   207
// original STL class-specific allocators, but with less fragmentation.
sl@0
   208
//
sl@0
   209
// Important implementation properties:
sl@0
   210
// 1. If the client request an object of size > _MAX_BYTES, the resulting
sl@0
   211
//    object will be obtained directly from malloc.
sl@0
   212
// 2. In all other cases, we allocate an object of size exactly
sl@0
   213
//    _S_round_up(requested_size).  Thus the client has enough size
sl@0
   214
//    information that we can return the object to the proper free list
sl@0
   215
//    without permanently losing part of the object.
sl@0
   216
//
sl@0
   217
sl@0
   218
#define _STLP_NFREELISTS 16
sl@0
   219
sl@0
   220
/*
sl@0
   221
 * On Symbian, the stlport is built as a dll and also dynamically linked against 
sl@0
   222
 * by the applications. The _STLP_USE_DYNAMIC_LIB should always be defined.
sl@0
   223
 * _STLP_LEAKS_PEDANTIC is defined to prevent the memory leaks in __node_alloc 
sl@0
   224
 * when the library is dynamically loaded and unloaded.
sl@0
   225
 */
sl@0
   226
#if defined (_STLP_LEAKS_PEDANTIC) && ( defined (_STLP_USE_DYNAMIC_LIB) || defined (__SYMBIAN32__) )
sl@0
   227
/*
sl@0
   228
 * We can only do cleanup of the node allocator memory pool if we are
sl@0
   229
 * sure that the STLport library is used as a shared one as it guaranties
sl@0
   230
 * the unicity of the node allocator instance. Without that guaranty node
sl@0
   231
 * allocator instances might exchange memory blocks making the implementation
sl@0
   232
 * of a cleaning process much more complicated.
sl@0
   233
 */
sl@0
   234
#  define _STLP_DO_CLEAN_NODE_ALLOC
sl@0
   235
#endif
sl@0
   236
sl@0
   237
/* When STLport is used without multi threaded safety we use the node allocator
sl@0
   238
 * implementation with locks as locks becomes no-op. The lock free implementation
sl@0
   239
 * always use system specific atomic operations which are slower than 'normal'
sl@0
   240
 * ones.
sl@0
   241
 */
sl@0
   242
#if defined (_STLP_THREADS) && \
sl@0
   243
    defined (_STLP_HAS_ATOMIC_FREELIST) && defined (_STLP_ATOMIC_ADD)
sl@0
   244
/*
sl@0
   245
 * We have an implementation of the atomic freelist (_STLP_atomic_freelist)
sl@0
   246
 * for this architecture and compiler.  That means we can use the non-blocking
sl@0
   247
 * implementation of the node-allocation engine.*/
sl@0
   248
#  define _STLP_USE_LOCK_FREE_IMPLEMENTATION
sl@0
   249
#endif
sl@0
   250
sl@0
   251
#if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
sl@0
   252
#  if defined (_STLP_THREADS)
sl@0
   253
sl@0
   254
class _Node_Alloc_Lock {
sl@0
   255
public:
sl@0
   256
  _Node_Alloc_Lock() {
sl@0
   257
#  if defined (_STLP_SGI_THREADS)
sl@0
   258
    if (__us_rsthread_malloc)
sl@0
   259
#  endif
sl@0
   260
      _S_lock._M_acquire_lock();
sl@0
   261
  }
sl@0
   262
sl@0
   263
  ~_Node_Alloc_Lock() {
sl@0
   264
#  if defined (_STLP_SGI_THREADS)
sl@0
   265
    if (__us_rsthread_malloc)
sl@0
   266
#  endif
sl@0
   267
        _S_lock._M_release_lock();
sl@0
   268
  }
sl@0
   269
#if defined (__SYMBIAN32__WSD__)
sl@0
   270
  static _STLP_STATIC_MUTEX& get_allocator_S_lock();
sl@0
   271
#else
sl@0
   272
  static _STLP_STATIC_MUTEX _S_lock;
sl@0
   273
#endif
sl@0
   274
};
sl@0
   275
sl@0
   276
#if !defined(__SYMBIAN32__WSD__)
sl@0
   277
_STLP_STATIC_MUTEX _Node_Alloc_Lock::_S_lock _STLP_MUTEX_INITIALIZER;
sl@0
   278
#endif
sl@0
   279
sl@0
   280
#  else
sl@0
   281
sl@0
   282
class _Node_Alloc_Lock {
sl@0
   283
public:
sl@0
   284
  _Node_Alloc_Lock() { }
sl@0
   285
  ~_Node_Alloc_Lock() { }
sl@0
   286
};
sl@0
   287
sl@0
   288
#  endif
sl@0
   289
sl@0
   290
struct _Node_alloc_obj {
sl@0
   291
  _Node_alloc_obj * _M_next;
sl@0
   292
};
sl@0
   293
#endif
sl@0
   294
sl@0
   295
class __node_alloc_impl {
sl@0
   296
_STLP_PRIVATE:
sl@0
   297
  static inline size_t _STLP_CALL _S_round_up(size_t __bytes)
sl@0
   298
  { return (((__bytes) + (size_t)_ALIGN-1) & ~((size_t)_ALIGN - 1)); }
sl@0
   299
sl@0
   300
#if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
sl@0
   301
  typedef _STLP_atomic_freelist::item   _Obj;
sl@0
   302
  typedef _STLP_atomic_freelist         _Freelist;
sl@0
   303
  typedef _STLP_atomic_freelist         _ChunkList;
sl@0
   304
sl@0
   305
  // Header of blocks of memory that have been allocated as part of
sl@0
   306
  // a larger chunk but have not yet been chopped up into nodes.
sl@0
   307
  struct _FreeBlockHeader : public _STLP_atomic_freelist::item {
sl@0
   308
    char* _M_end;     // pointer to end of free memory
sl@0
   309
  };
sl@0
   310
#else
sl@0
   311
  typedef _Node_alloc_obj       _Obj;
sl@0
   312
  typedef _Obj* _STLP_VOLATILE  _Freelist;
sl@0
   313
  typedef _Obj*                 _ChunkList;
sl@0
   314
#endif
sl@0
   315
sl@0
   316
private:
sl@0
   317
  // Returns an object of size __n, and optionally adds to size __n free list.
sl@0
   318
  static _Obj* _S_refill(size_t __n);
sl@0
   319
  // Allocates a chunk for nobjs of size __p_size.  nobjs may be reduced
sl@0
   320
  // if it is inconvenient to allocate the requested number.
sl@0
   321
  static char* _S_chunk_alloc(size_t __p_size, int& __nobjs);
sl@0
   322
  // Chunk allocation state.
sl@0
   323
#if defined(__SYMBIAN32__WSD__)  
sl@0
   324
  static _Freelist* get_S_free_list();
sl@0
   325
#else
sl@0
   326
  static _Freelist _S_free_list[_STLP_NFREELISTS];
sl@0
   327
#endif
sl@0
   328
  
sl@0
   329
  // Amount of total allocated memory
sl@0
   330
#if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
sl@0
   331
  static _STLP_VOLATILE __stl_atomic_t _S_heap_size;
sl@0
   332
#else
sl@0
   333
#if defined(__SYMBIAN32__WSD__)
sl@0
   334
  static size_t& get_S_heap_size();
sl@0
   335
#else
sl@0
   336
  static size_t _S_heap_size;
sl@0
   337
#endif
sl@0
   338
#endif
sl@0
   339
sl@0
   340
#if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
sl@0
   341
  // List of blocks of free memory
sl@0
   342
  static _STLP_atomic_freelist  _S_free_mem_blocks;
sl@0
   343
#else
sl@0
   344
#if defined(__SYMBIAN32__WSD__)
sl@0
   345
  // Start of the current free memory buffer
sl@0
   346
  static char*& get_S_start_free();
sl@0
   347
  // End of the current free memory buffer
sl@0
   348
  static char*& get_S_end_free();
sl@0
   349
#else
sl@0
   350
  // Start of the current free memory buffer
sl@0
   351
  static char* _S_start_free;
sl@0
   352
  // End of the current free memory buffer
sl@0
   353
  static char* _S_end_free;
sl@0
   354
#endif
sl@0
   355
#endif
sl@0
   356
sl@0
   357
#if defined (_STLP_DO_CLEAN_NODE_ALLOC)
sl@0
   358
public:
sl@0
   359
  // Methods to report alloc/dealloc calls to the counter system.
sl@0
   360
#  if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
sl@0
   361
  typedef _STLP_VOLATILE __stl_atomic_t _AllocCounter;
sl@0
   362
#  else
sl@0
   363
  typedef __stl_atomic_t _AllocCounter;
sl@0
   364
#  endif
sl@0
   365
  static _AllocCounter& _STLP_CALL _S_alloc_counter();
sl@0
   366
  static void _S_alloc_call();
sl@0
   367
  static void _S_dealloc_call();
sl@0
   368
sl@0
   369
private:
sl@0
   370
  // Free all the allocated chuncks of memory
sl@0
   371
  static void _S_chunk_dealloc();
sl@0
   372
  // Beginning of the linked list of allocated chunks of memory
sl@0
   373
  static _ChunkList _S_chunks;
sl@0
   374
#endif /* _STLP_DO_CLEAN_NODE_ALLOC */
sl@0
   375
sl@0
   376
public:
sl@0
   377
  /* __n must be > 0      */
sl@0
   378
  static void* _M_allocate(size_t& __n);
sl@0
   379
  /* __p may not be 0 */
sl@0
   380
  static void _M_deallocate(void *__p, size_t __n);
sl@0
   381
  
sl@0
   382
#if defined(__SYMBIAN32__WSD__)
sl@0
   383
	friend void ::stdcpp_allocators_init();
sl@0
   384
#endif
sl@0
   385
};
sl@0
   386
sl@0
   387
#if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
sl@0
   388
void* __node_alloc_impl::_M_allocate(size_t& __n) {
sl@0
   389
  __n = _S_round_up(__n);
sl@0
   390
  _Obj * _STLP_VOLATILE * __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
sl@0
   391
  _Obj *__r;
sl@0
   392
sl@0
   393
  // Acquire the lock here with a constructor call.
sl@0
   394
  // This ensures that it is released in exit or during stack
sl@0
   395
  // unwinding.
sl@0
   396
  _Node_Alloc_Lock __lock_instance;
sl@0
   397
sl@0
   398
  if ( (__r  = *__my_free_list) != 0 ) {
sl@0
   399
    *__my_free_list = __r->_M_next;
sl@0
   400
  } else {
sl@0
   401
    __r = _S_refill(__n);
sl@0
   402
  }
sl@0
   403
#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
sl@0
   404
  _S_alloc_call();
sl@0
   405
#  endif
sl@0
   406
  // lock is released here
sl@0
   407
  return __r;
sl@0
   408
}
sl@0
   409
sl@0
   410
void __node_alloc_impl::_M_deallocate(void *__p, size_t __n) {
sl@0
   411
  _Obj * _STLP_VOLATILE * __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
sl@0
   412
  _Obj * __pobj = __STATIC_CAST(_Obj*, __p);
sl@0
   413
sl@0
   414
  // acquire lock
sl@0
   415
  _Node_Alloc_Lock __lock_instance;
sl@0
   416
  __pobj->_M_next = *__my_free_list;
sl@0
   417
  *__my_free_list = __pobj;
sl@0
   418
sl@0
   419
#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
sl@0
   420
  _S_dealloc_call();
sl@0
   421
#  endif
sl@0
   422
  // lock is released here
sl@0
   423
}
sl@0
   424
sl@0
   425
/* We allocate memory in large chunks in order to avoid fragmenting     */
sl@0
   426
/* the malloc heap too much.                                            */
sl@0
   427
/* We assume that size is properly aligned.                             */
sl@0
   428
/* We hold the allocation lock.                                         */
sl@0
   429
char* __node_alloc_impl::_S_chunk_alloc(size_t _p_size, int& __nobjs) {
sl@0
   430
  char* __result;
sl@0
   431
  size_t __total_bytes = _p_size * __nobjs;
sl@0
   432
  size_t __bytes_left = _S_end_free - _S_start_free;
sl@0
   433
sl@0
   434
  if (__bytes_left > 0) {
sl@0
   435
    if (__bytes_left >= __total_bytes) {
sl@0
   436
      __result = _S_start_free;
sl@0
   437
      _S_start_free += __total_bytes;
sl@0
   438
      return __result;
sl@0
   439
    }
sl@0
   440
sl@0
   441
    if (__bytes_left >= _p_size) {
sl@0
   442
      __nobjs = (int)(__bytes_left / _p_size);
sl@0
   443
      __total_bytes = _p_size * __nobjs;
sl@0
   444
      __result = _S_start_free;
sl@0
   445
      _S_start_free += __total_bytes;
sl@0
   446
      return __result;
sl@0
   447
    }
sl@0
   448
sl@0
   449
    // Try to make use of the left-over piece.
sl@0
   450
    _Obj* _STLP_VOLATILE* __my_free_list = _S_free_list + _S_FREELIST_INDEX(__bytes_left);
sl@0
   451
    __REINTERPRET_CAST(_Obj*, _S_start_free)->_M_next = *__my_free_list;
sl@0
   452
    *__my_free_list = __REINTERPRET_CAST(_Obj*, _S_start_free);
sl@0
   453
  }
sl@0
   454
sl@0
   455
  size_t __bytes_to_get =
sl@0
   456
    2 * __total_bytes + _S_round_up(_S_heap_size >> 4)
sl@0
   457
#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
sl@0
   458
    + sizeof(_Obj)
sl@0
   459
#  endif
sl@0
   460
    ;
sl@0
   461
sl@0
   462
  _S_start_free = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
sl@0
   463
  if (0 == _S_start_free) {
sl@0
   464
    _Obj* _STLP_VOLATILE* __my_free_list;
sl@0
   465
    _Obj* __p;
sl@0
   466
    // Try to do with what we have.  That can't hurt.
sl@0
   467
    // We do not try smaller requests, since that tends
sl@0
   468
    // to result in disaster on multi-process machines.
sl@0
   469
    for (size_t __i = _p_size; __i <= (size_t)_MAX_BYTES; __i += (size_t)_ALIGN) {
sl@0
   470
      __my_free_list = _S_free_list + _S_FREELIST_INDEX(__i);
sl@0
   471
      __p = *__my_free_list;
sl@0
   472
      if (0 != __p) {
sl@0
   473
        *__my_free_list = __p -> _M_next;
sl@0
   474
        _S_start_free = __REINTERPRET_CAST(char*, __p);
sl@0
   475
        _S_end_free = _S_start_free + __i;
sl@0
   476
        return _S_chunk_alloc(_p_size, __nobjs);
sl@0
   477
        // Any leftover piece will eventually make it to the
sl@0
   478
        // right free list.
sl@0
   479
      }
sl@0
   480
    }
sl@0
   481
    _S_end_free = 0;    // In case of exception.
sl@0
   482
    _S_start_free = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
sl@0
   483
    /*
sl@0
   484
    (char*)malloc_alloc::allocate(__bytes_to_get);
sl@0
   485
    */
sl@0
   486
sl@0
   487
    // This should either throw an
sl@0
   488
    // exception or remedy the situation.  Thus we assume it
sl@0
   489
    // succeeded.
sl@0
   490
  }
sl@0
   491
sl@0
   492
  _S_heap_size += __bytes_to_get;
sl@0
   493
#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
sl@0
   494
  __REINTERPRET_CAST(_Obj*, _S_start_free)->_M_next = _S_chunks;
sl@0
   495
  _S_chunks = __REINTERPRET_CAST(_Obj*, _S_start_free);
sl@0
   496
#  endif
sl@0
   497
  _S_end_free = _S_start_free + __bytes_to_get;
sl@0
   498
#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
sl@0
   499
  _S_start_free += sizeof(_Obj);
sl@0
   500
#  endif
sl@0
   501
  return _S_chunk_alloc(_p_size, __nobjs);
sl@0
   502
}
sl@0
   503
sl@0
   504
/* Returns an object of size __n, and optionally adds to size __n free list.*/
sl@0
   505
/* We assume that __n is properly aligned.                                  */
sl@0
   506
/* We hold the allocation lock.                                             */
sl@0
   507
_Node_alloc_obj* __node_alloc_impl::_S_refill(size_t __n) {
sl@0
   508
  int __nobjs = 20;
sl@0
   509
  char* __chunk = _S_chunk_alloc(__n, __nobjs);
sl@0
   510
sl@0
   511
  if (1 == __nobjs) return __REINTERPRET_CAST(_Obj*, __chunk);
sl@0
   512
sl@0
   513
  _Obj* _STLP_VOLATILE* __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
sl@0
   514
  _Obj* __result;
sl@0
   515
  _Obj* __current_obj;
sl@0
   516
  _Obj* __next_obj;
sl@0
   517
sl@0
   518
  /* Build free list in chunk */
sl@0
   519
  __result = __REINTERPRET_CAST(_Obj*, __chunk);
sl@0
   520
  *__my_free_list = __next_obj = __REINTERPRET_CAST(_Obj*, __chunk + __n);
sl@0
   521
  for (--__nobjs; --__nobjs; ) {
sl@0
   522
    __current_obj = __next_obj;
sl@0
   523
    __next_obj = __REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __next_obj) + __n);
sl@0
   524
    __current_obj->_M_next = __next_obj;
sl@0
   525
  }
sl@0
   526
  __next_obj->_M_next = 0;
sl@0
   527
  return __result;
sl@0
   528
}
sl@0
   529
sl@0
   530
#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
sl@0
   531
void __node_alloc_impl::_S_alloc_call()
sl@0
   532
{ ++_S_alloc_counter(); }
sl@0
   533
sl@0
   534
void __node_alloc_impl::_S_dealloc_call() {
sl@0
   535
  __stl_atomic_t &counter = _S_alloc_counter();
sl@0
   536
  if (--counter == 0)
sl@0
   537
  { _S_chunk_dealloc(); }
sl@0
   538
}
sl@0
   539
sl@0
   540
/* We deallocate all the memory chunks      */
sl@0
   541
void __node_alloc_impl::_S_chunk_dealloc() {
sl@0
   542
  _Obj *__pcur = _S_chunks, *__pnext;
sl@0
   543
  while (__pcur != 0) {
sl@0
   544
    __pnext = __pcur->_M_next;
sl@0
   545
    __stlp_chunck_free(__pcur);
sl@0
   546
    __pcur = __pnext;
sl@0
   547
  }
sl@0
   548
  _S_chunks = 0;
sl@0
   549
  _S_start_free = _S_end_free = 0;
sl@0
   550
  _S_heap_size = 0;
sl@0
   551
  // Reinterprest cast cant remove volatileness. So using C style cast
sl@0
   552
  memset((char*)(&_S_free_list[0]), 0, _STLP_NFREELISTS * sizeof(_Obj*));
sl@0
   553
}
sl@0
   554
#  endif /* _STLP_DO_CLEAN_NODE_ALLOC */
sl@0
   555
sl@0
   556
#else /* !defined(_STLP_USE_LOCK_FREE_IMPLEMENTATION) */
sl@0
   557
sl@0
   558
void* __node_alloc_impl::_M_allocate(size_t& __n) {
sl@0
   559
  __n = _S_round_up(__n);
sl@0
   560
  _Obj* __r = _S_free_list[_S_FREELIST_INDEX(__n)].pop();
sl@0
   561
  if (__r  == 0)
sl@0
   562
  { __r = _S_refill(__n); }
sl@0
   563
sl@0
   564
#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
sl@0
   565
  _S_alloc_call();
sl@0
   566
#  endif
sl@0
   567
  return __r;
sl@0
   568
}
sl@0
   569
sl@0
   570
void __node_alloc_impl::_M_deallocate(void *__p, size_t __n) {
sl@0
   571
  _S_free_list[_S_FREELIST_INDEX(__n)].push(__STATIC_CAST(_Obj*, __p));
sl@0
   572
sl@0
   573
#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
sl@0
   574
  _S_dealloc_call();
sl@0
   575
#  endif
sl@0
   576
}
sl@0
   577
sl@0
   578
/* Returns an object of size __n, and optionally adds additional ones to    */
sl@0
   579
/* freelist of objects of size __n.                                         */
sl@0
   580
/* We assume that __n is properly aligned.                                  */
sl@0
   581
__node_alloc_impl::_Obj* __node_alloc_impl::_S_refill(size_t __n) {
sl@0
   582
  int __nobjs = 20;
sl@0
   583
  char* __chunk = _S_chunk_alloc(__n, __nobjs);
sl@0
   584
sl@0
   585
  if (__nobjs <= 1)
sl@0
   586
    return __REINTERPRET_CAST(_Obj*, __chunk);
sl@0
   587
sl@0
   588
  // Push all new nodes (minus first one) onto freelist
sl@0
   589
  _Obj* __result   = __REINTERPRET_CAST(_Obj*, __chunk);
sl@0
   590
  _Obj* __cur_item = __result;
sl@0
   591
  _Freelist* __my_freelist = _S_free_list + _S_FREELIST_INDEX(__n);
sl@0
   592
  for (--__nobjs; __nobjs != 0; --__nobjs) {
sl@0
   593
    __cur_item  = __REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __cur_item) + __n);
sl@0
   594
    __my_freelist->push(__cur_item);
sl@0
   595
  }
sl@0
   596
  return __result;
sl@0
   597
}
sl@0
   598
sl@0
   599
/* We allocate memory in large chunks in order to avoid fragmenting     */
sl@0
   600
/* the malloc heap too much.                                            */
sl@0
   601
/* We assume that size is properly aligned.                             */
sl@0
   602
char* __node_alloc_impl::_S_chunk_alloc(size_t _p_size, int& __nobjs) {
sl@0
   603
#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
sl@0
   604
  //We are going to add a small memory block to keep all the allocated blocks
sl@0
   605
  //address, we need to do so respecting the memory alignment. The following
sl@0
   606
  //static assert checks that the reserved block is big enough to store a pointer.
sl@0
   607
  _STLP_STATIC_ASSERT(sizeof(_Obj) <= _ALIGN)
sl@0
   608
#  endif
sl@0
   609
  char*  __result       = 0;
sl@0
   610
  __stl_atomic_t __total_bytes  = __STATIC_CAST(__stl_atomic_t, _p_size) * __nobjs;
sl@0
   611
sl@0
   612
  _FreeBlockHeader* __block = __STATIC_CAST(_FreeBlockHeader*, _S_free_mem_blocks.pop());
sl@0
   613
  if (__block != 0) {
sl@0
   614
    // We checked a block out and can now mess with it with impugnity.
sl@0
   615
    // We'll put the remainder back into the list if we're done with it below.
sl@0
   616
    char*  __buf_start  = __REINTERPRET_CAST(char*, __block);
sl@0
   617
    __stl_atomic_t __bytes_left = __block->_M_end - __buf_start;
sl@0
   618
sl@0
   619
    if ((__bytes_left < __total_bytes) && (__bytes_left >= __STATIC_CAST(__stl_atomic_t, _p_size))) {
sl@0
   620
      // There's enough left for at least one object, but not as much as we wanted
sl@0
   621
      __result      = __buf_start;
sl@0
   622
      __nobjs       = (int)(__bytes_left/_p_size);
sl@0
   623
      __total_bytes = __STATIC_CAST(__stl_atomic_t, _p_size) * __nobjs;
sl@0
   624
      __bytes_left -= __total_bytes;
sl@0
   625
      __buf_start  += __total_bytes;
sl@0
   626
    }
sl@0
   627
    else if (__bytes_left >= __total_bytes) {
sl@0
   628
      // The block has enough left to satisfy all that was asked for
sl@0
   629
      __result      = __buf_start;
sl@0
   630
      __bytes_left -= __total_bytes;
sl@0
   631
      __buf_start  += __total_bytes;
sl@0
   632
    }
sl@0
   633
sl@0
   634
    if (__bytes_left != 0) {
sl@0
   635
      // There is still some memory left over in block after we satisfied our request.
sl@0
   636
      if ((__result != 0) && (__bytes_left >= sizeof(_FreeBlockHeader))) {
sl@0
   637
        // We were able to allocate at least one object and there is still enough
sl@0
   638
        // left to put remainder back into list.
sl@0
   639
        _FreeBlockHeader* __newblock = __REINTERPRET_CAST(_FreeBlockHeader*, __buf_start);
sl@0
   640
        __newblock->_M_end  = __block->_M_end;
sl@0
   641
        _S_free_mem_blocks.push(__newblock);
sl@0
   642
      }
sl@0
   643
      else {
sl@0
   644
        // We were not able to allocate enough for at least one object.
sl@0
   645
        // Shove into freelist of nearest (rounded-down!) size.
sl@0
   646
        size_t __rounded_down = _S_round_up(__bytes_left + 1) - (size_t)_ALIGN;
sl@0
   647
        if (__rounded_down > 0)
sl@0
   648
          _S_free_list[_S_FREELIST_INDEX(__rounded_down)].push((_Obj*)__buf_start);
sl@0
   649
      }
sl@0
   650
    }
sl@0
   651
    if (__result != 0)
sl@0
   652
      return __result;
sl@0
   653
  }
sl@0
   654
sl@0
   655
  // We couldn't satisfy it from the list of free blocks, get new memory.
sl@0
   656
  __stl_atomic_t __bytes_to_get = 2 * __total_bytes + __STATIC_CAST(__stl_atomic_t, _S_round_up(_S_heap_size >> 4))
sl@0
   657
#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
sl@0
   658
    + _ALIGN
sl@0
   659
#  endif
sl@0
   660
    ;
sl@0
   661
sl@0
   662
  __result = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
sl@0
   663
  // Alignment check
sl@0
   664
  _STLP_VERBOSE_ASSERT(((__REINTERPRET_CAST(size_t, __result) & __STATIC_CAST(size_t, _ALIGN - 1)) == 0), _StlMsg_DBA_DELETED_TWICE)
sl@0
   665
sl@0
   666
  if (0 == __result) {
sl@0
   667
    // Allocation failed; try to canibalize from freelist of a larger object size.
sl@0
   668
    for (size_t __i = _p_size; __i <= (size_t)_MAX_BYTES; __i += (size_t)_ALIGN) {
sl@0
   669
      _Obj* __p  = _S_free_list[_S_FREELIST_INDEX(__i)].pop();
sl@0
   670
      if (0 != __p) {
sl@0
   671
        if (__i < sizeof(_FreeBlockHeader)) {
sl@0
   672
          // Not enough to put into list of free blocks, divvy it up here.
sl@0
   673
          // Use as much as possible for this request and shove remainder into freelist.
sl@0
   674
          __nobjs = (int)(__i/_p_size);
sl@0
   675
          __total_bytes = __nobjs * __STATIC_CAST(__stl_atomic_t, _p_size);
sl@0
   676
          size_t __bytes_left = __i - __total_bytes;
sl@0
   677
          size_t __rounded_down = _S_round_up(__bytes_left+1) - (size_t)_ALIGN;
sl@0
   678
          if (__rounded_down > 0) {
sl@0
   679
            _S_free_list[_S_FREELIST_INDEX(__rounded_down)].push(__REINTERPRET_CAST(_Obj*, __REINTERPRET_CAST(char*, __p) + __total_bytes));
sl@0
   680
          }
sl@0
   681
          return __REINTERPRET_CAST(char*, __p);
sl@0
   682
        }
sl@0
   683
        else {
sl@0
   684
          // Add node to list of available blocks and recursively allocate from it.
sl@0
   685
          _FreeBlockHeader* __newblock = (_FreeBlockHeader*)__p;
sl@0
   686
          __newblock->_M_end  = __REINTERPRET_CAST(char*, __p) + __i;
sl@0
   687
          _S_free_mem_blocks.push(__newblock);
sl@0
   688
          return _S_chunk_alloc(_p_size, __nobjs);
sl@0
   689
        }
sl@0
   690
      }
sl@0
   691
    }
sl@0
   692
sl@0
   693
    // We were not able to find something in a freelist, try to allocate a smaller amount.
sl@0
   694
    __bytes_to_get  = __total_bytes
sl@0
   695
#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
sl@0
   696
      + _ALIGN
sl@0
   697
#  endif
sl@0
   698
      ;
sl@0
   699
    __result = __STATIC_CAST(char*, __stlp_chunk_malloc(__bytes_to_get));
sl@0
   700
    // Alignment check
sl@0
   701
    _STLP_VERBOSE_ASSERT(((__REINTERPRET_CAST(size_t, __result) & __STATIC_CAST(size_t, _ALIGN - 1)) == 0), _StlMsg_DBA_DELETED_TWICE)
sl@0
   702
sl@0
   703
    // This should either throw an exception or remedy the situation.
sl@0
   704
    // Thus we assume it succeeded.
sl@0
   705
  }
sl@0
   706
sl@0
   707
  _STLP_ATOMIC_ADD(&_S_heap_size, __bytes_to_get);
sl@0
   708
sl@0
   709
#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
sl@0
   710
  // We have to track the allocated memory chunks for release on exit.
sl@0
   711
  _S_chunks.push(__REINTERPRET_CAST(_Obj*, __result));
sl@0
   712
  __result       += _ALIGN;
sl@0
   713
  __bytes_to_get -= _ALIGN;
sl@0
   714
#  endif
sl@0
   715
sl@0
   716
  if (__bytes_to_get > __total_bytes) {
sl@0
   717
    // Push excess memory allocated in this chunk into list of free memory blocks
sl@0
   718
    _FreeBlockHeader* __freeblock = __REINTERPRET_CAST(_FreeBlockHeader*, __result + __total_bytes);
sl@0
   719
    __freeblock->_M_end  = __result + __bytes_to_get;
sl@0
   720
    _S_free_mem_blocks.push(__freeblock);
sl@0
   721
  }
sl@0
   722
  return __result;
sl@0
   723
}
sl@0
   724
sl@0
   725
#  if defined (_STLP_DO_CLEAN_NODE_ALLOC)
sl@0
   726
void __node_alloc_impl::_S_alloc_call()
sl@0
   727
{ _STLP_ATOMIC_INCREMENT(&_S_alloc_counter()); }
sl@0
   728
sl@0
   729
void __node_alloc_impl::_S_dealloc_call() {
sl@0
   730
  _STLP_VOLATILE __stl_atomic_t *pcounter = &_S_alloc_counter();
sl@0
   731
  if (_STLP_ATOMIC_DECREMENT(pcounter) == 0)
sl@0
   732
    _S_chunk_dealloc();
sl@0
   733
}
sl@0
   734
sl@0
   735
/* We deallocate all the memory chunks      */
sl@0
   736
void __node_alloc_impl::_S_chunk_dealloc() {
sl@0
   737
  // Note: The _Node_alloc_helper class ensures that this function
sl@0
   738
  // will only be called when the (shared) library is unloaded or the
sl@0
   739
  // process is shutdown.  It's thus not possible that another thread
sl@0
   740
  // is currently trying to allocate a node (we're not thread-safe here).
sl@0
   741
  //
sl@0
   742
sl@0
   743
  // Clear the free blocks and all freelistst.  This makes sure that if
sl@0
   744
  // for some reason more memory is allocated again during shutdown
sl@0
   745
  // (it'd also be really nasty to leave references to deallocated memory).
sl@0
   746
  _S_free_mem_blocks.clear();
sl@0
   747
  _S_heap_size      = 0;
sl@0
   748
sl@0
   749
  for (size_t __i = 0; __i < _STLP_NFREELISTS; ++__i) {
sl@0
   750
    _S_free_list[__i].clear();
sl@0
   751
  }
sl@0
   752
sl@0
   753
  // Detach list of chunks and free them all
sl@0
   754
  _Obj* __chunk = _S_chunks.clear();
sl@0
   755
  while (__chunk != 0) {
sl@0
   756
    _Obj* __next = __chunk->_M_next;
sl@0
   757
    __stlp_chunck_free(__chunk);
sl@0
   758
    __chunk  = __next;
sl@0
   759
  }
sl@0
   760
}
sl@0
   761
#  endif /* _STLP_DO_CLEAN_NODE_ALLOC */
sl@0
   762
sl@0
   763
#endif /* !defined(_STLP_USE_LOCK_FREE_IMPLEMENTATION) */
sl@0
   764
sl@0
   765
#if defined (_STLP_DO_CLEAN_NODE_ALLOC)
sl@0
   766
struct __node_alloc_cleaner {
sl@0
   767
  ~__node_alloc_cleaner()
sl@0
   768
      {
sl@0
   769
      __node_alloc_impl::_S_dealloc_call(); 
sl@0
   770
      }
sl@0
   771
};
sl@0
   772
sl@0
   773
#  if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
sl@0
   774
_STLP_VOLATILE __stl_atomic_t& _STLP_CALL
sl@0
   775
#  else
sl@0
   776
__stl_atomic_t& _STLP_CALL
sl@0
   777
#  endif
sl@0
   778
__node_alloc_impl::_S_alloc_counter() {
sl@0
   779
  static _AllocCounter _S_counter = 1;
sl@0
   780
  static __node_alloc_cleaner _S_node_alloc_cleaner;
sl@0
   781
  return _S_counter;
sl@0
   782
}
sl@0
   783
#endif
sl@0
   784
sl@0
   785
#if !defined(__SYMBIAN32__WSD__)
sl@0
   786
#if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
sl@0
   787
_Node_alloc_obj * _STLP_VOLATILE
sl@0
   788
__node_alloc_impl::_S_free_list[_STLP_NFREELISTS]
sl@0
   789
= {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
sl@0
   790
// The 16 zeros are necessary to make version 4.1 of the SunPro
sl@0
   791
// compiler happy.  Otherwise it appears to allocate too little
sl@0
   792
// space for the array.
sl@0
   793
#else
sl@0
   794
_STLP_atomic_freelist __node_alloc_impl::_S_free_list[_STLP_NFREELISTS];
sl@0
   795
_STLP_atomic_freelist __node_alloc_impl::_S_free_mem_blocks;
sl@0
   796
#endif
sl@0
   797
sl@0
   798
#if !defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
sl@0
   799
char *__node_alloc_impl::_S_start_free = 0;
sl@0
   800
char *__node_alloc_impl::_S_end_free = 0;
sl@0
   801
#endif
sl@0
   802
sl@0
   803
#if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
sl@0
   804
_STLP_VOLATILE __stl_atomic_t
sl@0
   805
#else
sl@0
   806
size_t
sl@0
   807
#endif
sl@0
   808
__node_alloc_impl::_S_heap_size = 0;
sl@0
   809
#endif //__SYMBIAN32__WSD__
sl@0
   810
sl@0
   811
#if defined (_STLP_DO_CLEAN_NODE_ALLOC)
sl@0
   812
#  if defined (_STLP_USE_LOCK_FREE_IMPLEMENTATION)
sl@0
   813
_STLP_atomic_freelist __node_alloc_impl::_S_chunks;
sl@0
   814
#  else
sl@0
   815
_Node_alloc_obj* __node_alloc_impl::_S_chunks  = 0;
sl@0
   816
#  endif
sl@0
   817
#endif
sl@0
   818
sl@0
   819
_STLP_DECLSPEC void * _STLP_CALL __node_alloc::_M_allocate(size_t& __n)
sl@0
   820
{ return __node_alloc_impl::_M_allocate(__n); }
sl@0
   821
sl@0
   822
_STLP_DECLSPEC void _STLP_CALL __node_alloc::_M_deallocate(void *__p, size_t __n)
sl@0
   823
{ __node_alloc_impl::_M_deallocate(__p, __n); }
sl@0
   824
sl@0
   825
#if defined (_STLP_PTHREADS) && !defined (_STLP_NO_THREADS)
sl@0
   826
sl@0
   827
#  define _STLP_DATA_ALIGNMENT 8
sl@0
   828
sl@0
   829
_STLP_MOVE_TO_PRIV_NAMESPACE
sl@0
   830
sl@0
   831
// *******************************************************
sl@0
   832
// __perthread_alloc implementation
sl@0
   833
union _Pthread_alloc_obj {
sl@0
   834
  union _Pthread_alloc_obj * __free_list_link;
sl@0
   835
  char __client_data[_STLP_DATA_ALIGNMENT];    /* The client sees this.    */
sl@0
   836
};
sl@0
   837
sl@0
   838
// Pthread allocators don't appear to the client to have meaningful
sl@0
   839
// instances.  We do in fact need to associate some state with each
sl@0
   840
// thread.  That state is represented by _Pthread_alloc_per_thread_state.
sl@0
   841
sl@0
   842
struct _Pthread_alloc_per_thread_state {
sl@0
   843
  typedef _Pthread_alloc_obj __obj;
sl@0
   844
  enum { _S_NFREELISTS = _MAX_BYTES / _STLP_DATA_ALIGNMENT };
sl@0
   845
sl@0
   846
  // Free list link for list of available per thread structures.
sl@0
   847
  // When one of these becomes available for reuse due to thread
sl@0
   848
  // termination, any objects in its free list remain associated
sl@0
   849
  // with it.  The whole structure may then be used by a newly
sl@0
   850
  // created thread.
sl@0
   851
  _Pthread_alloc_per_thread_state() : __next(0)
sl@0
   852
  { memset((void *)__CONST_CAST(_Pthread_alloc_obj**, __free_list), 0, (size_t)_S_NFREELISTS * sizeof(__obj *)); }
sl@0
   853
  // Returns an object of size __n, and possibly adds to size n free list.
sl@0
   854
  void *_M_refill(size_t __n);
sl@0
   855
sl@0
   856
  _Pthread_alloc_obj* volatile __free_list[_S_NFREELISTS];
sl@0
   857
  _Pthread_alloc_per_thread_state *__next;
sl@0
   858
  // this data member is only to be used by per_thread_allocator, which returns memory to the originating thread.
sl@0
   859
  _STLP_mutex _M_lock;
sl@0
   860
};
sl@0
   861
sl@0
   862
// Pthread-specific allocator.
sl@0
   863
class _Pthread_alloc_impl {
sl@0
   864
public: // but only for internal use:
sl@0
   865
  typedef _Pthread_alloc_per_thread_state __state_type;
sl@0
   866
  typedef char value_type;
sl@0
   867
sl@0
   868
  // Allocates a chunk for nobjs of size size.  nobjs may be reduced
sl@0
   869
  // if it is inconvenient to allocate the requested number.
sl@0
   870
  static char *_S_chunk_alloc(size_t __size, size_t &__nobjs, __state_type*);
sl@0
   871
sl@0
   872
  enum {_S_ALIGN = _STLP_DATA_ALIGNMENT};
sl@0
   873
sl@0
   874
  static size_t _S_round_up(size_t __bytes)
sl@0
   875
  { return (((__bytes) + (int)_S_ALIGN - 1) & ~((int)_S_ALIGN - 1)); }
sl@0
   876
  static size_t _S_freelist_index(size_t __bytes)
sl@0
   877
  { return (((__bytes) + (int)_S_ALIGN - 1) / (int)_S_ALIGN - 1); }
sl@0
   878
sl@0
   879
private:
sl@0
   880
  // Chunk allocation state. And other shared state.
sl@0
   881
  // Protected by _S_chunk_allocator_lock.
sl@0
   882
#if defined(__SYMBIAN32__WSD__)
sl@0
   883
public:	
sl@0
   884
	static void pt_wsd_init() {
sl@0
   885
		get_S_free_per_thread_states() = 0;
sl@0
   886
		get_S_key() = 0;
sl@0
   887
		get_S_chunk_allocator_lock()._M_lock.iState   = _ENeedsNormalInit;
sl@0
   888
		get_S_chunk_allocator_lock()._M_lock.iPtr     = 0;
sl@0
   889
		get_S_chunk_allocator_lock()._M_lock.iReentry = 0;
sl@0
   890
		get_S_key_initialized() = false;
sl@0
   891
		get_S_start_free() = 0;
sl@0
   892
		get_S_end_free() = 0;
sl@0
   893
		get_S_heap_size() = 0;
sl@0
   894
	}
sl@0
   895
private:
sl@0
   896
  static _STLP_STATIC_MUTEX& get_S_chunk_allocator_lock()
sl@0
   897
	{ return get_libcpp_wsd().wsd_pt_S_chunk_allocator_lock; }
sl@0
   898
  static char*& get_S_start_free()
sl@0
   899
  	{ return get_libcpp_wsd().wsd_pt_S_start_free; }
sl@0
   900
  static char*& get_S_end_free()
sl@0
   901
  	{ return get_libcpp_wsd().wsd_pt_S_end_free; }
sl@0
   902
  static size_t& get_S_heap_size()
sl@0
   903
  	{ return get_libcpp_wsd().wsd_pt_S_heap_size; }
sl@0
   904
  static __state_type*& get_S_free_per_thread_states()
sl@0
   905
	{ return get_libcpp_wsd().wsd_pt_S_free_per_thread_states; }
sl@0
   906
  static pthread_key_t& get_S_key()
sl@0
   907
  	{ return get_libcpp_wsd().wsd_pt_S_key; }
sl@0
   908
  static bool& get_S_key_initialized()
sl@0
   909
  	{ return get_libcpp_wsd().wsd_pt_S_key_initialized; }
sl@0
   910
#else
sl@0
   911
  static _STLP_STATIC_MUTEX _S_chunk_allocator_lock;
sl@0
   912
  static char *_S_start_free;
sl@0
   913
  static char *_S_end_free;
sl@0
   914
  static size_t _S_heap_size;
sl@0
   915
  static __state_type *_S_free_per_thread_states;
sl@0
   916
  static pthread_key_t _S_key;
sl@0
   917
  static bool _S_key_initialized;
sl@0
   918
#endif
sl@0
   919
  // Pthread key under which per thread state is stored.
sl@0
   920
  // Allocator instances that are currently unclaimed by any thread.
sl@0
   921
  static void _S_destructor(void *instance);
sl@0
   922
  // Function to be called on thread exit to reclaim per thread
sl@0
   923
  // state.
sl@0
   924
  static __state_type *_S_new_per_thread_state();
sl@0
   925
public:
sl@0
   926
  // Return a recycled or new per thread state.
sl@0
   927
  static __state_type *_S_get_per_thread_state();
sl@0
   928
private:
sl@0
   929
        // ensure that the current thread has an associated
sl@0
   930
        // per thread state.
sl@0
   931
  class _M_lock;
sl@0
   932
  friend class _M_lock;
sl@0
   933
  class _M_lock {
sl@0
   934
  public:
sl@0
   935
    _M_lock () { _S_chunk_allocator_lock._M_acquire_lock(); }
sl@0
   936
    ~_M_lock () { _S_chunk_allocator_lock._M_release_lock(); }
sl@0
   937
  };
sl@0
   938
sl@0
   939
public:
sl@0
   940
sl@0
   941
  /* n must be > 0      */
sl@0
   942
  static void * allocate(size_t& __n);
sl@0
   943
sl@0
   944
  /* p may not be 0 */
sl@0
   945
  static void deallocate(void *__p, size_t __n);
sl@0
   946
sl@0
   947
  // boris : versions for per_thread_allocator
sl@0
   948
  /* n must be > 0      */
sl@0
   949
  static void * allocate(size_t& __n, __state_type* __a);
sl@0
   950
sl@0
   951
  /* p may not be 0 */
sl@0
   952
  static void deallocate(void *__p, size_t __n, __state_type* __a);
sl@0
   953
sl@0
   954
  static void * reallocate(void *__p, size_t __old_sz, size_t& __new_sz);
sl@0
   955
};
sl@0
   956
sl@0
   957
/* Returns an object of size n, and optionally adds to size n free list.*/
sl@0
   958
/* We assume that n is properly aligned.                                */
sl@0
   959
/* We hold the allocation lock.                                         */
sl@0
   960
void *_Pthread_alloc_per_thread_state::_M_refill(size_t __n) {
sl@0
   961
  typedef _Pthread_alloc_obj __obj;
sl@0
   962
  size_t __nobjs = 128;
sl@0
   963
  char * __chunk = _Pthread_alloc_impl::_S_chunk_alloc(__n, __nobjs, this);
sl@0
   964
  __obj * volatile * __my_free_list;
sl@0
   965
  __obj * __result;
sl@0
   966
  __obj * __current_obj, * __next_obj;
sl@0
   967
  size_t __i;
sl@0
   968
sl@0
   969
  if (1 == __nobjs)  {
sl@0
   970
    return __chunk;
sl@0
   971
  }
sl@0
   972
sl@0
   973
  __my_free_list = __free_list + _Pthread_alloc_impl::_S_freelist_index(__n);
sl@0
   974
sl@0
   975
  /* Build free list in chunk */
sl@0
   976
  __result = (__obj *)__chunk;
sl@0
   977
  *__my_free_list = __next_obj = (__obj *)(__chunk + __n);
sl@0
   978
  for (__i = 1; ; ++__i) {
sl@0
   979
    __current_obj = __next_obj;
sl@0
   980
    __next_obj = (__obj *)((char *)__next_obj + __n);
sl@0
   981
    if (__nobjs - 1 == __i) {
sl@0
   982
      __current_obj -> __free_list_link = 0;
sl@0
   983
      break;
sl@0
   984
    } else {
sl@0
   985
      __current_obj -> __free_list_link = __next_obj;
sl@0
   986
    }
sl@0
   987
  }
sl@0
   988
  return __result;
sl@0
   989
}
sl@0
   990
sl@0
   991
void _Pthread_alloc_impl::_S_destructor(void *__instance) {
sl@0
   992
  _M_lock __lock_instance;  // Need to acquire lock here.
sl@0
   993
  _Pthread_alloc_per_thread_state* __s = (_Pthread_alloc_per_thread_state*)__instance;
sl@0
   994
  __s -> __next = _S_free_per_thread_states;
sl@0
   995
  _S_free_per_thread_states = __s;
sl@0
   996
}
sl@0
   997
sl@0
   998
_Pthread_alloc_per_thread_state* _Pthread_alloc_impl::_S_new_per_thread_state() {
sl@0
   999
  /* lock already held here.  */
sl@0
  1000
  if (0 != _S_free_per_thread_states) {
sl@0
  1001
    _Pthread_alloc_per_thread_state *__result = _S_free_per_thread_states;
sl@0
  1002
    _S_free_per_thread_states = _S_free_per_thread_states -> __next;
sl@0
  1003
    return __result;
sl@0
  1004
  }
sl@0
  1005
  else {
sl@0
  1006
    return _STLP_NEW _Pthread_alloc_per_thread_state;
sl@0
  1007
  }
sl@0
  1008
}
sl@0
  1009
sl@0
  1010
_Pthread_alloc_per_thread_state* _Pthread_alloc_impl::_S_get_per_thread_state() {
sl@0
  1011
  int __ret_code;
sl@0
  1012
  __state_type* __result;
sl@0
  1013
sl@0
  1014
  if (_S_key_initialized && ((__result = (__state_type*) pthread_getspecific(_S_key)) != NULL))
sl@0
  1015
    return __result;
sl@0
  1016
sl@0
  1017
  /*REFERENCED*/
sl@0
  1018
  _M_lock __lock_instance;  // Need to acquire lock here.
sl@0
  1019
  if (!_S_key_initialized) {
sl@0
  1020
    if (pthread_key_create(&_S_key, _S_destructor)) {
sl@0
  1021
      __THROW_BAD_ALLOC;  // failed
sl@0
  1022
    }
sl@0
  1023
    _S_key_initialized = true;
sl@0
  1024
  }
sl@0
  1025
sl@0
  1026
  __result = _S_new_per_thread_state();
sl@0
  1027
  __ret_code = pthread_setspecific(_S_key, __result);
sl@0
  1028
  if (__ret_code) {
sl@0
  1029
    if (__ret_code == ENOMEM) {
sl@0
  1030
      __THROW_BAD_ALLOC;
sl@0
  1031
    } else {
sl@0
  1032
  // EINVAL
sl@0
  1033
      _STLP_ABORT();
sl@0
  1034
    }
sl@0
  1035
  }
sl@0
  1036
  return __result;
sl@0
  1037
}
sl@0
  1038
sl@0
  1039
/* We allocate memory in large chunks in order to avoid fragmenting     */
sl@0
  1040
/* the malloc heap too much.                                            */
sl@0
  1041
/* We assume that size is properly aligned.                             */
sl@0
  1042
char *_Pthread_alloc_impl::_S_chunk_alloc(size_t __p_size, size_t &__nobjs, _Pthread_alloc_per_thread_state *__a) {
sl@0
  1043
  typedef _Pthread_alloc_obj __obj;
sl@0
  1044
  {
sl@0
  1045
    char * __result;
sl@0
  1046
    size_t __total_bytes;
sl@0
  1047
    size_t __bytes_left;
sl@0
  1048
    /*REFERENCED*/
sl@0
  1049
    _M_lock __lock_instance;         // Acquire lock for this routine
sl@0
  1050
sl@0
  1051
    __total_bytes = __p_size * __nobjs;
sl@0
  1052
    __bytes_left = _S_end_free - _S_start_free;
sl@0
  1053
    if (__bytes_left >= __total_bytes) {
sl@0
  1054
      __result = _S_start_free;
sl@0
  1055
      _S_start_free += __total_bytes;
sl@0
  1056
      return __result;
sl@0
  1057
    } else if (__bytes_left >= __p_size) {
sl@0
  1058
      __nobjs = __bytes_left/__p_size;
sl@0
  1059
      __total_bytes = __p_size * __nobjs;
sl@0
  1060
      __result = _S_start_free;
sl@0
  1061
      _S_start_free += __total_bytes;
sl@0
  1062
      return __result;
sl@0
  1063
    } else {
sl@0
  1064
      size_t __bytes_to_get = 2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
sl@0
  1065
      // Try to make use of the left-over piece.
sl@0
  1066
      if (__bytes_left > 0) {
sl@0
  1067
        __obj * volatile * __my_free_list = __a->__free_list + _S_freelist_index(__bytes_left);
sl@0
  1068
        ((__obj *)_S_start_free) -> __free_list_link = *__my_free_list;
sl@0
  1069
        *__my_free_list = (__obj *)_S_start_free;
sl@0
  1070
      }
sl@0
  1071
#  ifdef _SGI_SOURCE
sl@0
  1072
      // Try to get memory that's aligned on something like a
sl@0
  1073
      // cache line boundary, so as to avoid parceling out
sl@0
  1074
      // parts of the same line to different threads and thus
sl@0
  1075
      // possibly different processors.
sl@0
  1076
      {
sl@0
  1077
        const int __cache_line_size = 128;  // probable upper bound
sl@0
  1078
        __bytes_to_get &= ~(__cache_line_size-1);
sl@0
  1079
        _S_start_free = (char *)memalign(__cache_line_size, __bytes_to_get);
sl@0
  1080
        if (0 == _S_start_free) {
sl@0
  1081
          _S_start_free = (char *)__malloc_alloc::allocate(__bytes_to_get);
sl@0
  1082
        }
sl@0
  1083
      }
sl@0
  1084
#  else  /* !SGI_SOURCE */
sl@0
  1085
      _S_start_free = (char *)__malloc_alloc::allocate(__bytes_to_get);
sl@0
  1086
#  endif
sl@0
  1087
      _S_heap_size += __bytes_to_get;
sl@0
  1088
      _S_end_free = _S_start_free + __bytes_to_get;
sl@0
  1089
    }
sl@0
  1090
  }
sl@0
  1091
  // lock is released here
sl@0
  1092
  return _S_chunk_alloc(__p_size, __nobjs, __a);
sl@0
  1093
}
sl@0
  1094
sl@0
  1095
sl@0
  1096
/* n must be > 0      */
sl@0
  1097
void *_Pthread_alloc_impl::allocate(size_t& __n) {
sl@0
  1098
  typedef _Pthread_alloc_obj __obj;
sl@0
  1099
  __obj * volatile * __my_free_list;
sl@0
  1100
  __obj * __result;
sl@0
  1101
  __state_type* __a;
sl@0
  1102
sl@0
  1103
  if (__n > _MAX_BYTES) {
sl@0
  1104
    return __malloc_alloc::allocate(__n);
sl@0
  1105
  }
sl@0
  1106
sl@0
  1107
  __n = _S_round_up(__n);
sl@0
  1108
  __a = _S_get_per_thread_state();
sl@0
  1109
sl@0
  1110
  __my_free_list = __a->__free_list + _S_freelist_index(__n);
sl@0
  1111
  __result = *__my_free_list;
sl@0
  1112
  if (__result == 0) {
sl@0
  1113
    void *__r = __a->_M_refill(__n);
sl@0
  1114
    return __r;
sl@0
  1115
  }
sl@0
  1116
  *__my_free_list = __result->__free_list_link;
sl@0
  1117
  return __result;
sl@0
  1118
};
sl@0
  1119
sl@0
  1120
/* p may not be 0 */
sl@0
  1121
void _Pthread_alloc_impl::deallocate(void *__p, size_t __n) {
sl@0
  1122
  typedef _Pthread_alloc_obj __obj;
sl@0
  1123
  __obj *__q = (__obj *)__p;
sl@0
  1124
  __obj * volatile * __my_free_list;
sl@0
  1125
  __state_type* __a;
sl@0
  1126
sl@0
  1127
  if (__n > _MAX_BYTES) {
sl@0
  1128
      __malloc_alloc::deallocate(__p, __n);
sl@0
  1129
      return;
sl@0
  1130
  }
sl@0
  1131
sl@0
  1132
  __a = _S_get_per_thread_state();
sl@0
  1133
sl@0
  1134
  __my_free_list = __a->__free_list + _S_freelist_index(__n);
sl@0
  1135
  __q -> __free_list_link = *__my_free_list;
sl@0
  1136
  *__my_free_list = __q;
sl@0
  1137
}
sl@0
  1138
sl@0
  1139
// boris : versions for per_thread_allocator
sl@0
  1140
/* n must be > 0      */
sl@0
  1141
void *_Pthread_alloc_impl::allocate(size_t& __n, __state_type* __a) {
sl@0
  1142
  typedef _Pthread_alloc_obj __obj;
sl@0
  1143
  __obj * volatile * __my_free_list;
sl@0
  1144
  __obj * __result;
sl@0
  1145
sl@0
  1146
  if (__n > _MAX_BYTES) {
sl@0
  1147
    return __malloc_alloc::allocate(__n);
sl@0
  1148
  }
sl@0
  1149
  __n = _S_round_up(__n);
sl@0
  1150
sl@0
  1151
  // boris : here, we have to lock per thread state, as we may be getting memory from
sl@0
  1152
  // different thread pool.
sl@0
  1153
  _STLP_auto_lock __lock(__a->_M_lock);
sl@0
  1154
sl@0
  1155
  __my_free_list = __a->__free_list + _S_freelist_index(__n);
sl@0
  1156
  __result = *__my_free_list;
sl@0
  1157
  if (__result == 0) {
sl@0
  1158
    void *__r = __a->_M_refill(__n);
sl@0
  1159
    return __r;
sl@0
  1160
  }
sl@0
  1161
  *__my_free_list = __result->__free_list_link;
sl@0
  1162
  return __result;
sl@0
  1163
};
sl@0
  1164
sl@0
  1165
/* p may not be 0 */
sl@0
  1166
void _Pthread_alloc_impl::deallocate(void *__p, size_t __n, __state_type* __a) {
sl@0
  1167
  typedef _Pthread_alloc_obj __obj;
sl@0
  1168
  __obj *__q = (__obj *)__p;
sl@0
  1169
  __obj * volatile * __my_free_list;
sl@0
  1170
sl@0
  1171
  if (__n > _MAX_BYTES) {
sl@0
  1172
    __malloc_alloc::deallocate(__p, __n);
sl@0
  1173
    return;
sl@0
  1174
  }
sl@0
  1175
sl@0
  1176
  // boris : here, we have to lock per thread state, as we may be returning memory from
sl@0
  1177
  // different thread.
sl@0
  1178
  _STLP_auto_lock __lock(__a->_M_lock);
sl@0
  1179
sl@0
  1180
  __my_free_list = __a->__free_list + _S_freelist_index(__n);
sl@0
  1181
  __q -> __free_list_link = *__my_free_list;
sl@0
  1182
  *__my_free_list = __q;
sl@0
  1183
}
sl@0
  1184
sl@0
  1185
void *_Pthread_alloc_impl::reallocate(void *__p, size_t __old_sz, size_t& __new_sz) {
sl@0
  1186
  void * __result;
sl@0
  1187
  size_t __copy_sz;
sl@0
  1188
sl@0
  1189
  if (__old_sz > _MAX_BYTES && __new_sz > _MAX_BYTES) {
sl@0
  1190
    return realloc(__p, __new_sz);
sl@0
  1191
  }
sl@0
  1192
sl@0
  1193
  if (_S_round_up(__old_sz) == _S_round_up(__new_sz)) return __p;
sl@0
  1194
  __result = allocate(__new_sz);
sl@0
  1195
  __copy_sz = __new_sz > __old_sz? __old_sz : __new_sz;
sl@0
  1196
  memcpy(__result, __p, __copy_sz);
sl@0
  1197
  deallocate(__p, __old_sz);
sl@0
  1198
  return __result;
sl@0
  1199
}
sl@0
  1200
#if !defined(__SYMBIAN32__WSD__)
sl@0
  1201
_Pthread_alloc_per_thread_state* _Pthread_alloc_impl::_S_free_per_thread_states = 0;
sl@0
  1202
pthread_key_t _Pthread_alloc_impl::_S_key = 0;
sl@0
  1203
_STLP_STATIC_MUTEX _Pthread_alloc_impl::_S_chunk_allocator_lock _STLP_MUTEX_INITIALIZER;
sl@0
  1204
bool _Pthread_alloc_impl::_S_key_initialized = false;
sl@0
  1205
char *_Pthread_alloc_impl::_S_start_free = 0;
sl@0
  1206
char *_Pthread_alloc_impl::_S_end_free = 0;
sl@0
  1207
size_t _Pthread_alloc_impl::_S_heap_size = 0;
sl@0
  1208
#else
sl@0
  1209
sl@0
  1210
inline __oom_handler_type& __malloc_alloc_impl::get_oom_handler()
sl@0
  1211
	{
sl@0
  1212
	return get_libcpp_wsd().wsd__oom_handler;
sl@0
  1213
	}
sl@0
  1214
sl@0
  1215
inline __node_alloc_impl::_Freelist* __node_alloc_impl::get_S_free_list()
sl@0
  1216
	{
sl@0
  1217
	return (__node_alloc_impl::_Freelist*)get_libcpp_wsd().wsd_S_free_list;
sl@0
  1218
	}
sl@0
  1219
sl@0
  1220
inline size_t& __node_alloc_impl::get_S_heap_size()
sl@0
  1221
	{
sl@0
  1222
	return get_libcpp_wsd().wsd__node_alloc_impl_S_heap_size;
sl@0
  1223
	}
sl@0
  1224
sl@0
  1225
inline char*& __node_alloc_impl::get_S_start_free()
sl@0
  1226
	{
sl@0
  1227
	return get_libcpp_wsd().wsd_S_start_free;
sl@0
  1228
	}
sl@0
  1229
sl@0
  1230
inline char*& __node_alloc_impl::get_S_end_free()
sl@0
  1231
	{
sl@0
  1232
	return get_libcpp_wsd().wsd_S_end_free;
sl@0
  1233
	}
sl@0
  1234
sl@0
  1235
inline _STLP_STATIC_MUTEX& _Node_Alloc_Lock::get_allocator_S_lock()
sl@0
  1236
	{
sl@0
  1237
	return get_libcpp_wsd().wsd_allocator_S_lock;
sl@0
  1238
	}
sl@0
  1239
sl@0
  1240
#endif
sl@0
  1241
sl@0
  1242
void * _STLP_CALL _Pthread_alloc::allocate(size_t& __n)
sl@0
  1243
{ return _Pthread_alloc_impl::allocate(__n); }
sl@0
  1244
void _STLP_CALL _Pthread_alloc::deallocate(void *__p, size_t __n)
sl@0
  1245
{ _Pthread_alloc_impl::deallocate(__p, __n); }
sl@0
  1246
void * _STLP_CALL _Pthread_alloc::allocate(size_t& __n, __state_type* __a)
sl@0
  1247
{ return _Pthread_alloc_impl::allocate(__n, __a); }
sl@0
  1248
void _STLP_CALL _Pthread_alloc::deallocate(void *__p, size_t __n, __state_type* __a)
sl@0
  1249
{ _Pthread_alloc_impl::deallocate(__p, __n, __a); }
sl@0
  1250
void * _STLP_CALL _Pthread_alloc::reallocate(void *__p, size_t __old_sz, size_t& __new_sz)
sl@0
  1251
{ return _Pthread_alloc_impl::reallocate(__p, __old_sz, __new_sz); }
sl@0
  1252
_Pthread_alloc_per_thread_state* _STLP_CALL _Pthread_alloc::_S_get_per_thread_state()
sl@0
  1253
{ return _Pthread_alloc_impl::_S_get_per_thread_state(); }
sl@0
  1254
sl@0
  1255
_STLP_MOVE_TO_STD_NAMESPACE
sl@0
  1256
sl@0
  1257
#endif
sl@0
  1258
sl@0
  1259
_STLP_END_NAMESPACE
sl@0
  1260
sl@0
  1261
sl@0
  1262
#if defined(__SYMBIAN32__WSD__)
sl@0
  1263
// to be called from an stdcpp init.  (to init WSD)
sl@0
  1264
void stdcpp_allocators_init()
sl@0
  1265
	{
sl@0
  1266
	// init oom handler	
sl@0
  1267
	std::__malloc_alloc_impl::get_oom_handler() = NULL;
sl@0
  1268
	
sl@0
  1269
	// lock init
sl@0
  1270
	stlp_priv::_Node_Alloc_Lock::get_allocator_S_lock()._M_lock.iState   = _ENeedsNormalInit;
sl@0
  1271
	stlp_priv::_Node_Alloc_Lock::get_allocator_S_lock()._M_lock.iPtr     = 0;
sl@0
  1272
	stlp_priv::_Node_Alloc_Lock::get_allocator_S_lock()._M_lock.iReentry = 0;
sl@0
  1273
sl@0
  1274
	// init _node_alloc_impl::x
sl@0
  1275
	stlp_priv::__node_alloc_impl::get_S_heap_size() = 0;	
sl@0
  1276
	stlp_priv::__node_alloc_impl::get_S_start_free() = 0;
sl@0
  1277
	stlp_priv::__node_alloc_impl::get_S_end_free()	  = 0;
sl@0
  1278
	
sl@0
  1279
	// initialize free list
sl@0
  1280
	for (int count = 0; count < _STLP_NFREELISTS; count++)
sl@0
  1281
		stlp_priv::__node_alloc_impl::_S_free_list[count] = 0;
sl@0
  1282
sl@0
  1283
	//pthread_alloc_impl
sl@0
  1284
	stlp_priv::_Pthread_alloc_impl::pt_wsd_init();
sl@0
  1285
	}
sl@0
  1286
#endif
sl@0
  1287
sl@0
  1288
#undef _S_FREELIST_INDEX