epoc32/include/stdapis/stlportv5/stl/_alloc.c
branchSymbian2
changeset 3 e1b950c65cb4
parent 2 2fe1408b6811
child 4 837f303aceeb
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/epoc32/include/stdapis/stlportv5/stl/_alloc.c	Wed Mar 31 12:27:01 2010 +0100
     1.3 @@ -0,0 +1,378 @@
     1.4 +/*
     1.5 + *
     1.6 + * Copyright (c) 1996,1997
     1.7 + * Silicon Graphics Computer Systems, Inc.
     1.8 + *
     1.9 + * Copyright (c) 1997
    1.10 + * Moscow Center for SPARC Technology
    1.11 + *
    1.12 + * Copyright (c) 1999 
    1.13 + * Boris Fomitchev
    1.14 + *
    1.15 + * This material is provided "as is", with absolutely no warranty expressed
    1.16 + * or implied. Any use is at your own risk.
    1.17 + *
    1.18 + * Permission to use or copy this software for any purpose is hereby granted 
    1.19 + * without fee, provided the above notices are retained on all copies.
    1.20 + * Permission to modify the code and to distribute modified code is granted,
    1.21 + * provided the above notices are retained, and a notice that the code was
    1.22 + * modified is included with the above copyright notice.
    1.23 + *
    1.24 + */
    1.25 +#ifndef _STLP_ALLOC_C
    1.26 +#define _STLP_ALLOC_C
    1.27 +
    1.28 +#ifdef __WATCOMC__
    1.29 +#pragma warning 13 9
    1.30 +#pragma warning 367 9
    1.31 +#pragma warning 368 9
    1.32 +#endif
    1.33 +
    1.34 +#ifndef _STLP_INTERNAL_ALLOC_H
    1.35 +#  include <stl/_alloc.h>
    1.36 +#endif
    1.37 +
    1.38 +# if defined (_STLP_EXPOSE_GLOBALS_IMPLEMENTATION)
    1.39 +
    1.40 +# ifdef _STLP_SGI_THREADS
    1.41 +  // We test whether threads are in use before locking.
    1.42 +  // Perhaps this should be moved into stl_threads.h, but that
    1.43 +  // probably makes it harder to avoid the procedure call when
    1.44 +  // it isn't needed.
    1.45 +    extern "C" {
    1.46 +      extern int __us_rsthread_malloc;
    1.47 +    }
    1.48 +# endif
    1.49 +
    1.50 +
    1.51 +// Specialised debug form of malloc which does not provide "false"
    1.52 +// memory leaks when run with debug CRT libraries.
    1.53 +#if defined(_STLP_MSVC) && (_STLP_MSVC>=1020 && defined(_STLP_DEBUG_ALLOC)) && ! defined (_STLP_WINCE)
    1.54 +#  include <crtdbg.h>
    1.55 +inline void* __stlp_chunk_malloc(size_t __bytes) { _STLP_CHECK_NULL_ALLOC(_malloc_dbg(__bytes, _CRT_BLOCK, __FILE__, __LINE__)); }
    1.56 +#else	// !_DEBUG
    1.57 +# ifdef _STLP_NODE_ALLOC_USE_MALLOC
    1.58 +#  include <cstdlib>
    1.59 +inline void* __stlp_chunk_malloc(size_t __bytes) { _STLP_CHECK_NULL_ALLOC(_STLP_VENDOR_CSTD::malloc(__bytes)); }
    1.60 +# else
    1.61 +inline void* __stlp_chunk_malloc(size_t __bytes) { return _STLP_STD::__stl_new(__bytes); }
    1.62 +# endif
    1.63 +#endif	// !_DEBUG
    1.64 +
    1.65 +
    1.66 +#define _S_FREELIST_INDEX(__bytes) ((__bytes-size_t(1))>>(int)_ALIGN_SHIFT)
    1.67 +
    1.68 +_STLP_BEGIN_NAMESPACE
    1.69 +
    1.70 +#ifndef _STLP_NO_NODE_ALLOC
    1.71 +
    1.72 +template <int __inst>
    1.73 +void *  _STLP_CALL __malloc_alloc<__inst>::_S_oom_malloc(size_t __n)
    1.74 +{
    1.75 +  __oom_handler_type __my_malloc_handler;
    1.76 +  void * __result;
    1.77 +
    1.78 +  for (;;) {
    1.79 +    __my_malloc_handler = __oom_handler;
    1.80 +    if (0 == __my_malloc_handler) { __THROW_BAD_ALLOC; }
    1.81 +    (*__my_malloc_handler)();
    1.82 +    __result = malloc(__n);
    1.83 +    if (__result) return(__result);
    1.84 +  }
    1.85 +#if defined(_STLP_NEED_UNREACHABLE_RETURN)
    1.86 +  return 0;
    1.87 +#endif
    1.88 +
    1.89 +}
    1.90 +
    1.91 +#endif
    1.92 +
    1.93 +template <class _Alloc>
    1.94 +void *  _STLP_CALL __debug_alloc<_Alloc>::allocate(size_t __n) {
    1.95 +  size_t __real_n = __n + __extra_before_chunk() + __extra_after_chunk();
    1.96 +  __alloc_header *__result = (__alloc_header *)__allocator_type::allocate(__real_n);
    1.97 +  memset((char*)__result, __shred_byte, __real_n*sizeof(value_type));
    1.98 +  __result->__magic = __magic;
    1.99 +  __result->__type_size = sizeof(value_type);
   1.100 +  __result->_M_size = (_STLP_UINT32_T)__n;
   1.101 +  return ((char*)__result) + (long)__extra_before;
   1.102 +}
   1.103 +
   1.104 +template <class _Alloc>
   1.105 +void  _STLP_CALL
   1.106 +__debug_alloc<_Alloc>::deallocate(void *__p, size_t __n) {
   1.107 +  __alloc_header * __real_p = (__alloc_header*)((char *)__p -(long)__extra_before);
   1.108 +  // check integrity
   1.109 +  _STLP_VERBOSE_ASSERT(__real_p->__magic != __deleted_magic, _StlMsg_DBA_DELETED_TWICE)
   1.110 +  _STLP_VERBOSE_ASSERT(__real_p->__magic == __magic, _StlMsg_DBA_NEVER_ALLOCATED)
   1.111 +  _STLP_VERBOSE_ASSERT(__real_p->__type_size == 1,_StlMsg_DBA_TYPE_MISMATCH)
   1.112 +  _STLP_VERBOSE_ASSERT(__real_p->_M_size == __n, _StlMsg_DBA_SIZE_MISMATCH)
   1.113 +  // check pads on both sides
   1.114 +  unsigned char* __tmp;
   1.115 +  for (__tmp= (unsigned char*)(__real_p+1); __tmp < (unsigned char*)__p; __tmp++) {  
   1.116 +    _STLP_VERBOSE_ASSERT(*__tmp==__shred_byte, _StlMsg_DBA_UNDERRUN)
   1.117 +      }
   1.118 +  
   1.119 +  size_t __real_n= __n + __extra_before_chunk() + __extra_after_chunk();
   1.120 +  
   1.121 +  for (__tmp= ((unsigned char*)__p)+__n*sizeof(value_type); 
   1.122 +       __tmp < ((unsigned char*)__real_p)+__real_n ; __tmp++) {
   1.123 +    _STLP_VERBOSE_ASSERT(*__tmp==__shred_byte, _StlMsg_DBA_OVERRUN)
   1.124 +      }
   1.125 +  
   1.126 +  // that may be unfortunate, just in case
   1.127 +  __real_p->__magic=__deleted_magic;
   1.128 +  memset((char*)__p, __shred_byte, __n*sizeof(value_type));
   1.129 +  __allocator_type::deallocate(__real_p, __real_n);
   1.130 +}
   1.131 +
   1.132 +#ifndef _STLP_NO_NODE_ALLOC
   1.133 +
   1.134 +// # ifdef _STLP_THREADS
   1.135 +
   1.136 +template <bool __threads, int __inst>
   1.137 +class _Node_Alloc_Lock {
   1.138 +public:
   1.139 +  _Node_Alloc_Lock() { 
   1.140 +    
   1.141 +#  ifdef _STLP_SGI_THREADS
   1.142 +    if (__threads && __us_rsthread_malloc)
   1.143 +#  else /* !_STLP_SGI_THREADS */
   1.144 +      if (__threads) 
   1.145 +#  endif
   1.146 +    	_S_lock._M_acquire_lock(); 
   1.147 +  }
   1.148 +  
   1.149 +  ~_Node_Alloc_Lock() {
   1.150 +#  ifdef _STLP_SGI_THREADS
   1.151 +    if (__threads && __us_rsthread_malloc)
   1.152 +#  else /* !_STLP_SGI_THREADS */
   1.153 +      if (__threads)
   1.154 +#  endif
   1.155 +        _S_lock._M_release_lock(); 
   1.156 +  }
   1.157 +  
   1.158 +  static _STLP_STATIC_MUTEX _S_lock;
   1.159 +};
   1.160 +
   1.161 +// # endif  /* _STLP_THREADS */
   1.162 +
   1.163 +
   1.164 +template <bool __threads, int __inst>
   1.165 +void* _STLP_CALL
   1.166 +__node_alloc<__threads, __inst>::_M_allocate(size_t __n) {
   1.167 +  void*  __r;
   1.168 +  _Obj * _STLP_VOLATILE * __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
   1.169 +  // #       ifdef _STLP_THREADS
   1.170 +  /*REFERENCED*/
   1.171 +  _Node_Alloc_Lock<__threads, __inst> __lock_instance;
   1.172 +  // #       endif
   1.173 +  // Acquire the lock here with a constructor call.
   1.174 +  // This ensures that it is released in exit or during stack
   1.175 +  // unwinding.
   1.176 +  if ( (__r  = *__my_free_list) != 0 ) {
   1.177 +    *__my_free_list = ((_Obj*)__r) -> _M_free_list_link;
   1.178 +  } else {
   1.179 +    __r = _S_refill(__n);
   1.180 +  }
   1.181 +  // lock is released here
   1.182 +  return __r;
   1.183 +}
   1.184 +
   1.185 +template <bool __threads, int __inst>
   1.186 +void _STLP_CALL
   1.187 +__node_alloc<__threads, __inst>::_M_deallocate(void *__p, size_t __n) {
   1.188 +  _Obj * _STLP_VOLATILE * __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
   1.189 +  // #       ifdef _STLP_THREADS
   1.190 +  /*REFERENCED*/
   1.191 +  _Node_Alloc_Lock<__threads, __inst> __lock_instance;
   1.192 +  // #       endif /* _STLP_THREADS */
   1.193 +  // acquire lock
   1.194 +  ((_Obj *)__p) -> _M_free_list_link = *__my_free_list;
   1.195 +  *__my_free_list = (_Obj *)__p;
   1.196 +  // lock is released here
   1.197 +}
   1.198 +
   1.199 +/* We allocate memory in large chunks in order to avoid fragmenting     */
   1.200 +/* the malloc heap too much.                                            */
   1.201 +/* We assume that size is properly aligned.                             */
   1.202 +/* We hold the allocation lock.                                         */
   1.203 +template <bool __threads, int __inst>
   1.204 +char* _STLP_CALL
   1.205 +__node_alloc<__threads, __inst>::_S_chunk_alloc(size_t _p_size, 
   1.206 +						int& __nobjs)
   1.207 +{
   1.208 +  char* __result;
   1.209 +  size_t __total_bytes = _p_size * __nobjs;
   1.210 +  size_t __bytes_left = _S_end_free - _S_start_free;
   1.211 +
   1.212 +  if (__bytes_left >= __total_bytes) {
   1.213 +    __result = _S_start_free;
   1.214 +    _S_start_free += __total_bytes;
   1.215 +    return(__result);
   1.216 +  } else if (__bytes_left >= _p_size) {
   1.217 +    __nobjs = (int)(__bytes_left/_p_size);
   1.218 +    __total_bytes = _p_size * __nobjs;
   1.219 +    __result = _S_start_free;
   1.220 +    _S_start_free += __total_bytes;
   1.221 +    return(__result);
   1.222 +  } else {
   1.223 +    size_t __bytes_to_get = 
   1.224 +      2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
   1.225 +    // Try to make use of the left-over piece.
   1.226 +    if (__bytes_left > 0) {
   1.227 +      _Obj* _STLP_VOLATILE* __my_free_list =
   1.228 +	_S_free_list + _S_FREELIST_INDEX(__bytes_left);
   1.229 +
   1.230 +      ((_Obj*)_S_start_free) -> _M_free_list_link = *__my_free_list;
   1.231 +      *__my_free_list = (_Obj*)_S_start_free;
   1.232 +    }
   1.233 +    _S_start_free = (char*)__stlp_chunk_malloc(__bytes_to_get);
   1.234 +    if (0 == _S_start_free) {
   1.235 +      size_t __i;
   1.236 +      _Obj* _STLP_VOLATILE* __my_free_list;
   1.237 +      _Obj* __p;
   1.238 +      // Try to make do with what we have.  That can't
   1.239 +      // hurt.  We do not try smaller requests, since that tends
   1.240 +      // to result in disaster on multi-process machines.
   1.241 +      for (__i = _p_size; __i <= (size_t)_MAX_BYTES; __i += (size_t)_ALIGN) {
   1.242 +	__my_free_list = _S_free_list + _S_FREELIST_INDEX(__i);
   1.243 +	__p = *__my_free_list;
   1.244 +	if (0 != __p) {
   1.245 +	  *__my_free_list = __p -> _M_free_list_link;
   1.246 +	  _S_start_free = (char*)__p;
   1.247 +	  _S_end_free = _S_start_free + __i;
   1.248 +	  return(_S_chunk_alloc(_p_size, __nobjs));
   1.249 +	  // Any leftover piece will eventually make it to the
   1.250 +	  // right free list.
   1.251 +	}
   1.252 +      }
   1.253 +      _S_end_free = 0;	// In case of exception.
   1.254 +      _S_start_free = (char*)__stlp_chunk_malloc(__bytes_to_get);
   1.255 +    /*
   1.256 +      (char*)malloc_alloc::allocate(__bytes_to_get);
   1.257 +      */
   1.258 +
   1.259 +      // This should either throw an
   1.260 +      // exception or remedy the situation.  Thus we assume it
   1.261 +      // succeeded.
   1.262 +    }
   1.263 +    _S_heap_size += __bytes_to_get;
   1.264 +    _S_end_free = _S_start_free + __bytes_to_get;
   1.265 +    return(_S_chunk_alloc(_p_size, __nobjs));
   1.266 +  }
   1.267 +}
   1.268 +
   1.269 +
   1.270 +/* Returns an object of size __n, and optionally adds to size __n free list.*/
   1.271 +/* We assume that __n is properly aligned.                                */
   1.272 +/* We hold the allocation lock.                                         */
   1.273 +template <bool __threads, int __inst>
   1.274 +void* _STLP_CALL
   1.275 +__node_alloc<__threads, __inst>::_S_refill(size_t __n)
   1.276 +{
   1.277 +  int __nobjs = 20;
   1.278 +  __n = _S_round_up(__n);
   1.279 +  char* __chunk = _S_chunk_alloc(__n, __nobjs);
   1.280 +  _Obj* _STLP_VOLATILE* __my_free_list;
   1.281 +  _Obj* __result;
   1.282 +  _Obj* __current_obj;
   1.283 +  _Obj* __next_obj;
   1.284 +  int __i;
   1.285 +
   1.286 +  if (1 == __nobjs) return(__chunk);
   1.287 +  __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
   1.288 +
   1.289 +  /* Build free list in chunk */
   1.290 +  __result = (_Obj*)__chunk;
   1.291 +  *__my_free_list = __next_obj = (_Obj*)(__chunk + __n);
   1.292 +  for (__i = 1; ; __i++) {
   1.293 +    __current_obj = __next_obj;
   1.294 +    __next_obj = (_Obj*)((char*)__next_obj + __n);
   1.295 +    if (__nobjs - 1 == __i) {
   1.296 +      __current_obj -> _M_free_list_link = 0;
   1.297 +      break;
   1.298 +    } else {
   1.299 +      __current_obj -> _M_free_list_link = __next_obj;
   1.300 +    }
   1.301 +  }
   1.302 +  return(__result);
   1.303 +}
   1.304 +
   1.305 +# if ( _STLP_STATIC_TEMPLATE_DATA > 0 )
   1.306 +// malloc_alloc out-of-memory handling
   1.307 +template <int __inst>
   1.308 +__oom_handler_type __malloc_alloc<__inst>::__oom_handler=(__oom_handler_type)0 ;
   1.309 +
   1.310 +#ifdef _STLP_THREADS
   1.311 +    template <bool __threads, int __inst>
   1.312 +    _STLP_STATIC_MUTEX
   1.313 +    _Node_Alloc_Lock<__threads, __inst>::_S_lock _STLP_MUTEX_INITIALIZER;
   1.314 +#endif
   1.315 +
   1.316 +template <bool __threads, int __inst>
   1.317 +_Node_alloc_obj * _STLP_VOLATILE
   1.318 +__node_alloc<__threads, __inst>::_S_free_list[_STLP_NFREELISTS]
   1.319 += {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
   1.320 +// The 16 zeros are necessary to make version 4.1 of the SunPro
   1.321 +// compiler happy.  Otherwise it appears to allocate too little
   1.322 +// space for the array.
   1.323 +
   1.324 +template <bool __threads, int __inst>
   1.325 +char *__node_alloc<__threads, __inst>::_S_start_free = 0;
   1.326 +
   1.327 +template <bool __threads, int __inst>
   1.328 +char *__node_alloc<__threads, __inst>::_S_end_free = 0;
   1.329 +
   1.330 +template <bool __threads, int __inst>
   1.331 +size_t __node_alloc<__threads, __inst>::_S_heap_size = 0;
   1.332 +
   1.333 +
   1.334 +# else /* ( _STLP_STATIC_TEMPLATE_DATA > 0 ) */
   1.335 +
   1.336 +__DECLARE_INSTANCE(__oom_handler_type, __malloc_alloc<0>::__oom_handler, =0);
   1.337 +
   1.338 +# define _STLP_ALLOC_NOTHREADS __node_alloc<false, 0>
   1.339 +# define _STLP_ALLOC_THREADS   __node_alloc<true, 0>
   1.340 +# define _STLP_ALLOC_NOTHREADS_LOCK _Node_Alloc_Lock<false, 0>
   1.341 +# define _STLP_ALLOC_THREADS_LOCK   _Node_Alloc_Lock<true, 0>
   1.342 +
   1.343 +__DECLARE_INSTANCE(char *, _STLP_ALLOC_NOTHREADS::_S_start_free,=0);
   1.344 +__DECLARE_INSTANCE(char *, _STLP_ALLOC_NOTHREADS::_S_end_free,=0);
   1.345 +__DECLARE_INSTANCE(size_t, _STLP_ALLOC_NOTHREADS::_S_heap_size,=0);
   1.346 +__DECLARE_INSTANCE(_Node_alloc_obj * _STLP_VOLATILE,
   1.347 +                   _STLP_ALLOC_NOTHREADS::_S_free_list[_STLP_NFREELISTS],
   1.348 +                   ={0});
   1.349 +__DECLARE_INSTANCE(char *, _STLP_ALLOC_THREADS::_S_start_free,=0);
   1.350 +__DECLARE_INSTANCE(char *, _STLP_ALLOC_THREADS::_S_end_free,=0);
   1.351 +__DECLARE_INSTANCE(size_t, _STLP_ALLOC_THREADS::_S_heap_size,=0);
   1.352 +__DECLARE_INSTANCE(_Node_alloc_obj * _STLP_VOLATILE,
   1.353 +                   _STLP_ALLOC_THREADS::_S_free_list[_STLP_NFREELISTS],
   1.354 +                   ={0});
   1.355 +// #   ifdef _STLP_THREADS
   1.356 +__DECLARE_INSTANCE(_STLP_STATIC_MUTEX,
   1.357 +                   _STLP_ALLOC_NOTHREADS_LOCK::_S_lock,
   1.358 +                   _STLP_MUTEX_INITIALIZER);
   1.359 +__DECLARE_INSTANCE(_STLP_STATIC_MUTEX,
   1.360 +                   _STLP_ALLOC_THREADS_LOCK::_S_lock,
   1.361 +                   _STLP_MUTEX_INITIALIZER);
   1.362 +// #   endif
   1.363 +
   1.364 +# undef _STLP_ALLOC_THREADS
   1.365 +# undef _STLP_ALLOC_NOTHREADS
   1.366 +
   1.367 +#  endif /* _STLP_STATIC_TEMPLATE_DATA */
   1.368 +
   1.369 +#endif
   1.370 +
   1.371 +_STLP_END_NAMESPACE
   1.372 +
   1.373 +# undef _S_FREELIST_INDEX
   1.374 +
   1.375 +# endif /* _STLP_EXPOSE_GLOBALS_IMPLEMENTATION */
   1.376 +
   1.377 +#endif /*  _STLP_ALLOC_C */
   1.378 +
   1.379 +// Local Variables:
   1.380 +// mode:C++
   1.381 +// End: