1.1 --- a/epoc32/include/stdapis/stlportv5/stl/_alloc.c Wed Mar 31 12:27:01 2010 +0100
1.2 +++ b/epoc32/include/stdapis/stlportv5/stl/_alloc.c Wed Mar 31 12:33:34 2010 +0100
1.3 @@ -6,13 +6,13 @@
1.4 * Copyright (c) 1997
1.5 * Moscow Center for SPARC Technology
1.6 *
1.7 - * Copyright (c) 1999
1.8 + * Copyright (c) 1999
1.9 * Boris Fomitchev
1.10 *
1.11 * This material is provided "as is", with absolutely no warranty expressed
1.12 * or implied. Any use is at your own risk.
1.13 *
1.14 - * Permission to use or copy this software for any purpose is hereby granted
1.15 + * Permission to use or copy this software for any purpose is hereby granted
1.16 * without fee, provided the above notices are retained on all copies.
1.17 * Permission to modify the code and to distribute modified code is granted,
1.18 * provided the above notices are retained, and a notice that the code was
1.19 @@ -22,76 +22,29 @@
1.20 #ifndef _STLP_ALLOC_C
1.21 #define _STLP_ALLOC_C
1.22
1.23 -#ifdef __WATCOMC__
1.24 -#pragma warning 13 9
1.25 -#pragma warning 367 9
1.26 -#pragma warning 368 9
1.27 -#endif
1.28 -
1.29 #ifndef _STLP_INTERNAL_ALLOC_H
1.30 # include <stl/_alloc.h>
1.31 #endif
1.32
1.33 -# if defined (_STLP_EXPOSE_GLOBALS_IMPLEMENTATION)
1.34 -
1.35 -# ifdef _STLP_SGI_THREADS
1.36 - // We test whether threads are in use before locking.
1.37 - // Perhaps this should be moved into stl_threads.h, but that
1.38 - // probably makes it harder to avoid the procedure call when
1.39 - // it isn't needed.
1.40 - extern "C" {
1.41 - extern int __us_rsthread_malloc;
1.42 - }
1.43 -# endif
1.44 -
1.45 -
1.46 -// Specialised debug form of malloc which does not provide "false"
1.47 -// memory leaks when run with debug CRT libraries.
1.48 -#if defined(_STLP_MSVC) && (_STLP_MSVC>=1020 && defined(_STLP_DEBUG_ALLOC)) && ! defined (_STLP_WINCE)
1.49 -# include <crtdbg.h>
1.50 -inline void* __stlp_chunk_malloc(size_t __bytes) { _STLP_CHECK_NULL_ALLOC(_malloc_dbg(__bytes, _CRT_BLOCK, __FILE__, __LINE__)); }
1.51 -#else // !_DEBUG
1.52 -# ifdef _STLP_NODE_ALLOC_USE_MALLOC
1.53 -# include <cstdlib>
1.54 -inline void* __stlp_chunk_malloc(size_t __bytes) { _STLP_CHECK_NULL_ALLOC(_STLP_VENDOR_CSTD::malloc(__bytes)); }
1.55 -# else
1.56 -inline void* __stlp_chunk_malloc(size_t __bytes) { return _STLP_STD::__stl_new(__bytes); }
1.57 -# endif
1.58 -#endif // !_DEBUG
1.59 -
1.60 -
1.61 -#define _S_FREELIST_INDEX(__bytes) ((__bytes-size_t(1))>>(int)_ALIGN_SHIFT)
1.62 +#if defined (__WATCOMC__)
1.63 +# pragma warning 13 9
1.64 +# pragma warning 367 9
1.65 +# pragma warning 368 9
1.66 +#endif
1.67
1.68 _STLP_BEGIN_NAMESPACE
1.69
1.70 -#ifndef _STLP_NO_NODE_ALLOC
1.71 -
1.72 -template <int __inst>
1.73 -void * _STLP_CALL __malloc_alloc<__inst>::_S_oom_malloc(size_t __n)
1.74 -{
1.75 - __oom_handler_type __my_malloc_handler;
1.76 - void * __result;
1.77 -
1.78 - for (;;) {
1.79 - __my_malloc_handler = __oom_handler;
1.80 - if (0 == __my_malloc_handler) { __THROW_BAD_ALLOC; }
1.81 - (*__my_malloc_handler)();
1.82 - __result = malloc(__n);
1.83 - if (__result) return(__result);
1.84 +template <class _Alloc>
1.85 +void * _STLP_CALL __debug_alloc<_Alloc>::allocate(size_t __n) {
1.86 + size_t __total_extra = __extra_before_chunk() + __extra_after_chunk();
1.87 + size_t __real_n = __n + __total_extra;
1.88 + if (__real_n < __n) {
1.89 + //It means that we rolled on size_t, __n must be very large, lets hope
1.90 + //that allocating it will raised a bad_alloc exception:
1.91 + __real_n = __n + (__total_extra - __real_n - 1);
1.92 }
1.93 -#if defined(_STLP_NEED_UNREACHABLE_RETURN)
1.94 - return 0;
1.95 -#endif
1.96 -
1.97 -}
1.98 -
1.99 -#endif
1.100 -
1.101 -template <class _Alloc>
1.102 -void * _STLP_CALL __debug_alloc<_Alloc>::allocate(size_t __n) {
1.103 - size_t __real_n = __n + __extra_before_chunk() + __extra_after_chunk();
1.104 __alloc_header *__result = (__alloc_header *)__allocator_type::allocate(__real_n);
1.105 - memset((char*)__result, __shred_byte, __real_n*sizeof(value_type));
1.106 + memset((char*)__result, __shred_byte, __real_n * sizeof(value_type));
1.107 __result->__magic = __magic;
1.108 __result->__type_size = sizeof(value_type);
1.109 __result->_M_size = (_STLP_UINT32_T)__n;
1.110 @@ -109,268 +62,25 @@
1.111 _STLP_VERBOSE_ASSERT(__real_p->_M_size == __n, _StlMsg_DBA_SIZE_MISMATCH)
1.112 // check pads on both sides
1.113 unsigned char* __tmp;
1.114 - for (__tmp= (unsigned char*)(__real_p+1); __tmp < (unsigned char*)__p; __tmp++) {
1.115 - _STLP_VERBOSE_ASSERT(*__tmp==__shred_byte, _StlMsg_DBA_UNDERRUN)
1.116 - }
1.117 -
1.118 - size_t __real_n= __n + __extra_before_chunk() + __extra_after_chunk();
1.119 -
1.120 - for (__tmp= ((unsigned char*)__p)+__n*sizeof(value_type);
1.121 - __tmp < ((unsigned char*)__real_p)+__real_n ; __tmp++) {
1.122 - _STLP_VERBOSE_ASSERT(*__tmp==__shred_byte, _StlMsg_DBA_OVERRUN)
1.123 - }
1.124 -
1.125 + for (__tmp = (unsigned char*)(__real_p + 1); __tmp < (unsigned char*)__p; ++__tmp) {
1.126 + _STLP_VERBOSE_ASSERT(*__tmp == __shred_byte, _StlMsg_DBA_UNDERRUN)
1.127 + }
1.128 +
1.129 + size_t __real_n = __n + __extra_before_chunk() + __extra_after_chunk();
1.130 +
1.131 + for (__tmp= ((unsigned char*)__p) + __n * sizeof(value_type);
1.132 + __tmp < ((unsigned char*)__real_p) + __real_n ; ++__tmp) {
1.133 + _STLP_VERBOSE_ASSERT(*__tmp == __shred_byte, _StlMsg_DBA_OVERRUN)
1.134 + }
1.135 +
1.136 // that may be unfortunate, just in case
1.137 - __real_p->__magic=__deleted_magic;
1.138 - memset((char*)__p, __shred_byte, __n*sizeof(value_type));
1.139 + __real_p->__magic = __deleted_magic;
1.140 + memset((char*)__p, __shred_byte, __n * sizeof(value_type));
1.141 __allocator_type::deallocate(__real_p, __real_n);
1.142 }
1.143
1.144 -#ifndef _STLP_NO_NODE_ALLOC
1.145 -
1.146 -// # ifdef _STLP_THREADS
1.147 -
1.148 -template <bool __threads, int __inst>
1.149 -class _Node_Alloc_Lock {
1.150 -public:
1.151 - _Node_Alloc_Lock() {
1.152 -
1.153 -# ifdef _STLP_SGI_THREADS
1.154 - if (__threads && __us_rsthread_malloc)
1.155 -# else /* !_STLP_SGI_THREADS */
1.156 - if (__threads)
1.157 -# endif
1.158 - _S_lock._M_acquire_lock();
1.159 - }
1.160 -
1.161 - ~_Node_Alloc_Lock() {
1.162 -# ifdef _STLP_SGI_THREADS
1.163 - if (__threads && __us_rsthread_malloc)
1.164 -# else /* !_STLP_SGI_THREADS */
1.165 - if (__threads)
1.166 -# endif
1.167 - _S_lock._M_release_lock();
1.168 - }
1.169 -
1.170 - static _STLP_STATIC_MUTEX _S_lock;
1.171 -};
1.172 -
1.173 -// # endif /* _STLP_THREADS */
1.174 -
1.175 -
1.176 -template <bool __threads, int __inst>
1.177 -void* _STLP_CALL
1.178 -__node_alloc<__threads, __inst>::_M_allocate(size_t __n) {
1.179 - void* __r;
1.180 - _Obj * _STLP_VOLATILE * __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
1.181 - // # ifdef _STLP_THREADS
1.182 - /*REFERENCED*/
1.183 - _Node_Alloc_Lock<__threads, __inst> __lock_instance;
1.184 - // # endif
1.185 - // Acquire the lock here with a constructor call.
1.186 - // This ensures that it is released in exit or during stack
1.187 - // unwinding.
1.188 - if ( (__r = *__my_free_list) != 0 ) {
1.189 - *__my_free_list = ((_Obj*)__r) -> _M_free_list_link;
1.190 - } else {
1.191 - __r = _S_refill(__n);
1.192 - }
1.193 - // lock is released here
1.194 - return __r;
1.195 -}
1.196 -
1.197 -template <bool __threads, int __inst>
1.198 -void _STLP_CALL
1.199 -__node_alloc<__threads, __inst>::_M_deallocate(void *__p, size_t __n) {
1.200 - _Obj * _STLP_VOLATILE * __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
1.201 - // # ifdef _STLP_THREADS
1.202 - /*REFERENCED*/
1.203 - _Node_Alloc_Lock<__threads, __inst> __lock_instance;
1.204 - // # endif /* _STLP_THREADS */
1.205 - // acquire lock
1.206 - ((_Obj *)__p) -> _M_free_list_link = *__my_free_list;
1.207 - *__my_free_list = (_Obj *)__p;
1.208 - // lock is released here
1.209 -}
1.210 -
1.211 -/* We allocate memory in large chunks in order to avoid fragmenting */
1.212 -/* the malloc heap too much. */
1.213 -/* We assume that size is properly aligned. */
1.214 -/* We hold the allocation lock. */
1.215 -template <bool __threads, int __inst>
1.216 -char* _STLP_CALL
1.217 -__node_alloc<__threads, __inst>::_S_chunk_alloc(size_t _p_size,
1.218 - int& __nobjs)
1.219 -{
1.220 - char* __result;
1.221 - size_t __total_bytes = _p_size * __nobjs;
1.222 - size_t __bytes_left = _S_end_free - _S_start_free;
1.223 -
1.224 - if (__bytes_left >= __total_bytes) {
1.225 - __result = _S_start_free;
1.226 - _S_start_free += __total_bytes;
1.227 - return(__result);
1.228 - } else if (__bytes_left >= _p_size) {
1.229 - __nobjs = (int)(__bytes_left/_p_size);
1.230 - __total_bytes = _p_size * __nobjs;
1.231 - __result = _S_start_free;
1.232 - _S_start_free += __total_bytes;
1.233 - return(__result);
1.234 - } else {
1.235 - size_t __bytes_to_get =
1.236 - 2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
1.237 - // Try to make use of the left-over piece.
1.238 - if (__bytes_left > 0) {
1.239 - _Obj* _STLP_VOLATILE* __my_free_list =
1.240 - _S_free_list + _S_FREELIST_INDEX(__bytes_left);
1.241 -
1.242 - ((_Obj*)_S_start_free) -> _M_free_list_link = *__my_free_list;
1.243 - *__my_free_list = (_Obj*)_S_start_free;
1.244 - }
1.245 - _S_start_free = (char*)__stlp_chunk_malloc(__bytes_to_get);
1.246 - if (0 == _S_start_free) {
1.247 - size_t __i;
1.248 - _Obj* _STLP_VOLATILE* __my_free_list;
1.249 - _Obj* __p;
1.250 - // Try to make do with what we have. That can't
1.251 - // hurt. We do not try smaller requests, since that tends
1.252 - // to result in disaster on multi-process machines.
1.253 - for (__i = _p_size; __i <= (size_t)_MAX_BYTES; __i += (size_t)_ALIGN) {
1.254 - __my_free_list = _S_free_list + _S_FREELIST_INDEX(__i);
1.255 - __p = *__my_free_list;
1.256 - if (0 != __p) {
1.257 - *__my_free_list = __p -> _M_free_list_link;
1.258 - _S_start_free = (char*)__p;
1.259 - _S_end_free = _S_start_free + __i;
1.260 - return(_S_chunk_alloc(_p_size, __nobjs));
1.261 - // Any leftover piece will eventually make it to the
1.262 - // right free list.
1.263 - }
1.264 - }
1.265 - _S_end_free = 0; // In case of exception.
1.266 - _S_start_free = (char*)__stlp_chunk_malloc(__bytes_to_get);
1.267 - /*
1.268 - (char*)malloc_alloc::allocate(__bytes_to_get);
1.269 - */
1.270 -
1.271 - // This should either throw an
1.272 - // exception or remedy the situation. Thus we assume it
1.273 - // succeeded.
1.274 - }
1.275 - _S_heap_size += __bytes_to_get;
1.276 - _S_end_free = _S_start_free + __bytes_to_get;
1.277 - return(_S_chunk_alloc(_p_size, __nobjs));
1.278 - }
1.279 -}
1.280 -
1.281 -
1.282 -/* Returns an object of size __n, and optionally adds to size __n free list.*/
1.283 -/* We assume that __n is properly aligned. */
1.284 -/* We hold the allocation lock. */
1.285 -template <bool __threads, int __inst>
1.286 -void* _STLP_CALL
1.287 -__node_alloc<__threads, __inst>::_S_refill(size_t __n)
1.288 -{
1.289 - int __nobjs = 20;
1.290 - __n = _S_round_up(__n);
1.291 - char* __chunk = _S_chunk_alloc(__n, __nobjs);
1.292 - _Obj* _STLP_VOLATILE* __my_free_list;
1.293 - _Obj* __result;
1.294 - _Obj* __current_obj;
1.295 - _Obj* __next_obj;
1.296 - int __i;
1.297 -
1.298 - if (1 == __nobjs) return(__chunk);
1.299 - __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
1.300 -
1.301 - /* Build free list in chunk */
1.302 - __result = (_Obj*)__chunk;
1.303 - *__my_free_list = __next_obj = (_Obj*)(__chunk + __n);
1.304 - for (__i = 1; ; __i++) {
1.305 - __current_obj = __next_obj;
1.306 - __next_obj = (_Obj*)((char*)__next_obj + __n);
1.307 - if (__nobjs - 1 == __i) {
1.308 - __current_obj -> _M_free_list_link = 0;
1.309 - break;
1.310 - } else {
1.311 - __current_obj -> _M_free_list_link = __next_obj;
1.312 - }
1.313 - }
1.314 - return(__result);
1.315 -}
1.316 -
1.317 -# if ( _STLP_STATIC_TEMPLATE_DATA > 0 )
1.318 -// malloc_alloc out-of-memory handling
1.319 -template <int __inst>
1.320 -__oom_handler_type __malloc_alloc<__inst>::__oom_handler=(__oom_handler_type)0 ;
1.321 -
1.322 -#ifdef _STLP_THREADS
1.323 - template <bool __threads, int __inst>
1.324 - _STLP_STATIC_MUTEX
1.325 - _Node_Alloc_Lock<__threads, __inst>::_S_lock _STLP_MUTEX_INITIALIZER;
1.326 -#endif
1.327 -
1.328 -template <bool __threads, int __inst>
1.329 -_Node_alloc_obj * _STLP_VOLATILE
1.330 -__node_alloc<__threads, __inst>::_S_free_list[_STLP_NFREELISTS]
1.331 -= {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1.332 -// The 16 zeros are necessary to make version 4.1 of the SunPro
1.333 -// compiler happy. Otherwise it appears to allocate too little
1.334 -// space for the array.
1.335 -
1.336 -template <bool __threads, int __inst>
1.337 -char *__node_alloc<__threads, __inst>::_S_start_free = 0;
1.338 -
1.339 -template <bool __threads, int __inst>
1.340 -char *__node_alloc<__threads, __inst>::_S_end_free = 0;
1.341 -
1.342 -template <bool __threads, int __inst>
1.343 -size_t __node_alloc<__threads, __inst>::_S_heap_size = 0;
1.344 -
1.345 -
1.346 -# else /* ( _STLP_STATIC_TEMPLATE_DATA > 0 ) */
1.347 -
1.348 -__DECLARE_INSTANCE(__oom_handler_type, __malloc_alloc<0>::__oom_handler, =0);
1.349 -
1.350 -# define _STLP_ALLOC_NOTHREADS __node_alloc<false, 0>
1.351 -# define _STLP_ALLOC_THREADS __node_alloc<true, 0>
1.352 -# define _STLP_ALLOC_NOTHREADS_LOCK _Node_Alloc_Lock<false, 0>
1.353 -# define _STLP_ALLOC_THREADS_LOCK _Node_Alloc_Lock<true, 0>
1.354 -
1.355 -__DECLARE_INSTANCE(char *, _STLP_ALLOC_NOTHREADS::_S_start_free,=0);
1.356 -__DECLARE_INSTANCE(char *, _STLP_ALLOC_NOTHREADS::_S_end_free,=0);
1.357 -__DECLARE_INSTANCE(size_t, _STLP_ALLOC_NOTHREADS::_S_heap_size,=0);
1.358 -__DECLARE_INSTANCE(_Node_alloc_obj * _STLP_VOLATILE,
1.359 - _STLP_ALLOC_NOTHREADS::_S_free_list[_STLP_NFREELISTS],
1.360 - ={0});
1.361 -__DECLARE_INSTANCE(char *, _STLP_ALLOC_THREADS::_S_start_free,=0);
1.362 -__DECLARE_INSTANCE(char *, _STLP_ALLOC_THREADS::_S_end_free,=0);
1.363 -__DECLARE_INSTANCE(size_t, _STLP_ALLOC_THREADS::_S_heap_size,=0);
1.364 -__DECLARE_INSTANCE(_Node_alloc_obj * _STLP_VOLATILE,
1.365 - _STLP_ALLOC_THREADS::_S_free_list[_STLP_NFREELISTS],
1.366 - ={0});
1.367 -// # ifdef _STLP_THREADS
1.368 -__DECLARE_INSTANCE(_STLP_STATIC_MUTEX,
1.369 - _STLP_ALLOC_NOTHREADS_LOCK::_S_lock,
1.370 - _STLP_MUTEX_INITIALIZER);
1.371 -__DECLARE_INSTANCE(_STLP_STATIC_MUTEX,
1.372 - _STLP_ALLOC_THREADS_LOCK::_S_lock,
1.373 - _STLP_MUTEX_INITIALIZER);
1.374 -// # endif
1.375 -
1.376 -# undef _STLP_ALLOC_THREADS
1.377 -# undef _STLP_ALLOC_NOTHREADS
1.378 -
1.379 -# endif /* _STLP_STATIC_TEMPLATE_DATA */
1.380 -
1.381 -#endif
1.382 -
1.383 _STLP_END_NAMESPACE
1.384
1.385 -# undef _S_FREELIST_INDEX
1.386 -
1.387 -# endif /* _STLP_EXPOSE_GLOBALS_IMPLEMENTATION */
1.388 -
1.389 #endif /* _STLP_ALLOC_C */
1.390
1.391 // Local Variables: