1.1 --- a/epoc32/include/stdapis/stlport/stl/_alloc.c Tue Nov 24 13:55:44 2009 +0000
1.2 +++ b/epoc32/include/stdapis/stlport/stl/_alloc.c Tue Mar 16 16:12:26 2010 +0000
1.3 @@ -1,1 +1,378 @@
1.4 -_alloc.c
1.5 +/*
1.6 + *
1.7 + * Copyright (c) 1996,1997
1.8 + * Silicon Graphics Computer Systems, Inc.
1.9 + *
1.10 + * Copyright (c) 1997
1.11 + * Moscow Center for SPARC Technology
1.12 + *
1.13 + * Copyright (c) 1999
1.14 + * Boris Fomitchev
1.15 + *
1.16 + * This material is provided "as is", with absolutely no warranty expressed
1.17 + * or implied. Any use is at your own risk.
1.18 + *
1.19 + * Permission to use or copy this software for any purpose is hereby granted
1.20 + * without fee, provided the above notices are retained on all copies.
1.21 + * Permission to modify the code and to distribute modified code is granted,
1.22 + * provided the above notices are retained, and a notice that the code was
1.23 + * modified is included with the above copyright notice.
1.24 + *
1.25 + */
1.26 +#ifndef _STLP_ALLOC_C
1.27 +#define _STLP_ALLOC_C
1.28 +
1.29 +#ifdef __WATCOMC__
1.30 +#pragma warning 13 9
1.31 +#pragma warning 367 9
1.32 +#pragma warning 368 9
1.33 +#endif
1.34 +
1.35 +#ifndef _STLP_INTERNAL_ALLOC_H
1.36 +# include <stl/_alloc.h>
1.37 +#endif
1.38 +
1.39 +# if defined (_STLP_EXPOSE_GLOBALS_IMPLEMENTATION)
1.40 +
1.41 +# ifdef _STLP_SGI_THREADS
1.42 + // We test whether threads are in use before locking.
1.43 + // Perhaps this should be moved into stl_threads.h, but that
1.44 + // probably makes it harder to avoid the procedure call when
1.45 + // it isn't needed.
1.46 + extern "C" {
1.47 + extern int __us_rsthread_malloc;
1.48 + }
1.49 +# endif
1.50 +
1.51 +
1.52 +// Specialised debug form of malloc which does not provide "false"
1.53 +// memory leaks when run with debug CRT libraries.
1.54 +#if defined(_STLP_MSVC) && (_STLP_MSVC>=1020 && defined(_STLP_DEBUG_ALLOC)) && ! defined (_STLP_WINCE)
1.55 +# include <crtdbg.h>
1.56 +inline void* __stlp_chunk_malloc(size_t __bytes) { _STLP_CHECK_NULL_ALLOC(_malloc_dbg(__bytes, _CRT_BLOCK, __FILE__, __LINE__)); }
1.57 +#else // !_DEBUG
1.58 +# ifdef _STLP_NODE_ALLOC_USE_MALLOC
1.59 +# include <cstdlib>
1.60 +inline void* __stlp_chunk_malloc(size_t __bytes) { _STLP_CHECK_NULL_ALLOC(_STLP_VENDOR_CSTD::malloc(__bytes)); }
1.61 +# else
1.62 +inline void* __stlp_chunk_malloc(size_t __bytes) { return _STLP_STD::__stl_new(__bytes); }
1.63 +# endif
1.64 +#endif // !_DEBUG
1.65 +
1.66 +
1.67 +#define _S_FREELIST_INDEX(__bytes) ((__bytes-size_t(1))>>(int)_ALIGN_SHIFT)
1.68 +
1.69 +_STLP_BEGIN_NAMESPACE
1.70 +
1.71 +#ifndef _STLP_NO_NODE_ALLOC
1.72 +
1.73 +template <int __inst>
1.74 +void * _STLP_CALL __malloc_alloc<__inst>::_S_oom_malloc(size_t __n)
1.75 +{
1.76 + __oom_handler_type __my_malloc_handler;
1.77 + void * __result;
1.78 +
1.79 + for (;;) {
1.80 + __my_malloc_handler = __oom_handler;
1.81 + if (0 == __my_malloc_handler) { __THROW_BAD_ALLOC; }
1.82 + (*__my_malloc_handler)();
1.83 + __result = malloc(__n);
1.84 + if (__result) return(__result);
1.85 + }
1.86 +#if defined(_STLP_NEED_UNREACHABLE_RETURN)
1.87 + return 0;
1.88 +#endif
1.89 +
1.90 +}
1.91 +
1.92 +#endif
1.93 +
1.94 +template <class _Alloc>
1.95 +void * _STLP_CALL __debug_alloc<_Alloc>::allocate(size_t __n) {
1.96 + size_t __real_n = __n + __extra_before_chunk() + __extra_after_chunk();
1.97 + __alloc_header *__result = (__alloc_header *)__allocator_type::allocate(__real_n);
1.98 + memset((char*)__result, __shred_byte, __real_n*sizeof(value_type));
1.99 + __result->__magic = __magic;
1.100 + __result->__type_size = sizeof(value_type);
1.101 + __result->_M_size = (_STLP_UINT32_T)__n;
1.102 + return ((char*)__result) + (long)__extra_before;
1.103 +}
1.104 +
1.105 +template <class _Alloc>
1.106 +void _STLP_CALL
1.107 +__debug_alloc<_Alloc>::deallocate(void *__p, size_t __n) {
1.108 + __alloc_header * __real_p = (__alloc_header*)((char *)__p -(long)__extra_before);
1.109 + // check integrity
1.110 + _STLP_VERBOSE_ASSERT(__real_p->__magic != __deleted_magic, _StlMsg_DBA_DELETED_TWICE)
1.111 + _STLP_VERBOSE_ASSERT(__real_p->__magic == __magic, _StlMsg_DBA_NEVER_ALLOCATED)
1.112 + _STLP_VERBOSE_ASSERT(__real_p->__type_size == 1,_StlMsg_DBA_TYPE_MISMATCH)
1.113 + _STLP_VERBOSE_ASSERT(__real_p->_M_size == __n, _StlMsg_DBA_SIZE_MISMATCH)
1.114 + // check pads on both sides
1.115 + unsigned char* __tmp;
1.116 + for (__tmp= (unsigned char*)(__real_p+1); __tmp < (unsigned char*)__p; __tmp++) {
1.117 + _STLP_VERBOSE_ASSERT(*__tmp==__shred_byte, _StlMsg_DBA_UNDERRUN)
1.118 + }
1.119 +
1.120 + size_t __real_n= __n + __extra_before_chunk() + __extra_after_chunk();
1.121 +
1.122 + for (__tmp= ((unsigned char*)__p)+__n*sizeof(value_type);
1.123 + __tmp < ((unsigned char*)__real_p)+__real_n ; __tmp++) {
1.124 + _STLP_VERBOSE_ASSERT(*__tmp==__shred_byte, _StlMsg_DBA_OVERRUN)
1.125 + }
1.126 +
1.127 + // that may be unfortunate, just in case
1.128 + __real_p->__magic=__deleted_magic;
1.129 + memset((char*)__p, __shred_byte, __n*sizeof(value_type));
1.130 + __allocator_type::deallocate(__real_p, __real_n);
1.131 +}
1.132 +
1.133 +#ifndef _STLP_NO_NODE_ALLOC
1.134 +
1.135 +// # ifdef _STLP_THREADS
1.136 +
1.137 +template <bool __threads, int __inst>
1.138 +class _Node_Alloc_Lock {
1.139 +public:
1.140 + _Node_Alloc_Lock() {
1.141 +
1.142 +# ifdef _STLP_SGI_THREADS
1.143 + if (__threads && __us_rsthread_malloc)
1.144 +# else /* !_STLP_SGI_THREADS */
1.145 + if (__threads)
1.146 +# endif
1.147 + _S_lock._M_acquire_lock();
1.148 + }
1.149 +
1.150 + ~_Node_Alloc_Lock() {
1.151 +# ifdef _STLP_SGI_THREADS
1.152 + if (__threads && __us_rsthread_malloc)
1.153 +# else /* !_STLP_SGI_THREADS */
1.154 + if (__threads)
1.155 +# endif
1.156 + _S_lock._M_release_lock();
1.157 + }
1.158 +
1.159 + static _STLP_STATIC_MUTEX _S_lock;
1.160 +};
1.161 +
1.162 +// # endif /* _STLP_THREADS */
1.163 +
1.164 +
1.165 +template <bool __threads, int __inst>
1.166 +void* _STLP_CALL
1.167 +__node_alloc<__threads, __inst>::_M_allocate(size_t __n) {
1.168 + void* __r;
1.169 + _Obj * _STLP_VOLATILE * __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
1.170 + // # ifdef _STLP_THREADS
1.171 + /*REFERENCED*/
1.172 + _Node_Alloc_Lock<__threads, __inst> __lock_instance;
1.173 + // # endif
1.174 + // Acquire the lock here with a constructor call.
1.175 + // This ensures that it is released in exit or during stack
1.176 + // unwinding.
1.177 + if ( (__r = *__my_free_list) != 0 ) {
1.178 + *__my_free_list = ((_Obj*)__r) -> _M_free_list_link;
1.179 + } else {
1.180 + __r = _S_refill(__n);
1.181 + }
1.182 + // lock is released here
1.183 + return __r;
1.184 +}
1.185 +
1.186 +template <bool __threads, int __inst>
1.187 +void _STLP_CALL
1.188 +__node_alloc<__threads, __inst>::_M_deallocate(void *__p, size_t __n) {
1.189 + _Obj * _STLP_VOLATILE * __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
1.190 + // # ifdef _STLP_THREADS
1.191 + /*REFERENCED*/
1.192 + _Node_Alloc_Lock<__threads, __inst> __lock_instance;
1.193 + // # endif /* _STLP_THREADS */
1.194 + // acquire lock
1.195 + ((_Obj *)__p) -> _M_free_list_link = *__my_free_list;
1.196 + *__my_free_list = (_Obj *)__p;
1.197 + // lock is released here
1.198 +}
1.199 +
1.200 +/* We allocate memory in large chunks in order to avoid fragmenting */
1.201 +/* the malloc heap too much. */
1.202 +/* We assume that size is properly aligned. */
1.203 +/* We hold the allocation lock. */
1.204 +template <bool __threads, int __inst>
1.205 +char* _STLP_CALL
1.206 +__node_alloc<__threads, __inst>::_S_chunk_alloc(size_t _p_size,
1.207 + int& __nobjs)
1.208 +{
1.209 + char* __result;
1.210 + size_t __total_bytes = _p_size * __nobjs;
1.211 + size_t __bytes_left = _S_end_free - _S_start_free;
1.212 +
1.213 + if (__bytes_left >= __total_bytes) {
1.214 + __result = _S_start_free;
1.215 + _S_start_free += __total_bytes;
1.216 + return(__result);
1.217 + } else if (__bytes_left >= _p_size) {
1.218 + __nobjs = (int)(__bytes_left/_p_size);
1.219 + __total_bytes = _p_size * __nobjs;
1.220 + __result = _S_start_free;
1.221 + _S_start_free += __total_bytes;
1.222 + return(__result);
1.223 + } else {
1.224 + size_t __bytes_to_get =
1.225 + 2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
1.226 + // Try to make use of the left-over piece.
1.227 + if (__bytes_left > 0) {
1.228 + _Obj* _STLP_VOLATILE* __my_free_list =
1.229 + _S_free_list + _S_FREELIST_INDEX(__bytes_left);
1.230 +
1.231 + ((_Obj*)_S_start_free) -> _M_free_list_link = *__my_free_list;
1.232 + *__my_free_list = (_Obj*)_S_start_free;
1.233 + }
1.234 + _S_start_free = (char*)__stlp_chunk_malloc(__bytes_to_get);
1.235 + if (0 == _S_start_free) {
1.236 + size_t __i;
1.237 + _Obj* _STLP_VOLATILE* __my_free_list;
1.238 + _Obj* __p;
1.239 + // Try to make do with what we have. That can't
1.240 + // hurt. We do not try smaller requests, since that tends
1.241 + // to result in disaster on multi-process machines.
1.242 + for (__i = _p_size; __i <= (size_t)_MAX_BYTES; __i += (size_t)_ALIGN) {
1.243 + __my_free_list = _S_free_list + _S_FREELIST_INDEX(__i);
1.244 + __p = *__my_free_list;
1.245 + if (0 != __p) {
1.246 + *__my_free_list = __p -> _M_free_list_link;
1.247 + _S_start_free = (char*)__p;
1.248 + _S_end_free = _S_start_free + __i;
1.249 + return(_S_chunk_alloc(_p_size, __nobjs));
1.250 + // Any leftover piece will eventually make it to the
1.251 + // right free list.
1.252 + }
1.253 + }
1.254 + _S_end_free = 0; // In case of exception.
1.255 + _S_start_free = (char*)__stlp_chunk_malloc(__bytes_to_get);
1.256 + /*
1.257 + (char*)malloc_alloc::allocate(__bytes_to_get);
1.258 + */
1.259 +
1.260 + // This should either throw an
1.261 + // exception or remedy the situation. Thus we assume it
1.262 + // succeeded.
1.263 + }
1.264 + _S_heap_size += __bytes_to_get;
1.265 + _S_end_free = _S_start_free + __bytes_to_get;
1.266 + return(_S_chunk_alloc(_p_size, __nobjs));
1.267 + }
1.268 +}
1.269 +
1.270 +
1.271 +/* Returns an object of size __n, and optionally adds to size __n free list.*/
1.272 +/* We assume that __n is properly aligned. */
1.273 +/* We hold the allocation lock. */
1.274 +template <bool __threads, int __inst>
1.275 +void* _STLP_CALL
1.276 +__node_alloc<__threads, __inst>::_S_refill(size_t __n)
1.277 +{
1.278 + int __nobjs = 20;
1.279 + __n = _S_round_up(__n);
1.280 + char* __chunk = _S_chunk_alloc(__n, __nobjs);
1.281 + _Obj* _STLP_VOLATILE* __my_free_list;
1.282 + _Obj* __result;
1.283 + _Obj* __current_obj;
1.284 + _Obj* __next_obj;
1.285 + int __i;
1.286 +
1.287 + if (1 == __nobjs) return(__chunk);
1.288 + __my_free_list = _S_free_list + _S_FREELIST_INDEX(__n);
1.289 +
1.290 + /* Build free list in chunk */
1.291 + __result = (_Obj*)__chunk;
1.292 + *__my_free_list = __next_obj = (_Obj*)(__chunk + __n);
1.293 + for (__i = 1; ; __i++) {
1.294 + __current_obj = __next_obj;
1.295 + __next_obj = (_Obj*)((char*)__next_obj + __n);
1.296 + if (__nobjs - 1 == __i) {
1.297 + __current_obj -> _M_free_list_link = 0;
1.298 + break;
1.299 + } else {
1.300 + __current_obj -> _M_free_list_link = __next_obj;
1.301 + }
1.302 + }
1.303 + return(__result);
1.304 +}
1.305 +
1.306 +# if ( _STLP_STATIC_TEMPLATE_DATA > 0 )
1.307 +// malloc_alloc out-of-memory handling
1.308 +template <int __inst>
1.309 +__oom_handler_type __malloc_alloc<__inst>::__oom_handler=(__oom_handler_type)0 ;
1.310 +
1.311 +#ifdef _STLP_THREADS
1.312 + template <bool __threads, int __inst>
1.313 + _STLP_STATIC_MUTEX
1.314 + _Node_Alloc_Lock<__threads, __inst>::_S_lock _STLP_MUTEX_INITIALIZER;
1.315 +#endif
1.316 +
1.317 +template <bool __threads, int __inst>
1.318 +_Node_alloc_obj * _STLP_VOLATILE
1.319 +__node_alloc<__threads, __inst>::_S_free_list[_STLP_NFREELISTS]
1.320 += {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
1.321 +// The 16 zeros are necessary to make version 4.1 of the SunPro
1.322 +// compiler happy. Otherwise it appears to allocate too little
1.323 +// space for the array.
1.324 +
1.325 +template <bool __threads, int __inst>
1.326 +char *__node_alloc<__threads, __inst>::_S_start_free = 0;
1.327 +
1.328 +template <bool __threads, int __inst>
1.329 +char *__node_alloc<__threads, __inst>::_S_end_free = 0;
1.330 +
1.331 +template <bool __threads, int __inst>
1.332 +size_t __node_alloc<__threads, __inst>::_S_heap_size = 0;
1.333 +
1.334 +
1.335 +# else /* ( _STLP_STATIC_TEMPLATE_DATA > 0 ) */
1.336 +
1.337 +__DECLARE_INSTANCE(__oom_handler_type, __malloc_alloc<0>::__oom_handler, =0);
1.338 +
1.339 +# define _STLP_ALLOC_NOTHREADS __node_alloc<false, 0>
1.340 +# define _STLP_ALLOC_THREADS __node_alloc<true, 0>
1.341 +# define _STLP_ALLOC_NOTHREADS_LOCK _Node_Alloc_Lock<false, 0>
1.342 +# define _STLP_ALLOC_THREADS_LOCK _Node_Alloc_Lock<true, 0>
1.343 +
1.344 +__DECLARE_INSTANCE(char *, _STLP_ALLOC_NOTHREADS::_S_start_free,=0);
1.345 +__DECLARE_INSTANCE(char *, _STLP_ALLOC_NOTHREADS::_S_end_free,=0);
1.346 +__DECLARE_INSTANCE(size_t, _STLP_ALLOC_NOTHREADS::_S_heap_size,=0);
1.347 +__DECLARE_INSTANCE(_Node_alloc_obj * _STLP_VOLATILE,
1.348 + _STLP_ALLOC_NOTHREADS::_S_free_list[_STLP_NFREELISTS],
1.349 + ={0});
1.350 +__DECLARE_INSTANCE(char *, _STLP_ALLOC_THREADS::_S_start_free,=0);
1.351 +__DECLARE_INSTANCE(char *, _STLP_ALLOC_THREADS::_S_end_free,=0);
1.352 +__DECLARE_INSTANCE(size_t, _STLP_ALLOC_THREADS::_S_heap_size,=0);
1.353 +__DECLARE_INSTANCE(_Node_alloc_obj * _STLP_VOLATILE,
1.354 + _STLP_ALLOC_THREADS::_S_free_list[_STLP_NFREELISTS],
1.355 + ={0});
1.356 +// # ifdef _STLP_THREADS
1.357 +__DECLARE_INSTANCE(_STLP_STATIC_MUTEX,
1.358 + _STLP_ALLOC_NOTHREADS_LOCK::_S_lock,
1.359 + _STLP_MUTEX_INITIALIZER);
1.360 +__DECLARE_INSTANCE(_STLP_STATIC_MUTEX,
1.361 + _STLP_ALLOC_THREADS_LOCK::_S_lock,
1.362 + _STLP_MUTEX_INITIALIZER);
1.363 +// # endif
1.364 +
1.365 +# undef _STLP_ALLOC_THREADS
1.366 +# undef _STLP_ALLOC_NOTHREADS
1.367 +
1.368 +# endif /* _STLP_STATIC_TEMPLATE_DATA */
1.369 +
1.370 +#endif
1.371 +
1.372 +_STLP_END_NAMESPACE
1.373 +
1.374 +# undef _S_FREELIST_INDEX
1.375 +
1.376 +# endif /* _STLP_EXPOSE_GLOBALS_IMPLEMENTATION */
1.377 +
1.378 +#endif /* _STLP_ALLOC_C */
1.379 +
1.380 +// Local Variables:
1.381 +// mode:C++
1.382 +// End: