1.1 --- a/epoc32/include/stdapis/stlport/stl/_pthread_alloc.c Tue Nov 24 13:55:44 2009 +0000
1.2 +++ b/epoc32/include/stdapis/stlport/stl/_pthread_alloc.c Tue Mar 16 16:12:26 2010 +0000
1.3 @@ -1,1 +1,263 @@
1.4 -_pthread_alloc.c
1.5 +/*
1.6 + *
1.7 + * Copyright (c) 1996,1997
1.8 + * Silicon Graphics Computer Systems, Inc.
1.9 + *
1.10 + * Copyright (c) 1997
1.11 + * Moscow Center for SPARC Technology
1.12 + *
1.13 + * Copyright (c) 1999
1.14 + * Boris Fomitchev
1.15 + *
1.16 + * This material is provided "as is", with absolutely no warranty expressed
1.17 + * or implied. Any use is at your own risk.
1.18 + *
1.19 + * Permission to use or copy this software for any purpose is hereby granted
1.20 + * without fee, provided the above notices are retained on all copies.
1.21 + * Permission to modify the code and to distribute modified code is granted,
1.22 + * provided the above notices are retained, and a notice that the code was
1.23 + * modified is included with the above copyright notice.
1.24 + *
1.25 + */
1.26 +#ifndef _STLP_PTHREAD_ALLOC_C
1.27 +#define _STLP_PTHREAD_ALLOC_C
1.28 +
1.29 +#ifdef __WATCOMC__
1.30 +#pragma warning 13 9
1.31 +#pragma warning 367 9
1.32 +#pragma warning 368 9
1.33 +#endif
1.34 +
1.35 +#ifndef _STLP_PTHREAD_ALLOC_H
1.36 +# include <stl/_pthread_alloc.h>
1.37 +#endif
1.38 +
1.39 +# if defined (_STLP_EXPOSE_GLOBALS_IMPLEMENTATION)
1.40 +
1.41 +# include <cerrno>
1.42 +
1.43 +_STLP_BEGIN_NAMESPACE
1.44 +
1.45 +template <size_t _Max_size>
1.46 +void _Pthread_alloc<_Max_size>::_S_destructor(void * __instance)
1.47 +{
1.48 + _M_lock __lock_instance; // Need to acquire lock here.
1.49 + _Pthread_alloc_per_thread_state<_Max_size>* __s =
1.50 + (_Pthread_alloc_per_thread_state<_Max_size> *)__instance;
1.51 + __s -> __next = _S_free_per_thread_states;
1.52 + _S_free_per_thread_states = __s;
1.53 +}
1.54 +
1.55 +template <size_t _Max_size>
1.56 +_Pthread_alloc_per_thread_state<_Max_size> *
1.57 +_Pthread_alloc<_Max_size>::_S_new_per_thread_state()
1.58 +{
1.59 + /* lock already held here. */
1.60 + if (0 != _S_free_per_thread_states) {
1.61 + _Pthread_alloc_per_thread_state<_Max_size> *__result =
1.62 + _S_free_per_thread_states;
1.63 + _S_free_per_thread_states = _S_free_per_thread_states -> __next;
1.64 + return __result;
1.65 + } else {
1.66 + return (_Pthread_alloc_per_thread_state<_Max_size>*) \
1.67 + _STLP_PLACEMENT_NEW (_Pthread_alloc_per_thread_state<_Max_size>);
1.68 + }
1.69 +}
1.70 +
1.71 +template <size_t _Max_size>
1.72 +_Pthread_alloc_per_thread_state<_Max_size> *
1.73 +_Pthread_alloc<_Max_size>::_S_get_per_thread_state()
1.74 +{
1.75 +
1.76 + int __ret_code;
1.77 + __state_type* __result;
1.78 +
1.79 + if (_S_key_initialized && (__result = (__state_type*) pthread_getspecific(_S_key)))
1.80 + return __result;
1.81 +
1.82 + /*REFERENCED*/
1.83 + _M_lock __lock_instance; // Need to acquire lock here.
1.84 + if (!_S_key_initialized) {
1.85 + if (pthread_key_create(&_S_key, _S_destructor)) {
1.86 + __THROW_BAD_ALLOC; // failed
1.87 + }
1.88 + _S_key_initialized = true;
1.89 + }
1.90 +
1.91 + __result = _S_new_per_thread_state();
1.92 + __ret_code = pthread_setspecific(_S_key, __result);
1.93 + if (__ret_code) {
1.94 + if (__ret_code == ENOMEM) {
1.95 + __THROW_BAD_ALLOC;
1.96 + } else {
1.97 + // EINVAL
1.98 + _STLP_ABORT();
1.99 + }
1.100 + }
1.101 + return __result;
1.102 +}
1.103 +
1.104 +/* We allocate memory in large chunks in order to avoid fragmenting */
1.105 +/* the malloc heap too much. */
1.106 +/* We assume that size is properly aligned. */
1.107 +template <size_t _Max_size>
1.108 +char *_Pthread_alloc<_Max_size>
1.109 +::_S_chunk_alloc(size_t __p_size, size_t &__nobjs)
1.110 +{
1.111 + {
1.112 + char * __result;
1.113 + size_t __total_bytes;
1.114 + size_t __bytes_left;
1.115 + /*REFERENCED*/
1.116 + _M_lock __lock_instance; // Acquire lock for this routine
1.117 +
1.118 + __total_bytes = __p_size * __nobjs;
1.119 + __bytes_left = _S_end_free - _S_start_free;
1.120 + if (__bytes_left >= __total_bytes) {
1.121 + __result = _S_start_free;
1.122 + _S_start_free += __total_bytes;
1.123 + return(__result);
1.124 + } else if (__bytes_left >= __p_size) {
1.125 + __nobjs = __bytes_left/__p_size;
1.126 + __total_bytes = __p_size * __nobjs;
1.127 + __result = _S_start_free;
1.128 + _S_start_free += __total_bytes;
1.129 + return(__result);
1.130 + } else {
1.131 + size_t __bytes_to_get =
1.132 + 2 * __total_bytes + _S_round_up(_S_heap_size >> 4);
1.133 + // Try to make use of the left-over piece.
1.134 + if (__bytes_left > 0) {
1.135 + _Pthread_alloc_per_thread_state<_Max_size>* __a =
1.136 + (_Pthread_alloc_per_thread_state<_Max_size>*)
1.137 + pthread_getspecific(_S_key);
1.138 + __obj * volatile * __my_free_list =
1.139 + __a->__free_list + _S_freelist_index(__bytes_left);
1.140 +
1.141 + ((__obj *)_S_start_free) -> __free_list_link = *__my_free_list;
1.142 + *__my_free_list = (__obj *)_S_start_free;
1.143 + }
1.144 +# ifdef _SGI_SOURCE
1.145 + // Try to get memory that's aligned on something like a
1.146 + // cache line boundary, so as to avoid parceling out
1.147 + // parts of the same line to different threads and thus
1.148 + // possibly different processors.
1.149 + {
1.150 + const int __cache_line_size = 128; // probable upper bound
1.151 + __bytes_to_get &= ~(__cache_line_size-1);
1.152 + _S_start_free = (char *)memalign(__cache_line_size, __bytes_to_get);
1.153 + if (0 == _S_start_free) {
1.154 + _S_start_free = (char *)__malloc_alloc<0>::allocate(__bytes_to_get);
1.155 + }
1.156 + }
1.157 +# else /* !SGI_SOURCE */
1.158 + _S_start_free = (char *)__malloc_alloc<0>::allocate(__bytes_to_get);
1.159 +# endif
1.160 + _S_heap_size += __bytes_to_get;
1.161 + _S_end_free = _S_start_free + __bytes_to_get;
1.162 + }
1.163 + }
1.164 + // lock is released here
1.165 + return(_S_chunk_alloc(__p_size, __nobjs));
1.166 +}
1.167 +
1.168 +
1.169 +/* Returns an object of size n, and optionally adds to size n free list.*/
1.170 +/* We assume that n is properly aligned. */
1.171 +/* We hold the allocation lock. */
1.172 +template <size_t _Max_size>
1.173 +void *_Pthread_alloc_per_thread_state<_Max_size>
1.174 +::_M_refill(size_t __n)
1.175 +{
1.176 + size_t __nobjs = 128;
1.177 + char * __chunk =
1.178 + _Pthread_alloc<_Max_size>::_S_chunk_alloc(__n, __nobjs);
1.179 + __obj * volatile * __my_free_list;
1.180 + __obj * __result;
1.181 + __obj * __current_obj, * __next_obj;
1.182 + int __i;
1.183 +
1.184 + if (1 == __nobjs) {
1.185 + return(__chunk);
1.186 + }
1.187 + __my_free_list = __free_list
1.188 + + _Pthread_alloc<_Max_size>::_S_freelist_index(__n);
1.189 +
1.190 + /* Build free list in chunk */
1.191 + __result = (__obj *)__chunk;
1.192 + *__my_free_list = __next_obj = (__obj *)(__chunk + __n);
1.193 + for (__i = 1; ; __i++) {
1.194 + __current_obj = __next_obj;
1.195 + __next_obj = (__obj *)((char *)__next_obj + __n);
1.196 + if (__nobjs - 1 == __i) {
1.197 + __current_obj -> __free_list_link = 0;
1.198 + break;
1.199 + } else {
1.200 + __current_obj -> __free_list_link = __next_obj;
1.201 + }
1.202 + }
1.203 + return(__result);
1.204 +}
1.205 +
1.206 +template <size_t _Max_size>
1.207 +void *_Pthread_alloc<_Max_size>
1.208 +::reallocate(void *__p, size_t __old_sz, size_t __new_sz)
1.209 +{
1.210 + void * __result;
1.211 + size_t __copy_sz;
1.212 +
1.213 + if (__old_sz > _Max_size
1.214 + && __new_sz > _Max_size) {
1.215 + return(realloc(__p, __new_sz));
1.216 + }
1.217 + if (_S_round_up(__old_sz) == _S_round_up(__new_sz)) return(__p);
1.218 + __result = allocate(__new_sz);
1.219 + __copy_sz = __new_sz > __old_sz? __old_sz : __new_sz;
1.220 + memcpy(__result, __p, __copy_sz);
1.221 + deallocate(__p, __old_sz);
1.222 + return(__result);
1.223 +}
1.224 +
1.225 +#if defined (_STLP_STATIC_TEMPLATE_DATA) && (_STLP_STATIC_TEMPLATE_DATA > 0)
1.226 +
1.227 +template <size_t _Max_size>
1.228 +_Pthread_alloc_per_thread_state<_Max_size> * _Pthread_alloc<_Max_size>::_S_free_per_thread_states = 0;
1.229 +
1.230 +template <size_t _Max_size>
1.231 +pthread_key_t _Pthread_alloc<_Max_size>::_S_key =0;
1.232 +
1.233 +template <size_t _Max_size>
1.234 +bool _Pthread_alloc<_Max_size>::_S_key_initialized = false;
1.235 +
1.236 +template <size_t _Max_size>
1.237 +_STLP_mutex_base _Pthread_alloc<_Max_size>::_S_chunk_allocator_lock _STLP_MUTEX_INITIALIZER;
1.238 +
1.239 +template <size_t _Max_size>
1.240 +char *_Pthread_alloc<_Max_size>::_S_start_free = 0;
1.241 +
1.242 +template <size_t _Max_size>
1.243 +char *_Pthread_alloc<_Max_size>::_S_end_free = 0;
1.244 +
1.245 +template <size_t _Max_size>
1.246 +size_t _Pthread_alloc<_Max_size>::_S_heap_size = 0;
1.247 +
1.248 + # else
1.249 +
1.250 + __DECLARE_INSTANCE(template <size_t _Max_size> _Pthread_alloc_per_thread_state<_Max_size> *, _Pthread_alloc<_Max_size>::_S_free_per_thread_states, = 0);
1.251 + __DECLARE_INSTANCE(template <size_t _Max_size> pthread_key_t, _Pthread_alloc<_Max_size>::_S_key, = 0);
1.252 + __DECLARE_INSTANCE(template <size_t _Max_size> bool, _Pthread_alloc<_Max_size>::_S_key_initialized, = false);
1.253 + __DECLARE_INSTANCE(template <size_t _Max_size> char *, _Pthread_alloc<_Max_size>::_S_start_free, = 0);
1.254 + __DECLARE_INSTANCE(template <size_t _Max_size> char *, _Pthread_alloc<_Max_size>::_S_end_free, = 0);
1.255 + __DECLARE_INSTANCE(template <size_t _Max_size> size_t, _Pthread_alloc<_Max_size>::_S_heap_size, = 0);
1.256 +
1.257 +# endif
1.258 +
1.259 +_STLP_END_NAMESPACE
1.260 +
1.261 +# endif /* _STLP_EXPOSE_GLOBALS_IMPLEMENTATION */
1.262 +
1.263 +#endif /* _STLP_PTHREAD_ALLOC_C */
1.264 +
1.265 +// Local Variables:
1.266 +// mode:C++
1.267 +// End: