1.1 --- a/epoc32/include/stdapis/stlportv5/stl/_pthread_alloc.h Wed Mar 31 12:27:01 2010 +0100
1.2 +++ b/epoc32/include/stdapis/stlportv5/stl/_pthread_alloc.h Wed Mar 31 12:33:34 2010 +0100
1.3 @@ -9,13 +9,13 @@
1.4 * Copyright (c) 1997
1.5 * Moscow Center for SPARC Technology
1.6 *
1.7 - * Copyright (c) 1999
1.8 + * Copyright (c) 1999
1.9 * Boris Fomitchev
1.10 *
1.11 * This material is provided "as is", with absolutely no warranty expressed
1.12 * or implied. Any use is at your own risk.
1.13 *
1.14 - * Permission to use or copy this software for any purpose is hereby granted
1.15 + * Permission to use or copy this software for any purpose is hereby granted
1.16 * without fee, provided the above notices are retained on all copies.
1.17 * Permission to modify the code and to distribute modified code is granted,
1.18 * provided the above notices are retained, and a notice that the code was
1.19 @@ -26,225 +26,75 @@
1.20 #ifndef _STLP_PTHREAD_ALLOC_H
1.21 #define _STLP_PTHREAD_ALLOC_H
1.22
1.23 -// Pthread-specific node allocator.
1.24 -// This is similar to the default allocator, except that free-list
1.25 -// information is kept separately for each thread, avoiding locking.
1.26 -// This should be reasonably fast even in the presence of threads.
1.27 -// The down side is that storage may not be well-utilized.
1.28 -// It is not an error to allocate memory in thread A and deallocate
1.29 -// it in thread B. But this effectively transfers ownership of the memory,
1.30 -// so that it can only be reallocated by thread B. Thus this can effectively
1.31 -// result in a storage leak if it's done on a regular basis.
1.32 -// It can also result in frequent sharing of
1.33 -// cache lines among processors, with potentially serious performance
1.34 -// consequences.
1.35 +/*
1.36 + * Pthread-specific node allocator.
1.37 + * This is similar to the default allocator, except that free-list
1.38 + * information is kept separately for each thread, avoiding locking.
1.39 + * This should be reasonably fast even in the presence of threads.
1.40 + * The down side is that storage may not be well-utilized.
1.41 + * It is not an error to allocate memory in thread A and deallocate
1.42 + * it in thread B. But this effectively transfers ownership of the memory,
1.43 + * so that it can only be reallocated by thread B. Thus this can effectively
1.44 + * result in a storage leak if it's done on a regular basis.
1.45 + * It can also result in frequent sharing of
1.46 + * cache lines among processors, with potentially serious performance
1.47 + * consequences.
1.48 + */
1.49
1.50 -#include <pthread.h>
1.51 +#if !defined (_STLP_PTHREADS)
1.52 +# error POSIX specific allocator implementation. Your system do not seems to \
1.53 +have this interface so please comment the _STLP_USE_PERTHREAD_ALLOC macro \
1.54 +or report to the STLport forum.
1.55 +#endif
1.56 +
1.57 +#if defined (_STLP_USE_NO_IOSTREAMS)
1.58 +# error You cannot use per thread allocator implementation without building \
1.59 +STLport libraries.
1.60 +#endif
1.61
1.62 #ifndef _STLP_INTERNAL_ALLOC_H
1.63 -#include <stl/_alloc.h>
1.64 -#endif
1.65 -
1.66 -#ifndef __RESTRICT
1.67 -# define __RESTRICT
1.68 +# include <stl/_alloc.h>
1.69 #endif
1.70
1.71 _STLP_BEGIN_NAMESPACE
1.72
1.73 -#define _STLP_DATA_ALIGNMENT 8
1.74 +_STLP_MOVE_TO_PRIV_NAMESPACE
1.75
1.76 -union _Pthread_alloc_obj {
1.77 - union _Pthread_alloc_obj * __free_list_link;
1.78 - char __client_data[_STLP_DATA_ALIGNMENT]; /* The client sees this. */
1.79 -};
1.80 -
1.81 -// Pthread allocators don't appear to the client to have meaningful
1.82 -// instances. We do in fact need to associate some state with each
1.83 -// thread. That state is represented by
1.84 -// _Pthread_alloc_per_thread_state<_Max_size>.
1.85 -
1.86 -template<size_t _Max_size>
1.87 -struct _Pthread_alloc_per_thread_state {
1.88 - typedef _Pthread_alloc_obj __obj;
1.89 - enum { _S_NFREELISTS = _Max_size/_STLP_DATA_ALIGNMENT };
1.90 -
1.91 - // Free list link for list of available per thread structures.
1.92 - // When one of these becomes available for reuse due to thread
1.93 - // termination, any objects in its free list remain associated
1.94 - // with it. The whole structure may then be used by a newly
1.95 - // created thread.
1.96 - _Pthread_alloc_per_thread_state() : __next(0)
1.97 - {
1.98 - memset((void *)__free_list, 0, (size_t)_S_NFREELISTS * sizeof(__obj *));
1.99 - }
1.100 - // Returns an object of size __n, and possibly adds to size n free list.
1.101 - void *_M_refill(size_t __n);
1.102 -
1.103 - _Pthread_alloc_obj* volatile __free_list[_S_NFREELISTS];
1.104 - _Pthread_alloc_per_thread_state<_Max_size> * __next;
1.105 - // this data member is only to be used by per_thread_allocator, which returns memory to the originating thread.
1.106 - _STLP_mutex _M_lock;
1.107 -
1.108 - };
1.109 +struct _Pthread_alloc_per_thread_state;
1.110
1.111 // Pthread-specific allocator.
1.112 -// The argument specifies the largest object size allocated from per-thread
1.113 -// free lists. Larger objects are allocated using malloc_alloc.
1.114 -// Max_size must be a power of 2.
1.115 -template < __DFL_NON_TYPE_PARAM(size_t, _Max_size, _MAX_BYTES) >
1.116 -class _Pthread_alloc {
1.117 -
1.118 +class _STLP_CLASS_DECLSPEC _Pthread_alloc {
1.119 public: // but only for internal use:
1.120 -
1.121 - typedef _Pthread_alloc_obj __obj;
1.122 - typedef _Pthread_alloc_per_thread_state<_Max_size> __state_type;
1.123 + typedef _Pthread_alloc_per_thread_state __state_type;
1.124 typedef char value_type;
1.125
1.126 - // Allocates a chunk for nobjs of size size. nobjs may be reduced
1.127 - // if it is inconvenient to allocate the requested number.
1.128 - static char *_S_chunk_alloc(size_t __size, size_t &__nobjs);
1.129 -
1.130 - enum {_S_ALIGN = _STLP_DATA_ALIGNMENT};
1.131 -
1.132 - static size_t _S_round_up(size_t __bytes) {
1.133 - return (((__bytes) + (int)_S_ALIGN-1) & ~((int)_S_ALIGN - 1));
1.134 - }
1.135 - static size_t _S_freelist_index(size_t __bytes) {
1.136 - return (((__bytes) + (int)_S_ALIGN-1)/(int)_S_ALIGN - 1);
1.137 - }
1.138 -
1.139 -private:
1.140 - // Chunk allocation state. And other shared state.
1.141 - // Protected by _S_chunk_allocator_lock.
1.142 - static _STLP_mutex_base _S_chunk_allocator_lock;
1.143 - static char *_S_start_free;
1.144 - static char *_S_end_free;
1.145 - static size_t _S_heap_size;
1.146 - static _Pthread_alloc_per_thread_state<_Max_size>* _S_free_per_thread_states;
1.147 - static pthread_key_t _S_key;
1.148 - static bool _S_key_initialized;
1.149 - // Pthread key under which per thread state is stored.
1.150 - // Allocator instances that are currently unclaimed by any thread.
1.151 - static void _S_destructor(void *instance);
1.152 - // Function to be called on thread exit to reclaim per thread
1.153 - // state.
1.154 - static _Pthread_alloc_per_thread_state<_Max_size> *_S_new_per_thread_state();
1.155 public:
1.156 - // Return a recycled or new per thread state.
1.157 - static _Pthread_alloc_per_thread_state<_Max_size> *_S_get_per_thread_state();
1.158 -private:
1.159 - // ensure that the current thread has an associated
1.160 - // per thread state.
1.161 - class _M_lock;
1.162 - friend class _M_lock;
1.163 - class _M_lock {
1.164 - public:
1.165 - _M_lock () { _S_chunk_allocator_lock._M_acquire_lock(); }
1.166 - ~_M_lock () { _S_chunk_allocator_lock._M_release_lock(); }
1.167 - };
1.168 -
1.169 -public:
1.170 + // Return a recycled or new per thread state.
1.171 + static __state_type * _STLP_CALL _S_get_per_thread_state();
1.172
1.173 /* n must be > 0 */
1.174 - static void * allocate(size_t __n)
1.175 - {
1.176 - __obj * volatile * __my_free_list;
1.177 - __obj * __RESTRICT __result;
1.178 - __state_type* __a;
1.179 -
1.180 - if (__n > _Max_size) {
1.181 - return(__malloc_alloc<0>::allocate(__n));
1.182 - }
1.183 -
1.184 - __a = _S_get_per_thread_state();
1.185 -
1.186 - __my_free_list = __a -> __free_list + _S_freelist_index(__n);
1.187 - __result = *__my_free_list;
1.188 - if (__result == 0) {
1.189 - void *__r = __a -> _M_refill(_S_round_up(__n));
1.190 - return __r;
1.191 - }
1.192 - *__my_free_list = __result -> __free_list_link;
1.193 - return (__result);
1.194 - };
1.195 + static void * _STLP_CALL allocate(size_t& __n);
1.196
1.197 /* p may not be 0 */
1.198 - static void deallocate(void *__p, size_t __n)
1.199 - {
1.200 - __obj *__q = (__obj *)__p;
1.201 - __obj * volatile * __my_free_list;
1.202 - __state_type* __a;
1.203 -
1.204 - if (__n > _Max_size) {
1.205 - __malloc_alloc<0>::deallocate(__p, __n);
1.206 - return;
1.207 - }
1.208 -
1.209 - __a = _S_get_per_thread_state();
1.210 -
1.211 - __my_free_list = __a->__free_list + _S_freelist_index(__n);
1.212 - __q -> __free_list_link = *__my_free_list;
1.213 - *__my_free_list = __q;
1.214 - }
1.215 + static void _STLP_CALL deallocate(void *__p, size_t __n);
1.216
1.217 // boris : versions for per_thread_allocator
1.218 /* n must be > 0 */
1.219 - static void * allocate(size_t __n, __state_type* __a)
1.220 - {
1.221 - __obj * volatile * __my_free_list;
1.222 - __obj * __RESTRICT __result;
1.223 -
1.224 - if (__n > _Max_size) {
1.225 - return(__malloc_alloc<0>::allocate(__n));
1.226 - }
1.227 -
1.228 - // boris : here, we have to lock per thread state, as we may be getting memory from
1.229 - // different thread pool.
1.230 - _STLP_mutex_lock __lock(__a->_M_lock);
1.231 -
1.232 - __my_free_list = __a -> __free_list + _S_freelist_index(__n);
1.233 - __result = *__my_free_list;
1.234 - if (__result == 0) {
1.235 - void *__r = __a -> _M_refill(_S_round_up(__n));
1.236 - return __r;
1.237 - }
1.238 - *__my_free_list = __result -> __free_list_link;
1.239 - return (__result);
1.240 - };
1.241 + static void * _STLP_CALL allocate(size_t& __n, __state_type* __a);
1.242
1.243 /* p may not be 0 */
1.244 - static void deallocate(void *__p, size_t __n, __state_type* __a)
1.245 - {
1.246 - __obj *__q = (__obj *)__p;
1.247 - __obj * volatile * __my_free_list;
1.248 + static void _STLP_CALL deallocate(void *__p, size_t __n, __state_type* __a);
1.249
1.250 - if (__n > _Max_size) {
1.251 - __malloc_alloc<0>::deallocate(__p, __n);
1.252 - return;
1.253 - }
1.254 + static void * _STLP_CALL reallocate(void *__p, size_t __old_sz, size_t& __new_sz);
1.255 +};
1.256
1.257 - // boris : here, we have to lock per thread state, as we may be returning memory from
1.258 - // different thread.
1.259 - _STLP_mutex_lock __lock(__a->_M_lock);
1.260 +_STLP_MOVE_TO_STD_NAMESPACE
1.261
1.262 - __my_free_list = __a->__free_list + _S_freelist_index(__n);
1.263 - __q -> __free_list_link = *__my_free_list;
1.264 - *__my_free_list = __q;
1.265 - }
1.266 -
1.267 - static void * reallocate(void *__p, size_t __old_sz, size_t __new_sz);
1.268 -
1.269 -} ;
1.270 -
1.271 -# if defined (_STLP_USE_TEMPLATE_EXPORT)
1.272 -_STLP_EXPORT_TEMPLATE_CLASS _Pthread_alloc<_MAX_BYTES>;
1.273 -# endif
1.274 -
1.275 -typedef _Pthread_alloc<_MAX_BYTES> __pthread_alloc;
1.276 +typedef _STLP_PRIV _Pthread_alloc __pthread_alloc;
1.277 typedef __pthread_alloc pthread_alloc;
1.278
1.279 template <class _Tp>
1.280 -class pthread_allocator {
1.281 +class pthread_allocator : public __stlport_class<pthread_allocator<_Tp> > {
1.282 typedef pthread_alloc _S_Alloc; // The underlying allocator.
1.283 public:
1.284 typedef size_t size_type;
1.285 @@ -266,7 +116,7 @@
1.286
1.287 #if defined (_STLP_MEMBER_TEMPLATES) /* && defined (_STLP_FUNCTION_PARTIAL_ORDER) */
1.288 template <class _OtherType> pthread_allocator(const pthread_allocator<_OtherType>&)
1.289 - _STLP_NOTHROW {}
1.290 + _STLP_NOTHROW {}
1.291 #endif
1.292
1.293 ~pthread_allocator() _STLP_NOTHROW {}
1.294 @@ -277,19 +127,64 @@
1.295 // __n is permitted to be 0. The C++ standard says nothing about what
1.296 // the return value is when __n == 0.
1.297 _Tp* allocate(size_type __n, const void* = 0) {
1.298 - return __n != 0 ? __STATIC_CAST(_Tp*,_S_Alloc::allocate(__n * sizeof(_Tp)))
1.299 - : 0;
1.300 + if (__n > max_size()) {
1.301 + __THROW_BAD_ALLOC;
1.302 + }
1.303 + if (__n != 0) {
1.304 + size_type __buf_size = __n * sizeof(value_type);
1.305 + _Tp* __ret = __REINTERPRET_CAST(value_type*, _S_Alloc::allocate(__buf_size));
1.306 +#if defined (_STLP_DEBUG_UNINITIALIZED) && !defined (_STLP_DEBUG_ALLOC)
1.307 + if (__ret != 0) {
1.308 + memset((char*)__ret, _STLP_SHRED_BYTE, __buf_size);
1.309 + }
1.310 +#endif
1.311 + return __ret;
1.312 + }
1.313 + else
1.314 + return 0;
1.315 }
1.316
1.317 - // p is not permitted to be a null pointer.
1.318 - void deallocate(pointer __p, size_type __n)
1.319 - { _S_Alloc::deallocate(__p, __n * sizeof(_Tp)); }
1.320 + void deallocate(pointer __p, size_type __n) {
1.321 + _STLP_ASSERT( (__p == 0) == (__n == 0) )
1.322 + if (__p != 0) {
1.323 +#if defined (_STLP_DEBUG_UNINITIALIZED) && !defined (_STLP_DEBUG_ALLOC)
1.324 + memset((char*)__p, _STLP_SHRED_BYTE, __n * sizeof(value_type));
1.325 +#endif
1.326 + _S_Alloc::deallocate(__p, __n * sizeof(value_type));
1.327 + }
1.328 + }
1.329
1.330 - size_type max_size() const _STLP_NOTHROW
1.331 - { return size_t(-1) / sizeof(_Tp); }
1.332 + size_type max_size() const _STLP_NOTHROW
1.333 + { return size_t(-1) / sizeof(_Tp); }
1.334
1.335 void construct(pointer __p, const _Tp& __val) { _STLP_PLACEMENT_NEW (__p) _Tp(__val); }
1.336 void destroy(pointer _p) { _p->~_Tp(); }
1.337 +
1.338 +#if defined (_STLP_NO_EXTENSIONS)
1.339 + /* STLport extension giving rounded size of an allocated memory buffer
1.340 + * This method do not have to be part of a user defined allocator implementation
1.341 + * and won't even be called if such a function was granted.
1.342 + */
1.343 +protected:
1.344 +#endif
1.345 + _Tp* allocate(size_type __n, size_type& __allocated_n) {
1.346 + if (__n > max_size()) {
1.347 + __THROW_BAD_ALLOC;
1.348 + }
1.349 + if (__n != 0) {
1.350 + size_type __buf_size = __n * sizeof(value_type);
1.351 + _Tp* __ret = __REINTERPRET_CAST(value_type*, _S_Alloc::allocate(__buf_size));
1.352 +#if defined (_STLP_DEBUG_UNINITIALIZED) && !defined (_STLP_DEBUG_ALLOC)
1.353 + if (__ret != 0) {
1.354 + memset((char*)__ret, _STLP_SHRED_BYTE, __buf_size);
1.355 + }
1.356 +#endif
1.357 + __allocated_n = __buf_size / sizeof(value_type);
1.358 + return __ret;
1.359 + }
1.360 + else
1.361 + return 0;
1.362 + }
1.363 };
1.364
1.365 _STLP_TEMPLATE_NULL
1.366 @@ -309,58 +204,79 @@
1.367
1.368 template <class _T1, class _T2>
1.369 inline bool operator==(const pthread_allocator<_T1>&,
1.370 - const pthread_allocator<_T2>& a2)
1.371 -{
1.372 - return true;
1.373 -}
1.374 + const pthread_allocator<_T2>& a2)
1.375 +{ return true; }
1.376
1.377 #ifdef _STLP_FUNCTION_TMPL_PARTIAL_ORDER
1.378 template <class _T1, class _T2>
1.379 inline bool operator!=(const pthread_allocator<_T1>&,
1.380 const pthread_allocator<_T2>&)
1.381 -{
1.382 - return false;
1.383 -}
1.384 +{ return false; }
1.385 #endif
1.386
1.387
1.388 -#ifdef _STLP_CLASS_PARTIAL_SPECIALIZATION
1.389 +#if defined (_STLP_CLASS_PARTIAL_SPECIALIZATION)
1.390
1.391 -# ifdef _STLP_USE_RAW_SGI_ALLOCATORS
1.392 -template <class _Tp, size_t _Max_size>
1.393 -struct _Alloc_traits<_Tp, _Pthread_alloc<_Max_size> >
1.394 -{
1.395 - typedef __allocator<_Tp, _Pthread_alloc<_Max_size> >
1.396 - allocator_type;
1.397 -};
1.398 -# endif
1.399 +# if defined (_STLP_USE_RAW_SGI_ALLOCATORS)
1.400 +template <class _Tp>
1.401 +struct _Alloc_traits<_Tp, _Pthread_alloc>
1.402 +{ typedef __allocator<_Tp, _Pthread_alloc> allocator_type; };
1.403 +# endif
1.404
1.405 template <class _Tp, class _Atype>
1.406 struct _Alloc_traits<_Tp, pthread_allocator<_Atype> >
1.407 -{
1.408 - typedef pthread_allocator<_Tp> allocator_type;
1.409 -};
1.410 +{ typedef pthread_allocator<_Tp> allocator_type; };
1.411
1.412 #endif
1.413
1.414 -#if !defined (_STLP_USE_NESTED_TCLASS_THROUGHT_TPARAM)
1.415 +#if defined (_STLP_DONT_SUPPORT_REBIND_MEMBER_TEMPLATE)
1.416
1.417 template <class _Tp1, class _Tp2>
1.418 inline pthread_allocator<_Tp2>&
1.419 -__stl_alloc_rebind(pthread_allocator<_Tp1>& __x, const _Tp2*) {
1.420 - return (pthread_allocator<_Tp2>&)__x;
1.421 -}
1.422 +__stl_alloc_rebind(pthread_allocator<_Tp1>& __x, const _Tp2*)
1.423 +{ return (pthread_allocator<_Tp2>&)__x; }
1.424
1.425 template <class _Tp1, class _Tp2>
1.426 inline pthread_allocator<_Tp2>
1.427 -__stl_alloc_create(pthread_allocator<_Tp1>&, const _Tp2*) {
1.428 - return pthread_allocator<_Tp2>();
1.429 -}
1.430 +__stl_alloc_create(pthread_allocator<_Tp1>&, const _Tp2*)
1.431 +{ return pthread_allocator<_Tp2>(); }
1.432
1.433 -#endif /* _STLP_USE_NESTED_TCLASS_THROUGHT_TPARAM */
1.434 +#endif
1.435 +
1.436 +_STLP_MOVE_TO_PRIV_NAMESPACE
1.437 +
1.438 +template <class _Tp>
1.439 +struct __pthread_alloc_type_traits {
1.440 + typedef typename _IsSTLportClass<pthread_allocator<_Tp> >::_Ret _STLportAlloc;
1.441 + //The default allocator implementation which is recognize thanks to the
1.442 + //__stlport_class inheritance is a stateless object so:
1.443 + typedef _STLportAlloc has_trivial_default_constructor;
1.444 + typedef _STLportAlloc has_trivial_copy_constructor;
1.445 + typedef _STLportAlloc has_trivial_assignment_operator;
1.446 + typedef _STLportAlloc has_trivial_destructor;
1.447 + typedef _STLportAlloc is_POD_type;
1.448 +};
1.449 +
1.450 +_STLP_MOVE_TO_STD_NAMESPACE
1.451 +
1.452 +#if defined (_STLP_CLASS_PARTIAL_SPECIALIZATION)
1.453 +template <class _Tp>
1.454 +struct __type_traits<pthread_allocator<_Tp> > : _STLP_PRIV __pthread_alloc_type_traits<_Tp> {};
1.455 +#else
1.456 +_STLP_TEMPLATE_NULL
1.457 +struct __type_traits<pthread_allocator<char> > : _STLP_PRIV __pthread_alloc_type_traits<char> {};
1.458 +# if defined (_STLP_HAS_WCHAR_T)
1.459 +_STLP_TEMPLATE_NULL
1.460 +struct __type_traits<pthread_allocator<wchar_t> > : _STLP_PRIV __pthread_alloc_type_traits<wchar_t> {};
1.461 +# endif
1.462 +# if defined (_STLP_USE_PTR_SPECIALIZATIONS)
1.463 +_STLP_TEMPLATE_NULL
1.464 +struct __type_traits<pthread_allocator<void*> > : _STLP_PRIV __pthread_alloc_type_traits<void*> {};
1.465 +# endif
1.466 +#endif
1.467
1.468 //
1.469 -// per_thread_allocator<> : this allocator always return memory to the same thread
1.470 +// per_thread_allocator<> : this allocator always return memory to the same thread
1.471 // it was allocated from.
1.472 //
1.473
1.474 @@ -383,14 +299,14 @@
1.475 };
1.476 #endif
1.477
1.478 - per_thread_allocator() _STLP_NOTHROW {
1.479 + per_thread_allocator() _STLP_NOTHROW {
1.480 _M_state = _S_Alloc::_S_get_per_thread_state();
1.481 }
1.482 per_thread_allocator(const per_thread_allocator<_Tp>& __a) _STLP_NOTHROW : _M_state(__a._M_state){}
1.483
1.484 #if defined (_STLP_MEMBER_TEMPLATES) /* && defined (_STLP_FUNCTION_PARTIAL_ORDER) */
1.485 template <class _OtherType> per_thread_allocator(const per_thread_allocator<_OtherType>& __a)
1.486 - _STLP_NOTHROW : _M_state(__a._M_state) {}
1.487 + _STLP_NOTHROW : _M_state(__a._M_state) {}
1.488 #endif
1.489
1.490 ~per_thread_allocator() _STLP_NOTHROW {}
1.491 @@ -401,21 +317,67 @@
1.492 // __n is permitted to be 0. The C++ standard says nothing about what
1.493 // the return value is when __n == 0.
1.494 _Tp* allocate(size_type __n, const void* = 0) {
1.495 - return __n != 0 ? __STATIC_CAST(_Tp*,_S_Alloc::allocate(__n * sizeof(_Tp), _M_state)): 0;
1.496 + if (__n > max_size()) {
1.497 + __THROW_BAD_ALLOC;
1.498 + }
1.499 + if (__n != 0) {
1.500 + size_type __buf_size = __n * sizeof(value_type);
1.501 + _Tp* __ret = __REINTERPRET_CAST(_Tp*, _S_Alloc::allocate(__buf_size, _M_state));
1.502 +#if defined (_STLP_DEBUG_UNINITIALIZED) && !defined (_STLP_DEBUG_ALLOC)
1.503 + if (__ret != 0) {
1.504 + memset((char*)__ret, _STLP_SHRED_BYTE, __buf_size);
1.505 + }
1.506 +#endif
1.507 + return __ret;
1.508 + }
1.509 + else
1.510 + return 0;
1.511 }
1.512
1.513 - // p is not permitted to be a null pointer.
1.514 - void deallocate(pointer __p, size_type __n)
1.515 - { _S_Alloc::deallocate(__p, __n * sizeof(_Tp), _M_state); }
1.516 + void deallocate(pointer __p, size_type __n) {
1.517 + _STLP_ASSERT( (__p == 0) == (__n == 0) )
1.518 + if (__p != 0) {
1.519 +#if defined (_STLP_DEBUG_UNINITIALIZED) && !defined (_STLP_DEBUG_ALLOC)
1.520 + memset((char*)__p, _STLP_SHRED_BYTE, __n * sizeof(value_type));
1.521 +#endif
1.522 + _S_Alloc::deallocate(__p, __n * sizeof(value_type), _M_state);
1.523 + }
1.524 + }
1.525
1.526 - size_type max_size() const _STLP_NOTHROW
1.527 - { return size_t(-1) / sizeof(_Tp); }
1.528 + size_type max_size() const _STLP_NOTHROW
1.529 + { return size_t(-1) / sizeof(_Tp); }
1.530
1.531 void construct(pointer __p, const _Tp& __val) { _STLP_PLACEMENT_NEW (__p) _Tp(__val); }
1.532 void destroy(pointer _p) { _p->~_Tp(); }
1.533
1.534 // state is being kept here
1.535 __state_type* _M_state;
1.536 +
1.537 +#if defined (_STLP_NO_EXTENSIONS)
1.538 + /* STLport extension giving rounded size of an allocated memory buffer
1.539 + * This method do not have to be part of a user defined allocator implementation
1.540 + * and won't even be called if such a function was granted.
1.541 + */
1.542 +protected:
1.543 +#endif
1.544 + _Tp* allocate(size_type __n, size_type& __allocated_n) {
1.545 + if (__n > max_size()) {
1.546 + __THROW_BAD_ALLOC;
1.547 + }
1.548 + if (__n != 0) {
1.549 + size_type __buf_size = __n * sizeof(value_type);
1.550 + _Tp* __ret = __REINTERPRET_CAST(value_type*, _S_Alloc::allocate(__buf_size, _M_state));
1.551 +#if defined (_STLP_DEBUG_UNINITIALIZED) && !defined (_STLP_DEBUG_ALLOC)
1.552 + if (__ret != 0) {
1.553 + memset((char*)__ret, _STLP_SHRED_BYTE, __buf_size);
1.554 + }
1.555 +#endif
1.556 + __allocated_n = __buf_size / sizeof(value_type);
1.557 + return __ret;
1.558 + }
1.559 + else
1.560 + return 0;
1.561 + }
1.562 };
1.563
1.564 _STLP_TEMPLATE_NULL
1.565 @@ -435,53 +397,74 @@
1.566
1.567 template <class _T1, class _T2>
1.568 inline bool operator==(const per_thread_allocator<_T1>& __a1,
1.569 - const per_thread_allocator<_T2>& __a2)
1.570 -{
1.571 - return __a1._M_state == __a2._M_state;
1.572 -}
1.573 + const per_thread_allocator<_T2>& __a2)
1.574 +{ return __a1._M_state == __a2._M_state; }
1.575
1.576 #ifdef _STLP_FUNCTION_TMPL_PARTIAL_ORDER
1.577 template <class _T1, class _T2>
1.578 inline bool operator!=(const per_thread_allocator<_T1>& __a1,
1.579 const per_thread_allocator<_T2>& __a2)
1.580 -{
1.581 - return __a1._M_state != __a2._M_state;
1.582 -}
1.583 +{ return __a1._M_state != __a2._M_state; }
1.584 #endif
1.585
1.586
1.587 -#ifdef _STLP_CLASS_PARTIAL_SPECIALIZATION
1.588 +#if defined (_STLP_CLASS_PARTIAL_SPECIALIZATION)
1.589
1.590 template <class _Tp, class _Atype>
1.591 struct _Alloc_traits<_Tp, per_thread_allocator<_Atype> >
1.592 -{
1.593 - typedef per_thread_allocator<_Tp> allocator_type;
1.594 -};
1.595 +{ typedef per_thread_allocator<_Tp> allocator_type; };
1.596
1.597 #endif
1.598
1.599 -#if !defined (_STLP_USE_NESTED_TCLASS_THROUGHT_TPARAM)
1.600 +#if defined (_STLP_DONT_SUPPORT_REBIND_MEMBER_TEMPLATE)
1.601
1.602 template <class _Tp1, class _Tp2>
1.603 inline per_thread_allocator<_Tp2>&
1.604 -__stl_alloc_rebind(per_thread_allocator<_Tp1>& __x, const _Tp2*) {
1.605 - return (per_thread_allocator<_Tp2>&)__x;
1.606 -}
1.607 +__stl_alloc_rebind(per_thread_allocator<_Tp1>& __x, const _Tp2*)
1.608 +{ return (per_thread_allocator<_Tp2>&)__x; }
1.609
1.610 template <class _Tp1, class _Tp2>
1.611 inline per_thread_allocator<_Tp2>
1.612 -__stl_alloc_create(per_thread_allocator<_Tp1>&, const _Tp2*) {
1.613 - return per_thread_allocator<_Tp2>();
1.614 -}
1.615 +__stl_alloc_create(per_thread_allocator<_Tp1>&, const _Tp2*)
1.616 +{ return per_thread_allocator<_Tp2>(); }
1.617
1.618 -#endif /* _STLP_USE_NESTED_TCLASS_THROUGHT_TPARAM */
1.619 +#endif /* _STLP_DONT_SUPPORT_REBIND_MEMBER_TEMPLATE */
1.620 +
1.621 +_STLP_MOVE_TO_PRIV_NAMESPACE
1.622 +
1.623 +template <class _Tp>
1.624 +struct __perthread_alloc_type_traits {
1.625 + typedef typename _IsSTLportClass<per_thread_allocator<_Tp> >::_Ret _STLportAlloc;
1.626 + //The default allocator implementation which is recognize thanks to the
1.627 + //__stlport_class inheritance is a stateless object so:
1.628 + typedef __false_type has_trivial_default_constructor;
1.629 + typedef _STLportAlloc has_trivial_copy_constructor;
1.630 + typedef _STLportAlloc has_trivial_assignment_operator;
1.631 + typedef _STLportAlloc has_trivial_destructor;
1.632 + typedef __false_type is_POD_type;
1.633 +};
1.634 +
1.635 +_STLP_MOVE_TO_STD_NAMESPACE
1.636 +
1.637 +#if defined (_STLP_CLASS_PARTIAL_SPECIALIZATION)
1.638 +template <class _Tp>
1.639 +struct __type_traits<per_thread_allocator<_Tp> > : _STLP_PRIV __perthread_alloc_type_traits<_Tp> {};
1.640 +#else
1.641 +_STLP_TEMPLATE_NULL
1.642 +struct __type_traits<per_thread_allocator<char> > : _STLP_PRIV __perthread_alloc_type_traits<char> {};
1.643 +# if defined (_STLP_HAS_WCHAR_T)
1.644 +_STLP_TEMPLATE_NULL
1.645 +struct __type_traits<per_thread_allocator<wchar_t> > : _STLP_PRIV __perthread_alloc_type_traits<wchar_t> {};
1.646 +# endif
1.647 +# if defined (_STLP_USE_PTR_SPECIALIZATIONS)
1.648 +_STLP_TEMPLATE_NULL
1.649 +struct __type_traits<per_thread_allocator<void*> > : _STLP_PRIV __perthread_alloc_type_traits<void*> {};
1.650 +# endif
1.651 +#endif
1.652 +
1.653
1.654 _STLP_END_NAMESPACE
1.655
1.656 -# if defined (_STLP_EXPOSE_GLOBALS_IMPLEMENTATION) && !defined (_STLP_LINK_TIME_INSTANTIATION)
1.657 -# include <stl/_pthread_alloc.c>
1.658 -# endif
1.659 -
1.660 #endif /* _STLP_PTHREAD_ALLOC */
1.661
1.662 // Local Variables: