williamr@4
|
1 |
/*
|
williamr@4
|
2 |
*
|
williamr@4
|
3 |
* Copyright (c) 1994
|
williamr@4
|
4 |
* Hewlett-Packard Company
|
williamr@4
|
5 |
*
|
williamr@4
|
6 |
* Copyright (c) 1996,1997
|
williamr@4
|
7 |
* Silicon Graphics Computer Systems, Inc.
|
williamr@4
|
8 |
*
|
williamr@4
|
9 |
* Copyright (c) 1997
|
williamr@4
|
10 |
* Moscow Center for SPARC Technology
|
williamr@4
|
11 |
*
|
williamr@4
|
12 |
* Copyright (c) 1999
|
williamr@4
|
13 |
* Boris Fomitchev
|
williamr@4
|
14 |
*
|
williamr@4
|
15 |
* This material is provided "as is", with absolutely no warranty expressed
|
williamr@4
|
16 |
* or implied. Any use is at your own risk.
|
williamr@4
|
17 |
*
|
williamr@4
|
18 |
* Permission to use or copy this software for any purpose is hereby granted
|
williamr@4
|
19 |
* without fee, provided the above notices are retained on all copies.
|
williamr@4
|
20 |
* Permission to modify the code and to distribute modified code is granted,
|
williamr@4
|
21 |
* provided the above notices are retained, and a notice that the code was
|
williamr@4
|
22 |
* modified is included with the above copyright notice.
|
williamr@4
|
23 |
*
|
williamr@4
|
24 |
*/
|
williamr@4
|
25 |
|
williamr@4
|
26 |
#ifndef _STLP_PTHREAD_ALLOC_H
|
williamr@4
|
27 |
#define _STLP_PTHREAD_ALLOC_H
|
williamr@4
|
28 |
|
williamr@4
|
29 |
// Pthread-specific node allocator.
|
williamr@4
|
30 |
// This is similar to the default allocator, except that free-list
|
williamr@4
|
31 |
// information is kept separately for each thread, avoiding locking.
|
williamr@4
|
32 |
// This should be reasonably fast even in the presence of threads.
|
williamr@4
|
33 |
// The down side is that storage may not be well-utilized.
|
williamr@4
|
34 |
// It is not an error to allocate memory in thread A and deallocate
|
williamr@4
|
35 |
// it in thread B. But this effectively transfers ownership of the memory,
|
williamr@4
|
36 |
// so that it can only be reallocated by thread B. Thus this can effectively
|
williamr@4
|
37 |
// result in a storage leak if it's done on a regular basis.
|
williamr@4
|
38 |
// It can also result in frequent sharing of
|
williamr@4
|
39 |
// cache lines among processors, with potentially serious performance
|
williamr@4
|
40 |
// consequences.
|
williamr@4
|
41 |
|
williamr@4
|
42 |
#include <pthread.h>
|
williamr@4
|
43 |
|
williamr@4
|
44 |
#ifndef _STLP_INTERNAL_ALLOC_H
|
williamr@4
|
45 |
#include <stl/_alloc.h>
|
williamr@4
|
46 |
#endif
|
williamr@4
|
47 |
|
williamr@4
|
48 |
#ifndef __RESTRICT
|
williamr@4
|
49 |
# define __RESTRICT
|
williamr@4
|
50 |
#endif
|
williamr@4
|
51 |
|
williamr@4
|
52 |
_STLP_BEGIN_NAMESPACE
|
williamr@4
|
53 |
|
williamr@4
|
54 |
#define _STLP_DATA_ALIGNMENT 8
|
williamr@4
|
55 |
|
williamr@4
|
56 |
union _Pthread_alloc_obj {
|
williamr@4
|
57 |
union _Pthread_alloc_obj * __free_list_link;
|
williamr@4
|
58 |
char __client_data[_STLP_DATA_ALIGNMENT]; /* The client sees this. */
|
williamr@4
|
59 |
};
|
williamr@4
|
60 |
|
williamr@4
|
61 |
// Pthread allocators don't appear to the client to have meaningful
|
williamr@4
|
62 |
// instances. We do in fact need to associate some state with each
|
williamr@4
|
63 |
// thread. That state is represented by
|
williamr@4
|
64 |
// _Pthread_alloc_per_thread_state<_Max_size>.
|
williamr@4
|
65 |
|
williamr@4
|
66 |
template<size_t _Max_size>
|
williamr@4
|
67 |
struct _Pthread_alloc_per_thread_state {
|
williamr@4
|
68 |
typedef _Pthread_alloc_obj __obj;
|
williamr@4
|
69 |
enum { _S_NFREELISTS = _Max_size/_STLP_DATA_ALIGNMENT };
|
williamr@4
|
70 |
|
williamr@4
|
71 |
// Free list link for list of available per thread structures.
|
williamr@4
|
72 |
// When one of these becomes available for reuse due to thread
|
williamr@4
|
73 |
// termination, any objects in its free list remain associated
|
williamr@4
|
74 |
// with it. The whole structure may then be used by a newly
|
williamr@4
|
75 |
// created thread.
|
williamr@4
|
76 |
_Pthread_alloc_per_thread_state() : __next(0)
|
williamr@4
|
77 |
{
|
williamr@4
|
78 |
memset((void *)__free_list, 0, (size_t)_S_NFREELISTS * sizeof(__obj *));
|
williamr@4
|
79 |
}
|
williamr@4
|
80 |
// Returns an object of size __n, and possibly adds to size n free list.
|
williamr@4
|
81 |
void *_M_refill(size_t __n);
|
williamr@4
|
82 |
|
williamr@4
|
83 |
_Pthread_alloc_obj* volatile __free_list[_S_NFREELISTS];
|
williamr@4
|
84 |
_Pthread_alloc_per_thread_state<_Max_size> * __next;
|
williamr@4
|
85 |
// this data member is only to be used by per_thread_allocator, which returns memory to the originating thread.
|
williamr@4
|
86 |
_STLP_mutex _M_lock;
|
williamr@4
|
87 |
|
williamr@4
|
88 |
};
|
williamr@4
|
89 |
|
williamr@4
|
90 |
// Pthread-specific allocator.
|
williamr@4
|
91 |
// The argument specifies the largest object size allocated from per-thread
|
williamr@4
|
92 |
// free lists. Larger objects are allocated using malloc_alloc.
|
williamr@4
|
93 |
// Max_size must be a power of 2.
|
williamr@4
|
94 |
template < __DFL_NON_TYPE_PARAM(size_t, _Max_size, _MAX_BYTES) >
|
williamr@4
|
95 |
class _Pthread_alloc {
|
williamr@4
|
96 |
|
williamr@4
|
97 |
public: // but only for internal use:
|
williamr@4
|
98 |
|
williamr@4
|
99 |
typedef _Pthread_alloc_obj __obj;
|
williamr@4
|
100 |
typedef _Pthread_alloc_per_thread_state<_Max_size> __state_type;
|
williamr@4
|
101 |
typedef char value_type;
|
williamr@4
|
102 |
|
williamr@4
|
103 |
// Allocates a chunk for nobjs of size size. nobjs may be reduced
|
williamr@4
|
104 |
// if it is inconvenient to allocate the requested number.
|
williamr@4
|
105 |
static char *_S_chunk_alloc(size_t __size, size_t &__nobjs);
|
williamr@4
|
106 |
|
williamr@4
|
107 |
enum {_S_ALIGN = _STLP_DATA_ALIGNMENT};
|
williamr@4
|
108 |
|
williamr@4
|
109 |
static size_t _S_round_up(size_t __bytes) {
|
williamr@4
|
110 |
return (((__bytes) + (int)_S_ALIGN-1) & ~((int)_S_ALIGN - 1));
|
williamr@4
|
111 |
}
|
williamr@4
|
112 |
static size_t _S_freelist_index(size_t __bytes) {
|
williamr@4
|
113 |
return (((__bytes) + (int)_S_ALIGN-1)/(int)_S_ALIGN - 1);
|
williamr@4
|
114 |
}
|
williamr@4
|
115 |
|
williamr@4
|
116 |
private:
|
williamr@4
|
117 |
// Chunk allocation state. And other shared state.
|
williamr@4
|
118 |
// Protected by _S_chunk_allocator_lock.
|
williamr@4
|
119 |
static _STLP_mutex_base _S_chunk_allocator_lock;
|
williamr@4
|
120 |
static char *_S_start_free;
|
williamr@4
|
121 |
static char *_S_end_free;
|
williamr@4
|
122 |
static size_t _S_heap_size;
|
williamr@4
|
123 |
static _Pthread_alloc_per_thread_state<_Max_size>* _S_free_per_thread_states;
|
williamr@4
|
124 |
static pthread_key_t _S_key;
|
williamr@4
|
125 |
static bool _S_key_initialized;
|
williamr@4
|
126 |
// Pthread key under which per thread state is stored.
|
williamr@4
|
127 |
// Allocator instances that are currently unclaimed by any thread.
|
williamr@4
|
128 |
static void _S_destructor(void *instance);
|
williamr@4
|
129 |
// Function to be called on thread exit to reclaim per thread
|
williamr@4
|
130 |
// state.
|
williamr@4
|
131 |
static _Pthread_alloc_per_thread_state<_Max_size> *_S_new_per_thread_state();
|
williamr@4
|
132 |
public:
|
williamr@4
|
133 |
// Return a recycled or new per thread state.
|
williamr@4
|
134 |
static _Pthread_alloc_per_thread_state<_Max_size> *_S_get_per_thread_state();
|
williamr@4
|
135 |
private:
|
williamr@4
|
136 |
// ensure that the current thread has an associated
|
williamr@4
|
137 |
// per thread state.
|
williamr@4
|
138 |
class _M_lock;
|
williamr@4
|
139 |
friend class _M_lock;
|
williamr@4
|
140 |
class _M_lock {
|
williamr@4
|
141 |
public:
|
williamr@4
|
142 |
_M_lock () { _S_chunk_allocator_lock._M_acquire_lock(); }
|
williamr@4
|
143 |
~_M_lock () { _S_chunk_allocator_lock._M_release_lock(); }
|
williamr@4
|
144 |
};
|
williamr@4
|
145 |
|
williamr@4
|
146 |
public:
|
williamr@4
|
147 |
|
williamr@4
|
148 |
/* n must be > 0 */
|
williamr@4
|
149 |
static void * allocate(size_t __n)
|
williamr@4
|
150 |
{
|
williamr@4
|
151 |
__obj * volatile * __my_free_list;
|
williamr@4
|
152 |
__obj * __RESTRICT __result;
|
williamr@4
|
153 |
__state_type* __a;
|
williamr@4
|
154 |
|
williamr@4
|
155 |
if (__n > _Max_size) {
|
williamr@4
|
156 |
return(__malloc_alloc<0>::allocate(__n));
|
williamr@4
|
157 |
}
|
williamr@4
|
158 |
|
williamr@4
|
159 |
__a = _S_get_per_thread_state();
|
williamr@4
|
160 |
|
williamr@4
|
161 |
__my_free_list = __a -> __free_list + _S_freelist_index(__n);
|
williamr@4
|
162 |
__result = *__my_free_list;
|
williamr@4
|
163 |
if (__result == 0) {
|
williamr@4
|
164 |
void *__r = __a -> _M_refill(_S_round_up(__n));
|
williamr@4
|
165 |
return __r;
|
williamr@4
|
166 |
}
|
williamr@4
|
167 |
*__my_free_list = __result -> __free_list_link;
|
williamr@4
|
168 |
return (__result);
|
williamr@4
|
169 |
};
|
williamr@4
|
170 |
|
williamr@4
|
171 |
/* p may not be 0 */
|
williamr@4
|
172 |
static void deallocate(void *__p, size_t __n)
|
williamr@4
|
173 |
{
|
williamr@4
|
174 |
__obj *__q = (__obj *)__p;
|
williamr@4
|
175 |
__obj * volatile * __my_free_list;
|
williamr@4
|
176 |
__state_type* __a;
|
williamr@4
|
177 |
|
williamr@4
|
178 |
if (__n > _Max_size) {
|
williamr@4
|
179 |
__malloc_alloc<0>::deallocate(__p, __n);
|
williamr@4
|
180 |
return;
|
williamr@4
|
181 |
}
|
williamr@4
|
182 |
|
williamr@4
|
183 |
__a = _S_get_per_thread_state();
|
williamr@4
|
184 |
|
williamr@4
|
185 |
__my_free_list = __a->__free_list + _S_freelist_index(__n);
|
williamr@4
|
186 |
__q -> __free_list_link = *__my_free_list;
|
williamr@4
|
187 |
*__my_free_list = __q;
|
williamr@4
|
188 |
}
|
williamr@4
|
189 |
|
williamr@4
|
190 |
// boris : versions for per_thread_allocator
|
williamr@4
|
191 |
/* n must be > 0 */
|
williamr@4
|
192 |
static void * allocate(size_t __n, __state_type* __a)
|
williamr@4
|
193 |
{
|
williamr@4
|
194 |
__obj * volatile * __my_free_list;
|
williamr@4
|
195 |
__obj * __RESTRICT __result;
|
williamr@4
|
196 |
|
williamr@4
|
197 |
if (__n > _Max_size) {
|
williamr@4
|
198 |
return(__malloc_alloc<0>::allocate(__n));
|
williamr@4
|
199 |
}
|
williamr@4
|
200 |
|
williamr@4
|
201 |
// boris : here, we have to lock per thread state, as we may be getting memory from
|
williamr@4
|
202 |
// different thread pool.
|
williamr@4
|
203 |
_STLP_mutex_lock __lock(__a->_M_lock);
|
williamr@4
|
204 |
|
williamr@4
|
205 |
__my_free_list = __a -> __free_list + _S_freelist_index(__n);
|
williamr@4
|
206 |
__result = *__my_free_list;
|
williamr@4
|
207 |
if (__result == 0) {
|
williamr@4
|
208 |
void *__r = __a -> _M_refill(_S_round_up(__n));
|
williamr@4
|
209 |
return __r;
|
williamr@4
|
210 |
}
|
williamr@4
|
211 |
*__my_free_list = __result -> __free_list_link;
|
williamr@4
|
212 |
return (__result);
|
williamr@4
|
213 |
};
|
williamr@4
|
214 |
|
williamr@4
|
215 |
/* p may not be 0 */
|
williamr@4
|
216 |
static void deallocate(void *__p, size_t __n, __state_type* __a)
|
williamr@4
|
217 |
{
|
williamr@4
|
218 |
__obj *__q = (__obj *)__p;
|
williamr@4
|
219 |
__obj * volatile * __my_free_list;
|
williamr@4
|
220 |
|
williamr@4
|
221 |
if (__n > _Max_size) {
|
williamr@4
|
222 |
__malloc_alloc<0>::deallocate(__p, __n);
|
williamr@4
|
223 |
return;
|
williamr@4
|
224 |
}
|
williamr@4
|
225 |
|
williamr@4
|
226 |
// boris : here, we have to lock per thread state, as we may be returning memory from
|
williamr@4
|
227 |
// different thread.
|
williamr@4
|
228 |
_STLP_mutex_lock __lock(__a->_M_lock);
|
williamr@4
|
229 |
|
williamr@4
|
230 |
__my_free_list = __a->__free_list + _S_freelist_index(__n);
|
williamr@4
|
231 |
__q -> __free_list_link = *__my_free_list;
|
williamr@4
|
232 |
*__my_free_list = __q;
|
williamr@4
|
233 |
}
|
williamr@4
|
234 |
|
williamr@4
|
235 |
static void * reallocate(void *__p, size_t __old_sz, size_t __new_sz);
|
williamr@4
|
236 |
|
williamr@4
|
237 |
} ;
|
williamr@4
|
238 |
|
williamr@4
|
239 |
# if defined (_STLP_USE_TEMPLATE_EXPORT)
|
williamr@4
|
240 |
_STLP_EXPORT_TEMPLATE_CLASS _Pthread_alloc<_MAX_BYTES>;
|
williamr@4
|
241 |
# endif
|
williamr@4
|
242 |
|
williamr@4
|
243 |
typedef _Pthread_alloc<_MAX_BYTES> __pthread_alloc;
|
williamr@4
|
244 |
typedef __pthread_alloc pthread_alloc;
|
williamr@4
|
245 |
|
williamr@4
|
246 |
template <class _Tp>
|
williamr@4
|
247 |
class pthread_allocator {
|
williamr@4
|
248 |
typedef pthread_alloc _S_Alloc; // The underlying allocator.
|
williamr@4
|
249 |
public:
|
williamr@4
|
250 |
typedef size_t size_type;
|
williamr@4
|
251 |
typedef ptrdiff_t difference_type;
|
williamr@4
|
252 |
typedef _Tp* pointer;
|
williamr@4
|
253 |
typedef const _Tp* const_pointer;
|
williamr@4
|
254 |
typedef _Tp& reference;
|
williamr@4
|
255 |
typedef const _Tp& const_reference;
|
williamr@4
|
256 |
typedef _Tp value_type;
|
williamr@4
|
257 |
|
williamr@4
|
258 |
#ifdef _STLP_MEMBER_TEMPLATE_CLASSES
|
williamr@4
|
259 |
template <class _NewType> struct rebind {
|
williamr@4
|
260 |
typedef pthread_allocator<_NewType> other;
|
williamr@4
|
261 |
};
|
williamr@4
|
262 |
#endif
|
williamr@4
|
263 |
|
williamr@4
|
264 |
pthread_allocator() _STLP_NOTHROW {}
|
williamr@4
|
265 |
pthread_allocator(const pthread_allocator<_Tp>& a) _STLP_NOTHROW {}
|
williamr@4
|
266 |
|
williamr@4
|
267 |
#if defined (_STLP_MEMBER_TEMPLATES) /* && defined (_STLP_FUNCTION_PARTIAL_ORDER) */
|
williamr@4
|
268 |
template <class _OtherType> pthread_allocator(const pthread_allocator<_OtherType>&)
|
williamr@4
|
269 |
_STLP_NOTHROW {}
|
williamr@4
|
270 |
#endif
|
williamr@4
|
271 |
|
williamr@4
|
272 |
~pthread_allocator() _STLP_NOTHROW {}
|
williamr@4
|
273 |
|
williamr@4
|
274 |
pointer address(reference __x) const { return &__x; }
|
williamr@4
|
275 |
const_pointer address(const_reference __x) const { return &__x; }
|
williamr@4
|
276 |
|
williamr@4
|
277 |
// __n is permitted to be 0. The C++ standard says nothing about what
|
williamr@4
|
278 |
// the return value is when __n == 0.
|
williamr@4
|
279 |
_Tp* allocate(size_type __n, const void* = 0) {
|
williamr@4
|
280 |
return __n != 0 ? __STATIC_CAST(_Tp*,_S_Alloc::allocate(__n * sizeof(_Tp)))
|
williamr@4
|
281 |
: 0;
|
williamr@4
|
282 |
}
|
williamr@4
|
283 |
|
williamr@4
|
284 |
// p is not permitted to be a null pointer.
|
williamr@4
|
285 |
void deallocate(pointer __p, size_type __n)
|
williamr@4
|
286 |
{ _S_Alloc::deallocate(__p, __n * sizeof(_Tp)); }
|
williamr@4
|
287 |
|
williamr@4
|
288 |
size_type max_size() const _STLP_NOTHROW
|
williamr@4
|
289 |
{ return size_t(-1) / sizeof(_Tp); }
|
williamr@4
|
290 |
|
williamr@4
|
291 |
void construct(pointer __p, const _Tp& __val) { _STLP_PLACEMENT_NEW (__p) _Tp(__val); }
|
williamr@4
|
292 |
void destroy(pointer _p) { _p->~_Tp(); }
|
williamr@4
|
293 |
};
|
williamr@4
|
294 |
|
williamr@4
|
295 |
_STLP_TEMPLATE_NULL
|
williamr@4
|
296 |
class _STLP_CLASS_DECLSPEC pthread_allocator<void> {
|
williamr@4
|
297 |
public:
|
williamr@4
|
298 |
typedef size_t size_type;
|
williamr@4
|
299 |
typedef ptrdiff_t difference_type;
|
williamr@4
|
300 |
typedef void* pointer;
|
williamr@4
|
301 |
typedef const void* const_pointer;
|
williamr@4
|
302 |
typedef void value_type;
|
williamr@4
|
303 |
#ifdef _STLP_MEMBER_TEMPLATE_CLASSES
|
williamr@4
|
304 |
template <class _NewType> struct rebind {
|
williamr@4
|
305 |
typedef pthread_allocator<_NewType> other;
|
williamr@4
|
306 |
};
|
williamr@4
|
307 |
#endif
|
williamr@4
|
308 |
};
|
williamr@4
|
309 |
|
williamr@4
|
310 |
template <class _T1, class _T2>
|
williamr@4
|
311 |
inline bool operator==(const pthread_allocator<_T1>&,
|
williamr@4
|
312 |
const pthread_allocator<_T2>& a2)
|
williamr@4
|
313 |
{
|
williamr@4
|
314 |
return true;
|
williamr@4
|
315 |
}
|
williamr@4
|
316 |
|
williamr@4
|
317 |
#ifdef _STLP_FUNCTION_TMPL_PARTIAL_ORDER
|
williamr@4
|
318 |
template <class _T1, class _T2>
|
williamr@4
|
319 |
inline bool operator!=(const pthread_allocator<_T1>&,
|
williamr@4
|
320 |
const pthread_allocator<_T2>&)
|
williamr@4
|
321 |
{
|
williamr@4
|
322 |
return false;
|
williamr@4
|
323 |
}
|
williamr@4
|
324 |
#endif
|
williamr@4
|
325 |
|
williamr@4
|
326 |
|
williamr@4
|
327 |
#ifdef _STLP_CLASS_PARTIAL_SPECIALIZATION
|
williamr@4
|
328 |
|
williamr@4
|
329 |
# ifdef _STLP_USE_RAW_SGI_ALLOCATORS
|
williamr@4
|
330 |
template <class _Tp, size_t _Max_size>
|
williamr@4
|
331 |
struct _Alloc_traits<_Tp, _Pthread_alloc<_Max_size> >
|
williamr@4
|
332 |
{
|
williamr@4
|
333 |
typedef __allocator<_Tp, _Pthread_alloc<_Max_size> >
|
williamr@4
|
334 |
allocator_type;
|
williamr@4
|
335 |
};
|
williamr@4
|
336 |
# endif
|
williamr@4
|
337 |
|
williamr@4
|
338 |
template <class _Tp, class _Atype>
|
williamr@4
|
339 |
struct _Alloc_traits<_Tp, pthread_allocator<_Atype> >
|
williamr@4
|
340 |
{
|
williamr@4
|
341 |
typedef pthread_allocator<_Tp> allocator_type;
|
williamr@4
|
342 |
};
|
williamr@4
|
343 |
|
williamr@4
|
344 |
#endif
|
williamr@4
|
345 |
|
williamr@4
|
346 |
#if !defined (_STLP_USE_NESTED_TCLASS_THROUGHT_TPARAM)
|
williamr@4
|
347 |
|
williamr@4
|
348 |
template <class _Tp1, class _Tp2>
|
williamr@4
|
349 |
inline pthread_allocator<_Tp2>&
|
williamr@4
|
350 |
__stl_alloc_rebind(pthread_allocator<_Tp1>& __x, const _Tp2*) {
|
williamr@4
|
351 |
return (pthread_allocator<_Tp2>&)__x;
|
williamr@4
|
352 |
}
|
williamr@4
|
353 |
|
williamr@4
|
354 |
template <class _Tp1, class _Tp2>
|
williamr@4
|
355 |
inline pthread_allocator<_Tp2>
|
williamr@4
|
356 |
__stl_alloc_create(pthread_allocator<_Tp1>&, const _Tp2*) {
|
williamr@4
|
357 |
return pthread_allocator<_Tp2>();
|
williamr@4
|
358 |
}
|
williamr@4
|
359 |
|
williamr@4
|
360 |
#endif /* _STLP_USE_NESTED_TCLASS_THROUGHT_TPARAM */
|
williamr@4
|
361 |
|
williamr@4
|
362 |
//
|
williamr@4
|
363 |
// per_thread_allocator<> : this allocator always return memory to the same thread
|
williamr@4
|
364 |
// it was allocated from.
|
williamr@4
|
365 |
//
|
williamr@4
|
366 |
|
williamr@4
|
367 |
template <class _Tp>
|
williamr@4
|
368 |
class per_thread_allocator {
|
williamr@4
|
369 |
typedef pthread_alloc _S_Alloc; // The underlying allocator.
|
williamr@4
|
370 |
typedef pthread_alloc::__state_type __state_type;
|
williamr@4
|
371 |
public:
|
williamr@4
|
372 |
typedef size_t size_type;
|
williamr@4
|
373 |
typedef ptrdiff_t difference_type;
|
williamr@4
|
374 |
typedef _Tp* pointer;
|
williamr@4
|
375 |
typedef const _Tp* const_pointer;
|
williamr@4
|
376 |
typedef _Tp& reference;
|
williamr@4
|
377 |
typedef const _Tp& const_reference;
|
williamr@4
|
378 |
typedef _Tp value_type;
|
williamr@4
|
379 |
|
williamr@4
|
380 |
#ifdef _STLP_MEMBER_TEMPLATE_CLASSES
|
williamr@4
|
381 |
template <class _NewType> struct rebind {
|
williamr@4
|
382 |
typedef per_thread_allocator<_NewType> other;
|
williamr@4
|
383 |
};
|
williamr@4
|
384 |
#endif
|
williamr@4
|
385 |
|
williamr@4
|
386 |
per_thread_allocator() _STLP_NOTHROW {
|
williamr@4
|
387 |
_M_state = _S_Alloc::_S_get_per_thread_state();
|
williamr@4
|
388 |
}
|
williamr@4
|
389 |
per_thread_allocator(const per_thread_allocator<_Tp>& __a) _STLP_NOTHROW : _M_state(__a._M_state){}
|
williamr@4
|
390 |
|
williamr@4
|
391 |
#if defined (_STLP_MEMBER_TEMPLATES) /* && defined (_STLP_FUNCTION_PARTIAL_ORDER) */
|
williamr@4
|
392 |
template <class _OtherType> per_thread_allocator(const per_thread_allocator<_OtherType>& __a)
|
williamr@4
|
393 |
_STLP_NOTHROW : _M_state(__a._M_state) {}
|
williamr@4
|
394 |
#endif
|
williamr@4
|
395 |
|
williamr@4
|
396 |
~per_thread_allocator() _STLP_NOTHROW {}
|
williamr@4
|
397 |
|
williamr@4
|
398 |
pointer address(reference __x) const { return &__x; }
|
williamr@4
|
399 |
const_pointer address(const_reference __x) const { return &__x; }
|
williamr@4
|
400 |
|
williamr@4
|
401 |
// __n is permitted to be 0. The C++ standard says nothing about what
|
williamr@4
|
402 |
// the return value is when __n == 0.
|
williamr@4
|
403 |
_Tp* allocate(size_type __n, const void* = 0) {
|
williamr@4
|
404 |
return __n != 0 ? __STATIC_CAST(_Tp*,_S_Alloc::allocate(__n * sizeof(_Tp), _M_state)): 0;
|
williamr@4
|
405 |
}
|
williamr@4
|
406 |
|
williamr@4
|
407 |
// p is not permitted to be a null pointer.
|
williamr@4
|
408 |
void deallocate(pointer __p, size_type __n)
|
williamr@4
|
409 |
{ _S_Alloc::deallocate(__p, __n * sizeof(_Tp), _M_state); }
|
williamr@4
|
410 |
|
williamr@4
|
411 |
size_type max_size() const _STLP_NOTHROW
|
williamr@4
|
412 |
{ return size_t(-1) / sizeof(_Tp); }
|
williamr@4
|
413 |
|
williamr@4
|
414 |
void construct(pointer __p, const _Tp& __val) { _STLP_PLACEMENT_NEW (__p) _Tp(__val); }
|
williamr@4
|
415 |
void destroy(pointer _p) { _p->~_Tp(); }
|
williamr@4
|
416 |
|
williamr@4
|
417 |
// state is being kept here
|
williamr@4
|
418 |
__state_type* _M_state;
|
williamr@4
|
419 |
};
|
williamr@4
|
420 |
|
williamr@4
|
421 |
_STLP_TEMPLATE_NULL
|
williamr@4
|
422 |
class _STLP_CLASS_DECLSPEC per_thread_allocator<void> {
|
williamr@4
|
423 |
public:
|
williamr@4
|
424 |
typedef size_t size_type;
|
williamr@4
|
425 |
typedef ptrdiff_t difference_type;
|
williamr@4
|
426 |
typedef void* pointer;
|
williamr@4
|
427 |
typedef const void* const_pointer;
|
williamr@4
|
428 |
typedef void value_type;
|
williamr@4
|
429 |
#ifdef _STLP_MEMBER_TEMPLATE_CLASSES
|
williamr@4
|
430 |
template <class _NewType> struct rebind {
|
williamr@4
|
431 |
typedef per_thread_allocator<_NewType> other;
|
williamr@4
|
432 |
};
|
williamr@4
|
433 |
#endif
|
williamr@4
|
434 |
};
|
williamr@4
|
435 |
|
williamr@4
|
436 |
template <class _T1, class _T2>
|
williamr@4
|
437 |
inline bool operator==(const per_thread_allocator<_T1>& __a1,
|
williamr@4
|
438 |
const per_thread_allocator<_T2>& __a2)
|
williamr@4
|
439 |
{
|
williamr@4
|
440 |
return __a1._M_state == __a2._M_state;
|
williamr@4
|
441 |
}
|
williamr@4
|
442 |
|
williamr@4
|
443 |
#ifdef _STLP_FUNCTION_TMPL_PARTIAL_ORDER
|
williamr@4
|
444 |
template <class _T1, class _T2>
|
williamr@4
|
445 |
inline bool operator!=(const per_thread_allocator<_T1>& __a1,
|
williamr@4
|
446 |
const per_thread_allocator<_T2>& __a2)
|
williamr@4
|
447 |
{
|
williamr@4
|
448 |
return __a1._M_state != __a2._M_state;
|
williamr@4
|
449 |
}
|
williamr@4
|
450 |
#endif
|
williamr@4
|
451 |
|
williamr@4
|
452 |
|
williamr@4
|
453 |
#ifdef _STLP_CLASS_PARTIAL_SPECIALIZATION
|
williamr@4
|
454 |
|
williamr@4
|
455 |
template <class _Tp, class _Atype>
|
williamr@4
|
456 |
struct _Alloc_traits<_Tp, per_thread_allocator<_Atype> >
|
williamr@4
|
457 |
{
|
williamr@4
|
458 |
typedef per_thread_allocator<_Tp> allocator_type;
|
williamr@4
|
459 |
};
|
williamr@4
|
460 |
|
williamr@4
|
461 |
#endif
|
williamr@4
|
462 |
|
williamr@4
|
463 |
#if !defined (_STLP_USE_NESTED_TCLASS_THROUGHT_TPARAM)
|
williamr@4
|
464 |
|
williamr@4
|
465 |
template <class _Tp1, class _Tp2>
|
williamr@4
|
466 |
inline per_thread_allocator<_Tp2>&
|
williamr@4
|
467 |
__stl_alloc_rebind(per_thread_allocator<_Tp1>& __x, const _Tp2*) {
|
williamr@4
|
468 |
return (per_thread_allocator<_Tp2>&)__x;
|
williamr@4
|
469 |
}
|
williamr@4
|
470 |
|
williamr@4
|
471 |
template <class _Tp1, class _Tp2>
|
williamr@4
|
472 |
inline per_thread_allocator<_Tp2>
|
williamr@4
|
473 |
__stl_alloc_create(per_thread_allocator<_Tp1>&, const _Tp2*) {
|
williamr@4
|
474 |
return per_thread_allocator<_Tp2>();
|
williamr@4
|
475 |
}
|
williamr@4
|
476 |
|
williamr@4
|
477 |
#endif /* _STLP_USE_NESTED_TCLASS_THROUGHT_TPARAM */
|
williamr@4
|
478 |
|
williamr@4
|
479 |
_STLP_END_NAMESPACE
|
williamr@4
|
480 |
|
williamr@4
|
481 |
# if defined (_STLP_EXPOSE_GLOBALS_IMPLEMENTATION) && !defined (_STLP_LINK_TIME_INSTANTIATION)
|
williamr@4
|
482 |
# include <stl/_pthread_alloc.c>
|
williamr@4
|
483 |
# endif
|
williamr@4
|
484 |
|
williamr@4
|
485 |
#endif /* _STLP_PTHREAD_ALLOC */
|
williamr@4
|
486 |
|
williamr@4
|
487 |
// Local Variables:
|
williamr@4
|
488 |
// mode:C++
|
williamr@4
|
489 |
// End:
|