2 * Portions Copyright (c) 2008 Nokia Corporation and/or its subsidiary(-ies). All rights reserved.
4 * Copyright (c) 1997-1999
5 * Silicon Graphics Computer Systems, Inc.
10 * This material is provided "as is", with absolutely no warranty expressed
11 * or implied. Any use is at your own risk.
13 * Permission to use or copy this software for any purpose is hereby granted
14 * without fee, provided the above notices are retained on all copies.
15 * Permission to modify the code and to distribute modified code is granted,
16 * provided the above notices are retained, and a notice that the code was
17 * modified is included with the above copyright notice.
21 // WARNING: This is an internal header file, included by other C++
22 // standard library headers. You should not attempt to use this header
26 #ifndef _STLP_INTERNAL_THREADS_H
27 #define _STLP_INTERNAL_THREADS_H
29 // Supported threading models are native SGI, pthreads, uithreads
30 // (similar to pthreads, but based on an earlier draft of the Posix
31 // threads standard), and Win32 threads. Uithread support by Jochen
32 // Schlick, 1999, and Solaris threads generalized to them.
34 #ifndef _STLP_INTERNAL_CSTDDEF
35 # include <stl/_cstddef.h>
38 #ifndef _STLP_INTERNAL_CSTDLIB
39 # include <stl/_cstdlib.h>
42 // On SUN and Mac OS X gcc, zero-initialization works just fine...
43 #if defined (__sun) || (defined (__GNUC__) && defined(__APPLE__))
44 # define _STLP_MUTEX_INITIALIZER
47 /* This header defines the following atomic operation that platform should
48 * try to support as much as possible. Atomic operation are exposed as macro
49 * in order to easily test for their existance. They are:
50 * __stl_atomic_t _STLP_ATOMIC_INCREMENT(volatile __stl_atomic_t* __ptr) :
51 * increment *__ptr by 1 and returns the new value
52 * __stl_atomic_t _STLP_ATOMIC_DECREMENT(volatile __stl_atomic_t* __ptr) :
53 * decrement *__ptr by 1 and returns the new value
54 * __stl_atomic_t _STLP_ATOMIC_EXCHANGE(volatile __stl_atomic_t* __target, __stl_atomic_t __val) :
55 * assign __val to *__target and returns former *__target value
56 * void* _STLP_ATOMIC_EXCHANGE_PTR(void* volatile* __target, void* __ptr) :
57 * assign __ptr to *__target and returns former *__target value
58 * __stl_atomic_t _STLP_ATOMIC_ADD(volatile __stl_atomic_t* __target, __stl_atomic_t __val) :
59 * does *__target = *__target + __val and returns the old *__target value
62 #if defined (_STLP_WIN32) || defined (__sgi) || defined (_STLP_SPARC_SOLARIS_THREADS)
63 typedef long __stl_atomic_t;
65 /* Don't import whole namespace!!!! - ptr */
66 // # if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)
67 // // using _STLP_VENDOR_CSTD::size_t;
68 // using namespace _STLP_VENDOR_CSTD;
70 typedef size_t __stl_atomic_t;
73 #if defined (_STLP_THREADS)
75 # if defined (_STLP_SGI_THREADS)
78 // Hack for SGI o32 compilers.
79 # if !defined(__add_and_fetch) && \
80 (__mips < 3 || !(defined (_ABIN32) || defined(_ABI64)))
81 # define __add_and_fetch(__l,__v) add_then_test((unsigned long*)__l,__v)
82 # define __test_and_set(__l,__v) test_and_set(__l,__v)
85 # if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))
86 # define _STLP_ATOMIC_EXCHANGE(__p, __q) test_and_set(__p, __q)
88 # define _STLP_ATOMIC_EXCHANGE(__p, __q) __test_and_set((unsigned long*)__p, (unsigned long)__q)
91 # define _STLP_ATOMIC_INCREMENT(__x) __add_and_fetch(__x, 1)
92 # define _STLP_ATOMIC_DECREMENT(__x) __add_and_fetch(__x, (size_t) -1)
94 # elif defined (_STLP_PTHREADS)
97 # if !defined (_STLP_USE_PTHREAD_SPINLOCK)
98 # if defined (PTHREAD_MUTEX_INITIALIZER) && !defined (_STLP_MUTEX_INITIALIZER) && defined (_REENTRANT)
99 # define _STLP_MUTEX_INITIALIZER = { PTHREAD_MUTEX_INITIALIZER }
101 //HPUX variants have (on some platforms optional) non-standard "DCE" pthreads impl
102 # if defined (_DECTHREADS_) && (defined (_PTHREAD_USE_D4) || defined (__hpux)) && !defined (_CMA_SUPPRESS_EXTERNALS_)
103 # define _STLP_PTHREAD_ATTR_DEFAULT pthread_mutexattr_default
105 # define _STLP_PTHREAD_ATTR_DEFAULT 0
107 # else // _STLP_USE_PTHREAD_SPINLOCK
108 # if defined (__OpenBSD__)
109 # include <spinlock.h>
111 # endif // _STLP_USE_PTHREAD_SPINLOCK
113 # if defined (__GNUC__) && defined (__i386__)
115 # if !defined (_STLP_ATOMIC_INCREMENT)
116 inline long _STLP_atomic_increment_gcc_x86(long volatile* p) {
119 ("lock; xaddl %1, %0;"
120 :"=m" (*p), "=r" (result)
125 # define _STLP_ATOMIC_INCREMENT(__x) (_STLP_atomic_increment_gcc_x86((long volatile*)__x))
128 # if !defined (_STLP_ATOMIC_DECREMENT)
129 inline long _STLP_atomic_decrement_gcc_x86(long volatile* p) {
132 ("lock; xaddl %1, %0;"
133 :"=m" (*p), "=r" (result)
138 # define _STLP_ATOMIC_DECREMENT(__x) (_STLP_atomic_decrement_gcc_x86((long volatile*)__x))
141 # if !defined (_STLP_ATOMIC_ADD)
142 inline long _STLP_atomic_add_gcc_x86(long volatile* p, long addend) {
145 ("lock; xaddl %1, %0;"
146 :"=m" (*p), "=r" (result)
147 :"m" (*p), "1" (addend)
149 return result + addend;
151 # define _STLP_ATOMIC_ADD(__dst, __val) (_STLP_atomic_add_gcc_x86((long volatile*)__dst, (long)__val))
154 # endif /* if defined(__GNUC__) && defined(__i386__) */
156 # elif defined (_STLP_WIN32THREADS)
158 # if !defined (_STLP_ATOMIC_INCREMENT)
159 # if !defined (_STLP_NEW_PLATFORM_SDK)
160 # define _STLP_ATOMIC_INCREMENT(__x) InterlockedIncrement(__CONST_CAST(long*, __x))
161 # define _STLP_ATOMIC_DECREMENT(__x) InterlockedDecrement(__CONST_CAST(long*, __x))
162 # define _STLP_ATOMIC_EXCHANGE(__x, __y) InterlockedExchange(__CONST_CAST(long*, __x), __y)
164 # define _STLP_ATOMIC_INCREMENT(__x) InterlockedIncrement(__x)
165 # define _STLP_ATOMIC_DECREMENT(__x) InterlockedDecrement(__x)
166 # define _STLP_ATOMIC_EXCHANGE(__x, __y) InterlockedExchange(__x, __y)
168 # define _STLP_ATOMIC_EXCHANGE_PTR(__x, __y) STLPInterlockedExchangePointer(__x, __y)
170 * The following functionnality is only available since Windows 98, those that are targeting previous OSes
171 * should define _WIN32_WINDOWS to a value lower that the one of Win 98, see Platform SDK documentation for
174 # if defined (_STLP_NEW_PLATFORM_SDK) && (!defined (_STLP_WIN32_VERSION) || (_STLP_WIN32_VERSION >= 0x0410))
175 # define _STLP_ATOMIC_ADD(__dst, __val) InterlockedExchangeAdd(__dst, __val)
179 # elif defined (__DECC) || defined (__DECCXX)
181 # include <machine/builtins.h>
182 # define _STLP_ATOMIC_EXCHANGE __ATOMIC_EXCH_LONG
183 # define _STLP_ATOMIC_INCREMENT(__x) __ATOMIC_ADD_LONG(__x, 1)
184 # define _STLP_ATOMIC_DECREMENT(__x) __ATOMIC_ADD_LONG(__x, -1)
186 # elif defined(_STLP_SPARC_SOLARIS_THREADS)
188 # include <stl/_sparc_atomic.h>
190 # elif defined (_STLP_UITHREADS)
192 // this inclusion is potential hazard to bring up all sorts
193 // of old-style headers. Let's assume vendor already know how
194 // to deal with that.
195 # ifndef _STLP_INTERNAL_CTIME
196 # include <stl/_ctime.h>
198 # if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)
199 using _STLP_VENDOR_CSTD::time_t;
205 # elif defined (_STLP_BETHREADS)
210 # define _STLP_MUTEX_INITIALIZER = { 0 }
212 # elif defined (_STLP_NWTHREADS)
214 # include <nwthread.h>
215 # include <nwsemaph.h>
217 # elif defined(_STLP_OS2THREADS)
219 # if defined (__GNUC__)
220 # define INCL_DOSSEMAPHORES
223 // This section serves to replace os2.h for VisualAge C++
224 typedef unsigned long ULONG;
225 # if !defined (__HEV__) /* INCL_SEMAPHORE may also define HEV */
230 typedef ULONG APIRET;
233 typedef const char* PCSZ;
234 typedef ULONG BOOL32;
235 APIRET _System DosCreateMutexSem(PCSZ pszName, PHEV phev, ULONG flAttr, BOOL32 fState);
236 APIRET _System DosRequestMutexSem(HMTX hmtx, ULONG ulTimeout);
237 APIRET _System DosReleaseMutexSem(HMTX hmtx);
238 APIRET _System DosCloseMutexSem(HMTX hmtx);
239 # define _STLP_MUTEX_INITIALIZER = { 0 }
246 # define _STLP_ATOMIC_INCREMENT(__x) ++(*__x)
247 # define _STLP_ATOMIC_DECREMENT(__x) --(*__x)
248 /* We do not grant other atomic operations as they are useless if STLport do not have
253 #if !defined (_STLP_MUTEX_INITIALIZER)
254 # if defined(_STLP_ATOMIC_EXCHANGE)
255 # define _STLP_MUTEX_INITIALIZER = { 0 }
256 # elif defined(_STLP_UITHREADS)
257 # define _STLP_MUTEX_INITIALIZER = { DEFAULTMUTEX }
259 # define _STLP_MUTEX_INITIALIZER
264 _STLP_BEGIN_NAMESPACE
266 #if defined (_STLP_THREADS) && !defined (_STLP_USE_PTHREAD_SPINLOCK)
267 // Helper struct. This is a workaround for various compilers that don't
268 // handle static variables in inline functions properly.
269 template <int __inst>
270 struct _STLP_mutex_spin {
271 enum { __low_max = 30, __high_max = 1000 };
272 // Low if we suspect uniprocessor, high for multiprocessor.
273 //Note: For SYMBIAN Emulator, these entries are to be considered WSD.
274 //Still, EWSD solution can't be applied since it's templated.
275 static unsigned __max;
276 static unsigned __last;
277 static void _STLP_CALL _M_do_lock(volatile __stl_atomic_t* __lock);
278 static void _STLP_CALL _S_nsec_sleep(int __log_nsec);
280 #endif // !_STLP_USE_PTHREAD_SPINLOCK
282 // Locking class. Note that this class *does not have a constructor*.
283 // It must be initialized either statically, with _STLP_MUTEX_INITIALIZER,
284 // or dynamically, by explicitly calling the _M_initialize member function.
285 // (This is similar to the ways that a pthreads mutex can be initialized.)
286 // There are explicit member functions for acquiring and releasing the lock.
288 // There is no constructor because static initialization is essential for
289 // some uses, and only a class aggregate (see section 8.5.1 of the C++
290 // standard) can be initialized that way. That means we must have no
291 // constructors, no base classes, no virtual functions, and no private or
292 // protected members.
294 // For non-static cases, clients should use _STLP_mutex.
296 struct _STLP_CLASS_DECLSPEC _STLP_mutex_base {
297 #if defined (_STLP_ATOMIC_EXCHANGE) || defined (_STLP_SGI_THREADS)
298 // It should be relatively easy to get this to work on any modern Unix.
299 volatile __stl_atomic_t _M_lock;
302 #if defined (_STLP_THREADS)
303 # if defined (_STLP_ATOMIC_EXCHANGE)
304 inline void _M_initialize() { _M_lock = 0; }
305 inline void _M_destroy() {}
307 void _M_acquire_lock() {
308 _STLP_mutex_spin<0>::_M_do_lock(&_M_lock);
311 inline void _M_release_lock() {
312 volatile __stl_atomic_t* __lock = &_M_lock;
313 # if defined(_STLP_SGI_THREADS) && defined(__GNUC__) && __mips >= 3
316 # elif defined(_STLP_SGI_THREADS) && __mips >= 3 && \
317 (defined (_ABIN32) || defined(_ABI64))
318 __lock_release(__lock);
319 # elif defined (_STLP_SPARC_SOLARIS_THREADS)
320 # if defined (__WORD64) || defined (__arch64__) || defined (__sparcv9) || defined (__sparcv8plus)
321 asm("membar #StoreStore ; membar #LoadStore");
328 // This is not sufficient on many multiprocessors, since
329 // writes to protected variables and the lock may be reordered.
332 # elif defined (_STLP_PTHREADS)
333 # if defined (_STLP_USE_PTHREAD_SPINLOCK)
334 # if !defined (__OpenBSD__)
335 pthread_spinlock_t _M_lock;
336 inline void _M_initialize() { pthread_spin_init( &_M_lock, 0 ); }
337 inline void _M_destroy() { pthread_spin_destroy( &_M_lock ); }
339 // sorry, but no static initializer for pthread_spinlock_t;
340 // this will not work for compilers that has problems with call
341 // constructor of static object...
343 // _STLP_mutex_base()
344 // { pthread_spin_init( &_M_lock, 0 ); }
346 // ~_STLP_mutex_base()
347 // { pthread_spin_destroy( &_M_lock ); }
349 inline void _M_acquire_lock() { pthread_spin_lock( &_M_lock ); }
350 inline void _M_release_lock() { pthread_spin_unlock( &_M_lock ); }
351 # else // __OpenBSD__
353 inline void _M_initialize() { _SPINLOCK_INIT( &_M_lock ); }
354 inline void _M_destroy() { }
355 inline void _M_acquire_lock() { _SPINLOCK( &_M_lock ); }
356 inline void _M_release_lock() { _SPINUNLOCK( &_M_lock ); }
357 # endif // __OpenBSD__
358 # else // !_STLP_USE_PTHREAD_SPINLOCK
359 pthread_mutex_t _M_lock;
360 inline void _M_initialize()
361 { pthread_mutex_init(&_M_lock,_STLP_PTHREAD_ATTR_DEFAULT); }
362 inline void _M_destroy()
363 { pthread_mutex_destroy(&_M_lock); }
364 inline void _M_acquire_lock() {
365 # if defined ( __hpux ) && ! defined (PTHREAD_MUTEX_INITIALIZER)
366 if (!_M_lock.field1) _M_initialize();
368 pthread_mutex_lock(&_M_lock);
370 inline void _M_release_lock() { pthread_mutex_unlock(&_M_lock); }
371 # endif // !_STLP_USE_PTHREAD_SPINLOCK
373 # elif defined (_STLP_UITHREADS)
375 inline void _M_initialize()
376 { mutex_init(&_M_lock, 0, NULL); }
377 inline void _M_destroy()
378 { mutex_destroy(&_M_lock); }
379 inline void _M_acquire_lock() { mutex_lock(&_M_lock); }
380 inline void _M_release_lock() { mutex_unlock(&_M_lock); }
382 # elif defined (_STLP_OS2THREADS)
384 inline void _M_initialize() { DosCreateMutexSem(NULL, &_M_lock, 0, false); }
385 inline void _M_destroy() { DosCloseMutexSem(_M_lock); }
386 inline void _M_acquire_lock() {
387 if (!_M_lock) _M_initialize();
388 DosRequestMutexSem(_M_lock, SEM_INDEFINITE_WAIT);
390 inline void _M_release_lock() { DosReleaseMutexSem(_M_lock); }
391 # elif defined (_STLP_BETHREADS)
393 inline void _M_initialize() {
394 sem = create_sem(1, "STLPort");
397 inline void _M_destroy() {
398 int t = delete_sem(sem);
399 assert(t == B_NO_ERROR);
401 inline void _M_acquire_lock();
402 inline void _M_release_lock() {
403 status_t t = release_sem(sem);
404 assert(t == B_NO_ERROR);
406 # elif defined (_STLP_NWTHREADS)
408 inline void _M_initialize()
409 { _M_lock = OpenLocalSemaphore(1); }
410 inline void _M_destroy()
411 { CloseLocalSemaphore(_M_lock); }
412 inline void _M_acquire_lock()
413 { WaitOnLocalSemaphore(_M_lock); }
414 inline void _M_release_lock() { SignalLocalSemaphore(_M_lock); }
415 # else //*ty 11/24/2001 - added configuration check
416 # error "Unknown thread facility configuration"
418 #else /* No threads */
419 inline void _M_initialize() {}
420 inline void _M_destroy() {}
421 inline void _M_acquire_lock() {}
422 inline void _M_release_lock() {}
423 #endif // _STLP_PTHREADS
426 // Locking class. The constructor initializes the lock, the destructor destroys it.
427 // Well - behaving class, does not need static initializer
429 class _STLP_CLASS_DECLSPEC _STLP_mutex : public _STLP_mutex_base {
431 inline _STLP_mutex () { _M_initialize(); }
432 inline ~_STLP_mutex () { _M_destroy(); }
434 _STLP_mutex(const _STLP_mutex&);
435 void operator=(const _STLP_mutex&);
438 // A locking class that uses _STLP_STATIC_MUTEX. The constructor takes
439 // a reference to an _STLP_STATIC_MUTEX, and acquires a lock. The destructor
440 // releases the lock.
441 // It's not clear that this is exactly the right functionality.
442 // It will probably change in the future.
444 struct _STLP_CLASS_DECLSPEC _STLP_auto_lock {
445 _STLP_auto_lock(_STLP_STATIC_MUTEX& __lock) : _M_lock(__lock)
446 { _M_lock._M_acquire_lock(); }
448 { _M_lock._M_release_lock(); }
451 _STLP_STATIC_MUTEX& _M_lock;
452 void operator=(const _STLP_auto_lock&);
453 _STLP_auto_lock(const _STLP_auto_lock&);
457 * Class _Refcount_Base provides a type, __stl_atomic_t, a data member,
458 * _M_ref_count, and member functions _M_incr and _M_decr, which perform
459 * atomic preincrement/predecrement. The constructor initializes
462 class _STLP_CLASS_DECLSPEC _Refcount_Base {
463 // The data member _M_ref_count
464 #if defined (__DMC__)
467 _STLP_VOLATILE __stl_atomic_t _M_ref_count;
469 #if defined (_STLP_THREADS) && \
470 (!defined (_STLP_ATOMIC_INCREMENT) || !defined (_STLP_ATOMIC_DECREMENT) || \
471 (defined (_STLP_WIN32_VERSION) && (_STLP_WIN32_VERSION <= 0x0400)))
472 # define _STLP_USE_MUTEX
473 _STLP_mutex _M_mutex;
478 _Refcount_Base(__stl_atomic_t __n) : _M_ref_count(__n) {}
480 // _M_incr and _M_decr
481 #if defined (_STLP_THREADS)
482 # if !defined (_STLP_USE_MUTEX)
483 __stl_atomic_t _M_incr() { return _STLP_ATOMIC_INCREMENT(&_M_ref_count); }
484 __stl_atomic_t _M_decr() { return _STLP_ATOMIC_DECREMENT(&_M_ref_count); }
486 # undef _STLP_USE_MUTEX
487 __stl_atomic_t _M_incr() {
488 _STLP_auto_lock l(_M_mutex);
489 return ++_M_ref_count;
491 __stl_atomic_t _M_decr() {
492 _STLP_auto_lock l(_M_mutex);
493 return --_M_ref_count;
496 #else /* No threads */
497 __stl_atomic_t _M_incr() { return ++_M_ref_count; }
498 __stl_atomic_t _M_decr() { return --_M_ref_count; }
504 #ifdef __SYMBIAN32__WSD__
505 _STLP_DECLSPEC std::_STLP_STATIC_MUTEX& exp_get_threads_S_swap_lock();
506 _STLP_DECLSPEC std::_STLP_STATIC_MUTEX& exp_get_threads_0_S_swap_lock();
509 _STLP_BEGIN_NAMESPACE
511 /* Atomic swap on __stl_atomic_t
512 * This is guaranteed to behave as though it were atomic only if all
513 * possibly concurrent updates use _Atomic_swap.
514 * In some cases the operation is emulated with a lock.
515 * Idem for _Atomic_swap_ptr
517 /* Helper struct to handle following cases:
518 * - on platforms where sizeof(__stl_atomic_t) == sizeof(void*) atomic
519 * exchange can be done on pointers
520 * - on platform without atomic operation swap is done in a critical section,
521 * portable but inefficient.
523 template <int __use_ptr_atomic_swap>
524 class _Atomic_swap_struct {
526 #if defined (_STLP_THREADS) && \
527 !defined (_STLP_ATOMIC_EXCHANGE) && \
528 (defined (_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || \
529 defined (_STLP_USE_PTHREAD_SPINLOCK) || defined (_STLP_NWTHREADS))
530 # define _STLP_USE_ATOMIC_SWAP_MUTEX
531 #if !defined(__SYMBIAN32__WSD__)
532 static _STLP_STATIC_MUTEX _S_swap_lock;
534 static _STLP_STATIC_MUTEX& get_threads_S_swap_lock()
535 { return ::exp_get_threads_S_swap_lock(); }
536 # define _S_swap_lock get_threads_S_swap_lock()
540 static __stl_atomic_t _S_swap(_STLP_VOLATILE __stl_atomic_t* __p, __stl_atomic_t __q) {
541 #if defined (_STLP_THREADS)
542 # if defined (_STLP_ATOMIC_EXCHANGE)
543 return _STLP_ATOMIC_EXCHANGE(__p, __q);
544 # elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
545 _S_swap_lock._M_acquire_lock();
546 __stl_atomic_t __result = *__p;
548 _S_swap_lock._M_release_lock();
551 # error Missing atomic swap implementation
555 __stl_atomic_t __result = *__p;
558 #endif // _STLP_THREADS
561 static void* _S_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
562 #if defined (_STLP_THREADS)
563 # if defined (_STLP_ATOMIC_EXCHANGE_PTR)
564 return _STLP_ATOMIC_EXCHANGE_PTR(__p, __q);
565 # elif defined (_STLP_ATOMIC_EXCHANGE)
566 _STLP_STATIC_ASSERT(sizeof(__stl_atomic_t) == sizeof(void*))
567 return __REINTERPRET_CAST(void*, _STLP_ATOMIC_EXCHANGE(__REINTERPRET_CAST(volatile __stl_atomic_t*, __p),
568 __REINTERPRET_CAST(__stl_atomic_t, __q))
570 # elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
571 _S_swap_lock._M_acquire_lock();
572 void *__result = *__p;
574 _S_swap_lock._M_release_lock();
577 # error Missing pointer atomic swap implementation
581 void *__result = *__p;
587 #if defined(__SYMBIAN32__WSD__)
592 class _Atomic_swap_struct<0> {
594 #if defined (_STLP_THREADS) && \
595 (!defined (_STLP_ATOMIC_EXCHANGE) || !defined (_STLP_ATOMIC_EXCHANGE_PTR)) && \
596 (defined (_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || \
597 defined (_STLP_USE_PTHREAD_SPINLOCK) || defined (_STLP_NWTHREADS))
598 # define _STLP_USE_ATOMIC_SWAP_MUTEX
599 #if !defined(__SYMBIAN32__WSD__)
600 static _STLP_STATIC_MUTEX _S_swap_lock;
602 static _STLP_STATIC_MUTEX& get_threads_0_S_swap_lock()
603 { return ::exp_get_threads_0_S_swap_lock(); }
604 # define _S_swap_lock get_threads_0_S_swap_lock()
608 static __stl_atomic_t _S_swap(_STLP_VOLATILE __stl_atomic_t* __p, __stl_atomic_t __q) {
609 #if defined (_STLP_THREADS)
610 # if defined (_STLP_ATOMIC_EXCHANGE)
611 return _STLP_ATOMIC_EXCHANGE(__p, __q);
612 # elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
613 /* This should be portable, but performance is expected
614 * to be quite awful. This really needs platform specific
617 _S_swap_lock._M_acquire_lock();
618 __stl_atomic_t __result = *__p;
620 _S_swap_lock._M_release_lock();
623 # error Missing atomic swap implementation
627 __stl_atomic_t __result = *__p;
630 #endif // _STLP_THREADS
633 static void* _S_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
634 #if defined (_STLP_THREADS)
635 # if defined (_STLP_ATOMIC_EXCHANGE_PTR)
636 return _STLP_ATOMIC_EXCHANGE_PTR(__p, __q);
637 # elif defined (_STLP_ATOMIC_EXCHANGE)
638 _STLP_STATIC_ASSERT(sizeof(__stl_atomic_t) == sizeof(void*))
639 return __REINTERPRET_CAST(void*, _STLP_ATOMIC_EXCHANGE(__REINTERPRET_CAST(volatile __stl_atomic_t*, __p),
640 __REINTERPRET_CAST(__stl_atomic_t, __q))
642 # elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
643 _S_swap_lock._M_acquire_lock();
644 void *__result = *__p;
646 _S_swap_lock._M_release_lock();
649 # error Missing pointer atomic swap implementation
653 void *__result = *__p;
659 #if defined(__SYMBIAN32__WSD__)
663 #if defined (_STLP_MSVC) && (_STLP_MSVC == 1300)
664 # pragma warning (push)
665 # pragma warning (disable : 4189) //__use_ptr_atomic_swap initialized but not used
668 inline __stl_atomic_t _STLP_CALL _Atomic_swap(_STLP_VOLATILE __stl_atomic_t * __p, __stl_atomic_t __q) {
669 const int __use_ptr_atomic_swap = sizeof(__stl_atomic_t) == sizeof(void*);
670 return _Atomic_swap_struct<__use_ptr_atomic_swap>::_S_swap(__p, __q);
673 inline void* _STLP_CALL _Atomic_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
674 const int __use_ptr_atomic_swap = sizeof(__stl_atomic_t) == sizeof(void*);
675 return _Atomic_swap_struct<__use_ptr_atomic_swap>::_S_swap_ptr(__p, __q);
678 #if defined (_STLP_MSVC) && (_STLP_MSVC == 1300)
679 # pragma warning (pop)
682 #if defined (_STLP_BETHREADS)
683 template <int __inst>
684 struct _STLP_beos_static_lock_data {
686 struct mutex_t : public _STLP_mutex {
688 { _STLP_beos_static_lock_data<0>::is_init = true; }
690 { _STLP_beos_static_lock_data<0>::is_init = false; }
695 template <int __inst>
696 bool _STLP_beos_static_lock_data<__inst>::is_init = false;
697 template <int __inst>
698 typename _STLP_beos_static_lock_data<__inst>::mutex_t _STLP_beos_static_lock_data<__inst>::mut;
700 inline void _STLP_mutex_base::_M_acquire_lock() {
702 // we need to initialise on demand here
703 // to prevent race conditions use our global
704 // mutex if it's available:
705 if (_STLP_beos_static_lock_data<0>::is_init) {
706 _STLP_auto_lock al(_STLP_beos_static_lock_data<0>::mut);
707 if (sem == 0) _M_initialize();
710 // no lock available, we must still be
711 // in startup code, THERE MUST BE ONE THREAD
712 // ONLY active at this point.
717 t = acquire_sem(sem);
718 assert(t == B_NO_ERROR);
724 #if !defined (_STLP_LINK_TIME_INSTANTIATION)
725 # include <stl/_threads.c>
728 #endif /* _STLP_INTERNAL_THREADS_H */