Attempt to represent the S^2->S^3 header reorganisation as a series of "hg rename" operations
2 * Copyright (c) 1997-1999
3 * Silicon Graphics Computer Systems, Inc.
8 * This material is provided "as is", with absolutely no warranty expressed
9 * or implied. Any use is at your own risk.
11 * Permission to use or copy this software for any purpose is hereby granted
12 * without fee, provided the above notices are retained on all copies.
13 * Permission to modify the code and to distribute modified code is granted,
14 * provided the above notices are retained, and a notice that the code was
15 * modified is included with the above copyright notice.
19 // WARNING: This is an internal header file, included by other C++
20 // standard library headers. You should not attempt to use this header
22 // Stl_config.h should be included before this file.
25 #ifndef _STLP_INTERNAL_THREADS_H
26 #define _STLP_INTERNAL_THREADS_H
28 // Supported threading models are native SGI, pthreads, uithreads
29 // (similar to pthreads, but based on an earlier draft of the Posix
30 // threads standard), and Win32 threads. Uithread support by Jochen
31 // Schlick, 1999, and Solaris threads generalized to them.
33 #ifndef _STLP_CONFIG_H
34 #include <stl/_config.h>
37 # if ! defined (_STLP_CSTDDEF)
41 # if ! defined (_STLP_CSTDLIB)
45 // On SUN and Mac OS X gcc, zero-initialization works just fine...
46 # if defined (__sun) || ( defined(__GNUC__) && defined(__APPLE__) )
47 # define _STLP_MUTEX_INITIALIZER
50 # if defined (_STLP_WIN32) || defined (__sgi) || defined (_STLP_SPARC_SOLARIS_THREADS)
51 typedef long __stl_atomic_t;
53 # if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)
54 // using _STLP_VENDOR_CSTD::size_t;
55 using namespace _STLP_VENDOR_CSTD;
57 typedef size_t __stl_atomic_t;
60 # if defined(_STLP_SGI_THREADS)
62 // Hack for SGI o32 compilers.
63 #if !defined(__add_and_fetch) && \
64 (__mips < 3 || !(defined (_ABIN32) || defined(_ABI64)))
65 # define __add_and_fetch(__l,__v) add_then_test((unsigned long*)__l,__v)
66 # define __test_and_set(__l,__v) test_and_set(__l,__v)
69 # if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))
70 # define _STLP_ATOMIC_EXCHANGE(__p, __q) test_and_set(__p, __q)
72 # define _STLP_ATOMIC_EXCHANGE(__p, __q) __test_and_set((unsigned long*)__p, (unsigned long)__q)
75 # define _STLP_ATOMIC_INCREMENT(__x) __add_and_fetch(__x, 1)
76 # define _STLP_ATOMIC_DECREMENT(__x) __add_and_fetch(__x, (size_t) -1)
78 # elif defined (__GNUC__) && defined (__i386__) && defined (__unix__) && defined (_STLP_USE_INLINE_X86_SPINLOCK)
80 // gcc on i386 linux, freebsd, etc.
82 // This enables the memory caching on x86 linux. It is critical for SMP
83 // without it the performace is DISMAL!
84 static inline unsigned long __xchg(volatile __stl_atomic_t* target, int source)
87 // The target is refernce in memory rather than the register
88 // because making a copy of it from memory to the register and
89 // back again would ruin the atomic nature of the call.
90 // the source does not need to be delt with atomicly so it can
91 // be copied about as needed.
93 // The casting of the source is used to prevent gcc from optimizing
94 // in such a way that breaks the atomic nature of this call.
96 __asm__ __volatile__("xchgl %1,%0"
97 :"=m" (*(volatile long *) target), "=r" (source)
98 :"m" (*(volatile long *) target), "r" (source) );
101 // The assembly above does the following atomicly:
103 // source=(int)(*target);
104 // (int)(*target)=temp;
108 static inline void __inc_and_fetch(volatile __stl_atomic_t* __x)
110 // Referenced in memory rather than register to preserve the atomic nature.
112 __asm__ __volatile__(
117 // The assembly above does the following atomicly:
121 static inline void __dec_and_fetch(volatile __stl_atomic_t* __x)
123 // Referenced in memory rather than register to preserve the atomic nature.
125 __asm__ __volatile__(
130 // The assembly above does the following atomicly:
134 # define _STLP_ATOMIC_EXCHANGE(target, newValue) ((__xchg(target, newValue)))
135 # define _STLP_ATOMIC_INCREMENT(__x) __inc_and_fetch(__x)
136 # define _STLP_ATOMIC_DECREMENT(__x) __dec_and_fetch(__x)
138 # elif defined(_STLP_PTHREADS)
140 # include <pthread.h>
141 # ifndef _STLP_USE_PTHREAD_SPINLOCK
142 # if defined(PTHREAD_MUTEX_INITIALIZER) && !defined(_STLP_MUTEX_INITIALIZER)
143 # define _STLP_MUTEX_INITIALIZER = { PTHREAD_MUTEX_INITIALIZER }
146 //HPUX variants have (on some platforms optional) non-standard "DCE" pthreads impl
147 # if defined(_DECTHREADS_) && (defined(_PTHREAD_USE_D4) || defined(__hpux)) && !defined(_CMA_SUPPRESS_EXTERNALS_)
148 # define _STLP_PTHREAD_ATTR_DEFAULT pthread_mutexattr_default
150 # define _STLP_PTHREAD_ATTR_DEFAULT 0
152 # endif // !_STLP_USE_PTHREAD_SPINLOCK
154 # elif defined(_STLP_WIN32THREADS)
155 # if !defined (_STLP_WINDOWS_H_INCLUDED) && ! defined (_WINDOWS_H)
156 # if ! (defined ( _STLP_MSVC ) || defined (__BORLANDC__) || defined (__ICL) || defined (__WATCOMC__) || defined (__MINGW32__) || defined (__DMC__))
157 # ifdef _STLP_USE_MFC
160 # include <windows.h>
162 # define _STLP_WINDOWS_H_INCLUDED
164 // This section serves as a replacement for windows.h header for Visual C++
166 # if (defined(_M_MRX000) || defined(_M_ALPHA) \
167 || (defined(_M_PPC) && (_MSC_VER >= 1000))) && !defined(RC_INVOKED)
168 # define InterlockedIncrement _InterlockedIncrement
169 # define InterlockedDecrement _InterlockedDecrement
170 # define InterlockedExchange _InterlockedExchange
171 # define _STLP_STDCALL
174 # define _STLP_STDCALL _cdecl
176 # define _STLP_STDCALL __stdcall
180 #if (_MSC_VER >= 1300) || defined (_STLP_NEW_PLATFORM_SDK)
181 _STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedIncrement(long volatile *);
182 _STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedDecrement(long volatile *);
183 _STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedExchange(long volatile *, long);
185 // boris : for the latest SDK, you may actually need the other version of the declaration (above)
186 // even for earlier VC++ versions. There is no way to tell SDK versions apart, sorry ...
187 _STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedIncrement(long*);
188 _STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedDecrement(long*);
189 _STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedExchange(long*, long);
192 _STLP_IMPORT_DECLSPEC void _STLP_STDCALL Sleep(unsigned long);
193 _STLP_IMPORT_DECLSPEC void _STLP_STDCALL OutputDebugStringA( const char* lpOutputString );
196 typedef unsigned long DWORD;
197 _STLP_IMPORT_DECLSPEC DWORD _STLP_STDCALL GetCurrentThreadId();
198 #endif /* _STLP_DEBUG */
200 # if defined (InterlockedIncrement)
201 # pragma intrinsic(_InterlockedIncrement)
202 # pragma intrinsic(_InterlockedDecrement)
203 # pragma intrinsic(_InterlockedExchange)
207 # endif /* STL_MSVC */
209 # define _STLP_WINDOWS_H_INCLUDED
211 # endif /* _STLP_WIN32 */
213 # ifndef _STLP_ATOMIC_INCREMENT
214 # define _STLP_ATOMIC_INCREMENT(__x) InterlockedIncrement((long*)__x)
215 # define _STLP_ATOMIC_DECREMENT(__x) InterlockedDecrement((long*)__x)
216 # define _STLP_ATOMIC_EXCHANGE(__x, __y) InterlockedExchange((long*)__x, (long)__y)
218 # elif defined(__DECC) || defined(__DECCXX)
219 # include <machine/builtins.h>
220 # define _STLP_ATOMIC_EXCHANGE __ATOMIC_EXCH_LONG
221 # define _STLP_ATOMIC_INCREMENT(__x) __ATOMIC_ADD_LONG(__x, 1)
222 # define _STLP_ATOMIC_DECREMENT(__x) __ATOMIC_ADD_LONG(__x, -1)
223 # elif defined(_STLP_SPARC_SOLARIS_THREADS)
224 # include <stl/_sparc_atomic.h>
225 # elif defined (_STLP_UITHREADS)
226 // this inclusion is potential hazard to bring up all sorts
227 // of old-style headers. Let's assume vendor already know how
228 // to deal with that.
230 # if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)
231 using _STLP_VENDOR_CSTD::time_t;
235 # include <stl/_cwchar.h>
236 # elif defined (_STLP_BETHREADS)
240 # define _STLP_MUTEX_INITIALIZER = { 0 }
241 #elif defined(_STLP_OS2THREADS)
243 # define INCL_DOSSEMAPHORES
246 // This section serves to replace os2.h for VisualAge C++
247 typedef unsigned long ULONG;
248 #ifndef __HEV__ /* INCL_SEMAPHORE may also define HEV */
253 typedef ULONG APIRET;
256 typedef const char* PCSZ;
257 typedef ULONG BOOL32;
258 APIRET _System DosCreateMutexSem(PCSZ pszName, PHEV phev, ULONG flAttr, BOOL32 fState);
259 APIRET _System DosRequestMutexSem(HMTX hmtx, ULONG ulTimeout);
260 APIRET _System DosReleaseMutexSem(HMTX hmtx);
261 APIRET _System DosCloseMutexSem(HMTX hmtx);
262 # define _STLP_MUTEX_INITIALIZER = { 0 };
264 # elif defined(_STLP_VXWORKS_THREADS)
268 # ifndef _STLP_MUTEX_INITIALIZER
269 # if defined(_STLP_ATOMIC_EXCHANGE)
270 // we are using our own spinlock.
271 # define _STLP_MUTEX_INITIALIZER = { 0 }
272 # elif defined(_STLP_UITHREADS)
274 # define _STLP_MUTEX_INITIALIZER = { DEFAULTMUTEX }
276 // we do not have static initializer available. therefore, on-demand synchronization is needed.
277 # define _STLP_MUTEX_INITIALIZER
278 # define _STLP_MUTEX_NEEDS_ONDEMAND_INITIALIZATION
282 _STLP_BEGIN_NAMESPACE
284 #ifndef _STLP_USE_PTHREAD_SPINLOCK
285 // Helper struct. This is a workaround for various compilers that don't
286 // handle static variables in inline functions properly.
287 template <int __inst>
288 struct _STLP_mutex_spin {
289 enum { __low_max = 30, __high_max = 1000 };
290 // Low if we suspect uniprocessor, high for multiprocessor.
291 static unsigned __max;
292 static unsigned __last;
293 static void _STLP_CALL _M_do_lock(volatile __stl_atomic_t* __lock);
294 static void _STLP_CALL _S_nsec_sleep(int __log_nsec);
296 #endif // !_STLP_USE_PTHREAD_SPINLOCK
299 // Locking class. Note that this class *does not have a constructor*.
300 // It must be initialized either statically, with _STLP_MUTEX_INITIALIZER,
301 // or dynamically, by explicitly calling the _M_initialize member function.
302 // (This is similar to the ways that a pthreads mutex can be initialized.)
303 // There are explicit member functions for acquiring and releasing the lock.
305 // There is no constructor because static initialization is essential for
306 // some uses, and only a class aggregate (see section 8.5.1 of the C++
307 // standard) can be initialized that way. That means we must have no
308 // constructors, no base classes, no virtual functions, and no private or
309 // protected members.
311 // For non-static cases, clients should use _STLP_mutex.
313 struct _STLP_CLASS_DECLSPEC _STLP_mutex_base
315 #if defined(_STLP_ATOMIC_EXCHANGE) || defined(_STLP_SGI_THREADS)
316 // It should be relatively easy to get this to work on any modern Unix.
317 volatile __stl_atomic_t _M_lock;
322 # ifdef _STLP_ATOMIC_EXCHANGE
323 inline void _M_initialize() { _M_lock=0; }
324 inline void _M_destroy() {}
326 void _M_acquire_lock() {
327 _STLP_mutex_spin<0>::_M_do_lock(&_M_lock);
330 inline void _M_release_lock() {
331 volatile __stl_atomic_t* __lock = &_M_lock;
332 # if defined(_STLP_SGI_THREADS) && defined(__GNUC__) && __mips >= 3
335 # elif defined(_STLP_SGI_THREADS) && __mips >= 3 \
336 && (defined (_ABIN32) || defined(_ABI64))
337 __lock_release(__lock);
338 # elif defined (_STLP_SPARC_SOLARIS_THREADS)
339 # if defined (__WORD64) || defined (__arch64__) || defined (__sparcv9) || defined (__sparcv8plus)
340 asm("membar #StoreStore ; membar #LoadStore");
347 // This is not sufficient on many multiprocessors, since
348 // writes to protected variables and the lock may be reordered.
351 # elif defined(_STLP_PTHREADS)
352 # ifdef _STLP_USE_PTHREAD_SPINLOCK
353 pthread_spinlock_t _M_lock;
354 inline void _M_initialize() { pthread_spin_init( &_M_lock, 0 ); }
355 inline void _M_destroy() { pthread_spin_destroy( &_M_lock ); }
357 inline void _M_acquire_lock() {
358 // we do not care about race conditions here : there is only one thread at this point
359 if(!_M_lock) pthread_spin_init( &_M_lock, 0 );
361 // fbp: here, initialization on demand should happen before the lock
362 // we use simple strategy as we are sure this only happens on initialization
363 pthread_spin_lock( &_M_lock );
366 inline void _M_acquire_lock_nodemand() {
367 pthread_spin_lock( &_M_lock );
369 inline void _M_release_lock() { pthread_spin_unlock( &_M_lock ); }
370 # else // !_STLP_USE_PTHREAD_SPINLOCK
371 pthread_mutex_t _M_lock;
373 inline void _M_initialize() {
374 pthread_mutex_init(&_M_lock,_STLP_PTHREAD_ATTR_DEFAULT);
376 inline void _M_destroy() {
377 pthread_mutex_destroy(&_M_lock);
379 inline void _M_acquire_lock_nodemand() {
380 pthread_mutex_lock(&_M_lock);
383 inline void _M_acquire_lock() {
384 # if defined (__hpux) && !defined (PTHREAD_MUTEX_INITIALIZER)
385 if (!_M_lock.field1) _M_initialize();
387 pthread_mutex_lock(&_M_lock);
389 inline void _M_release_lock() { pthread_mutex_unlock(&_M_lock); }
390 # endif // !_STLP_USE_PTHREAD_SPINLOCK
392 # elif defined (_STLP_UITHREADS)
394 inline void _M_initialize() {
395 mutex_init(&_M_lock,0,NULL);
397 inline void _M_destroy() {
398 mutex_destroy(&_M_lock);
400 inline void _M_acquire_lock() { mutex_lock(&_M_lock); }
401 inline void _M_release_lock() { mutex_unlock(&_M_lock); }
403 # elif defined(_STLP_OS2THREADS)
405 inline void _M_initialize() { DosCreateMutexSem(NULL, &_M_lock, 0, false); }
406 inline void _M_destroy() { DosCloseMutexSem(_M_lock); }
407 inline void _M_acquire_lock_nodemand() {
408 DosRequestMutexSem(_M_lock, SEM_INDEFINITE_WAIT);
410 inline void _M_acquire_lock() {
411 if(!_M_lock) _M_initialize();
412 DosRequestMutexSem(_M_lock, SEM_INDEFINITE_WAIT);
414 inline void _M_release_lock() { DosReleaseMutexSem(_M_lock); }
415 # elif defined(_STLP_BETHREADS)
417 inline void _M_initialize()
419 sem = create_sem(1, "STLPort");
422 inline void _M_destroy()
424 int t = delete_sem(sem);
425 assert(t == B_NO_ERROR);
427 inline void _M_acquire_lock_nodemand()
430 t = acquire_sem(sem);
431 assert(t == B_NO_ERROR);
433 inline void _M_acquire_lock();
434 inline void _M_release_lock()
436 status_t t = release_sem(sem);
437 assert(t == B_NO_ERROR);
439 # elif defined(_STLP_VXWORKS_THREADS)
441 inline void _M_initialize()
443 _M_sem = semMCreate(SEM_Q_FIFO);
446 inline void _M_destroy()
452 inline void _M_acquire_lock_nodemand()
455 semTake (_M_sem, WAIT_FOREVER);
458 inline void _M_acquire_lock()
462 _M_acquire_lock_nodemand();
464 inline void _M_release_lock()
467 semGive (_M_sem, WAIT_FOREVER);
470 # else //*ty 11/24/2001 - added configuration check
471 # error "Unknown thread facility configuration"
473 #else /* No threads */
474 inline void _M_initialize() {}
475 inline void _M_destroy() {}
476 inline void _M_acquire_lock() {}
477 inline void _M_release_lock() {}
478 #endif // _STLP_PTHREADS
482 #if defined (_STLP_THREADS) && defined (_STLP_MUTEX_NEEDS_ONDEMAND_INITIALIZATION)
483 // for use in _STLP_mutex, our purposes do not require ondemand initialization
484 // also, mutex_base may use some hacks to determine uninitialized state by zero data, which only works for globals.
485 class _STLP_CLASS_DECLSPEC _STLP_mutex_nodemand : public _STLP_mutex_base {
486 inline void _M_acquire_lock() {
487 _M_acquire_lock_nodemand();
491 typedef _STLP_mutex_base _STLP_mutex_nodemand;
495 // Locking class. The constructor initializes the lock, the destructor destroys it.
496 // Well - behaving class, does not need static initializer
497 class _STLP_CLASS_DECLSPEC _STLP_mutex : public _STLP_mutex_nodemand {
499 inline _STLP_mutex () { _M_initialize(); }
500 inline ~_STLP_mutex () { _M_destroy(); }
502 _STLP_mutex(const _STLP_mutex&);
503 void operator=(const _STLP_mutex&);
509 * Class _Refcount_Base provides a type, __stl_atomic_t, a data member,
510 * _M_ref_count, and member functions _M_incr and _M_decr, which perform
511 * atomic preincrement/predecrement. The constructor initializes
514 struct _STLP_CLASS_DECLSPEC _Refcount_Base
516 // The data member _M_ref_count
517 volatile __stl_atomic_t _M_ref_count;
519 # if !defined (_STLP_ATOMIC_EXCHANGE)
520 _STLP_mutex _M_mutex;
524 _Refcount_Base(__stl_atomic_t __n) : _M_ref_count(__n) {}
526 // _M_incr and _M_decr
527 # if defined (_STLP_THREADS) && defined (_STLP_ATOMIC_EXCHANGE)
528 void _M_incr() { _STLP_ATOMIC_INCREMENT((__stl_atomic_t*)&_M_ref_count); }
529 void _M_decr() { _STLP_ATOMIC_DECREMENT((__stl_atomic_t*)&_M_ref_count); }
530 # elif defined(_STLP_THREADS)
532 _M_mutex._M_acquire_lock();
534 _M_mutex._M_release_lock();
537 _M_mutex._M_acquire_lock();
539 _M_mutex._M_release_lock();
541 # else /* No threads */
542 void _M_incr() { ++_M_ref_count; }
543 void _M_decr() { --_M_ref_count; }
547 // Atomic swap on unsigned long
548 // This is guaranteed to behave as though it were atomic only if all
549 // possibly concurrent updates use _Atomic_swap.
550 // In some cases the operation is emulated with a lock.
551 # if defined (_STLP_THREADS)
552 # ifdef _STLP_ATOMIC_EXCHANGE
553 inline __stl_atomic_t _Atomic_swap(volatile __stl_atomic_t * __p, __stl_atomic_t __q) {
554 return (__stl_atomic_t) _STLP_ATOMIC_EXCHANGE(__p,__q);
556 # elif defined(_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || defined(_STLP_USE_PTHREAD_SPINLOCK)
557 // We use a template here only to get a unique initialized instance.
558 template<int __dummy>
559 struct _Swap_lock_struct {
560 static _STLP_STATIC_MUTEX _S_swap_lock;
564 // This should be portable, but performance is expected
565 // to be quite awful. This really needs platform specific
567 inline __stl_atomic_t _Atomic_swap(volatile __stl_atomic_t * __p, __stl_atomic_t __q) {
568 _Swap_lock_struct<0>::_S_swap_lock._M_acquire_lock();
569 __stl_atomic_t __result = *__p;
571 _Swap_lock_struct<0>::_S_swap_lock._M_release_lock();
574 # endif // _STLP_PTHREADS || _STLP_UITHREADS || _STLP_OS2THREADS || _STLP_USE_PTHREAD_SPINLOCK
575 # else // !_STLP_THREADS
577 static inline __stl_atomic_t _STLP_CALL
578 _Atomic_swap(volatile __stl_atomic_t * __p, __stl_atomic_t __q) {
579 __stl_atomic_t __result = *__p;
583 # endif // _STLP_THREADS
585 // A locking class that uses _STLP_STATIC_MUTEX. The constructor takes
586 // a reference to an _STLP_STATIC_MUTEX, and acquires a lock. The destructor
587 // releases the lock.
589 struct _STLP_CLASS_DECLSPEC _STLP_auto_lock
591 _STLP_STATIC_MUTEX& _M_lock;
593 _STLP_auto_lock(_STLP_STATIC_MUTEX& __lock) : _M_lock(__lock)
594 { _M_lock._M_acquire_lock(); }
595 ~_STLP_auto_lock() { _M_lock._M_release_lock(); }
598 void operator=(const _STLP_auto_lock&);
599 _STLP_auto_lock(const _STLP_auto_lock&);
602 typedef _STLP_auto_lock _STLP_mutex_lock;
604 #ifdef _STLP_BETHREADS
606 template <int __inst>
607 struct _STLP_beos_static_lock_data
610 struct mutex_t : public _STLP_mutex
614 _STLP_beos_static_lock_data<0>::is_init = true;
618 _STLP_beos_static_lock_data<0>::is_init = false;
624 template <int __inst>
625 bool _STLP_beos_static_lock_data<__inst>::is_init = false;
626 template <int __inst>
627 typename _STLP_beos_static_lock_data<__inst>::mutex_t _STLP_beos_static_lock_data<__inst>::mut;
630 inline void _STLP_mutex_base::_M_acquire_lock()
634 // we need to initialise on demand here
635 // to prevent race conditions use our global
636 // mutex if it's available:
637 if(_STLP_beos_static_lock_data<0>::is_init)
639 _STLP_auto_lock al(_STLP_beos_static_lock_data<0>::mut);
640 if(sem == 0) _M_initialize();
644 // no lock available, we must still be
645 // in startup code, THERE MUST BE ONE THREAD
646 // ONLY active at this point.
650 _M_acquire_lock_nodemand();
657 # if !defined (_STLP_LINK_TIME_INSTANTIATION)
658 # include <stl/_threads.c>
661 #endif /* _STLP_INTERNAL_THREADS_H */