epoc32/include/tools/stlport/stl/_threads.h
author William Roberts <williamr@symbian.org>
Wed, 31 Mar 2010 12:33:34 +0100
branchSymbian3
changeset 4 837f303aceeb
permissions -rw-r--r--
Current Symbian^3 public API header files (from PDK 3.0.h)
This is the epoc32/include tree with the "platform" subtrees removed, and
all but a selected few mbg and rsg files removed.
     1 /*
     2  * Copyright (c) 1997-1999
     3  * Silicon Graphics Computer Systems, Inc.
     4  *
     5  * Copyright (c) 1999
     6  * Boris Fomitchev
     7  *
     8  * This material is provided "as is", with absolutely no warranty expressed
     9  * or implied. Any use is at your own risk.
    10  *
    11  * Permission to use or copy this software for any purpose is hereby granted
    12  * without fee, provided the above notices are retained on all copies.
    13  * Permission to modify the code and to distribute modified code is granted,
    14  * provided the above notices are retained, and a notice that the code was
    15  * modified is included with the above copyright notice.
    16  *
    17  */
    18 
    19 // WARNING: This is an internal header file, included by other C++
    20 // standard library headers.  You should not attempt to use this header
    21 // file directly.
    22 
    23 
    24 #ifndef _STLP_INTERNAL_THREADS_H
    25 #define _STLP_INTERNAL_THREADS_H
    26 
    27 // Supported threading models are native SGI, pthreads, uithreads
    28 // (similar to pthreads, but based on an earlier draft of the Posix
    29 // threads standard), and Win32 threads.  Uithread support by Jochen
    30 // Schlick, 1999, and Solaris threads generalized to them.
    31 
    32 #ifndef _STLP_INTERNAL_CSTDDEF
    33 #  include <stl/_cstddef.h>
    34 #endif
    35 
    36 #ifndef _STLP_INTERNAL_CSTDLIB
    37 #  include <stl/_cstdlib.h>
    38 #endif
    39 
    40 // On SUN and Mac OS X gcc, zero-initialization works just fine...
    41 #if defined (__sun) || (defined (__GNUC__) && defined(__APPLE__))
    42 #  define _STLP_MUTEX_INITIALIZER
    43 #endif
    44 
    45 /* This header defines the following atomic operation that platform should
    46  * try to support as much as possible. Atomic operation are exposed as macro
    47  * in order to easily test for their existance. They are:
    48  * __stl_atomic_t _STLP_ATOMIC_INCREMENT(volatile __stl_atomic_t* __ptr) :
    49  * increment *__ptr by 1 and returns the new value
    50  * __stl_atomic_t _STLP_ATOMIC_DECREMENT(volatile __stl_atomic_t* __ptr) :
    51  * decrement  *__ptr by 1 and returns the new value
    52  * __stl_atomic_t _STLP_ATOMIC_EXCHANGE(volatile __stl_atomic_t* __target, __stl_atomic_t __val) :
    53  * assign __val to *__target and returns former *__target value
    54  * void* _STLP_ATOMIC_EXCHANGE_PTR(void* volatile* __target, void* __ptr) :
    55  * assign __ptr to *__target and returns former *__target value
    56  * __stl_atomic_t _STLP_ATOMIC_ADD(volatile __stl_atomic_t* __target, __stl_atomic_t __val) :
    57  * does *__target = *__target + __val and returns the old *__target value
    58  */
    59 
    60 #if defined (_STLP_WIN32) || defined (__sgi) || defined (_STLP_SPARC_SOLARIS_THREADS)
    61 typedef long __stl_atomic_t;
    62 #else
    63 /* Don't import whole namespace!!!! - ptr */
    64 // # if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)
    65 // // using _STLP_VENDOR_CSTD::size_t;
    66 // using namespace _STLP_VENDOR_CSTD;
    67 // # endif
    68 typedef size_t __stl_atomic_t;
    69 #endif
    70 
    71 #if defined (_STLP_THREADS)
    72 
    73 #  if defined (_STLP_SGI_THREADS)
    74 
    75 #    include <mutex.h>
    76 // Hack for SGI o32 compilers.
    77 #    if !defined(__add_and_fetch) && \
    78         (__mips < 3 || !(defined (_ABIN32) || defined(_ABI64)))
    79 #      define __add_and_fetch(__l,__v) add_then_test((unsigned long*)__l,__v)
    80 #      define __test_and_set(__l,__v)  test_and_set(__l,__v)
    81 #    endif /* o32 */
    82 
    83 #    if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))
    84 #      define _STLP_ATOMIC_EXCHANGE(__p, __q) test_and_set(__p, __q)
    85 #    else
    86 #      define _STLP_ATOMIC_EXCHANGE(__p, __q) __test_and_set((unsigned long*)__p, (unsigned long)__q)
    87 #    endif
    88 
    89 #    define _STLP_ATOMIC_INCREMENT(__x) __add_and_fetch(__x, 1)
    90 #    define _STLP_ATOMIC_DECREMENT(__x) __add_and_fetch(__x, (size_t) -1)
    91 
    92 #  elif defined (_STLP_PTHREADS)
    93 
    94 #    include <pthread.h>
    95 #    if !defined (_STLP_USE_PTHREAD_SPINLOCK)
    96 #      if defined (PTHREAD_MUTEX_INITIALIZER) && !defined (_STLP_MUTEX_INITIALIZER) && defined (_REENTRANT)
    97 #        define _STLP_MUTEX_INITIALIZER = { PTHREAD_MUTEX_INITIALIZER }
    98 #      endif
    99 //HPUX variants have (on some platforms optional) non-standard "DCE" pthreads impl
   100 #      if defined (_DECTHREADS_) && (defined (_PTHREAD_USE_D4) || defined (__hpux)) && !defined (_CMA_SUPPRESS_EXTERNALS_)
   101 #        define _STLP_PTHREAD_ATTR_DEFAULT pthread_mutexattr_default
   102 #      else
   103 #        define _STLP_PTHREAD_ATTR_DEFAULT 0
   104 #      endif
   105 #    else // _STLP_USE_PTHREAD_SPINLOCK
   106 #      if defined (__OpenBSD__)
   107 #        include <spinlock.h>
   108 #      endif
   109 #    endif // _STLP_USE_PTHREAD_SPINLOCK
   110 
   111 #    if defined (__GNUC__) && defined (__i386__)
   112 
   113 #      if !defined (_STLP_ATOMIC_INCREMENT)
   114 inline long _STLP_atomic_increment_gcc_x86(long volatile* p) {
   115   long result;
   116   __asm__ __volatile__
   117     ("lock; xaddl  %1, %0;"
   118     :"=m" (*p), "=r" (result)
   119     :"m" (*p),  "1"  (1)
   120     :"cc");
   121   return result + 1;
   122 }
   123 #        define _STLP_ATOMIC_INCREMENT(__x) (_STLP_atomic_increment_gcc_x86((long volatile*)__x))
   124 #      endif
   125 
   126 #      if !defined (_STLP_ATOMIC_DECREMENT)
   127 inline long _STLP_atomic_decrement_gcc_x86(long volatile* p) {
   128   long result;
   129   __asm__ __volatile__
   130     ("lock; xaddl  %1, %0;"
   131     :"=m" (*p), "=r" (result)
   132     :"m" (*p),  "1"  (-1)
   133     :"cc");
   134   return result - 1;
   135 }
   136 #        define _STLP_ATOMIC_DECREMENT(__x) (_STLP_atomic_decrement_gcc_x86((long volatile*)__x))
   137 #      endif
   138 
   139 #      if !defined (_STLP_ATOMIC_ADD)
   140 inline long _STLP_atomic_add_gcc_x86(long volatile* p, long addend) {
   141   long result;
   142   __asm__ __volatile__
   143     ("lock; xaddl %1, %0;"
   144     :"=m" (*p), "=r" (result)
   145     :"m"  (*p), "1"  (addend)
   146     :"cc");
   147  return result + addend;
   148 }
   149 #        define _STLP_ATOMIC_ADD(__dst, __val)  (_STLP_atomic_add_gcc_x86((long volatile*)__dst, (long)__val))
   150 #      endif
   151 
   152 #    endif /* if defined(__GNUC__) && defined(__i386__) */
   153 
   154 #  elif defined (_STLP_WIN32THREADS)
   155 
   156 #    if !defined (_STLP_ATOMIC_INCREMENT)
   157 #      if !defined (_STLP_NEW_PLATFORM_SDK)
   158 #        define _STLP_ATOMIC_INCREMENT(__x)           InterlockedIncrement(__CONST_CAST(long*, __x))
   159 #        define _STLP_ATOMIC_DECREMENT(__x)           InterlockedDecrement(__CONST_CAST(long*, __x))
   160 #        define _STLP_ATOMIC_EXCHANGE(__x, __y)       InterlockedExchange(__CONST_CAST(long*, __x), __y)
   161 #      else
   162 #        define _STLP_ATOMIC_INCREMENT(__x)           InterlockedIncrement(__x)
   163 #        define _STLP_ATOMIC_DECREMENT(__x)           InterlockedDecrement(__x)
   164 #        define _STLP_ATOMIC_EXCHANGE(__x, __y)       InterlockedExchange(__x, __y)
   165 #      endif
   166 #      define _STLP_ATOMIC_EXCHANGE_PTR(__x, __y)     STLPInterlockedExchangePointer(__x, __y)
   167 /*
   168  * The following functionnality is only available since Windows 98, those that are targeting previous OSes
   169  * should define _WIN32_WINDOWS to a value lower that the one of Win 98, see Platform SDK documentation for
   170  * more informations:
   171  */
   172 #      if defined (_STLP_NEW_PLATFORM_SDK) && (!defined (WINVER) || (WINVER >= 0x0410)) && \
   173                                               (!defined (_WIN32_WINDOWS) || (_WIN32_WINDOWS >= 0x0410))
   174 #        define _STLP_ATOMIC_ADD(__dst, __val) InterlockedExchangeAdd(__dst, __val)
   175 #      endif
   176 #    endif
   177 
   178 #  elif defined (__DECC) || defined (__DECCXX)
   179 
   180 #    include <machine/builtins.h>
   181 #    define _STLP_ATOMIC_EXCHANGE __ATOMIC_EXCH_LONG
   182 #    define _STLP_ATOMIC_INCREMENT(__x) __ATOMIC_ADD_LONG(__x, 1)
   183 #    define _STLP_ATOMIC_DECREMENT(__x) __ATOMIC_ADD_LONG(__x, -1)
   184 
   185 #  elif defined(_STLP_SPARC_SOLARIS_THREADS)
   186 
   187 #    include <stl/_sparc_atomic.h>
   188 
   189 #  elif defined (_STLP_UITHREADS)
   190 
   191 // this inclusion is potential hazard to bring up all sorts
   192 // of old-style headers. Let's assume vendor already know how
   193 // to deal with that.
   194 #    ifndef _STLP_INTERNAL_CTIME
   195 #      include <stl/_ctime.h>
   196 #    endif
   197 #    if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)
   198 using _STLP_VENDOR_CSTD::time_t;
   199 #    endif
   200 #    include <synch.h>
   201 #    include <cstdio>
   202 #    include <cwchar>
   203 
   204 #  elif defined (_STLP_BETHREADS)
   205 
   206 #    include <OS.h>
   207 #    include <cassert>
   208 #    include <stdio.h>
   209 #    define _STLP_MUTEX_INITIALIZER = { 0 }
   210 
   211 #  elif defined (_STLP_NWTHREADS)
   212 
   213 #    include <nwthread.h>
   214 #    include <nwsemaph.h>
   215 
   216 #  elif defined(_STLP_OS2THREADS)
   217 
   218 #    if defined (__GNUC__)
   219 #      define INCL_DOSSEMAPHORES
   220 #      include <os2.h>
   221 #    else
   222 // This section serves to replace os2.h for VisualAge C++
   223   typedef unsigned long ULONG;
   224 #      if !defined (__HEV__)  /* INCL_SEMAPHORE may also define HEV */
   225 #        define __HEV__
   226   typedef ULONG HEV;
   227   typedef HEV*  PHEV;
   228 #      endif
   229   typedef ULONG APIRET;
   230   typedef ULONG HMTX;
   231   typedef HMTX*  PHMTX;
   232   typedef const char*  PCSZ;
   233   typedef ULONG BOOL32;
   234   APIRET _System DosCreateMutexSem(PCSZ pszName, PHEV phev, ULONG flAttr, BOOL32 fState);
   235   APIRET _System DosRequestMutexSem(HMTX hmtx, ULONG ulTimeout);
   236   APIRET _System DosReleaseMutexSem(HMTX hmtx);
   237   APIRET _System DosCloseMutexSem(HMTX hmtx);
   238 #      define _STLP_MUTEX_INITIALIZER = { 0 }
   239 #    endif /* GNUC */
   240 
   241 #  endif
   242 
   243 #else
   244 /* no threads */
   245 #  define _STLP_ATOMIC_INCREMENT(__x) ++(*__x)
   246 #  define _STLP_ATOMIC_DECREMENT(__x) --(*__x)
   247 /* We do not grant other atomic operations as they are useless if STLport do not have
   248  * to be thread safe
   249  */
   250 #endif
   251 
   252 #if !defined (_STLP_MUTEX_INITIALIZER)
   253 #  if defined(_STLP_ATOMIC_EXCHANGE)
   254 #    define _STLP_MUTEX_INITIALIZER = { 0 }
   255 #  elif defined(_STLP_UITHREADS)
   256 #    define _STLP_MUTEX_INITIALIZER = { DEFAULTMUTEX }
   257 #  else
   258 #    define _STLP_MUTEX_INITIALIZER
   259 #  endif
   260 #endif
   261 
   262 _STLP_BEGIN_NAMESPACE
   263 
   264 #if defined (_STLP_THREADS) && !defined (_STLP_USE_PTHREAD_SPINLOCK)
   265 // Helper struct.  This is a workaround for various compilers that don't
   266 // handle static variables in inline functions properly.
   267 template <int __inst>
   268 struct _STLP_mutex_spin {
   269   enum { __low_max = 30, __high_max = 1000 };
   270   // Low if we suspect uniprocessor, high for multiprocessor.
   271   static unsigned __max;
   272   static unsigned __last;
   273   static void _STLP_CALL _M_do_lock(volatile __stl_atomic_t* __lock);
   274   static void _STLP_CALL _S_nsec_sleep(int __log_nsec);
   275 };
   276 #endif // !_STLP_USE_PTHREAD_SPINLOCK
   277 
   278 // Locking class.  Note that this class *does not have a constructor*.
   279 // It must be initialized either statically, with _STLP_MUTEX_INITIALIZER,
   280 // or dynamically, by explicitly calling the _M_initialize member function.
   281 // (This is similar to the ways that a pthreads mutex can be initialized.)
   282 // There are explicit member functions for acquiring and releasing the lock.
   283 
   284 // There is no constructor because static initialization is essential for
   285 // some uses, and only a class aggregate (see section 8.5.1 of the C++
   286 // standard) can be initialized that way.  That means we must have no
   287 // constructors, no base classes, no virtual functions, and no private or
   288 // protected members.
   289 
   290 // For non-static cases, clients should use  _STLP_mutex.
   291 
   292 struct _STLP_CLASS_DECLSPEC _STLP_mutex_base {
   293 #if defined (_STLP_ATOMIC_EXCHANGE) || defined (_STLP_SGI_THREADS)
   294   // It should be relatively easy to get this to work on any modern Unix.
   295   volatile __stl_atomic_t _M_lock;
   296 #endif
   297 
   298 #if defined (_STLP_THREADS)
   299 #  if defined (_STLP_ATOMIC_EXCHANGE)
   300   inline void _M_initialize() { _M_lock = 0; }
   301   inline void _M_destroy() {}
   302 
   303   void _M_acquire_lock() {
   304     _STLP_mutex_spin<0>::_M_do_lock(&_M_lock);
   305   }
   306 
   307   inline void _M_release_lock() {
   308     volatile __stl_atomic_t* __lock = &_M_lock;
   309 #    if defined(_STLP_SGI_THREADS) && defined(__GNUC__) && __mips >= 3
   310     asm("sync");
   311     *__lock = 0;
   312 #    elif defined(_STLP_SGI_THREADS) && __mips >= 3 && \
   313          (defined (_ABIN32) || defined(_ABI64))
   314     __lock_release(__lock);
   315 #    elif defined (_STLP_SPARC_SOLARIS_THREADS)
   316 #      if defined (__WORD64) || defined (__arch64__) || defined (__sparcv9) || defined (__sparcv8plus)
   317     asm("membar #StoreStore ; membar #LoadStore");
   318 #      else
   319     asm(" stbar ");
   320 #      endif
   321     *__lock = 0;
   322 #    else
   323     *__lock = 0;
   324     // This is not sufficient on many multiprocessors, since
   325     // writes to protected variables and the lock may be reordered.
   326 #    endif
   327   }
   328 #  elif defined (_STLP_PTHREADS)
   329 #    if defined (_STLP_USE_PTHREAD_SPINLOCK)
   330 #      if !defined (__OpenBSD__)
   331   pthread_spinlock_t _M_lock;
   332   inline void _M_initialize() { pthread_spin_init( &_M_lock, 0 ); }
   333   inline void _M_destroy() { pthread_spin_destroy( &_M_lock ); }
   334 
   335   // sorry, but no static initializer for pthread_spinlock_t;
   336   // this will not work for compilers that has problems with call
   337   // constructor of static object...
   338 
   339   // _STLP_mutex_base()
   340   //   { pthread_spin_init( &_M_lock, 0 ); }
   341 
   342   // ~_STLP_mutex_base()
   343   //   { pthread_spin_destroy( &_M_lock ); }
   344 
   345   inline void _M_acquire_lock() { pthread_spin_lock( &_M_lock ); }
   346   inline void _M_release_lock() { pthread_spin_unlock( &_M_lock ); }
   347 #      else // __OpenBSD__
   348   spinlock_t _M_lock;
   349   inline void _M_initialize() { _SPINLOCK_INIT( &_M_lock ); }
   350   inline void _M_destroy() { }
   351   inline void _M_acquire_lock() { _SPINLOCK( &_M_lock ); }
   352   inline void _M_release_lock() { _SPINUNLOCK( &_M_lock ); }
   353 #      endif // __OpenBSD__
   354 #    else // !_STLP_USE_PTHREAD_SPINLOCK
   355   pthread_mutex_t _M_lock;
   356   inline void _M_initialize()
   357   { pthread_mutex_init(&_M_lock,_STLP_PTHREAD_ATTR_DEFAULT); }
   358   inline void _M_destroy()
   359   { pthread_mutex_destroy(&_M_lock); }
   360   inline void _M_acquire_lock() {
   361 #      if defined ( __hpux ) && ! defined (PTHREAD_MUTEX_INITIALIZER)
   362     if (!_M_lock.field1)  _M_initialize();
   363 #      endif
   364     pthread_mutex_lock(&_M_lock);
   365   }
   366   inline void _M_release_lock() { pthread_mutex_unlock(&_M_lock); }
   367 #    endif // !_STLP_USE_PTHREAD_SPINLOCK
   368 
   369 #  elif defined (_STLP_UITHREADS)
   370   mutex_t _M_lock;
   371   inline void _M_initialize()
   372   { mutex_init(&_M_lock, 0, NULL); }
   373   inline void _M_destroy()
   374   { mutex_destroy(&_M_lock); }
   375   inline void _M_acquire_lock() { mutex_lock(&_M_lock); }
   376   inline void _M_release_lock() { mutex_unlock(&_M_lock); }
   377 
   378 #  elif defined (_STLP_OS2THREADS)
   379   HMTX _M_lock;
   380   inline void _M_initialize() { DosCreateMutexSem(NULL, &_M_lock, 0, false); }
   381   inline void _M_destroy() { DosCloseMutexSem(_M_lock); }
   382   inline void _M_acquire_lock() {
   383     if (!_M_lock) _M_initialize();
   384     DosRequestMutexSem(_M_lock, SEM_INDEFINITE_WAIT);
   385   }
   386   inline void _M_release_lock() { DosReleaseMutexSem(_M_lock); }
   387 #  elif defined (_STLP_BETHREADS)
   388   sem_id sem;
   389   inline void _M_initialize() {
   390     sem = create_sem(1, "STLPort");
   391     assert(sem > 0);
   392   }
   393   inline void _M_destroy() {
   394     int t = delete_sem(sem);
   395     assert(t == B_NO_ERROR);
   396   }
   397   inline void _M_acquire_lock();
   398   inline void _M_release_lock() {
   399     status_t t = release_sem(sem);
   400     assert(t == B_NO_ERROR);
   401   }
   402 #  elif defined (_STLP_NWTHREADS)
   403   LONG _M_lock;
   404   inline void _M_initialize()
   405   { _M_lock = OpenLocalSemaphore(1); }
   406   inline void _M_destroy()
   407   { CloseLocalSemaphore(_M_lock); }
   408   inline void _M_acquire_lock()
   409   { WaitOnLocalSemaphore(_M_lock); }
   410   inline void _M_release_lock() { SignalLocalSemaphore(_M_lock); }
   411 #  else      //*ty 11/24/2001 - added configuration check
   412 #    error "Unknown thread facility configuration"
   413 #  endif
   414 #else /* No threads */
   415   inline void _M_initialize() {}
   416   inline void _M_destroy() {}
   417   inline void _M_acquire_lock() {}
   418   inline void _M_release_lock() {}
   419 #endif // _STLP_PTHREADS
   420 };
   421 
   422 // Locking class.  The constructor initializes the lock, the destructor destroys it.
   423 // Well - behaving class, does not need static initializer
   424 
   425 class _STLP_CLASS_DECLSPEC _STLP_mutex : public _STLP_mutex_base {
   426   public:
   427     inline _STLP_mutex () { _M_initialize(); }
   428     inline ~_STLP_mutex () { _M_destroy(); }
   429   private:
   430     _STLP_mutex(const _STLP_mutex&);
   431     void operator=(const _STLP_mutex&);
   432 };
   433 
   434 // A locking class that uses _STLP_STATIC_MUTEX.  The constructor takes
   435 // a reference to an _STLP_STATIC_MUTEX, and acquires a lock.  The destructor
   436 // releases the lock.
   437 // It's not clear that this is exactly the right functionality.
   438 // It will probably change in the future.
   439 
   440 struct _STLP_CLASS_DECLSPEC _STLP_auto_lock {
   441   _STLP_auto_lock(_STLP_STATIC_MUTEX& __lock) : _M_lock(__lock)
   442   { _M_lock._M_acquire_lock(); }
   443   ~_STLP_auto_lock()
   444   { _M_lock._M_release_lock(); }
   445 
   446 private:
   447   _STLP_STATIC_MUTEX& _M_lock;
   448   void operator=(const _STLP_auto_lock&);
   449   _STLP_auto_lock(const _STLP_auto_lock&);
   450 };
   451 
   452 /*
   453  * Class _Refcount_Base provides a type, __stl_atomic_t, a data member,
   454  * _M_ref_count, and member functions _M_incr and _M_decr, which perform
   455  * atomic preincrement/predecrement.  The constructor initializes
   456  * _M_ref_count.
   457  */
   458 class _STLP_CLASS_DECLSPEC _Refcount_Base {
   459   // The data member _M_ref_count
   460 #if defined (__DMC__)
   461 public:
   462 #endif
   463   _STLP_VOLATILE __stl_atomic_t _M_ref_count;
   464 
   465 #if !defined (_STLP_ATOMIC_EXCHANGE)
   466   _STLP_mutex _M_mutex;
   467 #endif
   468 
   469   public:
   470   // Constructor
   471   _Refcount_Base(__stl_atomic_t __n) : _M_ref_count(__n) {}
   472 
   473   // _M_incr and _M_decr
   474 #if defined (_STLP_THREADS)
   475 #  if defined (_STLP_ATOMIC_EXCHANGE)
   476    int _M_incr() { return _STLP_ATOMIC_INCREMENT(&_M_ref_count); }
   477    int _M_decr() { return _STLP_ATOMIC_DECREMENT(&_M_ref_count); }
   478 #  else
   479   int _M_incr() {
   480     _STLP_auto_lock l(_M_mutex);
   481     return ++_M_ref_count;
   482   }
   483   int _M_decr() {
   484     _STLP_auto_lock l(_M_mutex);
   485     return --_M_ref_count;
   486   }
   487 #  endif
   488 #else  /* No threads */
   489   int _M_incr() { return ++_M_ref_count; }
   490   int _M_decr() { return --_M_ref_count; }
   491 #endif
   492 };
   493 
   494 /* Atomic swap on __stl_atomic_t
   495  * This is guaranteed to behave as though it were atomic only if all
   496  * possibly concurrent updates use _Atomic_swap.
   497  * In some cases the operation is emulated with a lock.
   498  * Idem for _Atomic_swap_ptr
   499  */
   500 /* Helper struct to handle following cases:
   501  * - on platforms where sizeof(__stl_atomic_t) == sizeof(void*) atomic
   502  *   exchange can be done on pointers
   503  * - on platform without atomic operation swap is done in a critical section,
   504  *   portable but inefficient.
   505  */
   506 template <int __use_ptr_atomic_swap>
   507 class _Atomic_swap_struct {
   508 public:
   509 #if defined (_STLP_THREADS) && \
   510     !defined (_STLP_ATOMIC_EXCHANGE) && \
   511     (defined (_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || \
   512      defined (_STLP_USE_PTHREAD_SPINLOCK) || defined (_STLP_NWTHREADS))
   513 #  define _STLP_USE_ATOMIC_SWAP_MUTEX
   514   static _STLP_STATIC_MUTEX _S_swap_lock;
   515 #endif
   516 
   517   static __stl_atomic_t _S_swap(_STLP_VOLATILE __stl_atomic_t* __p, __stl_atomic_t __q) {
   518 #if defined (_STLP_THREADS)
   519 #  if defined (_STLP_ATOMIC_EXCHANGE)
   520   return _STLP_ATOMIC_EXCHANGE(__p, __q);
   521 #  elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
   522   _S_swap_lock._M_acquire_lock();
   523   __stl_atomic_t __result = *__p;
   524   *__p = __q;
   525   _S_swap_lock._M_release_lock();
   526   return __result;
   527 #  else
   528 #    error Missing atomic swap implementation
   529 #  endif
   530 #else
   531   /* no threads */
   532   __stl_atomic_t __result = *__p;
   533   *__p = __q;
   534   return __result;
   535 #endif // _STLP_THREADS
   536   }
   537 
   538   static void* _S_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
   539 #if defined (_STLP_THREADS)
   540 #  if defined (_STLP_ATOMIC_EXCHANGE_PTR)
   541   return _STLP_ATOMIC_EXCHANGE_PTR(__p, __q);
   542 #  elif defined (_STLP_ATOMIC_EXCHANGE)
   543   _STLP_STATIC_ASSERT(sizeof(__stl_atomic_t) == sizeof(void*))
   544   return __REINTERPRET_CAST(void*, _STLP_ATOMIC_EXCHANGE(__REINTERPRET_CAST(volatile __stl_atomic_t*, __p),
   545                                                          __REINTERPRET_CAST(__stl_atomic_t, __q));
   546 #  elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
   547   _S_swap_lock._M_acquire_lock();
   548   void *__result = *__p;
   549   *__p = __q;
   550   _S_swap_lock._M_release_lock();
   551   return __result;
   552 #  else
   553 #    error Missing pointer atomic swap implementation
   554 #  endif
   555 #else
   556   /* no thread */
   557   void *__result = *__p;
   558   *__p = __q;
   559   return __result;
   560 #endif
   561   }
   562 };
   563 
   564 _STLP_TEMPLATE_NULL
   565 class _Atomic_swap_struct<0> {
   566 public:
   567 #if defined (_STLP_THREADS) && \
   568     (!defined (_STLP_ATOMIC_EXCHANGE) || !defined (_STLP_ATOMIC_EXCHANGE_PTR)) && \
   569     (defined (_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || \
   570      defined (_STLP_USE_PTHREAD_SPINLOCK) || defined (_STLP_NWTHREADS))
   571 #  define _STLP_USE_ATOMIC_SWAP_MUTEX
   572   static _STLP_STATIC_MUTEX _S_swap_lock;
   573 #endif
   574 
   575   static __stl_atomic_t _S_swap(_STLP_VOLATILE __stl_atomic_t* __p, __stl_atomic_t __q) {
   576 #if defined (_STLP_THREADS)
   577 #  if defined (_STLP_ATOMIC_EXCHANGE)
   578   return _STLP_ATOMIC_EXCHANGE(__p, __q);
   579 #  elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
   580   /* This should be portable, but performance is expected
   581    * to be quite awful.  This really needs platform specific
   582    * code.
   583    */
   584   _S_swap_lock._M_acquire_lock();
   585   __stl_atomic_t __result = *__p;
   586   *__p = __q;
   587   _S_swap_lock._M_release_lock();
   588   return __result;
   589 #  else
   590 #    error Missing atomic swap implementation
   591 #  endif
   592 #else
   593   /* no threads */
   594   __stl_atomic_t __result = *__p;
   595   *__p = __q;
   596   return __result;
   597 #endif // _STLP_THREADS
   598   }
   599 
   600   static void* _S_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
   601 #if defined (_STLP_THREADS)
   602 #  if defined (_STLP_ATOMIC_EXCHANGE_PTR)
   603   return _STLP_ATOMIC_EXCHANGE_PTR(__p, __q);
   604 #  elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
   605   _S_swap_lock._M_acquire_lock();
   606   void *__result = *__p;
   607   *__p = __q;
   608   _S_swap_lock._M_release_lock();
   609   return __result;
   610 #  else
   611 #    error Missing pointer atomic swap implementation
   612 #  endif
   613 #else
   614   /* no thread */
   615   void *__result = *__p;
   616   *__p = __q;
   617   return __result;
   618 #endif
   619   }
   620 };
   621 
   622 #if defined (_STLP_MSVC) && (_STLP_MSVC == 1300)
   623 #  pragma warning (push)
   624 #  pragma warning (disable : 4189) //__use_ptr_atomic_swap initialized but not used
   625 #endif
   626 
   627 inline __stl_atomic_t _STLP_CALL _Atomic_swap(_STLP_VOLATILE __stl_atomic_t * __p, __stl_atomic_t __q) {
   628   const int __use_ptr_atomic_swap = sizeof(__stl_atomic_t) == sizeof(void*);
   629   return _Atomic_swap_struct<__use_ptr_atomic_swap>::_S_swap(__p, __q);
   630 }
   631 
   632 inline void* _STLP_CALL _Atomic_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
   633   const int __use_ptr_atomic_swap = sizeof(__stl_atomic_t) == sizeof(void*);
   634   return _Atomic_swap_struct<__use_ptr_atomic_swap>::_S_swap_ptr(__p, __q);
   635 }
   636 
   637 #if defined (_STLP_MSVC) && (_STLP_MSVC == 1300)
   638 #  pragma warning (pop)
   639 #endif
   640 
   641 #if defined (_STLP_BETHREADS)
   642 template <int __inst>
   643 struct _STLP_beos_static_lock_data {
   644   static bool is_init;
   645   struct mutex_t : public _STLP_mutex {
   646     mutex_t()
   647     { _STLP_beos_static_lock_data<0>::is_init = true; }
   648     ~mutex_t()
   649     { _STLP_beos_static_lock_data<0>::is_init = false; }
   650   };
   651   static mutex_t mut;
   652 };
   653 
   654 template <int __inst>
   655 bool _STLP_beos_static_lock_data<__inst>::is_init = false;
   656 template <int __inst>
   657 typename _STLP_beos_static_lock_data<__inst>::mutex_t _STLP_beos_static_lock_data<__inst>::mut;
   658 
   659 inline void _STLP_mutex_base::_M_acquire_lock() {
   660   if (sem == 0) {
   661     // we need to initialise on demand here
   662     // to prevent race conditions use our global
   663     // mutex if it's available:
   664     if (_STLP_beos_static_lock_data<0>::is_init) {
   665       _STLP_auto_lock al(_STLP_beos_static_lock_data<0>::mut);
   666       if (sem == 0) _M_initialize();
   667     }
   668     else {
   669       // no lock available, we must still be
   670       // in startup code, THERE MUST BE ONE THREAD
   671       // ONLY active at this point.
   672       _M_initialize();
   673     }
   674   }
   675   status_t t;
   676   t = acquire_sem(sem);
   677   assert(t == B_NO_ERROR);
   678 }
   679 #endif
   680 
   681 _STLP_END_NAMESPACE
   682 
   683 #if !defined (_STLP_LINK_TIME_INSTANTIATION)
   684 #  include <stl/_threads.c>
   685 #endif
   686 
   687 #endif /* _STLP_INTERNAL_THREADS_H */
   688 
   689 // Local Variables:
   690 // mode:C++
   691 // End: