epoc32/include/tools/stlport/stl/_threads.h
branchSymbian3
changeset 4 837f303aceeb
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/epoc32/include/tools/stlport/stl/_threads.h	Wed Mar 31 12:33:34 2010 +0100
     1.3 @@ -0,0 +1,691 @@
     1.4 +/*
     1.5 + * Copyright (c) 1997-1999
     1.6 + * Silicon Graphics Computer Systems, Inc.
     1.7 + *
     1.8 + * Copyright (c) 1999
     1.9 + * Boris Fomitchev
    1.10 + *
    1.11 + * This material is provided "as is", with absolutely no warranty expressed
    1.12 + * or implied. Any use is at your own risk.
    1.13 + *
    1.14 + * Permission to use or copy this software for any purpose is hereby granted
    1.15 + * without fee, provided the above notices are retained on all copies.
    1.16 + * Permission to modify the code and to distribute modified code is granted,
    1.17 + * provided the above notices are retained, and a notice that the code was
    1.18 + * modified is included with the above copyright notice.
    1.19 + *
    1.20 + */
    1.21 +
    1.22 +// WARNING: This is an internal header file, included by other C++
    1.23 +// standard library headers.  You should not attempt to use this header
    1.24 +// file directly.
    1.25 +
    1.26 +
    1.27 +#ifndef _STLP_INTERNAL_THREADS_H
    1.28 +#define _STLP_INTERNAL_THREADS_H
    1.29 +
    1.30 +// Supported threading models are native SGI, pthreads, uithreads
    1.31 +// (similar to pthreads, but based on an earlier draft of the Posix
    1.32 +// threads standard), and Win32 threads.  Uithread support by Jochen
    1.33 +// Schlick, 1999, and Solaris threads generalized to them.
    1.34 +
    1.35 +#ifndef _STLP_INTERNAL_CSTDDEF
    1.36 +#  include <stl/_cstddef.h>
    1.37 +#endif
    1.38 +
    1.39 +#ifndef _STLP_INTERNAL_CSTDLIB
    1.40 +#  include <stl/_cstdlib.h>
    1.41 +#endif
    1.42 +
    1.43 +// On SUN and Mac OS X gcc, zero-initialization works just fine...
    1.44 +#if defined (__sun) || (defined (__GNUC__) && defined(__APPLE__))
    1.45 +#  define _STLP_MUTEX_INITIALIZER
    1.46 +#endif
    1.47 +
    1.48 +/* This header defines the following atomic operation that platform should
    1.49 + * try to support as much as possible. Atomic operation are exposed as macro
    1.50 + * in order to easily test for their existance. They are:
    1.51 + * __stl_atomic_t _STLP_ATOMIC_INCREMENT(volatile __stl_atomic_t* __ptr) :
    1.52 + * increment *__ptr by 1 and returns the new value
    1.53 + * __stl_atomic_t _STLP_ATOMIC_DECREMENT(volatile __stl_atomic_t* __ptr) :
    1.54 + * decrement  *__ptr by 1 and returns the new value
    1.55 + * __stl_atomic_t _STLP_ATOMIC_EXCHANGE(volatile __stl_atomic_t* __target, __stl_atomic_t __val) :
    1.56 + * assign __val to *__target and returns former *__target value
    1.57 + * void* _STLP_ATOMIC_EXCHANGE_PTR(void* volatile* __target, void* __ptr) :
    1.58 + * assign __ptr to *__target and returns former *__target value
    1.59 + * __stl_atomic_t _STLP_ATOMIC_ADD(volatile __stl_atomic_t* __target, __stl_atomic_t __val) :
    1.60 + * does *__target = *__target + __val and returns the old *__target value
    1.61 + */
    1.62 +
    1.63 +#if defined (_STLP_WIN32) || defined (__sgi) || defined (_STLP_SPARC_SOLARIS_THREADS)
    1.64 +typedef long __stl_atomic_t;
    1.65 +#else
    1.66 +/* Don't import whole namespace!!!! - ptr */
    1.67 +// # if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)
    1.68 +// // using _STLP_VENDOR_CSTD::size_t;
    1.69 +// using namespace _STLP_VENDOR_CSTD;
    1.70 +// # endif
    1.71 +typedef size_t __stl_atomic_t;
    1.72 +#endif
    1.73 +
    1.74 +#if defined (_STLP_THREADS)
    1.75 +
    1.76 +#  if defined (_STLP_SGI_THREADS)
    1.77 +
    1.78 +#    include <mutex.h>
    1.79 +// Hack for SGI o32 compilers.
    1.80 +#    if !defined(__add_and_fetch) && \
    1.81 +        (__mips < 3 || !(defined (_ABIN32) || defined(_ABI64)))
    1.82 +#      define __add_and_fetch(__l,__v) add_then_test((unsigned long*)__l,__v)
    1.83 +#      define __test_and_set(__l,__v)  test_and_set(__l,__v)
    1.84 +#    endif /* o32 */
    1.85 +
    1.86 +#    if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))
    1.87 +#      define _STLP_ATOMIC_EXCHANGE(__p, __q) test_and_set(__p, __q)
    1.88 +#    else
    1.89 +#      define _STLP_ATOMIC_EXCHANGE(__p, __q) __test_and_set((unsigned long*)__p, (unsigned long)__q)
    1.90 +#    endif
    1.91 +
    1.92 +#    define _STLP_ATOMIC_INCREMENT(__x) __add_and_fetch(__x, 1)
    1.93 +#    define _STLP_ATOMIC_DECREMENT(__x) __add_and_fetch(__x, (size_t) -1)
    1.94 +
    1.95 +#  elif defined (_STLP_PTHREADS)
    1.96 +
    1.97 +#    include <pthread.h>
    1.98 +#    if !defined (_STLP_USE_PTHREAD_SPINLOCK)
    1.99 +#      if defined (PTHREAD_MUTEX_INITIALIZER) && !defined (_STLP_MUTEX_INITIALIZER) && defined (_REENTRANT)
   1.100 +#        define _STLP_MUTEX_INITIALIZER = { PTHREAD_MUTEX_INITIALIZER }
   1.101 +#      endif
   1.102 +//HPUX variants have (on some platforms optional) non-standard "DCE" pthreads impl
   1.103 +#      if defined (_DECTHREADS_) && (defined (_PTHREAD_USE_D4) || defined (__hpux)) && !defined (_CMA_SUPPRESS_EXTERNALS_)
   1.104 +#        define _STLP_PTHREAD_ATTR_DEFAULT pthread_mutexattr_default
   1.105 +#      else
   1.106 +#        define _STLP_PTHREAD_ATTR_DEFAULT 0
   1.107 +#      endif
   1.108 +#    else // _STLP_USE_PTHREAD_SPINLOCK
   1.109 +#      if defined (__OpenBSD__)
   1.110 +#        include <spinlock.h>
   1.111 +#      endif
   1.112 +#    endif // _STLP_USE_PTHREAD_SPINLOCK
   1.113 +
   1.114 +#    if defined (__GNUC__) && defined (__i386__)
   1.115 +
   1.116 +#      if !defined (_STLP_ATOMIC_INCREMENT)
   1.117 +inline long _STLP_atomic_increment_gcc_x86(long volatile* p) {
   1.118 +  long result;
   1.119 +  __asm__ __volatile__
   1.120 +    ("lock; xaddl  %1, %0;"
   1.121 +    :"=m" (*p), "=r" (result)
   1.122 +    :"m" (*p),  "1"  (1)
   1.123 +    :"cc");
   1.124 +  return result + 1;
   1.125 +}
   1.126 +#        define _STLP_ATOMIC_INCREMENT(__x) (_STLP_atomic_increment_gcc_x86((long volatile*)__x))
   1.127 +#      endif
   1.128 +
   1.129 +#      if !defined (_STLP_ATOMIC_DECREMENT)
   1.130 +inline long _STLP_atomic_decrement_gcc_x86(long volatile* p) {
   1.131 +  long result;
   1.132 +  __asm__ __volatile__
   1.133 +    ("lock; xaddl  %1, %0;"
   1.134 +    :"=m" (*p), "=r" (result)
   1.135 +    :"m" (*p),  "1"  (-1)
   1.136 +    :"cc");
   1.137 +  return result - 1;
   1.138 +}
   1.139 +#        define _STLP_ATOMIC_DECREMENT(__x) (_STLP_atomic_decrement_gcc_x86((long volatile*)__x))
   1.140 +#      endif
   1.141 +
   1.142 +#      if !defined (_STLP_ATOMIC_ADD)
   1.143 +inline long _STLP_atomic_add_gcc_x86(long volatile* p, long addend) {
   1.144 +  long result;
   1.145 +  __asm__ __volatile__
   1.146 +    ("lock; xaddl %1, %0;"
   1.147 +    :"=m" (*p), "=r" (result)
   1.148 +    :"m"  (*p), "1"  (addend)
   1.149 +    :"cc");
   1.150 + return result + addend;
   1.151 +}
   1.152 +#        define _STLP_ATOMIC_ADD(__dst, __val)  (_STLP_atomic_add_gcc_x86((long volatile*)__dst, (long)__val))
   1.153 +#      endif
   1.154 +
   1.155 +#    endif /* if defined(__GNUC__) && defined(__i386__) */
   1.156 +
   1.157 +#  elif defined (_STLP_WIN32THREADS)
   1.158 +
   1.159 +#    if !defined (_STLP_ATOMIC_INCREMENT)
   1.160 +#      if !defined (_STLP_NEW_PLATFORM_SDK)
   1.161 +#        define _STLP_ATOMIC_INCREMENT(__x)           InterlockedIncrement(__CONST_CAST(long*, __x))
   1.162 +#        define _STLP_ATOMIC_DECREMENT(__x)           InterlockedDecrement(__CONST_CAST(long*, __x))
   1.163 +#        define _STLP_ATOMIC_EXCHANGE(__x, __y)       InterlockedExchange(__CONST_CAST(long*, __x), __y)
   1.164 +#      else
   1.165 +#        define _STLP_ATOMIC_INCREMENT(__x)           InterlockedIncrement(__x)
   1.166 +#        define _STLP_ATOMIC_DECREMENT(__x)           InterlockedDecrement(__x)
   1.167 +#        define _STLP_ATOMIC_EXCHANGE(__x, __y)       InterlockedExchange(__x, __y)
   1.168 +#      endif
   1.169 +#      define _STLP_ATOMIC_EXCHANGE_PTR(__x, __y)     STLPInterlockedExchangePointer(__x, __y)
   1.170 +/*
   1.171 + * The following functionnality is only available since Windows 98, those that are targeting previous OSes
   1.172 + * should define _WIN32_WINDOWS to a value lower that the one of Win 98, see Platform SDK documentation for
   1.173 + * more informations:
   1.174 + */
   1.175 +#      if defined (_STLP_NEW_PLATFORM_SDK) && (!defined (WINVER) || (WINVER >= 0x0410)) && \
   1.176 +                                              (!defined (_WIN32_WINDOWS) || (_WIN32_WINDOWS >= 0x0410))
   1.177 +#        define _STLP_ATOMIC_ADD(__dst, __val) InterlockedExchangeAdd(__dst, __val)
   1.178 +#      endif
   1.179 +#    endif
   1.180 +
   1.181 +#  elif defined (__DECC) || defined (__DECCXX)
   1.182 +
   1.183 +#    include <machine/builtins.h>
   1.184 +#    define _STLP_ATOMIC_EXCHANGE __ATOMIC_EXCH_LONG
   1.185 +#    define _STLP_ATOMIC_INCREMENT(__x) __ATOMIC_ADD_LONG(__x, 1)
   1.186 +#    define _STLP_ATOMIC_DECREMENT(__x) __ATOMIC_ADD_LONG(__x, -1)
   1.187 +
   1.188 +#  elif defined(_STLP_SPARC_SOLARIS_THREADS)
   1.189 +
   1.190 +#    include <stl/_sparc_atomic.h>
   1.191 +
   1.192 +#  elif defined (_STLP_UITHREADS)
   1.193 +
   1.194 +// this inclusion is potential hazard to bring up all sorts
   1.195 +// of old-style headers. Let's assume vendor already know how
   1.196 +// to deal with that.
   1.197 +#    ifndef _STLP_INTERNAL_CTIME
   1.198 +#      include <stl/_ctime.h>
   1.199 +#    endif
   1.200 +#    if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)
   1.201 +using _STLP_VENDOR_CSTD::time_t;
   1.202 +#    endif
   1.203 +#    include <synch.h>
   1.204 +#    include <cstdio>
   1.205 +#    include <cwchar>
   1.206 +
   1.207 +#  elif defined (_STLP_BETHREADS)
   1.208 +
   1.209 +#    include <OS.h>
   1.210 +#    include <cassert>
   1.211 +#    include <stdio.h>
   1.212 +#    define _STLP_MUTEX_INITIALIZER = { 0 }
   1.213 +
   1.214 +#  elif defined (_STLP_NWTHREADS)
   1.215 +
   1.216 +#    include <nwthread.h>
   1.217 +#    include <nwsemaph.h>
   1.218 +
   1.219 +#  elif defined(_STLP_OS2THREADS)
   1.220 +
   1.221 +#    if defined (__GNUC__)
   1.222 +#      define INCL_DOSSEMAPHORES
   1.223 +#      include <os2.h>
   1.224 +#    else
   1.225 +// This section serves to replace os2.h for VisualAge C++
   1.226 +  typedef unsigned long ULONG;
   1.227 +#      if !defined (__HEV__)  /* INCL_SEMAPHORE may also define HEV */
   1.228 +#        define __HEV__
   1.229 +  typedef ULONG HEV;
   1.230 +  typedef HEV*  PHEV;
   1.231 +#      endif
   1.232 +  typedef ULONG APIRET;
   1.233 +  typedef ULONG HMTX;
   1.234 +  typedef HMTX*  PHMTX;
   1.235 +  typedef const char*  PCSZ;
   1.236 +  typedef ULONG BOOL32;
   1.237 +  APIRET _System DosCreateMutexSem(PCSZ pszName, PHEV phev, ULONG flAttr, BOOL32 fState);
   1.238 +  APIRET _System DosRequestMutexSem(HMTX hmtx, ULONG ulTimeout);
   1.239 +  APIRET _System DosReleaseMutexSem(HMTX hmtx);
   1.240 +  APIRET _System DosCloseMutexSem(HMTX hmtx);
   1.241 +#      define _STLP_MUTEX_INITIALIZER = { 0 }
   1.242 +#    endif /* GNUC */
   1.243 +
   1.244 +#  endif
   1.245 +
   1.246 +#else
   1.247 +/* no threads */
   1.248 +#  define _STLP_ATOMIC_INCREMENT(__x) ++(*__x)
   1.249 +#  define _STLP_ATOMIC_DECREMENT(__x) --(*__x)
   1.250 +/* We do not grant other atomic operations as they are useless if STLport do not have
   1.251 + * to be thread safe
   1.252 + */
   1.253 +#endif
   1.254 +
   1.255 +#if !defined (_STLP_MUTEX_INITIALIZER)
   1.256 +#  if defined(_STLP_ATOMIC_EXCHANGE)
   1.257 +#    define _STLP_MUTEX_INITIALIZER = { 0 }
   1.258 +#  elif defined(_STLP_UITHREADS)
   1.259 +#    define _STLP_MUTEX_INITIALIZER = { DEFAULTMUTEX }
   1.260 +#  else
   1.261 +#    define _STLP_MUTEX_INITIALIZER
   1.262 +#  endif
   1.263 +#endif
   1.264 +
   1.265 +_STLP_BEGIN_NAMESPACE
   1.266 +
   1.267 +#if defined (_STLP_THREADS) && !defined (_STLP_USE_PTHREAD_SPINLOCK)
   1.268 +// Helper struct.  This is a workaround for various compilers that don't
   1.269 +// handle static variables in inline functions properly.
   1.270 +template <int __inst>
   1.271 +struct _STLP_mutex_spin {
   1.272 +  enum { __low_max = 30, __high_max = 1000 };
   1.273 +  // Low if we suspect uniprocessor, high for multiprocessor.
   1.274 +  static unsigned __max;
   1.275 +  static unsigned __last;
   1.276 +  static void _STLP_CALL _M_do_lock(volatile __stl_atomic_t* __lock);
   1.277 +  static void _STLP_CALL _S_nsec_sleep(int __log_nsec);
   1.278 +};
   1.279 +#endif // !_STLP_USE_PTHREAD_SPINLOCK
   1.280 +
   1.281 +// Locking class.  Note that this class *does not have a constructor*.
   1.282 +// It must be initialized either statically, with _STLP_MUTEX_INITIALIZER,
   1.283 +// or dynamically, by explicitly calling the _M_initialize member function.
   1.284 +// (This is similar to the ways that a pthreads mutex can be initialized.)
   1.285 +// There are explicit member functions for acquiring and releasing the lock.
   1.286 +
   1.287 +// There is no constructor because static initialization is essential for
   1.288 +// some uses, and only a class aggregate (see section 8.5.1 of the C++
   1.289 +// standard) can be initialized that way.  That means we must have no
   1.290 +// constructors, no base classes, no virtual functions, and no private or
   1.291 +// protected members.
   1.292 +
   1.293 +// For non-static cases, clients should use  _STLP_mutex.
   1.294 +
   1.295 +struct _STLP_CLASS_DECLSPEC _STLP_mutex_base {
   1.296 +#if defined (_STLP_ATOMIC_EXCHANGE) || defined (_STLP_SGI_THREADS)
   1.297 +  // It should be relatively easy to get this to work on any modern Unix.
   1.298 +  volatile __stl_atomic_t _M_lock;
   1.299 +#endif
   1.300 +
   1.301 +#if defined (_STLP_THREADS)
   1.302 +#  if defined (_STLP_ATOMIC_EXCHANGE)
   1.303 +  inline void _M_initialize() { _M_lock = 0; }
   1.304 +  inline void _M_destroy() {}
   1.305 +
   1.306 +  void _M_acquire_lock() {
   1.307 +    _STLP_mutex_spin<0>::_M_do_lock(&_M_lock);
   1.308 +  }
   1.309 +
   1.310 +  inline void _M_release_lock() {
   1.311 +    volatile __stl_atomic_t* __lock = &_M_lock;
   1.312 +#    if defined(_STLP_SGI_THREADS) && defined(__GNUC__) && __mips >= 3
   1.313 +    asm("sync");
   1.314 +    *__lock = 0;
   1.315 +#    elif defined(_STLP_SGI_THREADS) && __mips >= 3 && \
   1.316 +         (defined (_ABIN32) || defined(_ABI64))
   1.317 +    __lock_release(__lock);
   1.318 +#    elif defined (_STLP_SPARC_SOLARIS_THREADS)
   1.319 +#      if defined (__WORD64) || defined (__arch64__) || defined (__sparcv9) || defined (__sparcv8plus)
   1.320 +    asm("membar #StoreStore ; membar #LoadStore");
   1.321 +#      else
   1.322 +    asm(" stbar ");
   1.323 +#      endif
   1.324 +    *__lock = 0;
   1.325 +#    else
   1.326 +    *__lock = 0;
   1.327 +    // This is not sufficient on many multiprocessors, since
   1.328 +    // writes to protected variables and the lock may be reordered.
   1.329 +#    endif
   1.330 +  }
   1.331 +#  elif defined (_STLP_PTHREADS)
   1.332 +#    if defined (_STLP_USE_PTHREAD_SPINLOCK)
   1.333 +#      if !defined (__OpenBSD__)
   1.334 +  pthread_spinlock_t _M_lock;
   1.335 +  inline void _M_initialize() { pthread_spin_init( &_M_lock, 0 ); }
   1.336 +  inline void _M_destroy() { pthread_spin_destroy( &_M_lock ); }
   1.337 +
   1.338 +  // sorry, but no static initializer for pthread_spinlock_t;
   1.339 +  // this will not work for compilers that has problems with call
   1.340 +  // constructor of static object...
   1.341 +
   1.342 +  // _STLP_mutex_base()
   1.343 +  //   { pthread_spin_init( &_M_lock, 0 ); }
   1.344 +
   1.345 +  // ~_STLP_mutex_base()
   1.346 +  //   { pthread_spin_destroy( &_M_lock ); }
   1.347 +
   1.348 +  inline void _M_acquire_lock() { pthread_spin_lock( &_M_lock ); }
   1.349 +  inline void _M_release_lock() { pthread_spin_unlock( &_M_lock ); }
   1.350 +#      else // __OpenBSD__
   1.351 +  spinlock_t _M_lock;
   1.352 +  inline void _M_initialize() { _SPINLOCK_INIT( &_M_lock ); }
   1.353 +  inline void _M_destroy() { }
   1.354 +  inline void _M_acquire_lock() { _SPINLOCK( &_M_lock ); }
   1.355 +  inline void _M_release_lock() { _SPINUNLOCK( &_M_lock ); }
   1.356 +#      endif // __OpenBSD__
   1.357 +#    else // !_STLP_USE_PTHREAD_SPINLOCK
   1.358 +  pthread_mutex_t _M_lock;
   1.359 +  inline void _M_initialize()
   1.360 +  { pthread_mutex_init(&_M_lock,_STLP_PTHREAD_ATTR_DEFAULT); }
   1.361 +  inline void _M_destroy()
   1.362 +  { pthread_mutex_destroy(&_M_lock); }
   1.363 +  inline void _M_acquire_lock() {
   1.364 +#      if defined ( __hpux ) && ! defined (PTHREAD_MUTEX_INITIALIZER)
   1.365 +    if (!_M_lock.field1)  _M_initialize();
   1.366 +#      endif
   1.367 +    pthread_mutex_lock(&_M_lock);
   1.368 +  }
   1.369 +  inline void _M_release_lock() { pthread_mutex_unlock(&_M_lock); }
   1.370 +#    endif // !_STLP_USE_PTHREAD_SPINLOCK
   1.371 +
   1.372 +#  elif defined (_STLP_UITHREADS)
   1.373 +  mutex_t _M_lock;
   1.374 +  inline void _M_initialize()
   1.375 +  { mutex_init(&_M_lock, 0, NULL); }
   1.376 +  inline void _M_destroy()
   1.377 +  { mutex_destroy(&_M_lock); }
   1.378 +  inline void _M_acquire_lock() { mutex_lock(&_M_lock); }
   1.379 +  inline void _M_release_lock() { mutex_unlock(&_M_lock); }
   1.380 +
   1.381 +#  elif defined (_STLP_OS2THREADS)
   1.382 +  HMTX _M_lock;
   1.383 +  inline void _M_initialize() { DosCreateMutexSem(NULL, &_M_lock, 0, false); }
   1.384 +  inline void _M_destroy() { DosCloseMutexSem(_M_lock); }
   1.385 +  inline void _M_acquire_lock() {
   1.386 +    if (!_M_lock) _M_initialize();
   1.387 +    DosRequestMutexSem(_M_lock, SEM_INDEFINITE_WAIT);
   1.388 +  }
   1.389 +  inline void _M_release_lock() { DosReleaseMutexSem(_M_lock); }
   1.390 +#  elif defined (_STLP_BETHREADS)
   1.391 +  sem_id sem;
   1.392 +  inline void _M_initialize() {
   1.393 +    sem = create_sem(1, "STLPort");
   1.394 +    assert(sem > 0);
   1.395 +  }
   1.396 +  inline void _M_destroy() {
   1.397 +    int t = delete_sem(sem);
   1.398 +    assert(t == B_NO_ERROR);
   1.399 +  }
   1.400 +  inline void _M_acquire_lock();
   1.401 +  inline void _M_release_lock() {
   1.402 +    status_t t = release_sem(sem);
   1.403 +    assert(t == B_NO_ERROR);
   1.404 +  }
   1.405 +#  elif defined (_STLP_NWTHREADS)
   1.406 +  LONG _M_lock;
   1.407 +  inline void _M_initialize()
   1.408 +  { _M_lock = OpenLocalSemaphore(1); }
   1.409 +  inline void _M_destroy()
   1.410 +  { CloseLocalSemaphore(_M_lock); }
   1.411 +  inline void _M_acquire_lock()
   1.412 +  { WaitOnLocalSemaphore(_M_lock); }
   1.413 +  inline void _M_release_lock() { SignalLocalSemaphore(_M_lock); }
   1.414 +#  else      //*ty 11/24/2001 - added configuration check
   1.415 +#    error "Unknown thread facility configuration"
   1.416 +#  endif
   1.417 +#else /* No threads */
   1.418 +  inline void _M_initialize() {}
   1.419 +  inline void _M_destroy() {}
   1.420 +  inline void _M_acquire_lock() {}
   1.421 +  inline void _M_release_lock() {}
   1.422 +#endif // _STLP_PTHREADS
   1.423 +};
   1.424 +
   1.425 +// Locking class.  The constructor initializes the lock, the destructor destroys it.
   1.426 +// Well - behaving class, does not need static initializer
   1.427 +
   1.428 +class _STLP_CLASS_DECLSPEC _STLP_mutex : public _STLP_mutex_base {
   1.429 +  public:
   1.430 +    inline _STLP_mutex () { _M_initialize(); }
   1.431 +    inline ~_STLP_mutex () { _M_destroy(); }
   1.432 +  private:
   1.433 +    _STLP_mutex(const _STLP_mutex&);
   1.434 +    void operator=(const _STLP_mutex&);
   1.435 +};
   1.436 +
   1.437 +// A locking class that uses _STLP_STATIC_MUTEX.  The constructor takes
   1.438 +// a reference to an _STLP_STATIC_MUTEX, and acquires a lock.  The destructor
   1.439 +// releases the lock.
   1.440 +// It's not clear that this is exactly the right functionality.
   1.441 +// It will probably change in the future.
   1.442 +
   1.443 +struct _STLP_CLASS_DECLSPEC _STLP_auto_lock {
   1.444 +  _STLP_auto_lock(_STLP_STATIC_MUTEX& __lock) : _M_lock(__lock)
   1.445 +  { _M_lock._M_acquire_lock(); }
   1.446 +  ~_STLP_auto_lock()
   1.447 +  { _M_lock._M_release_lock(); }
   1.448 +
   1.449 +private:
   1.450 +  _STLP_STATIC_MUTEX& _M_lock;
   1.451 +  void operator=(const _STLP_auto_lock&);
   1.452 +  _STLP_auto_lock(const _STLP_auto_lock&);
   1.453 +};
   1.454 +
   1.455 +/*
   1.456 + * Class _Refcount_Base provides a type, __stl_atomic_t, a data member,
   1.457 + * _M_ref_count, and member functions _M_incr and _M_decr, which perform
   1.458 + * atomic preincrement/predecrement.  The constructor initializes
   1.459 + * _M_ref_count.
   1.460 + */
   1.461 +class _STLP_CLASS_DECLSPEC _Refcount_Base {
   1.462 +  // The data member _M_ref_count
   1.463 +#if defined (__DMC__)
   1.464 +public:
   1.465 +#endif
   1.466 +  _STLP_VOLATILE __stl_atomic_t _M_ref_count;
   1.467 +
   1.468 +#if !defined (_STLP_ATOMIC_EXCHANGE)
   1.469 +  _STLP_mutex _M_mutex;
   1.470 +#endif
   1.471 +
   1.472 +  public:
   1.473 +  // Constructor
   1.474 +  _Refcount_Base(__stl_atomic_t __n) : _M_ref_count(__n) {}
   1.475 +
   1.476 +  // _M_incr and _M_decr
   1.477 +#if defined (_STLP_THREADS)
   1.478 +#  if defined (_STLP_ATOMIC_EXCHANGE)
   1.479 +   int _M_incr() { return _STLP_ATOMIC_INCREMENT(&_M_ref_count); }
   1.480 +   int _M_decr() { return _STLP_ATOMIC_DECREMENT(&_M_ref_count); }
   1.481 +#  else
   1.482 +  int _M_incr() {
   1.483 +    _STLP_auto_lock l(_M_mutex);
   1.484 +    return ++_M_ref_count;
   1.485 +  }
   1.486 +  int _M_decr() {
   1.487 +    _STLP_auto_lock l(_M_mutex);
   1.488 +    return --_M_ref_count;
   1.489 +  }
   1.490 +#  endif
   1.491 +#else  /* No threads */
   1.492 +  int _M_incr() { return ++_M_ref_count; }
   1.493 +  int _M_decr() { return --_M_ref_count; }
   1.494 +#endif
   1.495 +};
   1.496 +
   1.497 +/* Atomic swap on __stl_atomic_t
   1.498 + * This is guaranteed to behave as though it were atomic only if all
   1.499 + * possibly concurrent updates use _Atomic_swap.
   1.500 + * In some cases the operation is emulated with a lock.
   1.501 + * Idem for _Atomic_swap_ptr
   1.502 + */
   1.503 +/* Helper struct to handle following cases:
   1.504 + * - on platforms where sizeof(__stl_atomic_t) == sizeof(void*) atomic
   1.505 + *   exchange can be done on pointers
   1.506 + * - on platform without atomic operation swap is done in a critical section,
   1.507 + *   portable but inefficient.
   1.508 + */
   1.509 +template <int __use_ptr_atomic_swap>
   1.510 +class _Atomic_swap_struct {
   1.511 +public:
   1.512 +#if defined (_STLP_THREADS) && \
   1.513 +    !defined (_STLP_ATOMIC_EXCHANGE) && \
   1.514 +    (defined (_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || \
   1.515 +     defined (_STLP_USE_PTHREAD_SPINLOCK) || defined (_STLP_NWTHREADS))
   1.516 +#  define _STLP_USE_ATOMIC_SWAP_MUTEX
   1.517 +  static _STLP_STATIC_MUTEX _S_swap_lock;
   1.518 +#endif
   1.519 +
   1.520 +  static __stl_atomic_t _S_swap(_STLP_VOLATILE __stl_atomic_t* __p, __stl_atomic_t __q) {
   1.521 +#if defined (_STLP_THREADS)
   1.522 +#  if defined (_STLP_ATOMIC_EXCHANGE)
   1.523 +  return _STLP_ATOMIC_EXCHANGE(__p, __q);
   1.524 +#  elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
   1.525 +  _S_swap_lock._M_acquire_lock();
   1.526 +  __stl_atomic_t __result = *__p;
   1.527 +  *__p = __q;
   1.528 +  _S_swap_lock._M_release_lock();
   1.529 +  return __result;
   1.530 +#  else
   1.531 +#    error Missing atomic swap implementation
   1.532 +#  endif
   1.533 +#else
   1.534 +  /* no threads */
   1.535 +  __stl_atomic_t __result = *__p;
   1.536 +  *__p = __q;
   1.537 +  return __result;
   1.538 +#endif // _STLP_THREADS
   1.539 +  }
   1.540 +
   1.541 +  static void* _S_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
   1.542 +#if defined (_STLP_THREADS)
   1.543 +#  if defined (_STLP_ATOMIC_EXCHANGE_PTR)
   1.544 +  return _STLP_ATOMIC_EXCHANGE_PTR(__p, __q);
   1.545 +#  elif defined (_STLP_ATOMIC_EXCHANGE)
   1.546 +  _STLP_STATIC_ASSERT(sizeof(__stl_atomic_t) == sizeof(void*))
   1.547 +  return __REINTERPRET_CAST(void*, _STLP_ATOMIC_EXCHANGE(__REINTERPRET_CAST(volatile __stl_atomic_t*, __p),
   1.548 +                                                         __REINTERPRET_CAST(__stl_atomic_t, __q));
   1.549 +#  elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
   1.550 +  _S_swap_lock._M_acquire_lock();
   1.551 +  void *__result = *__p;
   1.552 +  *__p = __q;
   1.553 +  _S_swap_lock._M_release_lock();
   1.554 +  return __result;
   1.555 +#  else
   1.556 +#    error Missing pointer atomic swap implementation
   1.557 +#  endif
   1.558 +#else
   1.559 +  /* no thread */
   1.560 +  void *__result = *__p;
   1.561 +  *__p = __q;
   1.562 +  return __result;
   1.563 +#endif
   1.564 +  }
   1.565 +};
   1.566 +
   1.567 +_STLP_TEMPLATE_NULL
   1.568 +class _Atomic_swap_struct<0> {
   1.569 +public:
   1.570 +#if defined (_STLP_THREADS) && \
   1.571 +    (!defined (_STLP_ATOMIC_EXCHANGE) || !defined (_STLP_ATOMIC_EXCHANGE_PTR)) && \
   1.572 +    (defined (_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || \
   1.573 +     defined (_STLP_USE_PTHREAD_SPINLOCK) || defined (_STLP_NWTHREADS))
   1.574 +#  define _STLP_USE_ATOMIC_SWAP_MUTEX
   1.575 +  static _STLP_STATIC_MUTEX _S_swap_lock;
   1.576 +#endif
   1.577 +
   1.578 +  static __stl_atomic_t _S_swap(_STLP_VOLATILE __stl_atomic_t* __p, __stl_atomic_t __q) {
   1.579 +#if defined (_STLP_THREADS)
   1.580 +#  if defined (_STLP_ATOMIC_EXCHANGE)
   1.581 +  return _STLP_ATOMIC_EXCHANGE(__p, __q);
   1.582 +#  elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
   1.583 +  /* This should be portable, but performance is expected
   1.584 +   * to be quite awful.  This really needs platform specific
   1.585 +   * code.
   1.586 +   */
   1.587 +  _S_swap_lock._M_acquire_lock();
   1.588 +  __stl_atomic_t __result = *__p;
   1.589 +  *__p = __q;
   1.590 +  _S_swap_lock._M_release_lock();
   1.591 +  return __result;
   1.592 +#  else
   1.593 +#    error Missing atomic swap implementation
   1.594 +#  endif
   1.595 +#else
   1.596 +  /* no threads */
   1.597 +  __stl_atomic_t __result = *__p;
   1.598 +  *__p = __q;
   1.599 +  return __result;
   1.600 +#endif // _STLP_THREADS
   1.601 +  }
   1.602 +
   1.603 +  static void* _S_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
   1.604 +#if defined (_STLP_THREADS)
   1.605 +#  if defined (_STLP_ATOMIC_EXCHANGE_PTR)
   1.606 +  return _STLP_ATOMIC_EXCHANGE_PTR(__p, __q);
   1.607 +#  elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
   1.608 +  _S_swap_lock._M_acquire_lock();
   1.609 +  void *__result = *__p;
   1.610 +  *__p = __q;
   1.611 +  _S_swap_lock._M_release_lock();
   1.612 +  return __result;
   1.613 +#  else
   1.614 +#    error Missing pointer atomic swap implementation
   1.615 +#  endif
   1.616 +#else
   1.617 +  /* no thread */
   1.618 +  void *__result = *__p;
   1.619 +  *__p = __q;
   1.620 +  return __result;
   1.621 +#endif
   1.622 +  }
   1.623 +};
   1.624 +
   1.625 +#if defined (_STLP_MSVC) && (_STLP_MSVC == 1300)
   1.626 +#  pragma warning (push)
   1.627 +#  pragma warning (disable : 4189) //__use_ptr_atomic_swap initialized but not used
   1.628 +#endif
   1.629 +
   1.630 +inline __stl_atomic_t _STLP_CALL _Atomic_swap(_STLP_VOLATILE __stl_atomic_t * __p, __stl_atomic_t __q) {
   1.631 +  const int __use_ptr_atomic_swap = sizeof(__stl_atomic_t) == sizeof(void*);
   1.632 +  return _Atomic_swap_struct<__use_ptr_atomic_swap>::_S_swap(__p, __q);
   1.633 +}
   1.634 +
   1.635 +inline void* _STLP_CALL _Atomic_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
   1.636 +  const int __use_ptr_atomic_swap = sizeof(__stl_atomic_t) == sizeof(void*);
   1.637 +  return _Atomic_swap_struct<__use_ptr_atomic_swap>::_S_swap_ptr(__p, __q);
   1.638 +}
   1.639 +
   1.640 +#if defined (_STLP_MSVC) && (_STLP_MSVC == 1300)
   1.641 +#  pragma warning (pop)
   1.642 +#endif
   1.643 +
   1.644 +#if defined (_STLP_BETHREADS)
   1.645 +template <int __inst>
   1.646 +struct _STLP_beos_static_lock_data {
   1.647 +  static bool is_init;
   1.648 +  struct mutex_t : public _STLP_mutex {
   1.649 +    mutex_t()
   1.650 +    { _STLP_beos_static_lock_data<0>::is_init = true; }
   1.651 +    ~mutex_t()
   1.652 +    { _STLP_beos_static_lock_data<0>::is_init = false; }
   1.653 +  };
   1.654 +  static mutex_t mut;
   1.655 +};
   1.656 +
   1.657 +template <int __inst>
   1.658 +bool _STLP_beos_static_lock_data<__inst>::is_init = false;
   1.659 +template <int __inst>
   1.660 +typename _STLP_beos_static_lock_data<__inst>::mutex_t _STLP_beos_static_lock_data<__inst>::mut;
   1.661 +
   1.662 +inline void _STLP_mutex_base::_M_acquire_lock() {
   1.663 +  if (sem == 0) {
   1.664 +    // we need to initialise on demand here
   1.665 +    // to prevent race conditions use our global
   1.666 +    // mutex if it's available:
   1.667 +    if (_STLP_beos_static_lock_data<0>::is_init) {
   1.668 +      _STLP_auto_lock al(_STLP_beos_static_lock_data<0>::mut);
   1.669 +      if (sem == 0) _M_initialize();
   1.670 +    }
   1.671 +    else {
   1.672 +      // no lock available, we must still be
   1.673 +      // in startup code, THERE MUST BE ONE THREAD
   1.674 +      // ONLY active at this point.
   1.675 +      _M_initialize();
   1.676 +    }
   1.677 +  }
   1.678 +  status_t t;
   1.679 +  t = acquire_sem(sem);
   1.680 +  assert(t == B_NO_ERROR);
   1.681 +}
   1.682 +#endif
   1.683 +
   1.684 +_STLP_END_NAMESPACE
   1.685 +
   1.686 +#if !defined (_STLP_LINK_TIME_INSTANTIATION)
   1.687 +#  include <stl/_threads.c>
   1.688 +#endif
   1.689 +
   1.690 +#endif /* _STLP_INTERNAL_THREADS_H */
   1.691 +
   1.692 +// Local Variables:
   1.693 +// mode:C++
   1.694 +// End: