epoc32/include/stdapis/stlport/stl/_threads.h
branchSymbian2
changeset 2 2fe1408b6811
parent 0 061f57f2323e
     1.1 --- a/epoc32/include/stdapis/stlport/stl/_threads.h	Tue Nov 24 13:55:44 2009 +0000
     1.2 +++ b/epoc32/include/stdapis/stlport/stl/_threads.h	Tue Mar 16 16:12:26 2010 +0000
     1.3 @@ -1,1 +1,666 @@
     1.4 -_threads.h
     1.5 +/*
     1.6 + * Copyright (c) 1997-1999
     1.7 + * Silicon Graphics Computer Systems, Inc.
     1.8 + *
     1.9 + * Copyright (c) 1999 
    1.10 + * Boris Fomitchev
    1.11 + *
    1.12 + * This material is provided "as is", with absolutely no warranty expressed
    1.13 + * or implied. Any use is at your own risk.
    1.14 + *
    1.15 + * Permission to use or copy this software for any purpose is hereby granted 
    1.16 + * without fee, provided the above notices are retained on all copies.
    1.17 + * Permission to modify the code and to distribute modified code is granted,
    1.18 + * provided the above notices are retained, and a notice that the code was
    1.19 + * modified is included with the above copyright notice.
    1.20 + *
    1.21 + */
    1.22 +
    1.23 +// WARNING: This is an internal header file, included by other C++
    1.24 +// standard library headers.  You should not attempt to use this header
    1.25 +// file directly.
    1.26 +// Stl_config.h should be included before this file.
    1.27 +
    1.28 +
    1.29 +#ifndef _STLP_INTERNAL_THREADS_H
    1.30 +#define _STLP_INTERNAL_THREADS_H
    1.31 +
    1.32 +// Supported threading models are native SGI, pthreads, uithreads
    1.33 +// (similar to pthreads, but based on an earlier draft of the Posix
    1.34 +// threads standard), and Win32 threads.  Uithread support by Jochen
    1.35 +// Schlick, 1999, and Solaris threads generalized to them.
    1.36 +
    1.37 +#ifndef _STLP_CONFIG_H
    1.38 +#include <stl/_config.h>
    1.39 +#endif
    1.40 +
    1.41 +# if ! defined (_STLP_CSTDDEF)
    1.42 +#  include <cstddef>
    1.43 +# endif
    1.44 +
    1.45 +# if ! defined (_STLP_CSTDLIB)
    1.46 +#  include <cstdlib>
    1.47 +# endif
    1.48 +
    1.49 +// On SUN and Mac OS X gcc, zero-initialization works just fine...
    1.50 +# if defined (__sun) || ( defined(__GNUC__) && defined(__APPLE__) )
    1.51 +# define _STLP_MUTEX_INITIALIZER
    1.52 +# endif
    1.53 +
    1.54 +# if defined (_STLP_WIN32) || defined (__sgi) || defined (_STLP_SPARC_SOLARIS_THREADS)
    1.55 +  typedef long __stl_atomic_t;
    1.56 +# else 
    1.57 +# if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)
    1.58 +// using _STLP_VENDOR_CSTD::size_t;
    1.59 +using namespace _STLP_VENDOR_CSTD;
    1.60 +# endif
    1.61 +  typedef size_t __stl_atomic_t;
    1.62 +#endif  
    1.63 +
    1.64 +# if defined(_STLP_SGI_THREADS)
    1.65 +#  include <mutex.h>
    1.66 +// Hack for SGI o32 compilers.
    1.67 +#if !defined(__add_and_fetch) && \
    1.68 +    (__mips < 3 || !(defined (_ABIN32) || defined(_ABI64)))
    1.69 +#  define __add_and_fetch(__l,__v) add_then_test((unsigned long*)__l,__v)  
    1.70 +#  define __test_and_set(__l,__v)  test_and_set(__l,__v)
    1.71 +#endif /* o32 */
    1.72 +
    1.73 +# if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))
    1.74 +#  define _STLP_ATOMIC_EXCHANGE(__p, __q) test_and_set(__p, __q)
    1.75 +# else
    1.76 +#  define _STLP_ATOMIC_EXCHANGE(__p, __q) __test_and_set((unsigned long*)__p, (unsigned long)__q)
    1.77 +# endif
    1.78 +
    1.79 +#  define _STLP_ATOMIC_INCREMENT(__x) __add_and_fetch(__x, 1)
    1.80 +#  define _STLP_ATOMIC_DECREMENT(__x) __add_and_fetch(__x, (size_t) -1)
    1.81 +
    1.82 +# elif defined (__GNUC__) && defined (__i386__) && defined (__unix__) && defined (_STLP_USE_INLINE_X86_SPINLOCK) 
    1.83 +
    1.84 +// gcc on i386 linux, freebsd, etc. 
    1.85 +
    1.86 +// This enables the memory caching on x86 linux.  It is critical for SMP
    1.87 +// without it the performace is DISMAL!
    1.88 +static inline unsigned long __xchg(volatile __stl_atomic_t* target, int source)
    1.89 +{
    1.90 +
    1.91 +  // The target is refernce in memory rather than the register
    1.92 +  // because making a copy of it from memory to the register and
    1.93 +  // back again would ruin the atomic nature of the call.
    1.94 +  // the source does not need to be delt with atomicly so it can
    1.95 +  // be copied about as needed.
    1.96 +  //
    1.97 +  // The casting of the source is used to prevent gcc from optimizing 
    1.98 +  // in such a way that breaks the atomic nature of this call.
    1.99 +  //
   1.100 +  __asm__ __volatile__("xchgl %1,%0"
   1.101 +		       :"=m" (*(volatile long *) target), "=r" (source)
   1.102 +		       :"m" (*(volatile long *) target), "r" (source) );
   1.103 +  return source;
   1.104 +
   1.105 +  //  The assembly above does the following atomicly:
   1.106 +  //   int temp=source;
   1.107 +  //   source=(int)(*target);
   1.108 +  //   (int)(*target)=temp;
   1.109 +  // return source
   1.110 +}
   1.111 +
   1.112 +static inline void __inc_and_fetch(volatile __stl_atomic_t* __x)
   1.113 +{
   1.114 +  // Referenced in memory rather than register to preserve the atomic nature.
   1.115 +  //
   1.116 +  __asm__ __volatile__(
   1.117 +      "lock; incl %0"
   1.118 +      :"=m" (*__x)
   1.119 +      :"m" (*__x) );
   1.120 +
   1.121 +  //  The assembly above does the following atomicly:
   1.122 +  //   ++(int)(*__x);
   1.123 +
   1.124 +}
   1.125 +static inline void __dec_and_fetch(volatile __stl_atomic_t* __x)
   1.126 +{
   1.127 +  // Referenced in memory rather than register to preserve the atomic nature.
   1.128 +  //
   1.129 +  __asm__ __volatile__(
   1.130 +      "lock; decl %0"
   1.131 +      :"=m" (*__x)
   1.132 +      :"m" (*__x) );
   1.133 +
   1.134 +  //  The assembly above does the following atomicly:
   1.135 +  //   --(int)(*__x);
   1.136 +}
   1.137 +
   1.138 +#  define _STLP_ATOMIC_EXCHANGE(target, newValue) ((__xchg(target, newValue)))
   1.139 +#  define _STLP_ATOMIC_INCREMENT(__x) __inc_and_fetch(__x)
   1.140 +#  define _STLP_ATOMIC_DECREMENT(__x) __dec_and_fetch(__x)
   1.141 +
   1.142 +# elif defined(_STLP_PTHREADS)
   1.143 +
   1.144 +#  include <pthread.h>
   1.145 +#  ifndef _STLP_USE_PTHREAD_SPINLOCK
   1.146 +#   if defined(PTHREAD_MUTEX_INITIALIZER) && !defined(_STLP_MUTEX_INITIALIZER)
   1.147 +#    define _STLP_MUTEX_INITIALIZER = { PTHREAD_MUTEX_INITIALIZER }
   1.148 +#   endif
   1.149 +
   1.150 +//HPUX variants have (on some platforms optional) non-standard "DCE" pthreads impl
   1.151 +#   if defined(_DECTHREADS_) && (defined(_PTHREAD_USE_D4) || defined(__hpux)) && !defined(_CMA_SUPPRESS_EXTERNALS_)
   1.152 +#    define _STLP_PTHREAD_ATTR_DEFAULT pthread_mutexattr_default
   1.153 +#   else
   1.154 +#    define _STLP_PTHREAD_ATTR_DEFAULT 0
   1.155 +#   endif
   1.156 +#  endif // !_STLP_USE_PTHREAD_SPINLOCK 
   1.157 +
   1.158 +# elif defined(_STLP_WIN32THREADS)
   1.159 +#  if !defined (_STLP_WINDOWS_H_INCLUDED) && ! defined (_WINDOWS_H)
   1.160 +#   if ! (defined ( _STLP_MSVC ) || defined (__BORLANDC__) || defined (__ICL) || defined (__WATCOMC__) || defined (__MINGW32__) || defined (__DMC__))
   1.161 +#    ifdef _STLP_USE_MFC
   1.162 +#     include <afx.h>
   1.163 +#    else
   1.164 +#     include <windows.h>
   1.165 +#    endif
   1.166 +#    define _STLP_WINDOWS_H_INCLUDED
   1.167 +#   else 
   1.168 +// This section serves as a replacement for windows.h header for Visual C++
   1.169 +extern "C" {
   1.170 +#   if (defined(_M_MRX000) || defined(_M_ALPHA) \
   1.171 +       || (defined(_M_PPC) && (_MSC_VER >= 1000))) && !defined(RC_INVOKED)
   1.172 +#    define InterlockedIncrement       _InterlockedIncrement
   1.173 +#    define InterlockedDecrement       _InterlockedDecrement
   1.174 +#    define InterlockedExchange        _InterlockedExchange
   1.175 +#    define _STLP_STDCALL
   1.176 +#   else
   1.177 +#    ifdef _MAC
   1.178 +#     define _STLP_STDCALL _cdecl
   1.179 +#    else
   1.180 +#     define _STLP_STDCALL __stdcall
   1.181 +#    endif
   1.182 +#   endif
   1.183 +
   1.184 +#if (_MSC_VER >= 1300) || defined (_STLP_NEW_PLATFORM_SDK)
   1.185 +_STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedIncrement(long volatile *);
   1.186 +_STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedDecrement(long volatile *);
   1.187 +_STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedExchange(long volatile *, long);
   1.188 +#else
   1.189 +  // boris : for the latest SDK, you may actually need the other version of the declaration (above)
   1.190 +  // even for earlier VC++ versions. There is no way to tell SDK versions apart, sorry ...
   1.191 +_STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedIncrement(long*);
   1.192 +_STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedDecrement(long*);
   1.193 +_STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedExchange(long*, long);
   1.194 +#endif
   1.195 +
   1.196 +_STLP_IMPORT_DECLSPEC void _STLP_STDCALL Sleep(unsigned long);
   1.197 +_STLP_IMPORT_DECLSPEC void _STLP_STDCALL OutputDebugStringA( const char* lpOutputString );
   1.198 +
   1.199 +#ifdef _STLP_DEBUG
   1.200 +typedef unsigned long DWORD;
   1.201 +_STLP_IMPORT_DECLSPEC DWORD _STLP_STDCALL GetCurrentThreadId();
   1.202 +#endif /* _STLP_DEBUG */
   1.203 +
   1.204 +#    if defined (InterlockedIncrement)
   1.205 +#     pragma intrinsic(_InterlockedIncrement)
   1.206 +#     pragma intrinsic(_InterlockedDecrement)
   1.207 +#     pragma intrinsic(_InterlockedExchange)
   1.208 +#    endif
   1.209 +} /* extern "C" */
   1.210 +
   1.211 +#   endif /* STL_MSVC */
   1.212 +
   1.213 +#   define _STLP_WINDOWS_H_INCLUDED
   1.214 +
   1.215 +#  endif /* _STLP_WIN32 */
   1.216 +
   1.217 +#  ifndef _STLP_ATOMIC_INCREMENT
   1.218 +#   define _STLP_ATOMIC_INCREMENT(__x)           InterlockedIncrement((long*)__x)
   1.219 +#   define _STLP_ATOMIC_DECREMENT(__x)           InterlockedDecrement((long*)__x)
   1.220 +#   define _STLP_ATOMIC_EXCHANGE(__x, __y)       InterlockedExchange((long*)__x, (long)__y)
   1.221 +#  endif
   1.222 +# elif defined(__DECC) || defined(__DECCXX)
   1.223 +#  include <machine/builtins.h>
   1.224 +#  define _STLP_ATOMIC_EXCHANGE __ATOMIC_EXCH_LONG
   1.225 +#  define _STLP_ATOMIC_INCREMENT(__x) __ATOMIC_ADD_LONG(__x, 1)
   1.226 +#  define _STLP_ATOMIC_DECREMENT(__x) __ATOMIC_ADD_LONG(__x, -1)
   1.227 +# elif defined(_STLP_SPARC_SOLARIS_THREADS)
   1.228 +#  include <stl/_sparc_atomic.h>
   1.229 +# elif defined (_STLP_UITHREADS)
   1.230 +// this inclusion is potential hazard to bring up all sorts
   1.231 +// of old-style headers. Let's assume vendor already know how
   1.232 +// to deal with that.
   1.233 +#  include <ctime>
   1.234 +# if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)
   1.235 +using _STLP_VENDOR_CSTD::time_t;
   1.236 +# endif
   1.237 +#  include <synch.h>
   1.238 +#  include <cstdio>
   1.239 +#  include <stl/_cwchar.h>
   1.240 +# elif defined (_STLP_BETHREADS)
   1.241 +#  include <OS.h>
   1.242 +#include <cassert>
   1.243 +#include <stdio.h>
   1.244 +#  define _STLP_MUTEX_INITIALIZER = { 0 }
   1.245 +#elif defined(_STLP_OS2THREADS)
   1.246 +# ifdef __GNUC__
   1.247 +#  define INCL_DOSSEMAPHORES
   1.248 +#  include <os2.h>
   1.249 +# else
   1.250 +  // This section serves to replace os2.h for VisualAge C++
   1.251 +  typedef unsigned long ULONG;
   1.252 +  #ifndef __HEV__  /* INCL_SEMAPHORE may also define HEV */
   1.253 +    #define __HEV__
   1.254 +    typedef ULONG HEV;
   1.255 +    typedef HEV*  PHEV;
   1.256 +  #endif
   1.257 +  typedef ULONG APIRET;
   1.258 +  typedef ULONG HMTX;
   1.259 +  typedef HMTX*  PHMTX;
   1.260 +  typedef const char*  PCSZ;
   1.261 +  typedef ULONG BOOL32;
   1.262 +  APIRET _System DosCreateMutexSem(PCSZ pszName, PHEV phev, ULONG flAttr, BOOL32 fState);
   1.263 +  APIRET _System DosRequestMutexSem(HMTX hmtx, ULONG ulTimeout);
   1.264 +  APIRET _System DosReleaseMutexSem(HMTX hmtx);
   1.265 +  APIRET _System DosCloseMutexSem(HMTX hmtx);
   1.266 +# define _STLP_MUTEX_INITIALIZER = { 0 };
   1.267 +#  endif /* GNUC */
   1.268 +# elif defined(_STLP_VXWORKS_THREADS)
   1.269 +#  include "semLib.h"
   1.270 +# endif
   1.271 +
   1.272 +# ifndef _STLP_MUTEX_INITIALIZER
   1.273 +#   if defined(_STLP_ATOMIC_EXCHANGE)
   1.274 +// we are using our own spinlock. 
   1.275 +#     define _STLP_MUTEX_INITIALIZER = { 0 }
   1.276 +#   elif defined(_STLP_UITHREADS)
   1.277 +// known case
   1.278 +#     define _STLP_MUTEX_INITIALIZER = { DEFAULTMUTEX }
   1.279 +#   else
   1.280 +// we do not have static initializer available. therefore, on-demand synchronization is needed.
   1.281 +#     define _STLP_MUTEX_INITIALIZER
   1.282 +#     define _STLP_MUTEX_NEEDS_ONDEMAND_INITIALIZATION
   1.283 +#   endif
   1.284 +# endif
   1.285 +
   1.286 +_STLP_BEGIN_NAMESPACE
   1.287 +
   1.288 +#ifndef _STLP_USE_PTHREAD_SPINLOCK
   1.289 +// Helper struct.  This is a workaround for various compilers that don't
   1.290 +// handle static variables in inline functions properly.
   1.291 +template <int __inst>
   1.292 +struct _STLP_mutex_spin {
   1.293 +  enum { __low_max = 30, __high_max = 1000 };
   1.294 +  // Low if we suspect uniprocessor, high for multiprocessor.
   1.295 +  static unsigned __max;
   1.296 +  static unsigned __last;
   1.297 +  static void _STLP_CALL _M_do_lock(volatile __stl_atomic_t* __lock);
   1.298 +  static void _STLP_CALL _S_nsec_sleep(int __log_nsec);
   1.299 +};
   1.300 +#endif // !_STLP_USE_PTHREAD_SPINLOCK
   1.301 +
   1.302 +
   1.303 +// Locking class.  Note that this class *does not have a constructor*.
   1.304 +// It must be initialized either statically, with _STLP_MUTEX_INITIALIZER,
   1.305 +// or dynamically, by explicitly calling the _M_initialize member function.
   1.306 +// (This is similar to the ways that a pthreads mutex can be initialized.)
   1.307 +// There are explicit member functions for acquiring and releasing the lock.
   1.308 +
   1.309 +// There is no constructor because static initialization is essential for
   1.310 +// some uses, and only a class aggregate (see section 8.5.1 of the C++
   1.311 +// standard) can be initialized that way.  That means we must have no
   1.312 +// constructors, no base classes, no virtual functions, and no private or
   1.313 +// protected members.
   1.314 +
   1.315 +// For non-static cases, clients should use  _STLP_mutex.
   1.316 +
   1.317 +struct _STLP_CLASS_DECLSPEC _STLP_mutex_base
   1.318 +{
   1.319 +#if defined(_STLP_ATOMIC_EXCHANGE) || defined(_STLP_SGI_THREADS)
   1.320 +  // It should be relatively easy to get this to work on any modern Unix.
   1.321 +  volatile __stl_atomic_t _M_lock;
   1.322 +#endif
   1.323 +
   1.324 +#ifdef _STLP_THREADS
   1.325 +
   1.326 +# ifdef _STLP_ATOMIC_EXCHANGE
   1.327 +  inline void _M_initialize() { _M_lock=0; }
   1.328 +  inline void _M_destroy() {}
   1.329 +
   1.330 +  void _M_acquire_lock() {
   1.331 +    _STLP_mutex_spin<0>::_M_do_lock(&_M_lock);
   1.332 +  }
   1.333 +
   1.334 +  inline void _M_release_lock() {
   1.335 +    volatile __stl_atomic_t* __lock = &_M_lock;
   1.336 +#  if defined(_STLP_SGI_THREADS) && defined(__GNUC__) && __mips >= 3
   1.337 +        asm("sync");
   1.338 +        *__lock = 0;
   1.339 +#  elif defined(_STLP_SGI_THREADS) && __mips >= 3 \
   1.340 +	 && (defined (_ABIN32) || defined(_ABI64))
   1.341 +        __lock_release(__lock);
   1.342 +#  elif defined (_STLP_SPARC_SOLARIS_THREADS)
   1.343 +#   if defined (__WORD64) || defined (__arch64__) || defined (__sparcv9) || defined (__sparcv8plus)
   1.344 +	asm("membar #StoreStore ; membar #LoadStore");
   1.345 +#   else
   1.346 +	asm(" stbar ");
   1.347 +#   endif
   1.348 +        *__lock = 0;	
   1.349 +#  else
   1.350 +        *__lock = 0;
   1.351 +        // This is not sufficient on many multiprocessors, since
   1.352 +        // writes to protected variables and the lock may be reordered.
   1.353 +#  endif
   1.354 +  }
   1.355 +# elif defined(_STLP_PTHREADS)
   1.356 +#  ifdef _STLP_USE_PTHREAD_SPINLOCK
   1.357 +  pthread_spinlock_t _M_lock;
   1.358 +  inline void _M_initialize() { pthread_spin_init( &_M_lock, 0 ); }
   1.359 +  inline void _M_destroy() { pthread_spin_destroy( &_M_lock ); }
   1.360 +
   1.361 +  inline void _M_acquire_lock() { 
   1.362 +    // we do not care about race conditions here : there is only one thread at this point 
   1.363 +    if(!_M_lock) pthread_spin_init( &_M_lock, 0 );
   1.364 +
   1.365 +    // fbp: here, initialization on demand should happen before the lock
   1.366 +    // we use simple strategy as we are sure this only happens on initialization
   1.367 +    pthread_spin_lock( &_M_lock );
   1.368 +  }
   1.369 +
   1.370 +  inline void _M_acquire_lock_nodemand() { 
   1.371 +    pthread_spin_lock( &_M_lock ); 
   1.372 +  }
   1.373 +  inline void _M_release_lock() { pthread_spin_unlock( &_M_lock ); }
   1.374 +#  else // !_STLP_USE_PTHREAD_SPINLOCK
   1.375 +  pthread_mutex_t _M_lock;
   1.376 +
   1.377 +  inline void _M_initialize() {
   1.378 +    pthread_mutex_init(&_M_lock,_STLP_PTHREAD_ATTR_DEFAULT);
   1.379 +  }
   1.380 +  inline void _M_destroy() {
   1.381 +    pthread_mutex_destroy(&_M_lock);
   1.382 +  }
   1.383 +  inline void _M_acquire_lock_nodemand() { 
   1.384 +    pthread_mutex_lock(&_M_lock);
   1.385 +  }
   1.386 +
   1.387 +  inline void _M_acquire_lock() { 
   1.388 +#    if defined (__hpux) && !defined (PTHREAD_MUTEX_INITIALIZER)
   1.389 +      if (!_M_lock.field1)  _M_initialize();
   1.390 +#    endif
   1.391 +     pthread_mutex_lock(&_M_lock);
   1.392 +  }
   1.393 +  inline void _M_release_lock() { pthread_mutex_unlock(&_M_lock); }
   1.394 +#  endif // !_STLP_USE_PTHREAD_SPINLOCK
   1.395 +  
   1.396 +# elif defined (_STLP_UITHREADS)
   1.397 +  mutex_t _M_lock;
   1.398 +  inline void _M_initialize() {
   1.399 +    mutex_init(&_M_lock,0,NULL);	
   1.400 +  }
   1.401 +  inline void _M_destroy() {
   1.402 +    mutex_destroy(&_M_lock);
   1.403 +  }
   1.404 +  inline void _M_acquire_lock() { mutex_lock(&_M_lock); }
   1.405 +  inline void _M_release_lock() { mutex_unlock(&_M_lock); }
   1.406 +
   1.407 +# elif defined(_STLP_OS2THREADS)
   1.408 +  HMTX _M_lock;
   1.409 +  inline void _M_initialize() { DosCreateMutexSem(NULL, &_M_lock, 0, false); }
   1.410 +  inline void _M_destroy() { DosCloseMutexSem(_M_lock); }
   1.411 +  inline void _M_acquire_lock_nodemand() {
   1.412 +    DosRequestMutexSem(_M_lock, SEM_INDEFINITE_WAIT);
   1.413 +  }  
   1.414 +  inline void _M_acquire_lock() {
   1.415 +    if(!_M_lock) _M_initialize();
   1.416 +    DosRequestMutexSem(_M_lock, SEM_INDEFINITE_WAIT);
   1.417 +  }
   1.418 +  inline void _M_release_lock() { DosReleaseMutexSem(_M_lock); }
   1.419 +# elif defined(_STLP_BETHREADS)
   1.420 +  sem_id sem;
   1.421 +  inline void _M_initialize() 
   1.422 +  {
   1.423 +     sem = create_sem(1, "STLPort");
   1.424 +     assert(sem > 0);
   1.425 +  }
   1.426 +  inline void _M_destroy() 
   1.427 +  {
   1.428 +     int t = delete_sem(sem);
   1.429 +     assert(t == B_NO_ERROR);
   1.430 +  }
   1.431 +  inline void _M_acquire_lock_nodemand()
   1.432 +  {
   1.433 +    status_t t;
   1.434 +    t = acquire_sem(sem);
   1.435 +    assert(t == B_NO_ERROR);
   1.436 +  }
   1.437 +  inline void _M_acquire_lock();
   1.438 +  inline void _M_release_lock() 
   1.439 +  {
   1.440 +     status_t t = release_sem(sem);
   1.441 +     assert(t == B_NO_ERROR);
   1.442 +  }
   1.443 +# elif defined(_STLP_VXWORKS_THREADS)
   1.444 +  SEM_ID _M_sem;
   1.445 +  inline void _M_initialize() 
   1.446 +  {
   1.447 +     _M_sem = semMCreate(SEM_Q_FIFO);
   1.448 +     assert(_M_sem > 0);
   1.449 +  }
   1.450 +  inline void _M_destroy() 
   1.451 +  {
   1.452 +    STATUS __s;
   1.453 +    semDelete (_M_sem);
   1.454 +    assert(__s == OK);
   1.455 +  }
   1.456 +  inline void _M_acquire_lock_nodemand()
   1.457 +  {
   1.458 +    STATUS __s;
   1.459 +    semTake (_M_sem, WAIT_FOREVER);
   1.460 +    assert(__s == OK);
   1.461 +  }
   1.462 +  inline void _M_acquire_lock()
   1.463 +  {
   1.464 +    if (!_M_sem)
   1.465 +      _M_initialize();
   1.466 +    _M_acquire_lock_nodemand();
   1.467 +  }
   1.468 +  inline void _M_release_lock() 
   1.469 +  {
   1.470 +    STATUS __s;
   1.471 +    semGive (_M_sem, WAIT_FOREVER);
   1.472 +    assert(__s == OK);
   1.473 +  }
   1.474 +# else		//*ty 11/24/2001 - added configuration check
   1.475 +#  error "Unknown thread facility configuration"
   1.476 +# endif
   1.477 +#else /* No threads */
   1.478 +  inline void _M_initialize() {}
   1.479 +  inline void _M_destroy() {}
   1.480 +  inline void _M_acquire_lock() {}
   1.481 +  inline void _M_release_lock() {}
   1.482 +#endif // _STLP_PTHREADS
   1.483 +};
   1.484 +
   1.485 +
   1.486 +#if defined (_STLP_THREADS) && defined (_STLP_MUTEX_NEEDS_ONDEMAND_INITIALIZATION)
   1.487 +// for use in _STLP_mutex, our purposes do not require ondemand initialization
   1.488 +// also, mutex_base may use some hacks to determine uninitialized state by zero data, which only works for globals.
   1.489 +class _STLP_CLASS_DECLSPEC _STLP_mutex_nodemand : public _STLP_mutex_base {
   1.490 +  inline void _M_acquire_lock() { 
   1.491 +    _M_acquire_lock_nodemand();
   1.492 +  }
   1.493 +};
   1.494 +#else
   1.495 +typedef _STLP_mutex_base _STLP_mutex_nodemand;
   1.496 +#endif
   1.497 +
   1.498 +
   1.499 +// Locking class.  The constructor initializes the lock, the destructor destroys it.
   1.500 +// Well - behaving class, does not need static initializer
   1.501 +class _STLP_CLASS_DECLSPEC _STLP_mutex : public _STLP_mutex_nodemand {
   1.502 +  public:
   1.503 +    inline _STLP_mutex () { _M_initialize(); }
   1.504 +    inline ~_STLP_mutex () { _M_destroy(); }
   1.505 +  private:
   1.506 +    _STLP_mutex(const _STLP_mutex&);
   1.507 +    void operator=(const _STLP_mutex&);
   1.508 +};
   1.509 +
   1.510 +
   1.511 +
   1.512 +/*
   1.513 + * Class _Refcount_Base provides a type, __stl_atomic_t, a data member,
   1.514 + * _M_ref_count, and member functions _M_incr and _M_decr, which perform
   1.515 + * atomic preincrement/predecrement.  The constructor initializes 
   1.516 + * _M_ref_count.
   1.517 + */
   1.518 +struct _STLP_CLASS_DECLSPEC _Refcount_Base
   1.519 +{
   1.520 +  // The data member _M_ref_count
   1.521 +  volatile __stl_atomic_t _M_ref_count;
   1.522 +
   1.523 +# if !defined (_STLP_ATOMIC_EXCHANGE)
   1.524 +  _STLP_mutex _M_mutex;
   1.525 +# endif
   1.526 +
   1.527 +  // Constructor
   1.528 +  _Refcount_Base(__stl_atomic_t __n) : _M_ref_count(__n) {}
   1.529 +
   1.530 +  // _M_incr and _M_decr
   1.531 +# if defined (_STLP_THREADS) && defined (_STLP_ATOMIC_EXCHANGE)
   1.532 +   void _M_incr() { _STLP_ATOMIC_INCREMENT((__stl_atomic_t*)&_M_ref_count); }
   1.533 +   void _M_decr() { _STLP_ATOMIC_DECREMENT((__stl_atomic_t*)&_M_ref_count); }
   1.534 +# elif defined(_STLP_THREADS)
   1.535 +  void _M_incr() {
   1.536 +    _M_mutex._M_acquire_lock();
   1.537 +    ++_M_ref_count;
   1.538 +    _M_mutex._M_release_lock();
   1.539 +  }
   1.540 +  void _M_decr() {
   1.541 +    _M_mutex._M_acquire_lock();
   1.542 +    --_M_ref_count;
   1.543 +    _M_mutex._M_release_lock();
   1.544 +  }
   1.545 +# else  /* No threads */
   1.546 +  void _M_incr() { ++_M_ref_count; }
   1.547 +  void _M_decr() { --_M_ref_count; }
   1.548 +# endif
   1.549 +};
   1.550 +
   1.551 +// Atomic swap on unsigned long
   1.552 +// This is guaranteed to behave as though it were atomic only if all
   1.553 +// possibly concurrent updates use _Atomic_swap.
   1.554 +// In some cases the operation is emulated with a lock.
   1.555 +# if defined (_STLP_THREADS)
   1.556 +#  ifdef _STLP_ATOMIC_EXCHANGE
   1.557 +inline __stl_atomic_t _Atomic_swap(volatile __stl_atomic_t * __p, __stl_atomic_t __q) {
   1.558 +  return (__stl_atomic_t) _STLP_ATOMIC_EXCHANGE(__p,__q);
   1.559 +}
   1.560 +#  elif defined(_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || defined(_STLP_USE_PTHREAD_SPINLOCK)
   1.561 +// We use a template here only to get a unique initialized instance.
   1.562 +template<int __dummy>
   1.563 +struct _Swap_lock_struct {
   1.564 +  static _STLP_STATIC_MUTEX _S_swap_lock;
   1.565 +};
   1.566 +
   1.567 +
   1.568 +// This should be portable, but performance is expected
   1.569 +// to be quite awful.  This really needs platform specific
   1.570 +// code.
   1.571 +inline __stl_atomic_t _Atomic_swap(volatile __stl_atomic_t * __p, __stl_atomic_t __q) {
   1.572 +  _Swap_lock_struct<0>::_S_swap_lock._M_acquire_lock();
   1.573 +  __stl_atomic_t __result = *__p;
   1.574 +  *__p = __q;
   1.575 +  _Swap_lock_struct<0>::_S_swap_lock._M_release_lock();
   1.576 +  return __result;
   1.577 +}
   1.578 +#  endif // _STLP_PTHREADS || _STLP_UITHREADS || _STLP_OS2THREADS || _STLP_USE_PTHREAD_SPINLOCK
   1.579 +# else // !_STLP_THREADS
   1.580 +/* no threads */
   1.581 +static inline __stl_atomic_t  _STLP_CALL
   1.582 +_Atomic_swap(volatile __stl_atomic_t * __p, __stl_atomic_t __q) {
   1.583 +  __stl_atomic_t __result = *__p;
   1.584 +  *__p = __q;
   1.585 +  return __result;
   1.586 +}
   1.587 +# endif // _STLP_THREADS
   1.588 +
   1.589 +// A locking class that uses _STLP_STATIC_MUTEX.  The constructor takes
   1.590 +// a reference to an _STLP_STATIC_MUTEX, and acquires a lock.  The destructor
   1.591 +// releases the lock.
   1.592 +
   1.593 +struct _STLP_CLASS_DECLSPEC _STLP_auto_lock
   1.594 +{
   1.595 +  _STLP_STATIC_MUTEX& _M_lock;
   1.596 +  
   1.597 +  _STLP_auto_lock(_STLP_STATIC_MUTEX& __lock) : _M_lock(__lock)
   1.598 +    { _M_lock._M_acquire_lock(); }
   1.599 +  ~_STLP_auto_lock() { _M_lock._M_release_lock(); }
   1.600 +
   1.601 +private:
   1.602 +  void operator=(const _STLP_auto_lock&);
   1.603 +  _STLP_auto_lock(const _STLP_auto_lock&);
   1.604 +};
   1.605 +
   1.606 +typedef _STLP_auto_lock _STLP_mutex_lock;
   1.607 +
   1.608 +#ifdef _STLP_BETHREADS
   1.609 +
   1.610 +template <int __inst>
   1.611 +struct _STLP_beos_static_lock_data
   1.612 +{
   1.613 +	static bool is_init;
   1.614 +	struct mutex_t : public _STLP_mutex
   1.615 +	{
   1.616 +		mutex_t()
   1.617 +		{
   1.618 +			_STLP_beos_static_lock_data<0>::is_init = true;
   1.619 +		}
   1.620 +		~mutex_t()
   1.621 +		{
   1.622 +			_STLP_beos_static_lock_data<0>::is_init = false;
   1.623 +		}
   1.624 +	};
   1.625 +	static mutex_t mut;
   1.626 +};
   1.627 +
   1.628 +template <int __inst>
   1.629 +bool _STLP_beos_static_lock_data<__inst>::is_init = false;
   1.630 +template <int __inst>
   1.631 +typename _STLP_beos_static_lock_data<__inst>::mutex_t _STLP_beos_static_lock_data<__inst>::mut;
   1.632 +
   1.633 +
   1.634 +inline void _STLP_mutex_base::_M_acquire_lock() 
   1.635 +{
   1.636 +	if(sem == 0)
   1.637 +	{
   1.638 +		// we need to initialise on demand here
   1.639 +		// to prevent race conditions use our global
   1.640 +		// mutex if it's available:
   1.641 +		if(_STLP_beos_static_lock_data<0>::is_init)
   1.642 +		{
   1.643 +			_STLP_auto_lock al(_STLP_beos_static_lock_data<0>::mut);
   1.644 +			if(sem == 0) _M_initialize();
   1.645 +		}
   1.646 +		else
   1.647 +		{
   1.648 +			// no lock available, we must still be
   1.649 +			// in startup code, THERE MUST BE ONE THREAD
   1.650 +			// ONLY active at this point.
   1.651 +			_M_initialize();
   1.652 +		}
   1.653 +	}
   1.654 +	_M_acquire_lock_nodemand();
   1.655 +}
   1.656 +
   1.657 +#endif
   1.658 +
   1.659 +_STLP_END_NAMESPACE
   1.660 +
   1.661 +# if !defined (_STLP_LINK_TIME_INSTANTIATION)
   1.662 +#  include <stl/_threads.c>
   1.663 +# endif
   1.664 +
   1.665 +#endif /* _STLP_INTERNAL_THREADS_H */
   1.666 +
   1.667 +// Local Variables:
   1.668 +// mode:C++
   1.669 +// End:
   1.670 +