epoc32/include/stdapis/stlportv5/stl/_threads.h
author William Roberts <williamr@symbian.org>
Wed, 31 Mar 2010 12:33:34 +0100
branchSymbian3
changeset 4 837f303aceeb
parent 3 e1b950c65cb4
permissions -rw-r--r--
Current Symbian^3 public API header files (from PDK 3.0.h)
This is the epoc32/include tree with the "platform" subtrees removed, and
all but a selected few mbg and rsg files removed.
     1 /*
     2  * Portions Copyright (c) 2008 Nokia Corporation and/or its subsidiary(-ies). All rights reserved.
     3  *
     4  * Copyright (c) 1997-1999
     5  * Silicon Graphics Computer Systems, Inc.
     6  *
     7  * Copyright (c) 1999
     8  * Boris Fomitchev
     9  *
    10  * This material is provided "as is", with absolutely no warranty expressed
    11  * or implied. Any use is at your own risk.
    12  *
    13  * Permission to use or copy this software for any purpose is hereby granted
    14  * without fee, provided the above notices are retained on all copies.
    15  * Permission to modify the code and to distribute modified code is granted,
    16  * provided the above notices are retained, and a notice that the code was
    17  * modified is included with the above copyright notice.
    18  *
    19  */
    20 
    21 // WARNING: This is an internal header file, included by other C++
    22 // standard library headers.  You should not attempt to use this header
    23 // file directly.
    24 
    25 
    26 #ifndef _STLP_INTERNAL_THREADS_H
    27 #define _STLP_INTERNAL_THREADS_H
    28 
    29 // Supported threading models are native SGI, pthreads, uithreads
    30 // (similar to pthreads, but based on an earlier draft of the Posix
    31 // threads standard), and Win32 threads.  Uithread support by Jochen
    32 // Schlick, 1999, and Solaris threads generalized to them.
    33 
    34 #ifndef _STLP_INTERNAL_CSTDDEF
    35 #  include <stl/_cstddef.h>
    36 #endif
    37 
    38 #ifndef _STLP_INTERNAL_CSTDLIB
    39 #  include <stl/_cstdlib.h>
    40 #endif
    41 
    42 // On SUN and Mac OS X gcc, zero-initialization works just fine...
    43 #if defined (__sun) || (defined (__GNUC__) && defined(__APPLE__))
    44 #  define _STLP_MUTEX_INITIALIZER
    45 #endif
    46 
    47 /* This header defines the following atomic operation that platform should
    48  * try to support as much as possible. Atomic operation are exposed as macro
    49  * in order to easily test for their existance. They are:
    50  * __stl_atomic_t _STLP_ATOMIC_INCREMENT(volatile __stl_atomic_t* __ptr) :
    51  * increment *__ptr by 1 and returns the new value
    52  * __stl_atomic_t _STLP_ATOMIC_DECREMENT(volatile __stl_atomic_t* __ptr) :
    53  * decrement  *__ptr by 1 and returns the new value
    54  * __stl_atomic_t _STLP_ATOMIC_EXCHANGE(volatile __stl_atomic_t* __target, __stl_atomic_t __val) :
    55  * assign __val to *__target and returns former *__target value
    56  * void* _STLP_ATOMIC_EXCHANGE_PTR(void* volatile* __target, void* __ptr) :
    57  * assign __ptr to *__target and returns former *__target value
    58  * __stl_atomic_t _STLP_ATOMIC_ADD(volatile __stl_atomic_t* __target, __stl_atomic_t __val) :
    59  * does *__target = *__target + __val and returns the old *__target value
    60  */
    61 
    62 #if defined (_STLP_WIN32) || defined (__sgi) || defined (_STLP_SPARC_SOLARIS_THREADS)
    63 typedef long __stl_atomic_t;
    64 #else
    65 /* Don't import whole namespace!!!! - ptr */
    66 // # if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)
    67 // // using _STLP_VENDOR_CSTD::size_t;
    68 // using namespace _STLP_VENDOR_CSTD;
    69 // # endif
    70 typedef size_t __stl_atomic_t;
    71 #endif
    72 
    73 #if defined (_STLP_THREADS)
    74 
    75 #  if defined (_STLP_SGI_THREADS)
    76 
    77 #    include <mutex.h>
    78 // Hack for SGI o32 compilers.
    79 #    if !defined(__add_and_fetch) && \
    80         (__mips < 3 || !(defined (_ABIN32) || defined(_ABI64)))
    81 #      define __add_and_fetch(__l,__v) add_then_test((unsigned long*)__l,__v)
    82 #      define __test_and_set(__l,__v)  test_and_set(__l,__v)
    83 #    endif /* o32 */
    84 
    85 #    if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))
    86 #      define _STLP_ATOMIC_EXCHANGE(__p, __q) test_and_set(__p, __q)
    87 #    else
    88 #      define _STLP_ATOMIC_EXCHANGE(__p, __q) __test_and_set((unsigned long*)__p, (unsigned long)__q)
    89 #    endif
    90 
    91 #    define _STLP_ATOMIC_INCREMENT(__x) __add_and_fetch(__x, 1)
    92 #    define _STLP_ATOMIC_DECREMENT(__x) __add_and_fetch(__x, (size_t) -1)
    93 
    94 #  elif defined (_STLP_PTHREADS)
    95 
    96 #    include <pthread.h>
    97 #    if !defined (_STLP_USE_PTHREAD_SPINLOCK)
    98 #      if defined (PTHREAD_MUTEX_INITIALIZER) && !defined (_STLP_MUTEX_INITIALIZER) && defined (_REENTRANT)
    99 #        define _STLP_MUTEX_INITIALIZER = { PTHREAD_MUTEX_INITIALIZER }
   100 #      endif
   101 //HPUX variants have (on some platforms optional) non-standard "DCE" pthreads impl
   102 #      if defined (_DECTHREADS_) && (defined (_PTHREAD_USE_D4) || defined (__hpux)) && !defined (_CMA_SUPPRESS_EXTERNALS_)
   103 #        define _STLP_PTHREAD_ATTR_DEFAULT pthread_mutexattr_default
   104 #      else
   105 #        define _STLP_PTHREAD_ATTR_DEFAULT 0
   106 #      endif
   107 #    else // _STLP_USE_PTHREAD_SPINLOCK
   108 #      if defined (__OpenBSD__)
   109 #        include <spinlock.h>
   110 #      endif
   111 #    endif // _STLP_USE_PTHREAD_SPINLOCK
   112 
   113 #    if defined (__GNUC__) && defined (__i386__)
   114 
   115 #      if !defined (_STLP_ATOMIC_INCREMENT)
   116 inline long _STLP_atomic_increment_gcc_x86(long volatile* p) {
   117   long result;
   118   __asm__ __volatile__
   119     ("lock; xaddl  %1, %0;"
   120     :"=m" (*p), "=r" (result)
   121     :"m" (*p),  "1"  (1)
   122     :"cc");
   123   return result + 1;
   124 }
   125 #        define _STLP_ATOMIC_INCREMENT(__x) (_STLP_atomic_increment_gcc_x86((long volatile*)__x))
   126 #      endif
   127 
   128 #      if !defined (_STLP_ATOMIC_DECREMENT)
   129 inline long _STLP_atomic_decrement_gcc_x86(long volatile* p) {
   130   long result;
   131   __asm__ __volatile__
   132     ("lock; xaddl  %1, %0;"
   133     :"=m" (*p), "=r" (result)
   134     :"m" (*p),  "1"  (-1)
   135     :"cc");
   136   return result - 1;
   137 }
   138 #        define _STLP_ATOMIC_DECREMENT(__x) (_STLP_atomic_decrement_gcc_x86((long volatile*)__x))
   139 #      endif
   140 
   141 #      if !defined (_STLP_ATOMIC_ADD)
   142 inline long _STLP_atomic_add_gcc_x86(long volatile* p, long addend) {
   143   long result;
   144   __asm__ __volatile__
   145     ("lock; xaddl %1, %0;"
   146     :"=m" (*p), "=r" (result)
   147     :"m"  (*p), "1"  (addend)
   148     :"cc");
   149  return result + addend;
   150 }
   151 #        define _STLP_ATOMIC_ADD(__dst, __val)  (_STLP_atomic_add_gcc_x86((long volatile*)__dst, (long)__val))
   152 #      endif
   153 
   154 #    endif /* if defined(__GNUC__) && defined(__i386__) */
   155 
   156 #  elif defined (_STLP_WIN32THREADS)
   157 
   158 #    if !defined (_STLP_ATOMIC_INCREMENT)
   159 #      if !defined (_STLP_NEW_PLATFORM_SDK)
   160 #        define _STLP_ATOMIC_INCREMENT(__x)           InterlockedIncrement(__CONST_CAST(long*, __x))
   161 #        define _STLP_ATOMIC_DECREMENT(__x)           InterlockedDecrement(__CONST_CAST(long*, __x))
   162 #        define _STLP_ATOMIC_EXCHANGE(__x, __y)       InterlockedExchange(__CONST_CAST(long*, __x), __y)
   163 #      else
   164 #        define _STLP_ATOMIC_INCREMENT(__x)           InterlockedIncrement(__x)
   165 #        define _STLP_ATOMIC_DECREMENT(__x)           InterlockedDecrement(__x)
   166 #        define _STLP_ATOMIC_EXCHANGE(__x, __y)       InterlockedExchange(__x, __y)
   167 #      endif
   168 #      define _STLP_ATOMIC_EXCHANGE_PTR(__x, __y)     STLPInterlockedExchangePointer(__x, __y)
   169 /*
   170  * The following functionnality is only available since Windows 98, those that are targeting previous OSes
   171  * should define _WIN32_WINDOWS to a value lower that the one of Win 98, see Platform SDK documentation for
   172  * more informations:
   173  */
   174 #      if defined (_STLP_NEW_PLATFORM_SDK) && (!defined (_STLP_WIN32_VERSION) || (_STLP_WIN32_VERSION >= 0x0410))
   175 #        define _STLP_ATOMIC_ADD(__dst, __val) InterlockedExchangeAdd(__dst, __val)
   176 #      endif
   177 #    endif
   178 
   179 #  elif defined (__DECC) || defined (__DECCXX)
   180 
   181 #    include <machine/builtins.h>
   182 #    define _STLP_ATOMIC_EXCHANGE __ATOMIC_EXCH_LONG
   183 #    define _STLP_ATOMIC_INCREMENT(__x) __ATOMIC_ADD_LONG(__x, 1)
   184 #    define _STLP_ATOMIC_DECREMENT(__x) __ATOMIC_ADD_LONG(__x, -1)
   185 
   186 #  elif defined(_STLP_SPARC_SOLARIS_THREADS)
   187 
   188 #    include <stl/_sparc_atomic.h>
   189 
   190 #  elif defined (_STLP_UITHREADS)
   191 
   192 // this inclusion is potential hazard to bring up all sorts
   193 // of old-style headers. Let's assume vendor already know how
   194 // to deal with that.
   195 #    ifndef _STLP_INTERNAL_CTIME
   196 #      include <stl/_ctime.h>
   197 #    endif
   198 #    if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)
   199 using _STLP_VENDOR_CSTD::time_t;
   200 #    endif
   201 #    include <synch.h>
   202 #    include <cstdio>
   203 #    include <cwchar>
   204 
   205 #  elif defined (_STLP_BETHREADS)
   206 
   207 #    include <OS.h>
   208 #    include <cassert>
   209 #    include <stdio.h>
   210 #    define _STLP_MUTEX_INITIALIZER = { 0 }
   211 
   212 #  elif defined (_STLP_NWTHREADS)
   213 
   214 #    include <nwthread.h>
   215 #    include <nwsemaph.h>
   216 
   217 #  elif defined(_STLP_OS2THREADS)
   218 
   219 #    if defined (__GNUC__)
   220 #      define INCL_DOSSEMAPHORES
   221 #      include <os2.h>
   222 #    else
   223 // This section serves to replace os2.h for VisualAge C++
   224   typedef unsigned long ULONG;
   225 #      if !defined (__HEV__)  /* INCL_SEMAPHORE may also define HEV */
   226 #        define __HEV__
   227   typedef ULONG HEV;
   228   typedef HEV*  PHEV;
   229 #      endif
   230   typedef ULONG APIRET;
   231   typedef ULONG HMTX;
   232   typedef HMTX*  PHMTX;
   233   typedef const char*  PCSZ;
   234   typedef ULONG BOOL32;
   235   APIRET _System DosCreateMutexSem(PCSZ pszName, PHEV phev, ULONG flAttr, BOOL32 fState);
   236   APIRET _System DosRequestMutexSem(HMTX hmtx, ULONG ulTimeout);
   237   APIRET _System DosReleaseMutexSem(HMTX hmtx);
   238   APIRET _System DosCloseMutexSem(HMTX hmtx);
   239 #      define _STLP_MUTEX_INITIALIZER = { 0 }
   240 #    endif /* GNUC */
   241 
   242 #  endif
   243 
   244 #else
   245 /* no threads */
   246 #  define _STLP_ATOMIC_INCREMENT(__x) ++(*__x)
   247 #  define _STLP_ATOMIC_DECREMENT(__x) --(*__x)
   248 /* We do not grant other atomic operations as they are useless if STLport do not have
   249  * to be thread safe
   250  */
   251 #endif
   252 
   253 #if !defined (_STLP_MUTEX_INITIALIZER)
   254 #  if defined(_STLP_ATOMIC_EXCHANGE)
   255 #    define _STLP_MUTEX_INITIALIZER = { 0 }
   256 #  elif defined(_STLP_UITHREADS)
   257 #    define _STLP_MUTEX_INITIALIZER = { DEFAULTMUTEX }
   258 #  else
   259 #    define _STLP_MUTEX_INITIALIZER
   260 #  endif
   261 #endif
   262   
   263 
   264 _STLP_BEGIN_NAMESPACE
   265 
   266 #if defined (_STLP_THREADS) && !defined (_STLP_USE_PTHREAD_SPINLOCK)
   267 // Helper struct.  This is a workaround for various compilers that don't
   268 // handle static variables in inline functions properly.
   269 template <int __inst>
   270 struct _STLP_mutex_spin {
   271   enum { __low_max = 30, __high_max = 1000 };
   272   // Low if we suspect uniprocessor, high for multiprocessor.
   273   //Note: For SYMBIAN Emulator, these entries are to be considered WSD.  
   274   //Still, EWSD solution can't be applied since it's templated.
   275   static unsigned __max;
   276   static unsigned __last;
   277   static void _STLP_CALL _M_do_lock(volatile __stl_atomic_t* __lock);
   278   static void _STLP_CALL _S_nsec_sleep(int __log_nsec);
   279 };
   280 #endif // !_STLP_USE_PTHREAD_SPINLOCK
   281 
   282 // Locking class.  Note that this class *does not have a constructor*.
   283 // It must be initialized either statically, with _STLP_MUTEX_INITIALIZER,
   284 // or dynamically, by explicitly calling the _M_initialize member function.
   285 // (This is similar to the ways that a pthreads mutex can be initialized.)
   286 // There are explicit member functions for acquiring and releasing the lock.
   287 
   288 // There is no constructor because static initialization is essential for
   289 // some uses, and only a class aggregate (see section 8.5.1 of the C++
   290 // standard) can be initialized that way.  That means we must have no
   291 // constructors, no base classes, no virtual functions, and no private or
   292 // protected members.
   293 
   294 // For non-static cases, clients should use  _STLP_mutex.
   295 
   296 struct _STLP_CLASS_DECLSPEC _STLP_mutex_base {
   297 #if defined (_STLP_ATOMIC_EXCHANGE) || defined (_STLP_SGI_THREADS)
   298   // It should be relatively easy to get this to work on any modern Unix.
   299   volatile __stl_atomic_t _M_lock;
   300 #endif
   301 
   302 #if defined (_STLP_THREADS)
   303 #  if defined (_STLP_ATOMIC_EXCHANGE)
   304   inline void _M_initialize() { _M_lock = 0; }
   305   inline void _M_destroy() {}
   306 
   307   void _M_acquire_lock() {
   308     _STLP_mutex_spin<0>::_M_do_lock(&_M_lock);
   309   }
   310 
   311   inline void _M_release_lock() {
   312     volatile __stl_atomic_t* __lock = &_M_lock;
   313 #    if defined(_STLP_SGI_THREADS) && defined(__GNUC__) && __mips >= 3
   314     asm("sync");
   315     *__lock = 0;
   316 #    elif defined(_STLP_SGI_THREADS) && __mips >= 3 && \
   317          (defined (_ABIN32) || defined(_ABI64))
   318     __lock_release(__lock);
   319 #    elif defined (_STLP_SPARC_SOLARIS_THREADS)
   320 #      if defined (__WORD64) || defined (__arch64__) || defined (__sparcv9) || defined (__sparcv8plus)
   321     asm("membar #StoreStore ; membar #LoadStore");
   322 #      else
   323     asm(" stbar ");
   324 #      endif
   325     *__lock = 0;
   326 #    else
   327     *__lock = 0;
   328     // This is not sufficient on many multiprocessors, since
   329     // writes to protected variables and the lock may be reordered.
   330 #    endif
   331   }
   332 #  elif defined (_STLP_PTHREADS)
   333 #    if defined (_STLP_USE_PTHREAD_SPINLOCK)
   334 #      if !defined (__OpenBSD__)
   335   pthread_spinlock_t _M_lock;
   336   inline void _M_initialize() { pthread_spin_init( &_M_lock, 0 ); }
   337   inline void _M_destroy() { pthread_spin_destroy( &_M_lock ); }
   338 
   339   // sorry, but no static initializer for pthread_spinlock_t;
   340   // this will not work for compilers that has problems with call
   341   // constructor of static object...
   342 
   343   // _STLP_mutex_base()
   344   //   { pthread_spin_init( &_M_lock, 0 ); }
   345 
   346   // ~_STLP_mutex_base()
   347   //   { pthread_spin_destroy( &_M_lock ); }
   348 
   349   inline void _M_acquire_lock() { pthread_spin_lock( &_M_lock ); }
   350   inline void _M_release_lock() { pthread_spin_unlock( &_M_lock ); }
   351 #      else // __OpenBSD__
   352   spinlock_t _M_lock;
   353   inline void _M_initialize() { _SPINLOCK_INIT( &_M_lock ); }
   354   inline void _M_destroy() { }
   355   inline void _M_acquire_lock() { _SPINLOCK( &_M_lock ); }
   356   inline void _M_release_lock() { _SPINUNLOCK( &_M_lock ); }
   357 #      endif // __OpenBSD__
   358 #    else // !_STLP_USE_PTHREAD_SPINLOCK
   359   pthread_mutex_t _M_lock;
   360   inline void _M_initialize()
   361   { pthread_mutex_init(&_M_lock,_STLP_PTHREAD_ATTR_DEFAULT); }
   362   inline void _M_destroy()
   363   { pthread_mutex_destroy(&_M_lock); }
   364   inline void _M_acquire_lock() {
   365 #      if defined ( __hpux ) && ! defined (PTHREAD_MUTEX_INITIALIZER)
   366     if (!_M_lock.field1)  _M_initialize();
   367 #      endif
   368     pthread_mutex_lock(&_M_lock);
   369   }
   370   inline void _M_release_lock() { pthread_mutex_unlock(&_M_lock); }
   371 #    endif // !_STLP_USE_PTHREAD_SPINLOCK
   372 
   373 #  elif defined (_STLP_UITHREADS)
   374   mutex_t _M_lock;
   375   inline void _M_initialize()
   376   { mutex_init(&_M_lock, 0, NULL); }
   377   inline void _M_destroy()
   378   { mutex_destroy(&_M_lock); }
   379   inline void _M_acquire_lock() { mutex_lock(&_M_lock); }
   380   inline void _M_release_lock() { mutex_unlock(&_M_lock); }
   381 
   382 #  elif defined (_STLP_OS2THREADS)
   383   HMTX _M_lock;
   384   inline void _M_initialize() { DosCreateMutexSem(NULL, &_M_lock, 0, false); }
   385   inline void _M_destroy() { DosCloseMutexSem(_M_lock); }
   386   inline void _M_acquire_lock() {
   387     if (!_M_lock) _M_initialize();
   388     DosRequestMutexSem(_M_lock, SEM_INDEFINITE_WAIT);
   389   }
   390   inline void _M_release_lock() { DosReleaseMutexSem(_M_lock); }
   391 #  elif defined (_STLP_BETHREADS)
   392   sem_id sem;
   393   inline void _M_initialize() {
   394     sem = create_sem(1, "STLPort");
   395     assert(sem > 0);
   396   }
   397   inline void _M_destroy() {
   398     int t = delete_sem(sem);
   399     assert(t == B_NO_ERROR);
   400   }
   401   inline void _M_acquire_lock();
   402   inline void _M_release_lock() {
   403     status_t t = release_sem(sem);
   404     assert(t == B_NO_ERROR);
   405   }
   406 #  elif defined (_STLP_NWTHREADS)
   407   LONG _M_lock;
   408   inline void _M_initialize()
   409   { _M_lock = OpenLocalSemaphore(1); }
   410   inline void _M_destroy()
   411   { CloseLocalSemaphore(_M_lock); }
   412   inline void _M_acquire_lock()
   413   { WaitOnLocalSemaphore(_M_lock); }
   414   inline void _M_release_lock() { SignalLocalSemaphore(_M_lock); }
   415 #  else      //*ty 11/24/2001 - added configuration check
   416 #    error "Unknown thread facility configuration"
   417 #  endif
   418 #else /* No threads */
   419   inline void _M_initialize() {}
   420   inline void _M_destroy() {}
   421   inline void _M_acquire_lock() {}
   422   inline void _M_release_lock() {}
   423 #endif // _STLP_PTHREADS
   424 };
   425 
   426 // Locking class.  The constructor initializes the lock, the destructor destroys it.
   427 // Well - behaving class, does not need static initializer
   428 
   429 class _STLP_CLASS_DECLSPEC _STLP_mutex : public _STLP_mutex_base {
   430   public:
   431     inline _STLP_mutex () { _M_initialize(); }
   432     inline ~_STLP_mutex () { _M_destroy(); }
   433   private:
   434     _STLP_mutex(const _STLP_mutex&);
   435     void operator=(const _STLP_mutex&);
   436 };
   437 
   438 // A locking class that uses _STLP_STATIC_MUTEX.  The constructor takes
   439 // a reference to an _STLP_STATIC_MUTEX, and acquires a lock.  The destructor
   440 // releases the lock.
   441 // It's not clear that this is exactly the right functionality.
   442 // It will probably change in the future.
   443 
   444 struct _STLP_CLASS_DECLSPEC _STLP_auto_lock {
   445   _STLP_auto_lock(_STLP_STATIC_MUTEX& __lock) : _M_lock(__lock)
   446   { _M_lock._M_acquire_lock(); }
   447   ~_STLP_auto_lock()
   448   { _M_lock._M_release_lock(); }
   449 
   450 private:
   451   _STLP_STATIC_MUTEX& _M_lock;
   452   void operator=(const _STLP_auto_lock&);
   453   _STLP_auto_lock(const _STLP_auto_lock&);
   454 };
   455 
   456 /*
   457  * Class _Refcount_Base provides a type, __stl_atomic_t, a data member,
   458  * _M_ref_count, and member functions _M_incr and _M_decr, which perform
   459  * atomic preincrement/predecrement.  The constructor initializes
   460  * _M_ref_count.
   461  */
   462 class _STLP_CLASS_DECLSPEC _Refcount_Base {
   463   // The data member _M_ref_count
   464 #if defined (__DMC__)
   465 public:
   466 #endif
   467   _STLP_VOLATILE __stl_atomic_t _M_ref_count;
   468 
   469 #if defined (_STLP_THREADS) && \
   470    (!defined (_STLP_ATOMIC_INCREMENT) || !defined (_STLP_ATOMIC_DECREMENT) || \
   471     (defined (_STLP_WIN32_VERSION) && (_STLP_WIN32_VERSION <= 0x0400)))
   472 #  define _STLP_USE_MUTEX
   473   _STLP_mutex _M_mutex;
   474 #endif
   475 
   476   public:
   477   // Constructor
   478   _Refcount_Base(__stl_atomic_t __n) : _M_ref_count(__n) {}
   479 
   480   // _M_incr and _M_decr
   481 #if defined (_STLP_THREADS)
   482 #  if !defined (_STLP_USE_MUTEX)
   483    __stl_atomic_t _M_incr() { return _STLP_ATOMIC_INCREMENT(&_M_ref_count); }
   484    __stl_atomic_t _M_decr() { return _STLP_ATOMIC_DECREMENT(&_M_ref_count); }
   485 #  else
   486 #    undef _STLP_USE_MUTEX
   487   __stl_atomic_t _M_incr() {
   488     _STLP_auto_lock l(_M_mutex);
   489     return ++_M_ref_count;
   490   }
   491   __stl_atomic_t _M_decr() {
   492     _STLP_auto_lock l(_M_mutex);
   493     return --_M_ref_count;
   494   }
   495 #  endif
   496 #else  /* No threads */
   497   __stl_atomic_t _M_incr() { return ++_M_ref_count; }
   498   __stl_atomic_t _M_decr() { return --_M_ref_count; }
   499 #endif
   500 };
   501 
   502 _STLP_END_NAMESPACE
   503 
   504 #ifdef __SYMBIAN32__WSD__   
   505 _STLP_DECLSPEC std::_STLP_STATIC_MUTEX& exp_get_threads_S_swap_lock();
   506 _STLP_DECLSPEC std::_STLP_STATIC_MUTEX& exp_get_threads_0_S_swap_lock();
   507 #endif
   508 
   509 _STLP_BEGIN_NAMESPACE
   510 
   511 /* Atomic swap on __stl_atomic_t
   512  * This is guaranteed to behave as though it were atomic only if all
   513  * possibly concurrent updates use _Atomic_swap.
   514  * In some cases the operation is emulated with a lock.
   515  * Idem for _Atomic_swap_ptr
   516  */
   517 /* Helper struct to handle following cases:
   518  * - on platforms where sizeof(__stl_atomic_t) == sizeof(void*) atomic
   519  *   exchange can be done on pointers
   520  * - on platform without atomic operation swap is done in a critical section,
   521  *   portable but inefficient.
   522  */
   523 template <int __use_ptr_atomic_swap>
   524 class _Atomic_swap_struct {
   525 public:
   526 #if defined (_STLP_THREADS) && \
   527     !defined (_STLP_ATOMIC_EXCHANGE) && \
   528     (defined (_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || \
   529      defined (_STLP_USE_PTHREAD_SPINLOCK) || defined (_STLP_NWTHREADS))
   530 #  define _STLP_USE_ATOMIC_SWAP_MUTEX
   531 #if !defined(__SYMBIAN32__WSD__)
   532   static _STLP_STATIC_MUTEX _S_swap_lock;  
   533 #else
   534   static _STLP_STATIC_MUTEX& get_threads_S_swap_lock()
   535 	  { return ::exp_get_threads_S_swap_lock(); }
   536 # define _S_swap_lock get_threads_S_swap_lock()
   537 #endif
   538 #endif
   539 
   540   static __stl_atomic_t _S_swap(_STLP_VOLATILE __stl_atomic_t* __p, __stl_atomic_t __q) {
   541 #if defined (_STLP_THREADS)
   542 #  if defined (_STLP_ATOMIC_EXCHANGE)
   543   return _STLP_ATOMIC_EXCHANGE(__p, __q);
   544 #  elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
   545   _S_swap_lock._M_acquire_lock();
   546   __stl_atomic_t __result = *__p;
   547   *__p = __q;
   548   _S_swap_lock._M_release_lock();
   549   return __result;
   550 #  else
   551 #    error Missing atomic swap implementation
   552 #  endif
   553 #else
   554   /* no threads */
   555   __stl_atomic_t __result = *__p;
   556   *__p = __q;
   557   return __result;
   558 #endif // _STLP_THREADS
   559   }
   560 
   561   static void* _S_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
   562 #if defined (_STLP_THREADS)
   563 #  if defined (_STLP_ATOMIC_EXCHANGE_PTR)
   564   return _STLP_ATOMIC_EXCHANGE_PTR(__p, __q);
   565 #  elif defined (_STLP_ATOMIC_EXCHANGE)
   566   _STLP_STATIC_ASSERT(sizeof(__stl_atomic_t) == sizeof(void*))
   567   return __REINTERPRET_CAST(void*, _STLP_ATOMIC_EXCHANGE(__REINTERPRET_CAST(volatile __stl_atomic_t*, __p),
   568                                                          __REINTERPRET_CAST(__stl_atomic_t, __q))
   569                             );
   570 #  elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
   571   _S_swap_lock._M_acquire_lock();
   572   void *__result = *__p;
   573   *__p = __q;
   574   _S_swap_lock._M_release_lock();
   575   return __result;
   576 #  else
   577 #    error Missing pointer atomic swap implementation
   578 #  endif
   579 #else
   580   /* no thread */
   581   void *__result = *__p;
   582   *__p = __q;
   583   return __result;
   584 #endif
   585   }
   586 };
   587 #if defined(__SYMBIAN32__WSD__)
   588 # undef _S_swap_lock
   589 #endif  
   590 
   591 _STLP_TEMPLATE_NULL
   592 class _Atomic_swap_struct<0> {
   593 public:
   594 #if defined (_STLP_THREADS) && \
   595     (!defined (_STLP_ATOMIC_EXCHANGE) || !defined (_STLP_ATOMIC_EXCHANGE_PTR)) && \
   596     (defined (_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || \
   597      defined (_STLP_USE_PTHREAD_SPINLOCK) || defined (_STLP_NWTHREADS))
   598 #  define _STLP_USE_ATOMIC_SWAP_MUTEX
   599 #if !defined(__SYMBIAN32__WSD__)
   600   static _STLP_STATIC_MUTEX _S_swap_lock;
   601 #else
   602   static _STLP_STATIC_MUTEX& get_threads_0_S_swap_lock()
   603 	  { return ::exp_get_threads_0_S_swap_lock(); }
   604 # define _S_swap_lock get_threads_0_S_swap_lock()
   605 #endif  
   606 #endif
   607 
   608   static __stl_atomic_t _S_swap(_STLP_VOLATILE __stl_atomic_t* __p, __stl_atomic_t __q) {
   609 #if defined (_STLP_THREADS)
   610 #  if defined (_STLP_ATOMIC_EXCHANGE)
   611   return _STLP_ATOMIC_EXCHANGE(__p, __q);
   612 #  elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
   613   /* This should be portable, but performance is expected
   614    * to be quite awful.  This really needs platform specific
   615    * code.
   616    */
   617   _S_swap_lock._M_acquire_lock();
   618   __stl_atomic_t __result = *__p;
   619   *__p = __q;
   620   _S_swap_lock._M_release_lock();
   621   return __result;
   622 #  else
   623 #    error Missing atomic swap implementation
   624 #  endif
   625 #else
   626   /* no threads */
   627   __stl_atomic_t __result = *__p;
   628   *__p = __q;
   629   return __result;
   630 #endif // _STLP_THREADS
   631   }
   632 
   633   static void* _S_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
   634 #if defined (_STLP_THREADS)
   635 #  if defined (_STLP_ATOMIC_EXCHANGE_PTR)
   636   return _STLP_ATOMIC_EXCHANGE_PTR(__p, __q);
   637 #  elif defined (_STLP_ATOMIC_EXCHANGE)
   638   _STLP_STATIC_ASSERT(sizeof(__stl_atomic_t) == sizeof(void*))
   639   return __REINTERPRET_CAST(void*, _STLP_ATOMIC_EXCHANGE(__REINTERPRET_CAST(volatile __stl_atomic_t*, __p),
   640                                                          __REINTERPRET_CAST(__stl_atomic_t, __q))
   641                             );
   642 #  elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
   643   _S_swap_lock._M_acquire_lock();
   644   void *__result = *__p;
   645   *__p = __q;
   646   _S_swap_lock._M_release_lock();
   647   return __result;
   648 #  else
   649 #    error Missing pointer atomic swap implementation
   650 #  endif
   651 #else
   652   /* no thread */
   653   void *__result = *__p;
   654   *__p = __q;
   655   return __result;
   656 #endif
   657   }
   658 };
   659 #if defined(__SYMBIAN32__WSD__)
   660 # undef _S_swap_lock
   661 #endif  
   662 
   663 #if defined (_STLP_MSVC) && (_STLP_MSVC == 1300)
   664 #  pragma warning (push)
   665 #  pragma warning (disable : 4189) //__use_ptr_atomic_swap initialized but not used
   666 #endif
   667 
   668 inline __stl_atomic_t _STLP_CALL _Atomic_swap(_STLP_VOLATILE __stl_atomic_t * __p, __stl_atomic_t __q) {
   669   const int __use_ptr_atomic_swap = sizeof(__stl_atomic_t) == sizeof(void*);
   670   return _Atomic_swap_struct<__use_ptr_atomic_swap>::_S_swap(__p, __q);
   671 }
   672 
   673 inline void* _STLP_CALL _Atomic_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
   674   const int __use_ptr_atomic_swap = sizeof(__stl_atomic_t) == sizeof(void*);
   675   return _Atomic_swap_struct<__use_ptr_atomic_swap>::_S_swap_ptr(__p, __q);
   676 }
   677 
   678 #if defined (_STLP_MSVC) && (_STLP_MSVC == 1300)
   679 #  pragma warning (pop)
   680 #endif
   681 
   682 #if defined (_STLP_BETHREADS)
   683 template <int __inst>
   684 struct _STLP_beos_static_lock_data {
   685   static bool is_init;
   686   struct mutex_t : public _STLP_mutex {
   687     mutex_t()
   688     { _STLP_beos_static_lock_data<0>::is_init = true; }
   689     ~mutex_t()
   690     { _STLP_beos_static_lock_data<0>::is_init = false; }
   691   };
   692   static mutex_t mut;
   693 };
   694 
   695 template <int __inst>
   696 bool _STLP_beos_static_lock_data<__inst>::is_init = false;
   697 template <int __inst>
   698 typename _STLP_beos_static_lock_data<__inst>::mutex_t _STLP_beos_static_lock_data<__inst>::mut;
   699 
   700 inline void _STLP_mutex_base::_M_acquire_lock() {
   701   if (sem == 0) {
   702     // we need to initialise on demand here
   703     // to prevent race conditions use our global
   704     // mutex if it's available:
   705     if (_STLP_beos_static_lock_data<0>::is_init) {
   706       _STLP_auto_lock al(_STLP_beos_static_lock_data<0>::mut);
   707       if (sem == 0) _M_initialize();
   708     }
   709     else {
   710       // no lock available, we must still be
   711       // in startup code, THERE MUST BE ONE THREAD
   712       // ONLY active at this point.
   713       _M_initialize();
   714     }
   715   }
   716   status_t t;
   717   t = acquire_sem(sem);
   718   assert(t == B_NO_ERROR);
   719 }
   720 #endif
   721 
   722 _STLP_END_NAMESPACE
   723 
   724 #if !defined (_STLP_LINK_TIME_INSTANTIATION)
   725 #  include <stl/_threads.c>
   726 #endif
   727 
   728 #endif /* _STLP_INTERNAL_THREADS_H */
   729 
   730 // Local Variables:
   731 // mode:C++
   732 // End: