epoc32/include/stdapis/stlportv5/stl/_threads.h
branchSymbian3
changeset 4 837f303aceeb
parent 3 e1b950c65cb4
     1.1 --- a/epoc32/include/stdapis/stlportv5/stl/_threads.h	Wed Mar 31 12:27:01 2010 +0100
     1.2 +++ b/epoc32/include/stdapis/stlportv5/stl/_threads.h	Wed Mar 31 12:33:34 2010 +0100
     1.3 @@ -1,14 +1,16 @@
     1.4  /*
     1.5 + * Portions Copyright (c) 2008 Nokia Corporation and/or its subsidiary(-ies). All rights reserved.
     1.6 + *
     1.7   * Copyright (c) 1997-1999
     1.8   * Silicon Graphics Computer Systems, Inc.
     1.9   *
    1.10 - * Copyright (c) 1999 
    1.11 + * Copyright (c) 1999
    1.12   * Boris Fomitchev
    1.13   *
    1.14   * This material is provided "as is", with absolutely no warranty expressed
    1.15   * or implied. Any use is at your own risk.
    1.16   *
    1.17 - * Permission to use or copy this software for any purpose is hereby granted 
    1.18 + * Permission to use or copy this software for any purpose is hereby granted
    1.19   * without fee, provided the above notices are retained on all copies.
    1.20   * Permission to modify the code and to distribute modified code is granted,
    1.21   * provided the above notices are retained, and a notice that the code was
    1.22 @@ -19,7 +21,6 @@
    1.23  // WARNING: This is an internal header file, included by other C++
    1.24  // standard library headers.  You should not attempt to use this header
    1.25  // file directly.
    1.26 -// Stl_config.h should be included before this file.
    1.27  
    1.28  
    1.29  #ifndef _STLP_INTERNAL_THREADS_H
    1.30 @@ -30,226 +31,202 @@
    1.31  // threads standard), and Win32 threads.  Uithread support by Jochen
    1.32  // Schlick, 1999, and Solaris threads generalized to them.
    1.33  
    1.34 -#ifndef _STLP_CONFIG_H
    1.35 -#include <stl/_config.h>
    1.36 +#ifndef _STLP_INTERNAL_CSTDDEF
    1.37 +#  include <stl/_cstddef.h>
    1.38  #endif
    1.39  
    1.40 -# if ! defined (_STLP_CSTDDEF)
    1.41 -#  include <cstddef>
    1.42 -# endif
    1.43 -
    1.44 -# if ! defined (_STLP_CSTDLIB)
    1.45 -#  include <cstdlib>
    1.46 -# endif
    1.47 +#ifndef _STLP_INTERNAL_CSTDLIB
    1.48 +#  include <stl/_cstdlib.h>
    1.49 +#endif
    1.50  
    1.51  // On SUN and Mac OS X gcc, zero-initialization works just fine...
    1.52 -# if defined (__sun) || ( defined(__GNUC__) && defined(__APPLE__) )
    1.53 -# define _STLP_MUTEX_INITIALIZER
    1.54 -# endif
    1.55 -
    1.56 -# if defined (_STLP_WIN32) || defined (__sgi) || defined (_STLP_SPARC_SOLARIS_THREADS)
    1.57 -  typedef long __stl_atomic_t;
    1.58 -# else 
    1.59 -# if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)
    1.60 -// using _STLP_VENDOR_CSTD::size_t;
    1.61 -using namespace _STLP_VENDOR_CSTD;
    1.62 -# endif
    1.63 -  typedef size_t __stl_atomic_t;
    1.64 -#endif  
    1.65 -
    1.66 -# if defined(_STLP_SGI_THREADS)
    1.67 -#  include <mutex.h>
    1.68 -// Hack for SGI o32 compilers.
    1.69 -#if !defined(__add_and_fetch) && \
    1.70 -    (__mips < 3 || !(defined (_ABIN32) || defined(_ABI64)))
    1.71 -#  define __add_and_fetch(__l,__v) add_then_test((unsigned long*)__l,__v)  
    1.72 -#  define __test_and_set(__l,__v)  test_and_set(__l,__v)
    1.73 -#endif /* o32 */
    1.74 -
    1.75 -# if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))
    1.76 -#  define _STLP_ATOMIC_EXCHANGE(__p, __q) test_and_set(__p, __q)
    1.77 -# else
    1.78 -#  define _STLP_ATOMIC_EXCHANGE(__p, __q) __test_and_set((unsigned long*)__p, (unsigned long)__q)
    1.79 -# endif
    1.80 -
    1.81 -#  define _STLP_ATOMIC_INCREMENT(__x) __add_and_fetch(__x, 1)
    1.82 -#  define _STLP_ATOMIC_DECREMENT(__x) __add_and_fetch(__x, (size_t) -1)
    1.83 -
    1.84 -# elif defined (__GNUC__) && defined (__i386__) && defined (__unix__) && defined (_STLP_USE_INLINE_X86_SPINLOCK) 
    1.85 -
    1.86 -// gcc on i386 linux, freebsd, etc. 
    1.87 -
    1.88 -// This enables the memory caching on x86 linux.  It is critical for SMP
    1.89 -// without it the performace is DISMAL!
    1.90 -static inline unsigned long __xchg(volatile __stl_atomic_t* target, int source)
    1.91 -{
    1.92 -
    1.93 -  // The target is refernce in memory rather than the register
    1.94 -  // because making a copy of it from memory to the register and
    1.95 -  // back again would ruin the atomic nature of the call.
    1.96 -  // the source does not need to be delt with atomicly so it can
    1.97 -  // be copied about as needed.
    1.98 -  //
    1.99 -  // The casting of the source is used to prevent gcc from optimizing 
   1.100 -  // in such a way that breaks the atomic nature of this call.
   1.101 -  //
   1.102 -  __asm__ __volatile__("xchgl %1,%0"
   1.103 -		       :"=m" (*(volatile long *) target), "=r" (source)
   1.104 -		       :"m" (*(volatile long *) target), "r" (source) );
   1.105 -  return source;
   1.106 -
   1.107 -  //  The assembly above does the following atomicly:
   1.108 -  //   int temp=source;
   1.109 -  //   source=(int)(*target);
   1.110 -  //   (int)(*target)=temp;
   1.111 -  // return source
   1.112 -}
   1.113 -
   1.114 -static inline void __inc_and_fetch(volatile __stl_atomic_t* __x)
   1.115 -{
   1.116 -  // Referenced in memory rather than register to preserve the atomic nature.
   1.117 -  //
   1.118 -  __asm__ __volatile__(
   1.119 -      "lock; incl %0"
   1.120 -      :"=m" (*__x)
   1.121 -      :"m" (*__x) );
   1.122 -
   1.123 -  //  The assembly above does the following atomicly:
   1.124 -  //   ++(int)(*__x);
   1.125 -
   1.126 -}
   1.127 -static inline void __dec_and_fetch(volatile __stl_atomic_t* __x)
   1.128 -{
   1.129 -  // Referenced in memory rather than register to preserve the atomic nature.
   1.130 -  //
   1.131 -  __asm__ __volatile__(
   1.132 -      "lock; decl %0"
   1.133 -      :"=m" (*__x)
   1.134 -      :"m" (*__x) );
   1.135 -
   1.136 -  //  The assembly above does the following atomicly:
   1.137 -  //   --(int)(*__x);
   1.138 -}
   1.139 -
   1.140 -#  define _STLP_ATOMIC_EXCHANGE(target, newValue) ((__xchg(target, newValue)))
   1.141 -#  define _STLP_ATOMIC_INCREMENT(__x) __inc_and_fetch(__x)
   1.142 -#  define _STLP_ATOMIC_DECREMENT(__x) __dec_and_fetch(__x)
   1.143 -
   1.144 -# elif defined(_STLP_PTHREADS)
   1.145 -
   1.146 -#  include <pthread.h>
   1.147 -#  ifndef _STLP_USE_PTHREAD_SPINLOCK
   1.148 -#   if defined(PTHREAD_MUTEX_INITIALIZER) && !defined(_STLP_MUTEX_INITIALIZER)
   1.149 -#    define _STLP_MUTEX_INITIALIZER = { PTHREAD_MUTEX_INITIALIZER }
   1.150 -#   endif
   1.151 -
   1.152 -//HPUX variants have (on some platforms optional) non-standard "DCE" pthreads impl
   1.153 -#   if defined(_DECTHREADS_) && (defined(_PTHREAD_USE_D4) || defined(__hpux)) && !defined(_CMA_SUPPRESS_EXTERNALS_)
   1.154 -#    define _STLP_PTHREAD_ATTR_DEFAULT pthread_mutexattr_default
   1.155 -#   else
   1.156 -#    define _STLP_PTHREAD_ATTR_DEFAULT 0
   1.157 -#   endif
   1.158 -#  endif // !_STLP_USE_PTHREAD_SPINLOCK 
   1.159 -
   1.160 -# elif defined(_STLP_WIN32THREADS)
   1.161 -#  if !defined (_STLP_WINDOWS_H_INCLUDED) && ! defined (_WINDOWS_H)
   1.162 -#   if ! (defined ( _STLP_MSVC ) || defined (__BORLANDC__) || defined (__ICL) || defined (__WATCOMC__) || defined (__MINGW32__) || defined (__DMC__))
   1.163 -#    ifdef _STLP_USE_MFC
   1.164 -#     include <afx.h>
   1.165 -#    else
   1.166 -#     include <windows.h>
   1.167 -#    endif
   1.168 -#    define _STLP_WINDOWS_H_INCLUDED
   1.169 -#   else 
   1.170 -// This section serves as a replacement for windows.h header for Visual C++
   1.171 -extern "C" {
   1.172 -#   if (defined(_M_MRX000) || defined(_M_ALPHA) \
   1.173 -       || (defined(_M_PPC) && (_MSC_VER >= 1000))) && !defined(RC_INVOKED)
   1.174 -#    define InterlockedIncrement       _InterlockedIncrement
   1.175 -#    define InterlockedDecrement       _InterlockedDecrement
   1.176 -#    define InterlockedExchange        _InterlockedExchange
   1.177 -#    define _STLP_STDCALL
   1.178 -#   else
   1.179 -#    ifdef _MAC
   1.180 -#     define _STLP_STDCALL _cdecl
   1.181 -#    else
   1.182 -#     define _STLP_STDCALL __stdcall
   1.183 -#    endif
   1.184 -#   endif
   1.185 -
   1.186 -#if (_MSC_VER >= 1300) || defined (_STLP_NEW_PLATFORM_SDK)
   1.187 -_STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedIncrement(long volatile *);
   1.188 -_STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedDecrement(long volatile *);
   1.189 -_STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedExchange(long volatile *, long);
   1.190 -#else
   1.191 -  // boris : for the latest SDK, you may actually need the other version of the declaration (above)
   1.192 -  // even for earlier VC++ versions. There is no way to tell SDK versions apart, sorry ...
   1.193 -_STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedIncrement(long*);
   1.194 -_STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedDecrement(long*);
   1.195 -_STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedExchange(long*, long);
   1.196 +#if defined (__sun) || (defined (__GNUC__) && defined(__APPLE__))
   1.197 +#  define _STLP_MUTEX_INITIALIZER
   1.198  #endif
   1.199  
   1.200 -_STLP_IMPORT_DECLSPEC void _STLP_STDCALL Sleep(unsigned long);
   1.201 -_STLP_IMPORT_DECLSPEC void _STLP_STDCALL OutputDebugStringA( const char* lpOutputString );
   1.202 +/* This header defines the following atomic operation that platform should
   1.203 + * try to support as much as possible. Atomic operation are exposed as macro
   1.204 + * in order to easily test for their existance. They are:
   1.205 + * __stl_atomic_t _STLP_ATOMIC_INCREMENT(volatile __stl_atomic_t* __ptr) :
   1.206 + * increment *__ptr by 1 and returns the new value
   1.207 + * __stl_atomic_t _STLP_ATOMIC_DECREMENT(volatile __stl_atomic_t* __ptr) :
   1.208 + * decrement  *__ptr by 1 and returns the new value
   1.209 + * __stl_atomic_t _STLP_ATOMIC_EXCHANGE(volatile __stl_atomic_t* __target, __stl_atomic_t __val) :
   1.210 + * assign __val to *__target and returns former *__target value
   1.211 + * void* _STLP_ATOMIC_EXCHANGE_PTR(void* volatile* __target, void* __ptr) :
   1.212 + * assign __ptr to *__target and returns former *__target value
   1.213 + * __stl_atomic_t _STLP_ATOMIC_ADD(volatile __stl_atomic_t* __target, __stl_atomic_t __val) :
   1.214 + * does *__target = *__target + __val and returns the old *__target value
   1.215 + */
   1.216  
   1.217 -#ifdef _STLP_DEBUG
   1.218 -typedef unsigned long DWORD;
   1.219 -_STLP_IMPORT_DECLSPEC DWORD _STLP_STDCALL GetCurrentThreadId();
   1.220 -#endif /* _STLP_DEBUG */
   1.221 +#if defined (_STLP_WIN32) || defined (__sgi) || defined (_STLP_SPARC_SOLARIS_THREADS)
   1.222 +typedef long __stl_atomic_t;
   1.223 +#else
   1.224 +/* Don't import whole namespace!!!! - ptr */
   1.225 +// # if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)
   1.226 +// // using _STLP_VENDOR_CSTD::size_t;
   1.227 +// using namespace _STLP_VENDOR_CSTD;
   1.228 +// # endif
   1.229 +typedef size_t __stl_atomic_t;
   1.230 +#endif
   1.231  
   1.232 -#    if defined (InterlockedIncrement)
   1.233 -#     pragma intrinsic(_InterlockedIncrement)
   1.234 -#     pragma intrinsic(_InterlockedDecrement)
   1.235 -#     pragma intrinsic(_InterlockedExchange)
   1.236 +#if defined (_STLP_THREADS)
   1.237 +
   1.238 +#  if defined (_STLP_SGI_THREADS)
   1.239 +
   1.240 +#    include <mutex.h>
   1.241 +// Hack for SGI o32 compilers.
   1.242 +#    if !defined(__add_and_fetch) && \
   1.243 +        (__mips < 3 || !(defined (_ABIN32) || defined(_ABI64)))
   1.244 +#      define __add_and_fetch(__l,__v) add_then_test((unsigned long*)__l,__v)
   1.245 +#      define __test_and_set(__l,__v)  test_and_set(__l,__v)
   1.246 +#    endif /* o32 */
   1.247 +
   1.248 +#    if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))
   1.249 +#      define _STLP_ATOMIC_EXCHANGE(__p, __q) test_and_set(__p, __q)
   1.250 +#    else
   1.251 +#      define _STLP_ATOMIC_EXCHANGE(__p, __q) __test_and_set((unsigned long*)__p, (unsigned long)__q)
   1.252  #    endif
   1.253 -} /* extern "C" */
   1.254  
   1.255 -#   endif /* STL_MSVC */
   1.256 +#    define _STLP_ATOMIC_INCREMENT(__x) __add_and_fetch(__x, 1)
   1.257 +#    define _STLP_ATOMIC_DECREMENT(__x) __add_and_fetch(__x, (size_t) -1)
   1.258  
   1.259 -#   define _STLP_WINDOWS_H_INCLUDED
   1.260 +#  elif defined (_STLP_PTHREADS)
   1.261  
   1.262 -#  endif /* _STLP_WIN32 */
   1.263 +#    include <pthread.h>
   1.264 +#    if !defined (_STLP_USE_PTHREAD_SPINLOCK)
   1.265 +#      if defined (PTHREAD_MUTEX_INITIALIZER) && !defined (_STLP_MUTEX_INITIALIZER) && defined (_REENTRANT)
   1.266 +#        define _STLP_MUTEX_INITIALIZER = { PTHREAD_MUTEX_INITIALIZER }
   1.267 +#      endif
   1.268 +//HPUX variants have (on some platforms optional) non-standard "DCE" pthreads impl
   1.269 +#      if defined (_DECTHREADS_) && (defined (_PTHREAD_USE_D4) || defined (__hpux)) && !defined (_CMA_SUPPRESS_EXTERNALS_)
   1.270 +#        define _STLP_PTHREAD_ATTR_DEFAULT pthread_mutexattr_default
   1.271 +#      else
   1.272 +#        define _STLP_PTHREAD_ATTR_DEFAULT 0
   1.273 +#      endif
   1.274 +#    else // _STLP_USE_PTHREAD_SPINLOCK
   1.275 +#      if defined (__OpenBSD__)
   1.276 +#        include <spinlock.h>
   1.277 +#      endif
   1.278 +#    endif // _STLP_USE_PTHREAD_SPINLOCK
   1.279  
   1.280 -#  ifndef _STLP_ATOMIC_INCREMENT
   1.281 -#   define _STLP_ATOMIC_INCREMENT(__x)           InterlockedIncrement((long*)__x)
   1.282 -#   define _STLP_ATOMIC_DECREMENT(__x)           InterlockedDecrement((long*)__x)
   1.283 -#   define _STLP_ATOMIC_EXCHANGE(__x, __y)       InterlockedExchange((long*)__x, (long)__y)
   1.284 -#  endif
   1.285 -# elif defined(__DECC) || defined(__DECCXX)
   1.286 -#  include <machine/builtins.h>
   1.287 -#  define _STLP_ATOMIC_EXCHANGE __ATOMIC_EXCH_LONG
   1.288 -#  define _STLP_ATOMIC_INCREMENT(__x) __ATOMIC_ADD_LONG(__x, 1)
   1.289 -#  define _STLP_ATOMIC_DECREMENT(__x) __ATOMIC_ADD_LONG(__x, -1)
   1.290 -# elif defined(_STLP_SPARC_SOLARIS_THREADS)
   1.291 -#  include <stl/_sparc_atomic.h>
   1.292 -# elif defined (_STLP_UITHREADS)
   1.293 +#    if defined (__GNUC__) && defined (__i386__)
   1.294 +
   1.295 +#      if !defined (_STLP_ATOMIC_INCREMENT)
   1.296 +inline long _STLP_atomic_increment_gcc_x86(long volatile* p) {
   1.297 +  long result;
   1.298 +  __asm__ __volatile__
   1.299 +    ("lock; xaddl  %1, %0;"
   1.300 +    :"=m" (*p), "=r" (result)
   1.301 +    :"m" (*p),  "1"  (1)
   1.302 +    :"cc");
   1.303 +  return result + 1;
   1.304 +}
   1.305 +#        define _STLP_ATOMIC_INCREMENT(__x) (_STLP_atomic_increment_gcc_x86((long volatile*)__x))
   1.306 +#      endif
   1.307 +
   1.308 +#      if !defined (_STLP_ATOMIC_DECREMENT)
   1.309 +inline long _STLP_atomic_decrement_gcc_x86(long volatile* p) {
   1.310 +  long result;
   1.311 +  __asm__ __volatile__
   1.312 +    ("lock; xaddl  %1, %0;"
   1.313 +    :"=m" (*p), "=r" (result)
   1.314 +    :"m" (*p),  "1"  (-1)
   1.315 +    :"cc");
   1.316 +  return result - 1;
   1.317 +}
   1.318 +#        define _STLP_ATOMIC_DECREMENT(__x) (_STLP_atomic_decrement_gcc_x86((long volatile*)__x))
   1.319 +#      endif
   1.320 +
   1.321 +#      if !defined (_STLP_ATOMIC_ADD)
   1.322 +inline long _STLP_atomic_add_gcc_x86(long volatile* p, long addend) {
   1.323 +  long result;
   1.324 +  __asm__ __volatile__
   1.325 +    ("lock; xaddl %1, %0;"
   1.326 +    :"=m" (*p), "=r" (result)
   1.327 +    :"m"  (*p), "1"  (addend)
   1.328 +    :"cc");
   1.329 + return result + addend;
   1.330 +}
   1.331 +#        define _STLP_ATOMIC_ADD(__dst, __val)  (_STLP_atomic_add_gcc_x86((long volatile*)__dst, (long)__val))
   1.332 +#      endif
   1.333 +
   1.334 +#    endif /* if defined(__GNUC__) && defined(__i386__) */
   1.335 +
   1.336 +#  elif defined (_STLP_WIN32THREADS)
   1.337 +
   1.338 +#    if !defined (_STLP_ATOMIC_INCREMENT)
   1.339 +#      if !defined (_STLP_NEW_PLATFORM_SDK)
   1.340 +#        define _STLP_ATOMIC_INCREMENT(__x)           InterlockedIncrement(__CONST_CAST(long*, __x))
   1.341 +#        define _STLP_ATOMIC_DECREMENT(__x)           InterlockedDecrement(__CONST_CAST(long*, __x))
   1.342 +#        define _STLP_ATOMIC_EXCHANGE(__x, __y)       InterlockedExchange(__CONST_CAST(long*, __x), __y)
   1.343 +#      else
   1.344 +#        define _STLP_ATOMIC_INCREMENT(__x)           InterlockedIncrement(__x)
   1.345 +#        define _STLP_ATOMIC_DECREMENT(__x)           InterlockedDecrement(__x)
   1.346 +#        define _STLP_ATOMIC_EXCHANGE(__x, __y)       InterlockedExchange(__x, __y)
   1.347 +#      endif
   1.348 +#      define _STLP_ATOMIC_EXCHANGE_PTR(__x, __y)     STLPInterlockedExchangePointer(__x, __y)
   1.349 +/*
   1.350 + * The following functionnality is only available since Windows 98, those that are targeting previous OSes
   1.351 + * should define _WIN32_WINDOWS to a value lower that the one of Win 98, see Platform SDK documentation for
   1.352 + * more informations:
   1.353 + */
   1.354 +#      if defined (_STLP_NEW_PLATFORM_SDK) && (!defined (_STLP_WIN32_VERSION) || (_STLP_WIN32_VERSION >= 0x0410))
   1.355 +#        define _STLP_ATOMIC_ADD(__dst, __val) InterlockedExchangeAdd(__dst, __val)
   1.356 +#      endif
   1.357 +#    endif
   1.358 +
   1.359 +#  elif defined (__DECC) || defined (__DECCXX)
   1.360 +
   1.361 +#    include <machine/builtins.h>
   1.362 +#    define _STLP_ATOMIC_EXCHANGE __ATOMIC_EXCH_LONG
   1.363 +#    define _STLP_ATOMIC_INCREMENT(__x) __ATOMIC_ADD_LONG(__x, 1)
   1.364 +#    define _STLP_ATOMIC_DECREMENT(__x) __ATOMIC_ADD_LONG(__x, -1)
   1.365 +
   1.366 +#  elif defined(_STLP_SPARC_SOLARIS_THREADS)
   1.367 +
   1.368 +#    include <stl/_sparc_atomic.h>
   1.369 +
   1.370 +#  elif defined (_STLP_UITHREADS)
   1.371 +
   1.372  // this inclusion is potential hazard to bring up all sorts
   1.373  // of old-style headers. Let's assume vendor already know how
   1.374  // to deal with that.
   1.375 -#  include <ctime>
   1.376 -# if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)
   1.377 +#    ifndef _STLP_INTERNAL_CTIME
   1.378 +#      include <stl/_ctime.h>
   1.379 +#    endif
   1.380 +#    if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)
   1.381  using _STLP_VENDOR_CSTD::time_t;
   1.382 -# endif
   1.383 -#  include <synch.h>
   1.384 -#  include <cstdio>
   1.385 -#  include <stl/_cwchar.h>
   1.386 -# elif defined (_STLP_BETHREADS)
   1.387 -#  include <OS.h>
   1.388 -#include <cassert>
   1.389 -#include <stdio.h>
   1.390 -#  define _STLP_MUTEX_INITIALIZER = { 0 }
   1.391 -#elif defined(_STLP_OS2THREADS)
   1.392 -# ifdef __GNUC__
   1.393 -#  define INCL_DOSSEMAPHORES
   1.394 -#  include <os2.h>
   1.395 -# else
   1.396 -  // This section serves to replace os2.h for VisualAge C++
   1.397 +#    endif
   1.398 +#    include <synch.h>
   1.399 +#    include <cstdio>
   1.400 +#    include <cwchar>
   1.401 +
   1.402 +#  elif defined (_STLP_BETHREADS)
   1.403 +
   1.404 +#    include <OS.h>
   1.405 +#    include <cassert>
   1.406 +#    include <stdio.h>
   1.407 +#    define _STLP_MUTEX_INITIALIZER = { 0 }
   1.408 +
   1.409 +#  elif defined (_STLP_NWTHREADS)
   1.410 +
   1.411 +#    include <nwthread.h>
   1.412 +#    include <nwsemaph.h>
   1.413 +
   1.414 +#  elif defined(_STLP_OS2THREADS)
   1.415 +
   1.416 +#    if defined (__GNUC__)
   1.417 +#      define INCL_DOSSEMAPHORES
   1.418 +#      include <os2.h>
   1.419 +#    else
   1.420 +// This section serves to replace os2.h for VisualAge C++
   1.421    typedef unsigned long ULONG;
   1.422 -  #ifndef __HEV__  /* INCL_SEMAPHORE may also define HEV */
   1.423 -    #define __HEV__
   1.424 -    typedef ULONG HEV;
   1.425 -    typedef HEV*  PHEV;
   1.426 -  #endif
   1.427 +#      if !defined (__HEV__)  /* INCL_SEMAPHORE may also define HEV */
   1.428 +#        define __HEV__
   1.429 +  typedef ULONG HEV;
   1.430 +  typedef HEV*  PHEV;
   1.431 +#      endif
   1.432    typedef ULONG APIRET;
   1.433    typedef ULONG HMTX;
   1.434    typedef HMTX*  PHMTX;
   1.435 @@ -259,35 +236,42 @@
   1.436    APIRET _System DosRequestMutexSem(HMTX hmtx, ULONG ulTimeout);
   1.437    APIRET _System DosReleaseMutexSem(HMTX hmtx);
   1.438    APIRET _System DosCloseMutexSem(HMTX hmtx);
   1.439 -# define _STLP_MUTEX_INITIALIZER = { 0 };
   1.440 -#  endif /* GNUC */
   1.441 -# elif defined(_STLP_VXWORKS_THREADS)
   1.442 -#  include "semLib.h"
   1.443 -# endif
   1.444 +#      define _STLP_MUTEX_INITIALIZER = { 0 }
   1.445 +#    endif /* GNUC */
   1.446  
   1.447 -# ifndef _STLP_MUTEX_INITIALIZER
   1.448 -#   if defined(_STLP_ATOMIC_EXCHANGE)
   1.449 -// we are using our own spinlock. 
   1.450 -#     define _STLP_MUTEX_INITIALIZER = { 0 }
   1.451 -#   elif defined(_STLP_UITHREADS)
   1.452 -// known case
   1.453 -#     define _STLP_MUTEX_INITIALIZER = { DEFAULTMUTEX }
   1.454 -#   else
   1.455 -// we do not have static initializer available. therefore, on-demand synchronization is needed.
   1.456 -#     define _STLP_MUTEX_INITIALIZER
   1.457 -#     define _STLP_MUTEX_NEEDS_ONDEMAND_INITIALIZATION
   1.458 -#   endif
   1.459 -# endif
   1.460 +#  endif
   1.461 +
   1.462 +#else
   1.463 +/* no threads */
   1.464 +#  define _STLP_ATOMIC_INCREMENT(__x) ++(*__x)
   1.465 +#  define _STLP_ATOMIC_DECREMENT(__x) --(*__x)
   1.466 +/* We do not grant other atomic operations as they are useless if STLport do not have
   1.467 + * to be thread safe
   1.468 + */
   1.469 +#endif
   1.470 +
   1.471 +#if !defined (_STLP_MUTEX_INITIALIZER)
   1.472 +#  if defined(_STLP_ATOMIC_EXCHANGE)
   1.473 +#    define _STLP_MUTEX_INITIALIZER = { 0 }
   1.474 +#  elif defined(_STLP_UITHREADS)
   1.475 +#    define _STLP_MUTEX_INITIALIZER = { DEFAULTMUTEX }
   1.476 +#  else
   1.477 +#    define _STLP_MUTEX_INITIALIZER
   1.478 +#  endif
   1.479 +#endif
   1.480 +  
   1.481  
   1.482  _STLP_BEGIN_NAMESPACE
   1.483  
   1.484 -#ifndef _STLP_USE_PTHREAD_SPINLOCK
   1.485 +#if defined (_STLP_THREADS) && !defined (_STLP_USE_PTHREAD_SPINLOCK)
   1.486  // Helper struct.  This is a workaround for various compilers that don't
   1.487  // handle static variables in inline functions properly.
   1.488  template <int __inst>
   1.489  struct _STLP_mutex_spin {
   1.490    enum { __low_max = 30, __high_max = 1000 };
   1.491    // Low if we suspect uniprocessor, high for multiprocessor.
   1.492 +  //Note: For SYMBIAN Emulator, these entries are to be considered WSD.  
   1.493 +  //Still, EWSD solution can't be applied since it's templated.
   1.494    static unsigned __max;
   1.495    static unsigned __last;
   1.496    static void _STLP_CALL _M_do_lock(volatile __stl_atomic_t* __lock);
   1.497 @@ -295,7 +279,6 @@
   1.498  };
   1.499  #endif // !_STLP_USE_PTHREAD_SPINLOCK
   1.500  
   1.501 -
   1.502  // Locking class.  Note that this class *does not have a constructor*.
   1.503  // It must be initialized either statically, with _STLP_MUTEX_INITIALIZER,
   1.504  // or dynamically, by explicitly calling the _M_initialize member function.
   1.505 @@ -310,17 +293,15 @@
   1.506  
   1.507  // For non-static cases, clients should use  _STLP_mutex.
   1.508  
   1.509 -struct _STLP_CLASS_DECLSPEC _STLP_mutex_base
   1.510 -{
   1.511 -#if defined(_STLP_ATOMIC_EXCHANGE) || defined(_STLP_SGI_THREADS)
   1.512 +struct _STLP_CLASS_DECLSPEC _STLP_mutex_base {
   1.513 +#if defined (_STLP_ATOMIC_EXCHANGE) || defined (_STLP_SGI_THREADS)
   1.514    // It should be relatively easy to get this to work on any modern Unix.
   1.515    volatile __stl_atomic_t _M_lock;
   1.516  #endif
   1.517  
   1.518 -#ifdef _STLP_THREADS
   1.519 -
   1.520 -# ifdef _STLP_ATOMIC_EXCHANGE
   1.521 -  inline void _M_initialize() { _M_lock=0; }
   1.522 +#if defined (_STLP_THREADS)
   1.523 +#  if defined (_STLP_ATOMIC_EXCHANGE)
   1.524 +  inline void _M_initialize() { _M_lock = 0; }
   1.525    inline void _M_destroy() {}
   1.526  
   1.527    void _M_acquire_lock() {
   1.528 @@ -329,147 +310,111 @@
   1.529  
   1.530    inline void _M_release_lock() {
   1.531      volatile __stl_atomic_t* __lock = &_M_lock;
   1.532 -#  if defined(_STLP_SGI_THREADS) && defined(__GNUC__) && __mips >= 3
   1.533 -        asm("sync");
   1.534 -        *__lock = 0;
   1.535 -#  elif defined(_STLP_SGI_THREADS) && __mips >= 3 \
   1.536 -	 && (defined (_ABIN32) || defined(_ABI64))
   1.537 -        __lock_release(__lock);
   1.538 -#  elif defined (_STLP_SPARC_SOLARIS_THREADS)
   1.539 -#   if defined (__WORD64) || defined (__arch64__) || defined (__sparcv9) || defined (__sparcv8plus)
   1.540 -	asm("membar #StoreStore ; membar #LoadStore");
   1.541 -#   else
   1.542 -	asm(" stbar ");
   1.543 -#   endif
   1.544 -        *__lock = 0;	
   1.545 -#  else
   1.546 -        *__lock = 0;
   1.547 -        // This is not sufficient on many multiprocessors, since
   1.548 -        // writes to protected variables and the lock may be reordered.
   1.549 -#  endif
   1.550 +#    if defined(_STLP_SGI_THREADS) && defined(__GNUC__) && __mips >= 3
   1.551 +    asm("sync");
   1.552 +    *__lock = 0;
   1.553 +#    elif defined(_STLP_SGI_THREADS) && __mips >= 3 && \
   1.554 +         (defined (_ABIN32) || defined(_ABI64))
   1.555 +    __lock_release(__lock);
   1.556 +#    elif defined (_STLP_SPARC_SOLARIS_THREADS)
   1.557 +#      if defined (__WORD64) || defined (__arch64__) || defined (__sparcv9) || defined (__sparcv8plus)
   1.558 +    asm("membar #StoreStore ; membar #LoadStore");
   1.559 +#      else
   1.560 +    asm(" stbar ");
   1.561 +#      endif
   1.562 +    *__lock = 0;
   1.563 +#    else
   1.564 +    *__lock = 0;
   1.565 +    // This is not sufficient on many multiprocessors, since
   1.566 +    // writes to protected variables and the lock may be reordered.
   1.567 +#    endif
   1.568    }
   1.569 -# elif defined(_STLP_PTHREADS)
   1.570 -#  ifdef _STLP_USE_PTHREAD_SPINLOCK
   1.571 +#  elif defined (_STLP_PTHREADS)
   1.572 +#    if defined (_STLP_USE_PTHREAD_SPINLOCK)
   1.573 +#      if !defined (__OpenBSD__)
   1.574    pthread_spinlock_t _M_lock;
   1.575    inline void _M_initialize() { pthread_spin_init( &_M_lock, 0 ); }
   1.576    inline void _M_destroy() { pthread_spin_destroy( &_M_lock ); }
   1.577  
   1.578 -  inline void _M_acquire_lock() { 
   1.579 -    // we do not care about race conditions here : there is only one thread at this point 
   1.580 -    if(!_M_lock) pthread_spin_init( &_M_lock, 0 );
   1.581 +  // sorry, but no static initializer for pthread_spinlock_t;
   1.582 +  // this will not work for compilers that has problems with call
   1.583 +  // constructor of static object...
   1.584  
   1.585 -    // fbp: here, initialization on demand should happen before the lock
   1.586 -    // we use simple strategy as we are sure this only happens on initialization
   1.587 -    pthread_spin_lock( &_M_lock );
   1.588 -  }
   1.589 +  // _STLP_mutex_base()
   1.590 +  //   { pthread_spin_init( &_M_lock, 0 ); }
   1.591  
   1.592 -  inline void _M_acquire_lock_nodemand() { 
   1.593 -    pthread_spin_lock( &_M_lock ); 
   1.594 -  }
   1.595 +  // ~_STLP_mutex_base()
   1.596 +  //   { pthread_spin_destroy( &_M_lock ); }
   1.597 +
   1.598 +  inline void _M_acquire_lock() { pthread_spin_lock( &_M_lock ); }
   1.599    inline void _M_release_lock() { pthread_spin_unlock( &_M_lock ); }
   1.600 -#  else // !_STLP_USE_PTHREAD_SPINLOCK
   1.601 +#      else // __OpenBSD__
   1.602 +  spinlock_t _M_lock;
   1.603 +  inline void _M_initialize() { _SPINLOCK_INIT( &_M_lock ); }
   1.604 +  inline void _M_destroy() { }
   1.605 +  inline void _M_acquire_lock() { _SPINLOCK( &_M_lock ); }
   1.606 +  inline void _M_release_lock() { _SPINUNLOCK( &_M_lock ); }
   1.607 +#      endif // __OpenBSD__
   1.608 +#    else // !_STLP_USE_PTHREAD_SPINLOCK
   1.609    pthread_mutex_t _M_lock;
   1.610 -
   1.611 -  inline void _M_initialize() {
   1.612 -    pthread_mutex_init(&_M_lock,_STLP_PTHREAD_ATTR_DEFAULT);
   1.613 -  }
   1.614 -  inline void _M_destroy() {
   1.615 -    pthread_mutex_destroy(&_M_lock);
   1.616 -  }
   1.617 -  inline void _M_acquire_lock_nodemand() { 
   1.618 +  inline void _M_initialize()
   1.619 +  { pthread_mutex_init(&_M_lock,_STLP_PTHREAD_ATTR_DEFAULT); }
   1.620 +  inline void _M_destroy()
   1.621 +  { pthread_mutex_destroy(&_M_lock); }
   1.622 +  inline void _M_acquire_lock() {
   1.623 +#      if defined ( __hpux ) && ! defined (PTHREAD_MUTEX_INITIALIZER)
   1.624 +    if (!_M_lock.field1)  _M_initialize();
   1.625 +#      endif
   1.626      pthread_mutex_lock(&_M_lock);
   1.627    }
   1.628 +  inline void _M_release_lock() { pthread_mutex_unlock(&_M_lock); }
   1.629 +#    endif // !_STLP_USE_PTHREAD_SPINLOCK
   1.630  
   1.631 -  inline void _M_acquire_lock() { 
   1.632 -#    if defined (__hpux) && !defined (PTHREAD_MUTEX_INITIALIZER)
   1.633 -      if (!_M_lock.field1)  _M_initialize();
   1.634 -#    endif
   1.635 -     pthread_mutex_lock(&_M_lock);
   1.636 -  }
   1.637 -  inline void _M_release_lock() { pthread_mutex_unlock(&_M_lock); }
   1.638 -#  endif // !_STLP_USE_PTHREAD_SPINLOCK
   1.639 -  
   1.640 -# elif defined (_STLP_UITHREADS)
   1.641 +#  elif defined (_STLP_UITHREADS)
   1.642    mutex_t _M_lock;
   1.643 -  inline void _M_initialize() {
   1.644 -    mutex_init(&_M_lock,0,NULL);	
   1.645 -  }
   1.646 -  inline void _M_destroy() {
   1.647 -    mutex_destroy(&_M_lock);
   1.648 -  }
   1.649 +  inline void _M_initialize()
   1.650 +  { mutex_init(&_M_lock, 0, NULL); }
   1.651 +  inline void _M_destroy()
   1.652 +  { mutex_destroy(&_M_lock); }
   1.653    inline void _M_acquire_lock() { mutex_lock(&_M_lock); }
   1.654    inline void _M_release_lock() { mutex_unlock(&_M_lock); }
   1.655  
   1.656 -# elif defined(_STLP_OS2THREADS)
   1.657 +#  elif defined (_STLP_OS2THREADS)
   1.658    HMTX _M_lock;
   1.659    inline void _M_initialize() { DosCreateMutexSem(NULL, &_M_lock, 0, false); }
   1.660    inline void _M_destroy() { DosCloseMutexSem(_M_lock); }
   1.661 -  inline void _M_acquire_lock_nodemand() {
   1.662 -    DosRequestMutexSem(_M_lock, SEM_INDEFINITE_WAIT);
   1.663 -  }  
   1.664    inline void _M_acquire_lock() {
   1.665 -    if(!_M_lock) _M_initialize();
   1.666 +    if (!_M_lock) _M_initialize();
   1.667      DosRequestMutexSem(_M_lock, SEM_INDEFINITE_WAIT);
   1.668    }
   1.669    inline void _M_release_lock() { DosReleaseMutexSem(_M_lock); }
   1.670 -# elif defined(_STLP_BETHREADS)
   1.671 +#  elif defined (_STLP_BETHREADS)
   1.672    sem_id sem;
   1.673 -  inline void _M_initialize() 
   1.674 -  {
   1.675 -     sem = create_sem(1, "STLPort");
   1.676 -     assert(sem > 0);
   1.677 +  inline void _M_initialize() {
   1.678 +    sem = create_sem(1, "STLPort");
   1.679 +    assert(sem > 0);
   1.680    }
   1.681 -  inline void _M_destroy() 
   1.682 -  {
   1.683 -     int t = delete_sem(sem);
   1.684 -     assert(t == B_NO_ERROR);
   1.685 -  }
   1.686 -  inline void _M_acquire_lock_nodemand()
   1.687 -  {
   1.688 -    status_t t;
   1.689 -    t = acquire_sem(sem);
   1.690 +  inline void _M_destroy() {
   1.691 +    int t = delete_sem(sem);
   1.692      assert(t == B_NO_ERROR);
   1.693    }
   1.694    inline void _M_acquire_lock();
   1.695 -  inline void _M_release_lock() 
   1.696 -  {
   1.697 -     status_t t = release_sem(sem);
   1.698 -     assert(t == B_NO_ERROR);
   1.699 +  inline void _M_release_lock() {
   1.700 +    status_t t = release_sem(sem);
   1.701 +    assert(t == B_NO_ERROR);
   1.702    }
   1.703 -# elif defined(_STLP_VXWORKS_THREADS)
   1.704 -  SEM_ID _M_sem;
   1.705 -  inline void _M_initialize() 
   1.706 -  {
   1.707 -     _M_sem = semMCreate(SEM_Q_FIFO);
   1.708 -     assert(_M_sem > 0);
   1.709 -  }
   1.710 -  inline void _M_destroy() 
   1.711 -  {
   1.712 -    STATUS __s;
   1.713 -    semDelete (_M_sem);
   1.714 -    assert(__s == OK);
   1.715 -  }
   1.716 -  inline void _M_acquire_lock_nodemand()
   1.717 -  {
   1.718 -    STATUS __s;
   1.719 -    semTake (_M_sem, WAIT_FOREVER);
   1.720 -    assert(__s == OK);
   1.721 -  }
   1.722 +#  elif defined (_STLP_NWTHREADS)
   1.723 +  LONG _M_lock;
   1.724 +  inline void _M_initialize()
   1.725 +  { _M_lock = OpenLocalSemaphore(1); }
   1.726 +  inline void _M_destroy()
   1.727 +  { CloseLocalSemaphore(_M_lock); }
   1.728    inline void _M_acquire_lock()
   1.729 -  {
   1.730 -    if (!_M_sem)
   1.731 -      _M_initialize();
   1.732 -    _M_acquire_lock_nodemand();
   1.733 -  }
   1.734 -  inline void _M_release_lock() 
   1.735 -  {
   1.736 -    STATUS __s;
   1.737 -    semGive (_M_sem, WAIT_FOREVER);
   1.738 -    assert(__s == OK);
   1.739 -  }
   1.740 -# else		//*ty 11/24/2001 - added configuration check
   1.741 -#  error "Unknown thread facility configuration"
   1.742 -# endif
   1.743 +  { WaitOnLocalSemaphore(_M_lock); }
   1.744 +  inline void _M_release_lock() { SignalLocalSemaphore(_M_lock); }
   1.745 +#  else      //*ty 11/24/2001 - added configuration check
   1.746 +#    error "Unknown thread facility configuration"
   1.747 +#  endif
   1.748  #else /* No threads */
   1.749    inline void _M_initialize() {}
   1.750    inline void _M_destroy() {}
   1.751 @@ -478,23 +423,10 @@
   1.752  #endif // _STLP_PTHREADS
   1.753  };
   1.754  
   1.755 -
   1.756 -#if defined (_STLP_THREADS) && defined (_STLP_MUTEX_NEEDS_ONDEMAND_INITIALIZATION)
   1.757 -// for use in _STLP_mutex, our purposes do not require ondemand initialization
   1.758 -// also, mutex_base may use some hacks to determine uninitialized state by zero data, which only works for globals.
   1.759 -class _STLP_CLASS_DECLSPEC _STLP_mutex_nodemand : public _STLP_mutex_base {
   1.760 -  inline void _M_acquire_lock() { 
   1.761 -    _M_acquire_lock_nodemand();
   1.762 -  }
   1.763 -};
   1.764 -#else
   1.765 -typedef _STLP_mutex_base _STLP_mutex_nodemand;
   1.766 -#endif
   1.767 -
   1.768 -
   1.769  // Locking class.  The constructor initializes the lock, the destructor destroys it.
   1.770  // Well - behaving class, does not need static initializer
   1.771 -class _STLP_CLASS_DECLSPEC _STLP_mutex : public _STLP_mutex_nodemand {
   1.772 +
   1.773 +class _STLP_CLASS_DECLSPEC _STLP_mutex : public _STLP_mutex_base {
   1.774    public:
   1.775      inline _STLP_mutex () { _M_initialize(); }
   1.776      inline ~_STLP_mutex () { _M_destroy(); }
   1.777 @@ -503,122 +435,261 @@
   1.778      void operator=(const _STLP_mutex&);
   1.779  };
   1.780  
   1.781 +// A locking class that uses _STLP_STATIC_MUTEX.  The constructor takes
   1.782 +// a reference to an _STLP_STATIC_MUTEX, and acquires a lock.  The destructor
   1.783 +// releases the lock.
   1.784 +// It's not clear that this is exactly the right functionality.
   1.785 +// It will probably change in the future.
   1.786  
   1.787 +struct _STLP_CLASS_DECLSPEC _STLP_auto_lock {
   1.788 +  _STLP_auto_lock(_STLP_STATIC_MUTEX& __lock) : _M_lock(__lock)
   1.789 +  { _M_lock._M_acquire_lock(); }
   1.790 +  ~_STLP_auto_lock()
   1.791 +  { _M_lock._M_release_lock(); }
   1.792 +
   1.793 +private:
   1.794 +  _STLP_STATIC_MUTEX& _M_lock;
   1.795 +  void operator=(const _STLP_auto_lock&);
   1.796 +  _STLP_auto_lock(const _STLP_auto_lock&);
   1.797 +};
   1.798  
   1.799  /*
   1.800   * Class _Refcount_Base provides a type, __stl_atomic_t, a data member,
   1.801   * _M_ref_count, and member functions _M_incr and _M_decr, which perform
   1.802 - * atomic preincrement/predecrement.  The constructor initializes 
   1.803 + * atomic preincrement/predecrement.  The constructor initializes
   1.804   * _M_ref_count.
   1.805   */
   1.806 -struct _STLP_CLASS_DECLSPEC _Refcount_Base
   1.807 -{
   1.808 +class _STLP_CLASS_DECLSPEC _Refcount_Base {
   1.809    // The data member _M_ref_count
   1.810 -  volatile __stl_atomic_t _M_ref_count;
   1.811 +#if defined (__DMC__)
   1.812 +public:
   1.813 +#endif
   1.814 +  _STLP_VOLATILE __stl_atomic_t _M_ref_count;
   1.815  
   1.816 -# if !defined (_STLP_ATOMIC_EXCHANGE)
   1.817 +#if defined (_STLP_THREADS) && \
   1.818 +   (!defined (_STLP_ATOMIC_INCREMENT) || !defined (_STLP_ATOMIC_DECREMENT) || \
   1.819 +    (defined (_STLP_WIN32_VERSION) && (_STLP_WIN32_VERSION <= 0x0400)))
   1.820 +#  define _STLP_USE_MUTEX
   1.821    _STLP_mutex _M_mutex;
   1.822 -# endif
   1.823 +#endif
   1.824  
   1.825 +  public:
   1.826    // Constructor
   1.827    _Refcount_Base(__stl_atomic_t __n) : _M_ref_count(__n) {}
   1.828  
   1.829    // _M_incr and _M_decr
   1.830 -# if defined (_STLP_THREADS) && defined (_STLP_ATOMIC_EXCHANGE)
   1.831 -   void _M_incr() { _STLP_ATOMIC_INCREMENT((__stl_atomic_t*)&_M_ref_count); }
   1.832 -   void _M_decr() { _STLP_ATOMIC_DECREMENT((__stl_atomic_t*)&_M_ref_count); }
   1.833 -# elif defined(_STLP_THREADS)
   1.834 -  void _M_incr() {
   1.835 -    _M_mutex._M_acquire_lock();
   1.836 -    ++_M_ref_count;
   1.837 -    _M_mutex._M_release_lock();
   1.838 +#if defined (_STLP_THREADS)
   1.839 +#  if !defined (_STLP_USE_MUTEX)
   1.840 +   __stl_atomic_t _M_incr() { return _STLP_ATOMIC_INCREMENT(&_M_ref_count); }
   1.841 +   __stl_atomic_t _M_decr() { return _STLP_ATOMIC_DECREMENT(&_M_ref_count); }
   1.842 +#  else
   1.843 +#    undef _STLP_USE_MUTEX
   1.844 +  __stl_atomic_t _M_incr() {
   1.845 +    _STLP_auto_lock l(_M_mutex);
   1.846 +    return ++_M_ref_count;
   1.847    }
   1.848 -  void _M_decr() {
   1.849 -    _M_mutex._M_acquire_lock();
   1.850 -    --_M_ref_count;
   1.851 -    _M_mutex._M_release_lock();
   1.852 +  __stl_atomic_t _M_decr() {
   1.853 +    _STLP_auto_lock l(_M_mutex);
   1.854 +    return --_M_ref_count;
   1.855    }
   1.856 -# else  /* No threads */
   1.857 -  void _M_incr() { ++_M_ref_count; }
   1.858 -  void _M_decr() { --_M_ref_count; }
   1.859 -# endif
   1.860 +#  endif
   1.861 +#else  /* No threads */
   1.862 +  __stl_atomic_t _M_incr() { return ++_M_ref_count; }
   1.863 +  __stl_atomic_t _M_decr() { return --_M_ref_count; }
   1.864 +#endif
   1.865  };
   1.866  
   1.867 -// Atomic swap on unsigned long
   1.868 -// This is guaranteed to behave as though it were atomic only if all
   1.869 -// possibly concurrent updates use _Atomic_swap.
   1.870 -// In some cases the operation is emulated with a lock.
   1.871 -# if defined (_STLP_THREADS)
   1.872 -#  ifdef _STLP_ATOMIC_EXCHANGE
   1.873 -inline __stl_atomic_t _Atomic_swap(volatile __stl_atomic_t * __p, __stl_atomic_t __q) {
   1.874 -  return (__stl_atomic_t) _STLP_ATOMIC_EXCHANGE(__p,__q);
   1.875 -}
   1.876 -#  elif defined(_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || defined(_STLP_USE_PTHREAD_SPINLOCK)
   1.877 -// We use a template here only to get a unique initialized instance.
   1.878 -template<int __dummy>
   1.879 -struct _Swap_lock_struct {
   1.880 -  static _STLP_STATIC_MUTEX _S_swap_lock;
   1.881 -};
   1.882 +_STLP_END_NAMESPACE
   1.883  
   1.884 +#ifdef __SYMBIAN32__WSD__   
   1.885 +_STLP_DECLSPEC std::_STLP_STATIC_MUTEX& exp_get_threads_S_swap_lock();
   1.886 +_STLP_DECLSPEC std::_STLP_STATIC_MUTEX& exp_get_threads_0_S_swap_lock();
   1.887 +#endif
   1.888  
   1.889 -// This should be portable, but performance is expected
   1.890 -// to be quite awful.  This really needs platform specific
   1.891 -// code.
   1.892 -inline __stl_atomic_t _Atomic_swap(volatile __stl_atomic_t * __p, __stl_atomic_t __q) {
   1.893 -  _Swap_lock_struct<0>::_S_swap_lock._M_acquire_lock();
   1.894 +_STLP_BEGIN_NAMESPACE
   1.895 +
   1.896 +/* Atomic swap on __stl_atomic_t
   1.897 + * This is guaranteed to behave as though it were atomic only if all
   1.898 + * possibly concurrent updates use _Atomic_swap.
   1.899 + * In some cases the operation is emulated with a lock.
   1.900 + * Idem for _Atomic_swap_ptr
   1.901 + */
   1.902 +/* Helper struct to handle following cases:
   1.903 + * - on platforms where sizeof(__stl_atomic_t) == sizeof(void*) atomic
   1.904 + *   exchange can be done on pointers
   1.905 + * - on platform without atomic operation swap is done in a critical section,
   1.906 + *   portable but inefficient.
   1.907 + */
   1.908 +template <int __use_ptr_atomic_swap>
   1.909 +class _Atomic_swap_struct {
   1.910 +public:
   1.911 +#if defined (_STLP_THREADS) && \
   1.912 +    !defined (_STLP_ATOMIC_EXCHANGE) && \
   1.913 +    (defined (_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || \
   1.914 +     defined (_STLP_USE_PTHREAD_SPINLOCK) || defined (_STLP_NWTHREADS))
   1.915 +#  define _STLP_USE_ATOMIC_SWAP_MUTEX
   1.916 +#if !defined(__SYMBIAN32__WSD__)
   1.917 +  static _STLP_STATIC_MUTEX _S_swap_lock;  
   1.918 +#else
   1.919 +  static _STLP_STATIC_MUTEX& get_threads_S_swap_lock()
   1.920 +	  { return ::exp_get_threads_S_swap_lock(); }
   1.921 +# define _S_swap_lock get_threads_S_swap_lock()
   1.922 +#endif
   1.923 +#endif
   1.924 +
   1.925 +  static __stl_atomic_t _S_swap(_STLP_VOLATILE __stl_atomic_t* __p, __stl_atomic_t __q) {
   1.926 +#if defined (_STLP_THREADS)
   1.927 +#  if defined (_STLP_ATOMIC_EXCHANGE)
   1.928 +  return _STLP_ATOMIC_EXCHANGE(__p, __q);
   1.929 +#  elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
   1.930 +  _S_swap_lock._M_acquire_lock();
   1.931    __stl_atomic_t __result = *__p;
   1.932    *__p = __q;
   1.933 -  _Swap_lock_struct<0>::_S_swap_lock._M_release_lock();
   1.934 +  _S_swap_lock._M_release_lock();
   1.935    return __result;
   1.936 -}
   1.937 -#  endif // _STLP_PTHREADS || _STLP_UITHREADS || _STLP_OS2THREADS || _STLP_USE_PTHREAD_SPINLOCK
   1.938 -# else // !_STLP_THREADS
   1.939 -/* no threads */
   1.940 -static inline __stl_atomic_t  _STLP_CALL
   1.941 -_Atomic_swap(volatile __stl_atomic_t * __p, __stl_atomic_t __q) {
   1.942 +#  else
   1.943 +#    error Missing atomic swap implementation
   1.944 +#  endif
   1.945 +#else
   1.946 +  /* no threads */
   1.947    __stl_atomic_t __result = *__p;
   1.948    *__p = __q;
   1.949    return __result;
   1.950 +#endif // _STLP_THREADS
   1.951 +  }
   1.952 +
   1.953 +  static void* _S_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
   1.954 +#if defined (_STLP_THREADS)
   1.955 +#  if defined (_STLP_ATOMIC_EXCHANGE_PTR)
   1.956 +  return _STLP_ATOMIC_EXCHANGE_PTR(__p, __q);
   1.957 +#  elif defined (_STLP_ATOMIC_EXCHANGE)
   1.958 +  _STLP_STATIC_ASSERT(sizeof(__stl_atomic_t) == sizeof(void*))
   1.959 +  return __REINTERPRET_CAST(void*, _STLP_ATOMIC_EXCHANGE(__REINTERPRET_CAST(volatile __stl_atomic_t*, __p),
   1.960 +                                                         __REINTERPRET_CAST(__stl_atomic_t, __q))
   1.961 +                            );
   1.962 +#  elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
   1.963 +  _S_swap_lock._M_acquire_lock();
   1.964 +  void *__result = *__p;
   1.965 +  *__p = __q;
   1.966 +  _S_swap_lock._M_release_lock();
   1.967 +  return __result;
   1.968 +#  else
   1.969 +#    error Missing pointer atomic swap implementation
   1.970 +#  endif
   1.971 +#else
   1.972 +  /* no thread */
   1.973 +  void *__result = *__p;
   1.974 +  *__p = __q;
   1.975 +  return __result;
   1.976 +#endif
   1.977 +  }
   1.978 +};
   1.979 +#if defined(__SYMBIAN32__WSD__)
   1.980 +# undef _S_swap_lock
   1.981 +#endif  
   1.982 +
   1.983 +_STLP_TEMPLATE_NULL
   1.984 +class _Atomic_swap_struct<0> {
   1.985 +public:
   1.986 +#if defined (_STLP_THREADS) && \
   1.987 +    (!defined (_STLP_ATOMIC_EXCHANGE) || !defined (_STLP_ATOMIC_EXCHANGE_PTR)) && \
   1.988 +    (defined (_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || \
   1.989 +     defined (_STLP_USE_PTHREAD_SPINLOCK) || defined (_STLP_NWTHREADS))
   1.990 +#  define _STLP_USE_ATOMIC_SWAP_MUTEX
   1.991 +#if !defined(__SYMBIAN32__WSD__)
   1.992 +  static _STLP_STATIC_MUTEX _S_swap_lock;
   1.993 +#else
   1.994 +  static _STLP_STATIC_MUTEX& get_threads_0_S_swap_lock()
   1.995 +	  { return ::exp_get_threads_0_S_swap_lock(); }
   1.996 +# define _S_swap_lock get_threads_0_S_swap_lock()
   1.997 +#endif  
   1.998 +#endif
   1.999 +
  1.1000 +  static __stl_atomic_t _S_swap(_STLP_VOLATILE __stl_atomic_t* __p, __stl_atomic_t __q) {
  1.1001 +#if defined (_STLP_THREADS)
  1.1002 +#  if defined (_STLP_ATOMIC_EXCHANGE)
  1.1003 +  return _STLP_ATOMIC_EXCHANGE(__p, __q);
  1.1004 +#  elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
  1.1005 +  /* This should be portable, but performance is expected
  1.1006 +   * to be quite awful.  This really needs platform specific
  1.1007 +   * code.
  1.1008 +   */
  1.1009 +  _S_swap_lock._M_acquire_lock();
  1.1010 +  __stl_atomic_t __result = *__p;
  1.1011 +  *__p = __q;
  1.1012 +  _S_swap_lock._M_release_lock();
  1.1013 +  return __result;
  1.1014 +#  else
  1.1015 +#    error Missing atomic swap implementation
  1.1016 +#  endif
  1.1017 +#else
  1.1018 +  /* no threads */
  1.1019 +  __stl_atomic_t __result = *__p;
  1.1020 +  *__p = __q;
  1.1021 +  return __result;
  1.1022 +#endif // _STLP_THREADS
  1.1023 +  }
  1.1024 +
  1.1025 +  static void* _S_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
  1.1026 +#if defined (_STLP_THREADS)
  1.1027 +#  if defined (_STLP_ATOMIC_EXCHANGE_PTR)
  1.1028 +  return _STLP_ATOMIC_EXCHANGE_PTR(__p, __q);
  1.1029 +#  elif defined (_STLP_ATOMIC_EXCHANGE)
  1.1030 +  _STLP_STATIC_ASSERT(sizeof(__stl_atomic_t) == sizeof(void*))
  1.1031 +  return __REINTERPRET_CAST(void*, _STLP_ATOMIC_EXCHANGE(__REINTERPRET_CAST(volatile __stl_atomic_t*, __p),
  1.1032 +                                                         __REINTERPRET_CAST(__stl_atomic_t, __q))
  1.1033 +                            );
  1.1034 +#  elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
  1.1035 +  _S_swap_lock._M_acquire_lock();
  1.1036 +  void *__result = *__p;
  1.1037 +  *__p = __q;
  1.1038 +  _S_swap_lock._M_release_lock();
  1.1039 +  return __result;
  1.1040 +#  else
  1.1041 +#    error Missing pointer atomic swap implementation
  1.1042 +#  endif
  1.1043 +#else
  1.1044 +  /* no thread */
  1.1045 +  void *__result = *__p;
  1.1046 +  *__p = __q;
  1.1047 +  return __result;
  1.1048 +#endif
  1.1049 +  }
  1.1050 +};
  1.1051 +#if defined(__SYMBIAN32__WSD__)
  1.1052 +# undef _S_swap_lock
  1.1053 +#endif  
  1.1054 +
  1.1055 +#if defined (_STLP_MSVC) && (_STLP_MSVC == 1300)
  1.1056 +#  pragma warning (push)
  1.1057 +#  pragma warning (disable : 4189) //__use_ptr_atomic_swap initialized but not used
  1.1058 +#endif
  1.1059 +
  1.1060 +inline __stl_atomic_t _STLP_CALL _Atomic_swap(_STLP_VOLATILE __stl_atomic_t * __p, __stl_atomic_t __q) {
  1.1061 +  const int __use_ptr_atomic_swap = sizeof(__stl_atomic_t) == sizeof(void*);
  1.1062 +  return _Atomic_swap_struct<__use_ptr_atomic_swap>::_S_swap(__p, __q);
  1.1063  }
  1.1064 -# endif // _STLP_THREADS
  1.1065  
  1.1066 -// A locking class that uses _STLP_STATIC_MUTEX.  The constructor takes
  1.1067 -// a reference to an _STLP_STATIC_MUTEX, and acquires a lock.  The destructor
  1.1068 -// releases the lock.
  1.1069 +inline void* _STLP_CALL _Atomic_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) {
  1.1070 +  const int __use_ptr_atomic_swap = sizeof(__stl_atomic_t) == sizeof(void*);
  1.1071 +  return _Atomic_swap_struct<__use_ptr_atomic_swap>::_S_swap_ptr(__p, __q);
  1.1072 +}
  1.1073  
  1.1074 -struct _STLP_CLASS_DECLSPEC _STLP_auto_lock
  1.1075 -{
  1.1076 -  _STLP_STATIC_MUTEX& _M_lock;
  1.1077 -  
  1.1078 -  _STLP_auto_lock(_STLP_STATIC_MUTEX& __lock) : _M_lock(__lock)
  1.1079 -    { _M_lock._M_acquire_lock(); }
  1.1080 -  ~_STLP_auto_lock() { _M_lock._M_release_lock(); }
  1.1081 +#if defined (_STLP_MSVC) && (_STLP_MSVC == 1300)
  1.1082 +#  pragma warning (pop)
  1.1083 +#endif
  1.1084  
  1.1085 -private:
  1.1086 -  void operator=(const _STLP_auto_lock&);
  1.1087 -  _STLP_auto_lock(const _STLP_auto_lock&);
  1.1088 -};
  1.1089 -
  1.1090 -typedef _STLP_auto_lock _STLP_mutex_lock;
  1.1091 -
  1.1092 -#ifdef _STLP_BETHREADS
  1.1093 -
  1.1094 +#if defined (_STLP_BETHREADS)
  1.1095  template <int __inst>
  1.1096 -struct _STLP_beos_static_lock_data
  1.1097 -{
  1.1098 -	static bool is_init;
  1.1099 -	struct mutex_t : public _STLP_mutex
  1.1100 -	{
  1.1101 -		mutex_t()
  1.1102 -		{
  1.1103 -			_STLP_beos_static_lock_data<0>::is_init = true;
  1.1104 -		}
  1.1105 -		~mutex_t()
  1.1106 -		{
  1.1107 -			_STLP_beos_static_lock_data<0>::is_init = false;
  1.1108 -		}
  1.1109 -	};
  1.1110 -	static mutex_t mut;
  1.1111 +struct _STLP_beos_static_lock_data {
  1.1112 +  static bool is_init;
  1.1113 +  struct mutex_t : public _STLP_mutex {
  1.1114 +    mutex_t()
  1.1115 +    { _STLP_beos_static_lock_data<0>::is_init = true; }
  1.1116 +    ~mutex_t()
  1.1117 +    { _STLP_beos_static_lock_data<0>::is_init = false; }
  1.1118 +  };
  1.1119 +  static mutex_t mut;
  1.1120  };
  1.1121  
  1.1122  template <int __inst>
  1.1123 @@ -626,41 +697,36 @@
  1.1124  template <int __inst>
  1.1125  typename _STLP_beos_static_lock_data<__inst>::mutex_t _STLP_beos_static_lock_data<__inst>::mut;
  1.1126  
  1.1127 -
  1.1128 -inline void _STLP_mutex_base::_M_acquire_lock() 
  1.1129 -{
  1.1130 -	if(sem == 0)
  1.1131 -	{
  1.1132 -		// we need to initialise on demand here
  1.1133 -		// to prevent race conditions use our global
  1.1134 -		// mutex if it's available:
  1.1135 -		if(_STLP_beos_static_lock_data<0>::is_init)
  1.1136 -		{
  1.1137 -			_STLP_auto_lock al(_STLP_beos_static_lock_data<0>::mut);
  1.1138 -			if(sem == 0) _M_initialize();
  1.1139 -		}
  1.1140 -		else
  1.1141 -		{
  1.1142 -			// no lock available, we must still be
  1.1143 -			// in startup code, THERE MUST BE ONE THREAD
  1.1144 -			// ONLY active at this point.
  1.1145 -			_M_initialize();
  1.1146 -		}
  1.1147 -	}
  1.1148 -	_M_acquire_lock_nodemand();
  1.1149 +inline void _STLP_mutex_base::_M_acquire_lock() {
  1.1150 +  if (sem == 0) {
  1.1151 +    // we need to initialise on demand here
  1.1152 +    // to prevent race conditions use our global
  1.1153 +    // mutex if it's available:
  1.1154 +    if (_STLP_beos_static_lock_data<0>::is_init) {
  1.1155 +      _STLP_auto_lock al(_STLP_beos_static_lock_data<0>::mut);
  1.1156 +      if (sem == 0) _M_initialize();
  1.1157 +    }
  1.1158 +    else {
  1.1159 +      // no lock available, we must still be
  1.1160 +      // in startup code, THERE MUST BE ONE THREAD
  1.1161 +      // ONLY active at this point.
  1.1162 +      _M_initialize();
  1.1163 +    }
  1.1164 +  }
  1.1165 +  status_t t;
  1.1166 +  t = acquire_sem(sem);
  1.1167 +  assert(t == B_NO_ERROR);
  1.1168  }
  1.1169 -
  1.1170  #endif
  1.1171  
  1.1172  _STLP_END_NAMESPACE
  1.1173  
  1.1174 -# if !defined (_STLP_LINK_TIME_INSTANTIATION)
  1.1175 +#if !defined (_STLP_LINK_TIME_INSTANTIATION)
  1.1176  #  include <stl/_threads.c>
  1.1177 -# endif
  1.1178 +#endif
  1.1179  
  1.1180  #endif /* _STLP_INTERNAL_THREADS_H */
  1.1181  
  1.1182  // Local Variables:
  1.1183  // mode:C++
  1.1184  // End:
  1.1185 -