diff -r e1b950c65cb4 -r 837f303aceeb epoc32/include/stdapis/stlportv5/stl/_threads.h --- a/epoc32/include/stdapis/stlportv5/stl/_threads.h Wed Mar 31 12:27:01 2010 +0100 +++ b/epoc32/include/stdapis/stlportv5/stl/_threads.h Wed Mar 31 12:33:34 2010 +0100 @@ -1,14 +1,16 @@ /* + * Portions Copyright (c) 2008 Nokia Corporation and/or its subsidiary(-ies). All rights reserved. + * * Copyright (c) 1997-1999 * Silicon Graphics Computer Systems, Inc. * - * Copyright (c) 1999 + * Copyright (c) 1999 * Boris Fomitchev * * This material is provided "as is", with absolutely no warranty expressed * or implied. Any use is at your own risk. * - * Permission to use or copy this software for any purpose is hereby granted + * Permission to use or copy this software for any purpose is hereby granted * without fee, provided the above notices are retained on all copies. * Permission to modify the code and to distribute modified code is granted, * provided the above notices are retained, and a notice that the code was @@ -19,7 +21,6 @@ // WARNING: This is an internal header file, included by other C++ // standard library headers. You should not attempt to use this header // file directly. -// Stl_config.h should be included before this file. #ifndef _STLP_INTERNAL_THREADS_H @@ -30,226 +31,202 @@ // threads standard), and Win32 threads. Uithread support by Jochen // Schlick, 1999, and Solaris threads generalized to them. -#ifndef _STLP_CONFIG_H -#include +#ifndef _STLP_INTERNAL_CSTDDEF +# include #endif -# if ! defined (_STLP_CSTDDEF) -# include -# endif - -# if ! defined (_STLP_CSTDLIB) -# include -# endif +#ifndef _STLP_INTERNAL_CSTDLIB +# include +#endif // On SUN and Mac OS X gcc, zero-initialization works just fine... -# if defined (__sun) || ( defined(__GNUC__) && defined(__APPLE__) ) -# define _STLP_MUTEX_INITIALIZER -# endif - -# if defined (_STLP_WIN32) || defined (__sgi) || defined (_STLP_SPARC_SOLARIS_THREADS) - typedef long __stl_atomic_t; -# else -# if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD) -// using _STLP_VENDOR_CSTD::size_t; -using namespace _STLP_VENDOR_CSTD; -# endif - typedef size_t __stl_atomic_t; -#endif - -# if defined(_STLP_SGI_THREADS) -# include -// Hack for SGI o32 compilers. -#if !defined(__add_and_fetch) && \ - (__mips < 3 || !(defined (_ABIN32) || defined(_ABI64))) -# define __add_and_fetch(__l,__v) add_then_test((unsigned long*)__l,__v) -# define __test_and_set(__l,__v) test_and_set(__l,__v) -#endif /* o32 */ - -# if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) -# define _STLP_ATOMIC_EXCHANGE(__p, __q) test_and_set(__p, __q) -# else -# define _STLP_ATOMIC_EXCHANGE(__p, __q) __test_and_set((unsigned long*)__p, (unsigned long)__q) -# endif - -# define _STLP_ATOMIC_INCREMENT(__x) __add_and_fetch(__x, 1) -# define _STLP_ATOMIC_DECREMENT(__x) __add_and_fetch(__x, (size_t) -1) - -# elif defined (__GNUC__) && defined (__i386__) && defined (__unix__) && defined (_STLP_USE_INLINE_X86_SPINLOCK) - -// gcc on i386 linux, freebsd, etc. - -// This enables the memory caching on x86 linux. It is critical for SMP -// without it the performace is DISMAL! -static inline unsigned long __xchg(volatile __stl_atomic_t* target, int source) -{ - - // The target is refernce in memory rather than the register - // because making a copy of it from memory to the register and - // back again would ruin the atomic nature of the call. - // the source does not need to be delt with atomicly so it can - // be copied about as needed. - // - // The casting of the source is used to prevent gcc from optimizing - // in such a way that breaks the atomic nature of this call. - // - __asm__ __volatile__("xchgl %1,%0" - :"=m" (*(volatile long *) target), "=r" (source) - :"m" (*(volatile long *) target), "r" (source) ); - return source; - - // The assembly above does the following atomicly: - // int temp=source; - // source=(int)(*target); - // (int)(*target)=temp; - // return source -} - -static inline void __inc_and_fetch(volatile __stl_atomic_t* __x) -{ - // Referenced in memory rather than register to preserve the atomic nature. - // - __asm__ __volatile__( - "lock; incl %0" - :"=m" (*__x) - :"m" (*__x) ); - - // The assembly above does the following atomicly: - // ++(int)(*__x); - -} -static inline void __dec_and_fetch(volatile __stl_atomic_t* __x) -{ - // Referenced in memory rather than register to preserve the atomic nature. - // - __asm__ __volatile__( - "lock; decl %0" - :"=m" (*__x) - :"m" (*__x) ); - - // The assembly above does the following atomicly: - // --(int)(*__x); -} - -# define _STLP_ATOMIC_EXCHANGE(target, newValue) ((__xchg(target, newValue))) -# define _STLP_ATOMIC_INCREMENT(__x) __inc_and_fetch(__x) -# define _STLP_ATOMIC_DECREMENT(__x) __dec_and_fetch(__x) - -# elif defined(_STLP_PTHREADS) - -# include -# ifndef _STLP_USE_PTHREAD_SPINLOCK -# if defined(PTHREAD_MUTEX_INITIALIZER) && !defined(_STLP_MUTEX_INITIALIZER) -# define _STLP_MUTEX_INITIALIZER = { PTHREAD_MUTEX_INITIALIZER } -# endif - -//HPUX variants have (on some platforms optional) non-standard "DCE" pthreads impl -# if defined(_DECTHREADS_) && (defined(_PTHREAD_USE_D4) || defined(__hpux)) && !defined(_CMA_SUPPRESS_EXTERNALS_) -# define _STLP_PTHREAD_ATTR_DEFAULT pthread_mutexattr_default -# else -# define _STLP_PTHREAD_ATTR_DEFAULT 0 -# endif -# endif // !_STLP_USE_PTHREAD_SPINLOCK - -# elif defined(_STLP_WIN32THREADS) -# if !defined (_STLP_WINDOWS_H_INCLUDED) && ! defined (_WINDOWS_H) -# if ! (defined ( _STLP_MSVC ) || defined (__BORLANDC__) || defined (__ICL) || defined (__WATCOMC__) || defined (__MINGW32__) || defined (__DMC__)) -# ifdef _STLP_USE_MFC -# include -# else -# include -# endif -# define _STLP_WINDOWS_H_INCLUDED -# else -// This section serves as a replacement for windows.h header for Visual C++ -extern "C" { -# if (defined(_M_MRX000) || defined(_M_ALPHA) \ - || (defined(_M_PPC) && (_MSC_VER >= 1000))) && !defined(RC_INVOKED) -# define InterlockedIncrement _InterlockedIncrement -# define InterlockedDecrement _InterlockedDecrement -# define InterlockedExchange _InterlockedExchange -# define _STLP_STDCALL -# else -# ifdef _MAC -# define _STLP_STDCALL _cdecl -# else -# define _STLP_STDCALL __stdcall -# endif -# endif - -#if (_MSC_VER >= 1300) || defined (_STLP_NEW_PLATFORM_SDK) -_STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedIncrement(long volatile *); -_STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedDecrement(long volatile *); -_STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedExchange(long volatile *, long); -#else - // boris : for the latest SDK, you may actually need the other version of the declaration (above) - // even for earlier VC++ versions. There is no way to tell SDK versions apart, sorry ... -_STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedIncrement(long*); -_STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedDecrement(long*); -_STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedExchange(long*, long); +#if defined (__sun) || (defined (__GNUC__) && defined(__APPLE__)) +# define _STLP_MUTEX_INITIALIZER #endif -_STLP_IMPORT_DECLSPEC void _STLP_STDCALL Sleep(unsigned long); -_STLP_IMPORT_DECLSPEC void _STLP_STDCALL OutputDebugStringA( const char* lpOutputString ); +/* This header defines the following atomic operation that platform should + * try to support as much as possible. Atomic operation are exposed as macro + * in order to easily test for their existance. They are: + * __stl_atomic_t _STLP_ATOMIC_INCREMENT(volatile __stl_atomic_t* __ptr) : + * increment *__ptr by 1 and returns the new value + * __stl_atomic_t _STLP_ATOMIC_DECREMENT(volatile __stl_atomic_t* __ptr) : + * decrement *__ptr by 1 and returns the new value + * __stl_atomic_t _STLP_ATOMIC_EXCHANGE(volatile __stl_atomic_t* __target, __stl_atomic_t __val) : + * assign __val to *__target and returns former *__target value + * void* _STLP_ATOMIC_EXCHANGE_PTR(void* volatile* __target, void* __ptr) : + * assign __ptr to *__target and returns former *__target value + * __stl_atomic_t _STLP_ATOMIC_ADD(volatile __stl_atomic_t* __target, __stl_atomic_t __val) : + * does *__target = *__target + __val and returns the old *__target value + */ -#ifdef _STLP_DEBUG -typedef unsigned long DWORD; -_STLP_IMPORT_DECLSPEC DWORD _STLP_STDCALL GetCurrentThreadId(); -#endif /* _STLP_DEBUG */ +#if defined (_STLP_WIN32) || defined (__sgi) || defined (_STLP_SPARC_SOLARIS_THREADS) +typedef long __stl_atomic_t; +#else +/* Don't import whole namespace!!!! - ptr */ +// # if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD) +// // using _STLP_VENDOR_CSTD::size_t; +// using namespace _STLP_VENDOR_CSTD; +// # endif +typedef size_t __stl_atomic_t; +#endif -# if defined (InterlockedIncrement) -# pragma intrinsic(_InterlockedIncrement) -# pragma intrinsic(_InterlockedDecrement) -# pragma intrinsic(_InterlockedExchange) +#if defined (_STLP_THREADS) + +# if defined (_STLP_SGI_THREADS) + +# include +// Hack for SGI o32 compilers. +# if !defined(__add_and_fetch) && \ + (__mips < 3 || !(defined (_ABIN32) || defined(_ABI64))) +# define __add_and_fetch(__l,__v) add_then_test((unsigned long*)__l,__v) +# define __test_and_set(__l,__v) test_and_set(__l,__v) +# endif /* o32 */ + +# if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64)) +# define _STLP_ATOMIC_EXCHANGE(__p, __q) test_and_set(__p, __q) +# else +# define _STLP_ATOMIC_EXCHANGE(__p, __q) __test_and_set((unsigned long*)__p, (unsigned long)__q) # endif -} /* extern "C" */ -# endif /* STL_MSVC */ +# define _STLP_ATOMIC_INCREMENT(__x) __add_and_fetch(__x, 1) +# define _STLP_ATOMIC_DECREMENT(__x) __add_and_fetch(__x, (size_t) -1) -# define _STLP_WINDOWS_H_INCLUDED +# elif defined (_STLP_PTHREADS) -# endif /* _STLP_WIN32 */ +# include +# if !defined (_STLP_USE_PTHREAD_SPINLOCK) +# if defined (PTHREAD_MUTEX_INITIALIZER) && !defined (_STLP_MUTEX_INITIALIZER) && defined (_REENTRANT) +# define _STLP_MUTEX_INITIALIZER = { PTHREAD_MUTEX_INITIALIZER } +# endif +//HPUX variants have (on some platforms optional) non-standard "DCE" pthreads impl +# if defined (_DECTHREADS_) && (defined (_PTHREAD_USE_D4) || defined (__hpux)) && !defined (_CMA_SUPPRESS_EXTERNALS_) +# define _STLP_PTHREAD_ATTR_DEFAULT pthread_mutexattr_default +# else +# define _STLP_PTHREAD_ATTR_DEFAULT 0 +# endif +# else // _STLP_USE_PTHREAD_SPINLOCK +# if defined (__OpenBSD__) +# include +# endif +# endif // _STLP_USE_PTHREAD_SPINLOCK -# ifndef _STLP_ATOMIC_INCREMENT -# define _STLP_ATOMIC_INCREMENT(__x) InterlockedIncrement((long*)__x) -# define _STLP_ATOMIC_DECREMENT(__x) InterlockedDecrement((long*)__x) -# define _STLP_ATOMIC_EXCHANGE(__x, __y) InterlockedExchange((long*)__x, (long)__y) -# endif -# elif defined(__DECC) || defined(__DECCXX) -# include -# define _STLP_ATOMIC_EXCHANGE __ATOMIC_EXCH_LONG -# define _STLP_ATOMIC_INCREMENT(__x) __ATOMIC_ADD_LONG(__x, 1) -# define _STLP_ATOMIC_DECREMENT(__x) __ATOMIC_ADD_LONG(__x, -1) -# elif defined(_STLP_SPARC_SOLARIS_THREADS) -# include -# elif defined (_STLP_UITHREADS) +# if defined (__GNUC__) && defined (__i386__) + +# if !defined (_STLP_ATOMIC_INCREMENT) +inline long _STLP_atomic_increment_gcc_x86(long volatile* p) { + long result; + __asm__ __volatile__ + ("lock; xaddl %1, %0;" + :"=m" (*p), "=r" (result) + :"m" (*p), "1" (1) + :"cc"); + return result + 1; +} +# define _STLP_ATOMIC_INCREMENT(__x) (_STLP_atomic_increment_gcc_x86((long volatile*)__x)) +# endif + +# if !defined (_STLP_ATOMIC_DECREMENT) +inline long _STLP_atomic_decrement_gcc_x86(long volatile* p) { + long result; + __asm__ __volatile__ + ("lock; xaddl %1, %0;" + :"=m" (*p), "=r" (result) + :"m" (*p), "1" (-1) + :"cc"); + return result - 1; +} +# define _STLP_ATOMIC_DECREMENT(__x) (_STLP_atomic_decrement_gcc_x86((long volatile*)__x)) +# endif + +# if !defined (_STLP_ATOMIC_ADD) +inline long _STLP_atomic_add_gcc_x86(long volatile* p, long addend) { + long result; + __asm__ __volatile__ + ("lock; xaddl %1, %0;" + :"=m" (*p), "=r" (result) + :"m" (*p), "1" (addend) + :"cc"); + return result + addend; +} +# define _STLP_ATOMIC_ADD(__dst, __val) (_STLP_atomic_add_gcc_x86((long volatile*)__dst, (long)__val)) +# endif + +# endif /* if defined(__GNUC__) && defined(__i386__) */ + +# elif defined (_STLP_WIN32THREADS) + +# if !defined (_STLP_ATOMIC_INCREMENT) +# if !defined (_STLP_NEW_PLATFORM_SDK) +# define _STLP_ATOMIC_INCREMENT(__x) InterlockedIncrement(__CONST_CAST(long*, __x)) +# define _STLP_ATOMIC_DECREMENT(__x) InterlockedDecrement(__CONST_CAST(long*, __x)) +# define _STLP_ATOMIC_EXCHANGE(__x, __y) InterlockedExchange(__CONST_CAST(long*, __x), __y) +# else +# define _STLP_ATOMIC_INCREMENT(__x) InterlockedIncrement(__x) +# define _STLP_ATOMIC_DECREMENT(__x) InterlockedDecrement(__x) +# define _STLP_ATOMIC_EXCHANGE(__x, __y) InterlockedExchange(__x, __y) +# endif +# define _STLP_ATOMIC_EXCHANGE_PTR(__x, __y) STLPInterlockedExchangePointer(__x, __y) +/* + * The following functionnality is only available since Windows 98, those that are targeting previous OSes + * should define _WIN32_WINDOWS to a value lower that the one of Win 98, see Platform SDK documentation for + * more informations: + */ +# if defined (_STLP_NEW_PLATFORM_SDK) && (!defined (_STLP_WIN32_VERSION) || (_STLP_WIN32_VERSION >= 0x0410)) +# define _STLP_ATOMIC_ADD(__dst, __val) InterlockedExchangeAdd(__dst, __val) +# endif +# endif + +# elif defined (__DECC) || defined (__DECCXX) + +# include +# define _STLP_ATOMIC_EXCHANGE __ATOMIC_EXCH_LONG +# define _STLP_ATOMIC_INCREMENT(__x) __ATOMIC_ADD_LONG(__x, 1) +# define _STLP_ATOMIC_DECREMENT(__x) __ATOMIC_ADD_LONG(__x, -1) + +# elif defined(_STLP_SPARC_SOLARIS_THREADS) + +# include + +# elif defined (_STLP_UITHREADS) + // this inclusion is potential hazard to bring up all sorts // of old-style headers. Let's assume vendor already know how // to deal with that. -# include -# if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD) +# ifndef _STLP_INTERNAL_CTIME +# include +# endif +# if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD) using _STLP_VENDOR_CSTD::time_t; -# endif -# include -# include -# include -# elif defined (_STLP_BETHREADS) -# include -#include -#include -# define _STLP_MUTEX_INITIALIZER = { 0 } -#elif defined(_STLP_OS2THREADS) -# ifdef __GNUC__ -# define INCL_DOSSEMAPHORES -# include -# else - // This section serves to replace os2.h for VisualAge C++ +# endif +# include +# include +# include + +# elif defined (_STLP_BETHREADS) + +# include +# include +# include +# define _STLP_MUTEX_INITIALIZER = { 0 } + +# elif defined (_STLP_NWTHREADS) + +# include +# include + +# elif defined(_STLP_OS2THREADS) + +# if defined (__GNUC__) +# define INCL_DOSSEMAPHORES +# include +# else +// This section serves to replace os2.h for VisualAge C++ typedef unsigned long ULONG; - #ifndef __HEV__ /* INCL_SEMAPHORE may also define HEV */ - #define __HEV__ - typedef ULONG HEV; - typedef HEV* PHEV; - #endif +# if !defined (__HEV__) /* INCL_SEMAPHORE may also define HEV */ +# define __HEV__ + typedef ULONG HEV; + typedef HEV* PHEV; +# endif typedef ULONG APIRET; typedef ULONG HMTX; typedef HMTX* PHMTX; @@ -259,35 +236,42 @@ APIRET _System DosRequestMutexSem(HMTX hmtx, ULONG ulTimeout); APIRET _System DosReleaseMutexSem(HMTX hmtx); APIRET _System DosCloseMutexSem(HMTX hmtx); -# define _STLP_MUTEX_INITIALIZER = { 0 }; -# endif /* GNUC */ -# elif defined(_STLP_VXWORKS_THREADS) -# include "semLib.h" -# endif +# define _STLP_MUTEX_INITIALIZER = { 0 } +# endif /* GNUC */ -# ifndef _STLP_MUTEX_INITIALIZER -# if defined(_STLP_ATOMIC_EXCHANGE) -// we are using our own spinlock. -# define _STLP_MUTEX_INITIALIZER = { 0 } -# elif defined(_STLP_UITHREADS) -// known case -# define _STLP_MUTEX_INITIALIZER = { DEFAULTMUTEX } -# else -// we do not have static initializer available. therefore, on-demand synchronization is needed. -# define _STLP_MUTEX_INITIALIZER -# define _STLP_MUTEX_NEEDS_ONDEMAND_INITIALIZATION -# endif -# endif +# endif + +#else +/* no threads */ +# define _STLP_ATOMIC_INCREMENT(__x) ++(*__x) +# define _STLP_ATOMIC_DECREMENT(__x) --(*__x) +/* We do not grant other atomic operations as they are useless if STLport do not have + * to be thread safe + */ +#endif + +#if !defined (_STLP_MUTEX_INITIALIZER) +# if defined(_STLP_ATOMIC_EXCHANGE) +# define _STLP_MUTEX_INITIALIZER = { 0 } +# elif defined(_STLP_UITHREADS) +# define _STLP_MUTEX_INITIALIZER = { DEFAULTMUTEX } +# else +# define _STLP_MUTEX_INITIALIZER +# endif +#endif + _STLP_BEGIN_NAMESPACE -#ifndef _STLP_USE_PTHREAD_SPINLOCK +#if defined (_STLP_THREADS) && !defined (_STLP_USE_PTHREAD_SPINLOCK) // Helper struct. This is a workaround for various compilers that don't // handle static variables in inline functions properly. template struct _STLP_mutex_spin { enum { __low_max = 30, __high_max = 1000 }; // Low if we suspect uniprocessor, high for multiprocessor. + //Note: For SYMBIAN Emulator, these entries are to be considered WSD. + //Still, EWSD solution can't be applied since it's templated. static unsigned __max; static unsigned __last; static void _STLP_CALL _M_do_lock(volatile __stl_atomic_t* __lock); @@ -295,7 +279,6 @@ }; #endif // !_STLP_USE_PTHREAD_SPINLOCK - // Locking class. Note that this class *does not have a constructor*. // It must be initialized either statically, with _STLP_MUTEX_INITIALIZER, // or dynamically, by explicitly calling the _M_initialize member function. @@ -310,17 +293,15 @@ // For non-static cases, clients should use _STLP_mutex. -struct _STLP_CLASS_DECLSPEC _STLP_mutex_base -{ -#if defined(_STLP_ATOMIC_EXCHANGE) || defined(_STLP_SGI_THREADS) +struct _STLP_CLASS_DECLSPEC _STLP_mutex_base { +#if defined (_STLP_ATOMIC_EXCHANGE) || defined (_STLP_SGI_THREADS) // It should be relatively easy to get this to work on any modern Unix. volatile __stl_atomic_t _M_lock; #endif -#ifdef _STLP_THREADS - -# ifdef _STLP_ATOMIC_EXCHANGE - inline void _M_initialize() { _M_lock=0; } +#if defined (_STLP_THREADS) +# if defined (_STLP_ATOMIC_EXCHANGE) + inline void _M_initialize() { _M_lock = 0; } inline void _M_destroy() {} void _M_acquire_lock() { @@ -329,147 +310,111 @@ inline void _M_release_lock() { volatile __stl_atomic_t* __lock = &_M_lock; -# if defined(_STLP_SGI_THREADS) && defined(__GNUC__) && __mips >= 3 - asm("sync"); - *__lock = 0; -# elif defined(_STLP_SGI_THREADS) && __mips >= 3 \ - && (defined (_ABIN32) || defined(_ABI64)) - __lock_release(__lock); -# elif defined (_STLP_SPARC_SOLARIS_THREADS) -# if defined (__WORD64) || defined (__arch64__) || defined (__sparcv9) || defined (__sparcv8plus) - asm("membar #StoreStore ; membar #LoadStore"); -# else - asm(" stbar "); -# endif - *__lock = 0; -# else - *__lock = 0; - // This is not sufficient on many multiprocessors, since - // writes to protected variables and the lock may be reordered. -# endif +# if defined(_STLP_SGI_THREADS) && defined(__GNUC__) && __mips >= 3 + asm("sync"); + *__lock = 0; +# elif defined(_STLP_SGI_THREADS) && __mips >= 3 && \ + (defined (_ABIN32) || defined(_ABI64)) + __lock_release(__lock); +# elif defined (_STLP_SPARC_SOLARIS_THREADS) +# if defined (__WORD64) || defined (__arch64__) || defined (__sparcv9) || defined (__sparcv8plus) + asm("membar #StoreStore ; membar #LoadStore"); +# else + asm(" stbar "); +# endif + *__lock = 0; +# else + *__lock = 0; + // This is not sufficient on many multiprocessors, since + // writes to protected variables and the lock may be reordered. +# endif } -# elif defined(_STLP_PTHREADS) -# ifdef _STLP_USE_PTHREAD_SPINLOCK +# elif defined (_STLP_PTHREADS) +# if defined (_STLP_USE_PTHREAD_SPINLOCK) +# if !defined (__OpenBSD__) pthread_spinlock_t _M_lock; inline void _M_initialize() { pthread_spin_init( &_M_lock, 0 ); } inline void _M_destroy() { pthread_spin_destroy( &_M_lock ); } - inline void _M_acquire_lock() { - // we do not care about race conditions here : there is only one thread at this point - if(!_M_lock) pthread_spin_init( &_M_lock, 0 ); + // sorry, but no static initializer for pthread_spinlock_t; + // this will not work for compilers that has problems with call + // constructor of static object... - // fbp: here, initialization on demand should happen before the lock - // we use simple strategy as we are sure this only happens on initialization - pthread_spin_lock( &_M_lock ); - } + // _STLP_mutex_base() + // { pthread_spin_init( &_M_lock, 0 ); } - inline void _M_acquire_lock_nodemand() { - pthread_spin_lock( &_M_lock ); - } + // ~_STLP_mutex_base() + // { pthread_spin_destroy( &_M_lock ); } + + inline void _M_acquire_lock() { pthread_spin_lock( &_M_lock ); } inline void _M_release_lock() { pthread_spin_unlock( &_M_lock ); } -# else // !_STLP_USE_PTHREAD_SPINLOCK +# else // __OpenBSD__ + spinlock_t _M_lock; + inline void _M_initialize() { _SPINLOCK_INIT( &_M_lock ); } + inline void _M_destroy() { } + inline void _M_acquire_lock() { _SPINLOCK( &_M_lock ); } + inline void _M_release_lock() { _SPINUNLOCK( &_M_lock ); } +# endif // __OpenBSD__ +# else // !_STLP_USE_PTHREAD_SPINLOCK pthread_mutex_t _M_lock; - - inline void _M_initialize() { - pthread_mutex_init(&_M_lock,_STLP_PTHREAD_ATTR_DEFAULT); - } - inline void _M_destroy() { - pthread_mutex_destroy(&_M_lock); - } - inline void _M_acquire_lock_nodemand() { + inline void _M_initialize() + { pthread_mutex_init(&_M_lock,_STLP_PTHREAD_ATTR_DEFAULT); } + inline void _M_destroy() + { pthread_mutex_destroy(&_M_lock); } + inline void _M_acquire_lock() { +# if defined ( __hpux ) && ! defined (PTHREAD_MUTEX_INITIALIZER) + if (!_M_lock.field1) _M_initialize(); +# endif pthread_mutex_lock(&_M_lock); } + inline void _M_release_lock() { pthread_mutex_unlock(&_M_lock); } +# endif // !_STLP_USE_PTHREAD_SPINLOCK - inline void _M_acquire_lock() { -# if defined (__hpux) && !defined (PTHREAD_MUTEX_INITIALIZER) - if (!_M_lock.field1) _M_initialize(); -# endif - pthread_mutex_lock(&_M_lock); - } - inline void _M_release_lock() { pthread_mutex_unlock(&_M_lock); } -# endif // !_STLP_USE_PTHREAD_SPINLOCK - -# elif defined (_STLP_UITHREADS) +# elif defined (_STLP_UITHREADS) mutex_t _M_lock; - inline void _M_initialize() { - mutex_init(&_M_lock,0,NULL); - } - inline void _M_destroy() { - mutex_destroy(&_M_lock); - } + inline void _M_initialize() + { mutex_init(&_M_lock, 0, NULL); } + inline void _M_destroy() + { mutex_destroy(&_M_lock); } inline void _M_acquire_lock() { mutex_lock(&_M_lock); } inline void _M_release_lock() { mutex_unlock(&_M_lock); } -# elif defined(_STLP_OS2THREADS) +# elif defined (_STLP_OS2THREADS) HMTX _M_lock; inline void _M_initialize() { DosCreateMutexSem(NULL, &_M_lock, 0, false); } inline void _M_destroy() { DosCloseMutexSem(_M_lock); } - inline void _M_acquire_lock_nodemand() { - DosRequestMutexSem(_M_lock, SEM_INDEFINITE_WAIT); - } inline void _M_acquire_lock() { - if(!_M_lock) _M_initialize(); + if (!_M_lock) _M_initialize(); DosRequestMutexSem(_M_lock, SEM_INDEFINITE_WAIT); } inline void _M_release_lock() { DosReleaseMutexSem(_M_lock); } -# elif defined(_STLP_BETHREADS) +# elif defined (_STLP_BETHREADS) sem_id sem; - inline void _M_initialize() - { - sem = create_sem(1, "STLPort"); - assert(sem > 0); + inline void _M_initialize() { + sem = create_sem(1, "STLPort"); + assert(sem > 0); } - inline void _M_destroy() - { - int t = delete_sem(sem); - assert(t == B_NO_ERROR); - } - inline void _M_acquire_lock_nodemand() - { - status_t t; - t = acquire_sem(sem); + inline void _M_destroy() { + int t = delete_sem(sem); assert(t == B_NO_ERROR); } inline void _M_acquire_lock(); - inline void _M_release_lock() - { - status_t t = release_sem(sem); - assert(t == B_NO_ERROR); + inline void _M_release_lock() { + status_t t = release_sem(sem); + assert(t == B_NO_ERROR); } -# elif defined(_STLP_VXWORKS_THREADS) - SEM_ID _M_sem; - inline void _M_initialize() - { - _M_sem = semMCreate(SEM_Q_FIFO); - assert(_M_sem > 0); - } - inline void _M_destroy() - { - STATUS __s; - semDelete (_M_sem); - assert(__s == OK); - } - inline void _M_acquire_lock_nodemand() - { - STATUS __s; - semTake (_M_sem, WAIT_FOREVER); - assert(__s == OK); - } +# elif defined (_STLP_NWTHREADS) + LONG _M_lock; + inline void _M_initialize() + { _M_lock = OpenLocalSemaphore(1); } + inline void _M_destroy() + { CloseLocalSemaphore(_M_lock); } inline void _M_acquire_lock() - { - if (!_M_sem) - _M_initialize(); - _M_acquire_lock_nodemand(); - } - inline void _M_release_lock() - { - STATUS __s; - semGive (_M_sem, WAIT_FOREVER); - assert(__s == OK); - } -# else //*ty 11/24/2001 - added configuration check -# error "Unknown thread facility configuration" -# endif + { WaitOnLocalSemaphore(_M_lock); } + inline void _M_release_lock() { SignalLocalSemaphore(_M_lock); } +# else //*ty 11/24/2001 - added configuration check +# error "Unknown thread facility configuration" +# endif #else /* No threads */ inline void _M_initialize() {} inline void _M_destroy() {} @@ -478,23 +423,10 @@ #endif // _STLP_PTHREADS }; - -#if defined (_STLP_THREADS) && defined (_STLP_MUTEX_NEEDS_ONDEMAND_INITIALIZATION) -// for use in _STLP_mutex, our purposes do not require ondemand initialization -// also, mutex_base may use some hacks to determine uninitialized state by zero data, which only works for globals. -class _STLP_CLASS_DECLSPEC _STLP_mutex_nodemand : public _STLP_mutex_base { - inline void _M_acquire_lock() { - _M_acquire_lock_nodemand(); - } -}; -#else -typedef _STLP_mutex_base _STLP_mutex_nodemand; -#endif - - // Locking class. The constructor initializes the lock, the destructor destroys it. // Well - behaving class, does not need static initializer -class _STLP_CLASS_DECLSPEC _STLP_mutex : public _STLP_mutex_nodemand { + +class _STLP_CLASS_DECLSPEC _STLP_mutex : public _STLP_mutex_base { public: inline _STLP_mutex () { _M_initialize(); } inline ~_STLP_mutex () { _M_destroy(); } @@ -503,122 +435,261 @@ void operator=(const _STLP_mutex&); }; +// A locking class that uses _STLP_STATIC_MUTEX. The constructor takes +// a reference to an _STLP_STATIC_MUTEX, and acquires a lock. The destructor +// releases the lock. +// It's not clear that this is exactly the right functionality. +// It will probably change in the future. +struct _STLP_CLASS_DECLSPEC _STLP_auto_lock { + _STLP_auto_lock(_STLP_STATIC_MUTEX& __lock) : _M_lock(__lock) + { _M_lock._M_acquire_lock(); } + ~_STLP_auto_lock() + { _M_lock._M_release_lock(); } + +private: + _STLP_STATIC_MUTEX& _M_lock; + void operator=(const _STLP_auto_lock&); + _STLP_auto_lock(const _STLP_auto_lock&); +}; /* * Class _Refcount_Base provides a type, __stl_atomic_t, a data member, * _M_ref_count, and member functions _M_incr and _M_decr, which perform - * atomic preincrement/predecrement. The constructor initializes + * atomic preincrement/predecrement. The constructor initializes * _M_ref_count. */ -struct _STLP_CLASS_DECLSPEC _Refcount_Base -{ +class _STLP_CLASS_DECLSPEC _Refcount_Base { // The data member _M_ref_count - volatile __stl_atomic_t _M_ref_count; +#if defined (__DMC__) +public: +#endif + _STLP_VOLATILE __stl_atomic_t _M_ref_count; -# if !defined (_STLP_ATOMIC_EXCHANGE) +#if defined (_STLP_THREADS) && \ + (!defined (_STLP_ATOMIC_INCREMENT) || !defined (_STLP_ATOMIC_DECREMENT) || \ + (defined (_STLP_WIN32_VERSION) && (_STLP_WIN32_VERSION <= 0x0400))) +# define _STLP_USE_MUTEX _STLP_mutex _M_mutex; -# endif +#endif + public: // Constructor _Refcount_Base(__stl_atomic_t __n) : _M_ref_count(__n) {} // _M_incr and _M_decr -# if defined (_STLP_THREADS) && defined (_STLP_ATOMIC_EXCHANGE) - void _M_incr() { _STLP_ATOMIC_INCREMENT((__stl_atomic_t*)&_M_ref_count); } - void _M_decr() { _STLP_ATOMIC_DECREMENT((__stl_atomic_t*)&_M_ref_count); } -# elif defined(_STLP_THREADS) - void _M_incr() { - _M_mutex._M_acquire_lock(); - ++_M_ref_count; - _M_mutex._M_release_lock(); +#if defined (_STLP_THREADS) +# if !defined (_STLP_USE_MUTEX) + __stl_atomic_t _M_incr() { return _STLP_ATOMIC_INCREMENT(&_M_ref_count); } + __stl_atomic_t _M_decr() { return _STLP_ATOMIC_DECREMENT(&_M_ref_count); } +# else +# undef _STLP_USE_MUTEX + __stl_atomic_t _M_incr() { + _STLP_auto_lock l(_M_mutex); + return ++_M_ref_count; } - void _M_decr() { - _M_mutex._M_acquire_lock(); - --_M_ref_count; - _M_mutex._M_release_lock(); + __stl_atomic_t _M_decr() { + _STLP_auto_lock l(_M_mutex); + return --_M_ref_count; } -# else /* No threads */ - void _M_incr() { ++_M_ref_count; } - void _M_decr() { --_M_ref_count; } -# endif +# endif +#else /* No threads */ + __stl_atomic_t _M_incr() { return ++_M_ref_count; } + __stl_atomic_t _M_decr() { return --_M_ref_count; } +#endif }; -// Atomic swap on unsigned long -// This is guaranteed to behave as though it were atomic only if all -// possibly concurrent updates use _Atomic_swap. -// In some cases the operation is emulated with a lock. -# if defined (_STLP_THREADS) -# ifdef _STLP_ATOMIC_EXCHANGE -inline __stl_atomic_t _Atomic_swap(volatile __stl_atomic_t * __p, __stl_atomic_t __q) { - return (__stl_atomic_t) _STLP_ATOMIC_EXCHANGE(__p,__q); -} -# elif defined(_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || defined(_STLP_USE_PTHREAD_SPINLOCK) -// We use a template here only to get a unique initialized instance. -template -struct _Swap_lock_struct { - static _STLP_STATIC_MUTEX _S_swap_lock; -}; +_STLP_END_NAMESPACE +#ifdef __SYMBIAN32__WSD__ +_STLP_DECLSPEC std::_STLP_STATIC_MUTEX& exp_get_threads_S_swap_lock(); +_STLP_DECLSPEC std::_STLP_STATIC_MUTEX& exp_get_threads_0_S_swap_lock(); +#endif -// This should be portable, but performance is expected -// to be quite awful. This really needs platform specific -// code. -inline __stl_atomic_t _Atomic_swap(volatile __stl_atomic_t * __p, __stl_atomic_t __q) { - _Swap_lock_struct<0>::_S_swap_lock._M_acquire_lock(); +_STLP_BEGIN_NAMESPACE + +/* Atomic swap on __stl_atomic_t + * This is guaranteed to behave as though it were atomic only if all + * possibly concurrent updates use _Atomic_swap. + * In some cases the operation is emulated with a lock. + * Idem for _Atomic_swap_ptr + */ +/* Helper struct to handle following cases: + * - on platforms where sizeof(__stl_atomic_t) == sizeof(void*) atomic + * exchange can be done on pointers + * - on platform without atomic operation swap is done in a critical section, + * portable but inefficient. + */ +template +class _Atomic_swap_struct { +public: +#if defined (_STLP_THREADS) && \ + !defined (_STLP_ATOMIC_EXCHANGE) && \ + (defined (_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || \ + defined (_STLP_USE_PTHREAD_SPINLOCK) || defined (_STLP_NWTHREADS)) +# define _STLP_USE_ATOMIC_SWAP_MUTEX +#if !defined(__SYMBIAN32__WSD__) + static _STLP_STATIC_MUTEX _S_swap_lock; +#else + static _STLP_STATIC_MUTEX& get_threads_S_swap_lock() + { return ::exp_get_threads_S_swap_lock(); } +# define _S_swap_lock get_threads_S_swap_lock() +#endif +#endif + + static __stl_atomic_t _S_swap(_STLP_VOLATILE __stl_atomic_t* __p, __stl_atomic_t __q) { +#if defined (_STLP_THREADS) +# if defined (_STLP_ATOMIC_EXCHANGE) + return _STLP_ATOMIC_EXCHANGE(__p, __q); +# elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX) + _S_swap_lock._M_acquire_lock(); __stl_atomic_t __result = *__p; *__p = __q; - _Swap_lock_struct<0>::_S_swap_lock._M_release_lock(); + _S_swap_lock._M_release_lock(); return __result; -} -# endif // _STLP_PTHREADS || _STLP_UITHREADS || _STLP_OS2THREADS || _STLP_USE_PTHREAD_SPINLOCK -# else // !_STLP_THREADS -/* no threads */ -static inline __stl_atomic_t _STLP_CALL -_Atomic_swap(volatile __stl_atomic_t * __p, __stl_atomic_t __q) { +# else +# error Missing atomic swap implementation +# endif +#else + /* no threads */ __stl_atomic_t __result = *__p; *__p = __q; return __result; +#endif // _STLP_THREADS + } + + static void* _S_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) { +#if defined (_STLP_THREADS) +# if defined (_STLP_ATOMIC_EXCHANGE_PTR) + return _STLP_ATOMIC_EXCHANGE_PTR(__p, __q); +# elif defined (_STLP_ATOMIC_EXCHANGE) + _STLP_STATIC_ASSERT(sizeof(__stl_atomic_t) == sizeof(void*)) + return __REINTERPRET_CAST(void*, _STLP_ATOMIC_EXCHANGE(__REINTERPRET_CAST(volatile __stl_atomic_t*, __p), + __REINTERPRET_CAST(__stl_atomic_t, __q)) + ); +# elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX) + _S_swap_lock._M_acquire_lock(); + void *__result = *__p; + *__p = __q; + _S_swap_lock._M_release_lock(); + return __result; +# else +# error Missing pointer atomic swap implementation +# endif +#else + /* no thread */ + void *__result = *__p; + *__p = __q; + return __result; +#endif + } +}; +#if defined(__SYMBIAN32__WSD__) +# undef _S_swap_lock +#endif + +_STLP_TEMPLATE_NULL +class _Atomic_swap_struct<0> { +public: +#if defined (_STLP_THREADS) && \ + (!defined (_STLP_ATOMIC_EXCHANGE) || !defined (_STLP_ATOMIC_EXCHANGE_PTR)) && \ + (defined (_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || \ + defined (_STLP_USE_PTHREAD_SPINLOCK) || defined (_STLP_NWTHREADS)) +# define _STLP_USE_ATOMIC_SWAP_MUTEX +#if !defined(__SYMBIAN32__WSD__) + static _STLP_STATIC_MUTEX _S_swap_lock; +#else + static _STLP_STATIC_MUTEX& get_threads_0_S_swap_lock() + { return ::exp_get_threads_0_S_swap_lock(); } +# define _S_swap_lock get_threads_0_S_swap_lock() +#endif +#endif + + static __stl_atomic_t _S_swap(_STLP_VOLATILE __stl_atomic_t* __p, __stl_atomic_t __q) { +#if defined (_STLP_THREADS) +# if defined (_STLP_ATOMIC_EXCHANGE) + return _STLP_ATOMIC_EXCHANGE(__p, __q); +# elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX) + /* This should be portable, but performance is expected + * to be quite awful. This really needs platform specific + * code. + */ + _S_swap_lock._M_acquire_lock(); + __stl_atomic_t __result = *__p; + *__p = __q; + _S_swap_lock._M_release_lock(); + return __result; +# else +# error Missing atomic swap implementation +# endif +#else + /* no threads */ + __stl_atomic_t __result = *__p; + *__p = __q; + return __result; +#endif // _STLP_THREADS + } + + static void* _S_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) { +#if defined (_STLP_THREADS) +# if defined (_STLP_ATOMIC_EXCHANGE_PTR) + return _STLP_ATOMIC_EXCHANGE_PTR(__p, __q); +# elif defined (_STLP_ATOMIC_EXCHANGE) + _STLP_STATIC_ASSERT(sizeof(__stl_atomic_t) == sizeof(void*)) + return __REINTERPRET_CAST(void*, _STLP_ATOMIC_EXCHANGE(__REINTERPRET_CAST(volatile __stl_atomic_t*, __p), + __REINTERPRET_CAST(__stl_atomic_t, __q)) + ); +# elif defined (_STLP_USE_ATOMIC_SWAP_MUTEX) + _S_swap_lock._M_acquire_lock(); + void *__result = *__p; + *__p = __q; + _S_swap_lock._M_release_lock(); + return __result; +# else +# error Missing pointer atomic swap implementation +# endif +#else + /* no thread */ + void *__result = *__p; + *__p = __q; + return __result; +#endif + } +}; +#if defined(__SYMBIAN32__WSD__) +# undef _S_swap_lock +#endif + +#if defined (_STLP_MSVC) && (_STLP_MSVC == 1300) +# pragma warning (push) +# pragma warning (disable : 4189) //__use_ptr_atomic_swap initialized but not used +#endif + +inline __stl_atomic_t _STLP_CALL _Atomic_swap(_STLP_VOLATILE __stl_atomic_t * __p, __stl_atomic_t __q) { + const int __use_ptr_atomic_swap = sizeof(__stl_atomic_t) == sizeof(void*); + return _Atomic_swap_struct<__use_ptr_atomic_swap>::_S_swap(__p, __q); } -# endif // _STLP_THREADS -// A locking class that uses _STLP_STATIC_MUTEX. The constructor takes -// a reference to an _STLP_STATIC_MUTEX, and acquires a lock. The destructor -// releases the lock. +inline void* _STLP_CALL _Atomic_swap_ptr(void* _STLP_VOLATILE* __p, void* __q) { + const int __use_ptr_atomic_swap = sizeof(__stl_atomic_t) == sizeof(void*); + return _Atomic_swap_struct<__use_ptr_atomic_swap>::_S_swap_ptr(__p, __q); +} -struct _STLP_CLASS_DECLSPEC _STLP_auto_lock -{ - _STLP_STATIC_MUTEX& _M_lock; - - _STLP_auto_lock(_STLP_STATIC_MUTEX& __lock) : _M_lock(__lock) - { _M_lock._M_acquire_lock(); } - ~_STLP_auto_lock() { _M_lock._M_release_lock(); } +#if defined (_STLP_MSVC) && (_STLP_MSVC == 1300) +# pragma warning (pop) +#endif -private: - void operator=(const _STLP_auto_lock&); - _STLP_auto_lock(const _STLP_auto_lock&); -}; - -typedef _STLP_auto_lock _STLP_mutex_lock; - -#ifdef _STLP_BETHREADS - +#if defined (_STLP_BETHREADS) template -struct _STLP_beos_static_lock_data -{ - static bool is_init; - struct mutex_t : public _STLP_mutex - { - mutex_t() - { - _STLP_beos_static_lock_data<0>::is_init = true; - } - ~mutex_t() - { - _STLP_beos_static_lock_data<0>::is_init = false; - } - }; - static mutex_t mut; +struct _STLP_beos_static_lock_data { + static bool is_init; + struct mutex_t : public _STLP_mutex { + mutex_t() + { _STLP_beos_static_lock_data<0>::is_init = true; } + ~mutex_t() + { _STLP_beos_static_lock_data<0>::is_init = false; } + }; + static mutex_t mut; }; template @@ -626,41 +697,36 @@ template typename _STLP_beos_static_lock_data<__inst>::mutex_t _STLP_beos_static_lock_data<__inst>::mut; - -inline void _STLP_mutex_base::_M_acquire_lock() -{ - if(sem == 0) - { - // we need to initialise on demand here - // to prevent race conditions use our global - // mutex if it's available: - if(_STLP_beos_static_lock_data<0>::is_init) - { - _STLP_auto_lock al(_STLP_beos_static_lock_data<0>::mut); - if(sem == 0) _M_initialize(); - } - else - { - // no lock available, we must still be - // in startup code, THERE MUST BE ONE THREAD - // ONLY active at this point. - _M_initialize(); - } - } - _M_acquire_lock_nodemand(); +inline void _STLP_mutex_base::_M_acquire_lock() { + if (sem == 0) { + // we need to initialise on demand here + // to prevent race conditions use our global + // mutex if it's available: + if (_STLP_beos_static_lock_data<0>::is_init) { + _STLP_auto_lock al(_STLP_beos_static_lock_data<0>::mut); + if (sem == 0) _M_initialize(); + } + else { + // no lock available, we must still be + // in startup code, THERE MUST BE ONE THREAD + // ONLY active at this point. + _M_initialize(); + } + } + status_t t; + t = acquire_sem(sem); + assert(t == B_NO_ERROR); } - #endif _STLP_END_NAMESPACE -# if !defined (_STLP_LINK_TIME_INSTANTIATION) +#if !defined (_STLP_LINK_TIME_INSTANTIATION) # include -# endif +#endif #endif /* _STLP_INTERNAL_THREADS_H */ // Local Variables: // mode:C++ // End: -