williamr@4
|
1 |
/*
|
williamr@4
|
2 |
* Copyright (c) 1997-1999
|
williamr@4
|
3 |
* Silicon Graphics Computer Systems, Inc.
|
williamr@4
|
4 |
*
|
williamr@4
|
5 |
* Copyright (c) 1999
|
williamr@4
|
6 |
* Boris Fomitchev
|
williamr@4
|
7 |
*
|
williamr@4
|
8 |
* This material is provided "as is", with absolutely no warranty expressed
|
williamr@4
|
9 |
* or implied. Any use is at your own risk.
|
williamr@4
|
10 |
*
|
williamr@4
|
11 |
* Permission to use or copy this software for any purpose is hereby granted
|
williamr@4
|
12 |
* without fee, provided the above notices are retained on all copies.
|
williamr@4
|
13 |
* Permission to modify the code and to distribute modified code is granted,
|
williamr@4
|
14 |
* provided the above notices are retained, and a notice that the code was
|
williamr@4
|
15 |
* modified is included with the above copyright notice.
|
williamr@4
|
16 |
*
|
williamr@4
|
17 |
*/
|
williamr@4
|
18 |
|
williamr@4
|
19 |
// WARNING: This is an internal header file, included by other C++
|
williamr@4
|
20 |
// standard library headers. You should not attempt to use this header
|
williamr@4
|
21 |
// file directly.
|
williamr@4
|
22 |
// Stl_config.h should be included before this file.
|
williamr@4
|
23 |
|
williamr@4
|
24 |
|
williamr@4
|
25 |
#ifndef _STLP_INTERNAL_THREADS_H
|
williamr@4
|
26 |
#define _STLP_INTERNAL_THREADS_H
|
williamr@4
|
27 |
|
williamr@4
|
28 |
// Supported threading models are native SGI, pthreads, uithreads
|
williamr@4
|
29 |
// (similar to pthreads, but based on an earlier draft of the Posix
|
williamr@4
|
30 |
// threads standard), and Win32 threads. Uithread support by Jochen
|
williamr@4
|
31 |
// Schlick, 1999, and Solaris threads generalized to them.
|
williamr@4
|
32 |
|
williamr@4
|
33 |
#ifndef _STLP_CONFIG_H
|
williamr@4
|
34 |
#include <stl/_config.h>
|
williamr@4
|
35 |
#endif
|
williamr@4
|
36 |
|
williamr@4
|
37 |
# if ! defined (_STLP_CSTDDEF)
|
williamr@4
|
38 |
# include <cstddef>
|
williamr@4
|
39 |
# endif
|
williamr@4
|
40 |
|
williamr@4
|
41 |
# if ! defined (_STLP_CSTDLIB)
|
williamr@4
|
42 |
# include <cstdlib>
|
williamr@4
|
43 |
# endif
|
williamr@4
|
44 |
|
williamr@4
|
45 |
// On SUN and Mac OS X gcc, zero-initialization works just fine...
|
williamr@4
|
46 |
# if defined (__sun) || ( defined(__GNUC__) && defined(__APPLE__) )
|
williamr@4
|
47 |
# define _STLP_MUTEX_INITIALIZER
|
williamr@4
|
48 |
# endif
|
williamr@4
|
49 |
|
williamr@4
|
50 |
# if defined (_STLP_WIN32) || defined (__sgi) || defined (_STLP_SPARC_SOLARIS_THREADS)
|
williamr@4
|
51 |
typedef long __stl_atomic_t;
|
williamr@4
|
52 |
# else
|
williamr@4
|
53 |
# if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)
|
williamr@4
|
54 |
// using _STLP_VENDOR_CSTD::size_t;
|
williamr@4
|
55 |
using namespace _STLP_VENDOR_CSTD;
|
williamr@4
|
56 |
# endif
|
williamr@4
|
57 |
typedef size_t __stl_atomic_t;
|
williamr@4
|
58 |
#endif
|
williamr@4
|
59 |
|
williamr@4
|
60 |
# if defined(_STLP_SGI_THREADS)
|
williamr@4
|
61 |
# include <mutex.h>
|
williamr@4
|
62 |
// Hack for SGI o32 compilers.
|
williamr@4
|
63 |
#if !defined(__add_and_fetch) && \
|
williamr@4
|
64 |
(__mips < 3 || !(defined (_ABIN32) || defined(_ABI64)))
|
williamr@4
|
65 |
# define __add_and_fetch(__l,__v) add_then_test((unsigned long*)__l,__v)
|
williamr@4
|
66 |
# define __test_and_set(__l,__v) test_and_set(__l,__v)
|
williamr@4
|
67 |
#endif /* o32 */
|
williamr@4
|
68 |
|
williamr@4
|
69 |
# if __mips < 3 || !(defined (_ABIN32) || defined(_ABI64))
|
williamr@4
|
70 |
# define _STLP_ATOMIC_EXCHANGE(__p, __q) test_and_set(__p, __q)
|
williamr@4
|
71 |
# else
|
williamr@4
|
72 |
# define _STLP_ATOMIC_EXCHANGE(__p, __q) __test_and_set((unsigned long*)__p, (unsigned long)__q)
|
williamr@4
|
73 |
# endif
|
williamr@4
|
74 |
|
williamr@4
|
75 |
# define _STLP_ATOMIC_INCREMENT(__x) __add_and_fetch(__x, 1)
|
williamr@4
|
76 |
# define _STLP_ATOMIC_DECREMENT(__x) __add_and_fetch(__x, (size_t) -1)
|
williamr@4
|
77 |
|
williamr@4
|
78 |
# elif defined (__GNUC__) && defined (__i386__) && defined (__unix__) && defined (_STLP_USE_INLINE_X86_SPINLOCK)
|
williamr@4
|
79 |
|
williamr@4
|
80 |
// gcc on i386 linux, freebsd, etc.
|
williamr@4
|
81 |
|
williamr@4
|
82 |
// This enables the memory caching on x86 linux. It is critical for SMP
|
williamr@4
|
83 |
// without it the performace is DISMAL!
|
williamr@4
|
84 |
static inline unsigned long __xchg(volatile __stl_atomic_t* target, int source)
|
williamr@4
|
85 |
{
|
williamr@4
|
86 |
|
williamr@4
|
87 |
// The target is refernce in memory rather than the register
|
williamr@4
|
88 |
// because making a copy of it from memory to the register and
|
williamr@4
|
89 |
// back again would ruin the atomic nature of the call.
|
williamr@4
|
90 |
// the source does not need to be delt with atomicly so it can
|
williamr@4
|
91 |
// be copied about as needed.
|
williamr@4
|
92 |
//
|
williamr@4
|
93 |
// The casting of the source is used to prevent gcc from optimizing
|
williamr@4
|
94 |
// in such a way that breaks the atomic nature of this call.
|
williamr@4
|
95 |
//
|
williamr@4
|
96 |
__asm__ __volatile__("xchgl %1,%0"
|
williamr@4
|
97 |
:"=m" (*(volatile long *) target), "=r" (source)
|
williamr@4
|
98 |
:"m" (*(volatile long *) target), "r" (source) );
|
williamr@4
|
99 |
return source;
|
williamr@4
|
100 |
|
williamr@4
|
101 |
// The assembly above does the following atomicly:
|
williamr@4
|
102 |
// int temp=source;
|
williamr@4
|
103 |
// source=(int)(*target);
|
williamr@4
|
104 |
// (int)(*target)=temp;
|
williamr@4
|
105 |
// return source
|
williamr@4
|
106 |
}
|
williamr@4
|
107 |
|
williamr@4
|
108 |
static inline void __inc_and_fetch(volatile __stl_atomic_t* __x)
|
williamr@4
|
109 |
{
|
williamr@4
|
110 |
// Referenced in memory rather than register to preserve the atomic nature.
|
williamr@4
|
111 |
//
|
williamr@4
|
112 |
__asm__ __volatile__(
|
williamr@4
|
113 |
"lock; incl %0"
|
williamr@4
|
114 |
:"=m" (*__x)
|
williamr@4
|
115 |
:"m" (*__x) );
|
williamr@4
|
116 |
|
williamr@4
|
117 |
// The assembly above does the following atomicly:
|
williamr@4
|
118 |
// ++(int)(*__x);
|
williamr@4
|
119 |
|
williamr@4
|
120 |
}
|
williamr@4
|
121 |
static inline void __dec_and_fetch(volatile __stl_atomic_t* __x)
|
williamr@4
|
122 |
{
|
williamr@4
|
123 |
// Referenced in memory rather than register to preserve the atomic nature.
|
williamr@4
|
124 |
//
|
williamr@4
|
125 |
__asm__ __volatile__(
|
williamr@4
|
126 |
"lock; decl %0"
|
williamr@4
|
127 |
:"=m" (*__x)
|
williamr@4
|
128 |
:"m" (*__x) );
|
williamr@4
|
129 |
|
williamr@4
|
130 |
// The assembly above does the following atomicly:
|
williamr@4
|
131 |
// --(int)(*__x);
|
williamr@4
|
132 |
}
|
williamr@4
|
133 |
|
williamr@4
|
134 |
# define _STLP_ATOMIC_EXCHANGE(target, newValue) ((__xchg(target, newValue)))
|
williamr@4
|
135 |
# define _STLP_ATOMIC_INCREMENT(__x) __inc_and_fetch(__x)
|
williamr@4
|
136 |
# define _STLP_ATOMIC_DECREMENT(__x) __dec_and_fetch(__x)
|
williamr@4
|
137 |
|
williamr@4
|
138 |
# elif defined(_STLP_PTHREADS)
|
williamr@4
|
139 |
|
williamr@4
|
140 |
# include <pthread.h>
|
williamr@4
|
141 |
# ifndef _STLP_USE_PTHREAD_SPINLOCK
|
williamr@4
|
142 |
# if defined(PTHREAD_MUTEX_INITIALIZER) && !defined(_STLP_MUTEX_INITIALIZER)
|
williamr@4
|
143 |
# define _STLP_MUTEX_INITIALIZER = { PTHREAD_MUTEX_INITIALIZER }
|
williamr@4
|
144 |
# endif
|
williamr@4
|
145 |
|
williamr@4
|
146 |
//HPUX variants have (on some platforms optional) non-standard "DCE" pthreads impl
|
williamr@4
|
147 |
# if defined(_DECTHREADS_) && (defined(_PTHREAD_USE_D4) || defined(__hpux)) && !defined(_CMA_SUPPRESS_EXTERNALS_)
|
williamr@4
|
148 |
# define _STLP_PTHREAD_ATTR_DEFAULT pthread_mutexattr_default
|
williamr@4
|
149 |
# else
|
williamr@4
|
150 |
# define _STLP_PTHREAD_ATTR_DEFAULT 0
|
williamr@4
|
151 |
# endif
|
williamr@4
|
152 |
# endif // !_STLP_USE_PTHREAD_SPINLOCK
|
williamr@4
|
153 |
|
williamr@4
|
154 |
# elif defined(_STLP_WIN32THREADS)
|
williamr@4
|
155 |
# if !defined (_STLP_WINDOWS_H_INCLUDED) && ! defined (_WINDOWS_H)
|
williamr@4
|
156 |
# if ! (defined ( _STLP_MSVC ) || defined (__BORLANDC__) || defined (__ICL) || defined (__WATCOMC__) || defined (__MINGW32__) || defined (__DMC__))
|
williamr@4
|
157 |
# ifdef _STLP_USE_MFC
|
williamr@4
|
158 |
# include <afx.h>
|
williamr@4
|
159 |
# else
|
williamr@4
|
160 |
# include <windows.h>
|
williamr@4
|
161 |
# endif
|
williamr@4
|
162 |
# define _STLP_WINDOWS_H_INCLUDED
|
williamr@4
|
163 |
# else
|
williamr@4
|
164 |
// This section serves as a replacement for windows.h header for Visual C++
|
williamr@4
|
165 |
extern "C" {
|
williamr@4
|
166 |
# if (defined(_M_MRX000) || defined(_M_ALPHA) \
|
williamr@4
|
167 |
|| (defined(_M_PPC) && (_MSC_VER >= 1000))) && !defined(RC_INVOKED)
|
williamr@4
|
168 |
# define InterlockedIncrement _InterlockedIncrement
|
williamr@4
|
169 |
# define InterlockedDecrement _InterlockedDecrement
|
williamr@4
|
170 |
# define InterlockedExchange _InterlockedExchange
|
williamr@4
|
171 |
# define _STLP_STDCALL
|
williamr@4
|
172 |
# else
|
williamr@4
|
173 |
# ifdef _MAC
|
williamr@4
|
174 |
# define _STLP_STDCALL _cdecl
|
williamr@4
|
175 |
# else
|
williamr@4
|
176 |
# define _STLP_STDCALL __stdcall
|
williamr@4
|
177 |
# endif
|
williamr@4
|
178 |
# endif
|
williamr@4
|
179 |
|
williamr@4
|
180 |
#if (_MSC_VER >= 1300) || defined (_STLP_NEW_PLATFORM_SDK)
|
williamr@4
|
181 |
_STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedIncrement(long volatile *);
|
williamr@4
|
182 |
_STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedDecrement(long volatile *);
|
williamr@4
|
183 |
_STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedExchange(long volatile *, long);
|
williamr@4
|
184 |
#else
|
williamr@4
|
185 |
// boris : for the latest SDK, you may actually need the other version of the declaration (above)
|
williamr@4
|
186 |
// even for earlier VC++ versions. There is no way to tell SDK versions apart, sorry ...
|
williamr@4
|
187 |
_STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedIncrement(long*);
|
williamr@4
|
188 |
_STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedDecrement(long*);
|
williamr@4
|
189 |
_STLP_IMPORT_DECLSPEC long _STLP_STDCALL InterlockedExchange(long*, long);
|
williamr@4
|
190 |
#endif
|
williamr@4
|
191 |
|
williamr@4
|
192 |
_STLP_IMPORT_DECLSPEC void _STLP_STDCALL Sleep(unsigned long);
|
williamr@4
|
193 |
_STLP_IMPORT_DECLSPEC void _STLP_STDCALL OutputDebugStringA( const char* lpOutputString );
|
williamr@4
|
194 |
|
williamr@4
|
195 |
#ifdef _STLP_DEBUG
|
williamr@4
|
196 |
typedef unsigned long DWORD;
|
williamr@4
|
197 |
_STLP_IMPORT_DECLSPEC DWORD _STLP_STDCALL GetCurrentThreadId();
|
williamr@4
|
198 |
#endif /* _STLP_DEBUG */
|
williamr@4
|
199 |
|
williamr@4
|
200 |
# if defined (InterlockedIncrement)
|
williamr@4
|
201 |
# pragma intrinsic(_InterlockedIncrement)
|
williamr@4
|
202 |
# pragma intrinsic(_InterlockedDecrement)
|
williamr@4
|
203 |
# pragma intrinsic(_InterlockedExchange)
|
williamr@4
|
204 |
# endif
|
williamr@4
|
205 |
} /* extern "C" */
|
williamr@4
|
206 |
|
williamr@4
|
207 |
# endif /* STL_MSVC */
|
williamr@4
|
208 |
|
williamr@4
|
209 |
# define _STLP_WINDOWS_H_INCLUDED
|
williamr@4
|
210 |
|
williamr@4
|
211 |
# endif /* _STLP_WIN32 */
|
williamr@4
|
212 |
|
williamr@4
|
213 |
# ifndef _STLP_ATOMIC_INCREMENT
|
williamr@4
|
214 |
# define _STLP_ATOMIC_INCREMENT(__x) InterlockedIncrement((long*)__x)
|
williamr@4
|
215 |
# define _STLP_ATOMIC_DECREMENT(__x) InterlockedDecrement((long*)__x)
|
williamr@4
|
216 |
# define _STLP_ATOMIC_EXCHANGE(__x, __y) InterlockedExchange((long*)__x, (long)__y)
|
williamr@4
|
217 |
# endif
|
williamr@4
|
218 |
# elif defined(__DECC) || defined(__DECCXX)
|
williamr@4
|
219 |
# include <machine/builtins.h>
|
williamr@4
|
220 |
# define _STLP_ATOMIC_EXCHANGE __ATOMIC_EXCH_LONG
|
williamr@4
|
221 |
# define _STLP_ATOMIC_INCREMENT(__x) __ATOMIC_ADD_LONG(__x, 1)
|
williamr@4
|
222 |
# define _STLP_ATOMIC_DECREMENT(__x) __ATOMIC_ADD_LONG(__x, -1)
|
williamr@4
|
223 |
# elif defined(_STLP_SPARC_SOLARIS_THREADS)
|
williamr@4
|
224 |
# include <stl/_sparc_atomic.h>
|
williamr@4
|
225 |
# elif defined (_STLP_UITHREADS)
|
williamr@4
|
226 |
// this inclusion is potential hazard to bring up all sorts
|
williamr@4
|
227 |
// of old-style headers. Let's assume vendor already know how
|
williamr@4
|
228 |
// to deal with that.
|
williamr@4
|
229 |
# include <ctime>
|
williamr@4
|
230 |
# if defined (_STLP_USE_NAMESPACES) && ! defined (_STLP_VENDOR_GLOBAL_CSTD)
|
williamr@4
|
231 |
using _STLP_VENDOR_CSTD::time_t;
|
williamr@4
|
232 |
# endif
|
williamr@4
|
233 |
# include <synch.h>
|
williamr@4
|
234 |
# include <cstdio>
|
williamr@4
|
235 |
# include <stl/_cwchar.h>
|
williamr@4
|
236 |
# elif defined (_STLP_BETHREADS)
|
williamr@4
|
237 |
# include <OS.h>
|
williamr@4
|
238 |
#include <cassert>
|
williamr@4
|
239 |
#include <stdio.h>
|
williamr@4
|
240 |
# define _STLP_MUTEX_INITIALIZER = { 0 }
|
williamr@4
|
241 |
#elif defined(_STLP_OS2THREADS)
|
williamr@4
|
242 |
# ifdef __GNUC__
|
williamr@4
|
243 |
# define INCL_DOSSEMAPHORES
|
williamr@4
|
244 |
# include <os2.h>
|
williamr@4
|
245 |
# else
|
williamr@4
|
246 |
// This section serves to replace os2.h for VisualAge C++
|
williamr@4
|
247 |
typedef unsigned long ULONG;
|
williamr@4
|
248 |
#ifndef __HEV__ /* INCL_SEMAPHORE may also define HEV */
|
williamr@4
|
249 |
#define __HEV__
|
williamr@4
|
250 |
typedef ULONG HEV;
|
williamr@4
|
251 |
typedef HEV* PHEV;
|
williamr@4
|
252 |
#endif
|
williamr@4
|
253 |
typedef ULONG APIRET;
|
williamr@4
|
254 |
typedef ULONG HMTX;
|
williamr@4
|
255 |
typedef HMTX* PHMTX;
|
williamr@4
|
256 |
typedef const char* PCSZ;
|
williamr@4
|
257 |
typedef ULONG BOOL32;
|
williamr@4
|
258 |
APIRET _System DosCreateMutexSem(PCSZ pszName, PHEV phev, ULONG flAttr, BOOL32 fState);
|
williamr@4
|
259 |
APIRET _System DosRequestMutexSem(HMTX hmtx, ULONG ulTimeout);
|
williamr@4
|
260 |
APIRET _System DosReleaseMutexSem(HMTX hmtx);
|
williamr@4
|
261 |
APIRET _System DosCloseMutexSem(HMTX hmtx);
|
williamr@4
|
262 |
# define _STLP_MUTEX_INITIALIZER = { 0 };
|
williamr@4
|
263 |
# endif /* GNUC */
|
williamr@4
|
264 |
# elif defined(_STLP_VXWORKS_THREADS)
|
williamr@4
|
265 |
# include "semLib.h"
|
williamr@4
|
266 |
# endif
|
williamr@4
|
267 |
|
williamr@4
|
268 |
# ifndef _STLP_MUTEX_INITIALIZER
|
williamr@4
|
269 |
# if defined(_STLP_ATOMIC_EXCHANGE)
|
williamr@4
|
270 |
// we are using our own spinlock.
|
williamr@4
|
271 |
# define _STLP_MUTEX_INITIALIZER = { 0 }
|
williamr@4
|
272 |
# elif defined(_STLP_UITHREADS)
|
williamr@4
|
273 |
// known case
|
williamr@4
|
274 |
# define _STLP_MUTEX_INITIALIZER = { DEFAULTMUTEX }
|
williamr@4
|
275 |
# else
|
williamr@4
|
276 |
// we do not have static initializer available. therefore, on-demand synchronization is needed.
|
williamr@4
|
277 |
# define _STLP_MUTEX_INITIALIZER
|
williamr@4
|
278 |
# define _STLP_MUTEX_NEEDS_ONDEMAND_INITIALIZATION
|
williamr@4
|
279 |
# endif
|
williamr@4
|
280 |
# endif
|
williamr@4
|
281 |
|
williamr@4
|
282 |
_STLP_BEGIN_NAMESPACE
|
williamr@4
|
283 |
|
williamr@4
|
284 |
#ifndef _STLP_USE_PTHREAD_SPINLOCK
|
williamr@4
|
285 |
// Helper struct. This is a workaround for various compilers that don't
|
williamr@4
|
286 |
// handle static variables in inline functions properly.
|
williamr@4
|
287 |
template <int __inst>
|
williamr@4
|
288 |
struct _STLP_mutex_spin {
|
williamr@4
|
289 |
enum { __low_max = 30, __high_max = 1000 };
|
williamr@4
|
290 |
// Low if we suspect uniprocessor, high for multiprocessor.
|
williamr@4
|
291 |
static unsigned __max;
|
williamr@4
|
292 |
static unsigned __last;
|
williamr@4
|
293 |
static void _STLP_CALL _M_do_lock(volatile __stl_atomic_t* __lock);
|
williamr@4
|
294 |
static void _STLP_CALL _S_nsec_sleep(int __log_nsec);
|
williamr@4
|
295 |
};
|
williamr@4
|
296 |
#endif // !_STLP_USE_PTHREAD_SPINLOCK
|
williamr@4
|
297 |
|
williamr@4
|
298 |
|
williamr@4
|
299 |
// Locking class. Note that this class *does not have a constructor*.
|
williamr@4
|
300 |
// It must be initialized either statically, with _STLP_MUTEX_INITIALIZER,
|
williamr@4
|
301 |
// or dynamically, by explicitly calling the _M_initialize member function.
|
williamr@4
|
302 |
// (This is similar to the ways that a pthreads mutex can be initialized.)
|
williamr@4
|
303 |
// There are explicit member functions for acquiring and releasing the lock.
|
williamr@4
|
304 |
|
williamr@4
|
305 |
// There is no constructor because static initialization is essential for
|
williamr@4
|
306 |
// some uses, and only a class aggregate (see section 8.5.1 of the C++
|
williamr@4
|
307 |
// standard) can be initialized that way. That means we must have no
|
williamr@4
|
308 |
// constructors, no base classes, no virtual functions, and no private or
|
williamr@4
|
309 |
// protected members.
|
williamr@4
|
310 |
|
williamr@4
|
311 |
// For non-static cases, clients should use _STLP_mutex.
|
williamr@4
|
312 |
|
williamr@4
|
313 |
struct _STLP_CLASS_DECLSPEC _STLP_mutex_base
|
williamr@4
|
314 |
{
|
williamr@4
|
315 |
#if defined(_STLP_ATOMIC_EXCHANGE) || defined(_STLP_SGI_THREADS)
|
williamr@4
|
316 |
// It should be relatively easy to get this to work on any modern Unix.
|
williamr@4
|
317 |
volatile __stl_atomic_t _M_lock;
|
williamr@4
|
318 |
#endif
|
williamr@4
|
319 |
|
williamr@4
|
320 |
#ifdef _STLP_THREADS
|
williamr@4
|
321 |
|
williamr@4
|
322 |
# ifdef _STLP_ATOMIC_EXCHANGE
|
williamr@4
|
323 |
inline void _M_initialize() { _M_lock=0; }
|
williamr@4
|
324 |
inline void _M_destroy() {}
|
williamr@4
|
325 |
|
williamr@4
|
326 |
void _M_acquire_lock() {
|
williamr@4
|
327 |
_STLP_mutex_spin<0>::_M_do_lock(&_M_lock);
|
williamr@4
|
328 |
}
|
williamr@4
|
329 |
|
williamr@4
|
330 |
inline void _M_release_lock() {
|
williamr@4
|
331 |
volatile __stl_atomic_t* __lock = &_M_lock;
|
williamr@4
|
332 |
# if defined(_STLP_SGI_THREADS) && defined(__GNUC__) && __mips >= 3
|
williamr@4
|
333 |
asm("sync");
|
williamr@4
|
334 |
*__lock = 0;
|
williamr@4
|
335 |
# elif defined(_STLP_SGI_THREADS) && __mips >= 3 \
|
williamr@4
|
336 |
&& (defined (_ABIN32) || defined(_ABI64))
|
williamr@4
|
337 |
__lock_release(__lock);
|
williamr@4
|
338 |
# elif defined (_STLP_SPARC_SOLARIS_THREADS)
|
williamr@4
|
339 |
# if defined (__WORD64) || defined (__arch64__) || defined (__sparcv9) || defined (__sparcv8plus)
|
williamr@4
|
340 |
asm("membar #StoreStore ; membar #LoadStore");
|
williamr@4
|
341 |
# else
|
williamr@4
|
342 |
asm(" stbar ");
|
williamr@4
|
343 |
# endif
|
williamr@4
|
344 |
*__lock = 0;
|
williamr@4
|
345 |
# else
|
williamr@4
|
346 |
*__lock = 0;
|
williamr@4
|
347 |
// This is not sufficient on many multiprocessors, since
|
williamr@4
|
348 |
// writes to protected variables and the lock may be reordered.
|
williamr@4
|
349 |
# endif
|
williamr@4
|
350 |
}
|
williamr@4
|
351 |
# elif defined(_STLP_PTHREADS)
|
williamr@4
|
352 |
# ifdef _STLP_USE_PTHREAD_SPINLOCK
|
williamr@4
|
353 |
pthread_spinlock_t _M_lock;
|
williamr@4
|
354 |
inline void _M_initialize() { pthread_spin_init( &_M_lock, 0 ); }
|
williamr@4
|
355 |
inline void _M_destroy() { pthread_spin_destroy( &_M_lock ); }
|
williamr@4
|
356 |
|
williamr@4
|
357 |
inline void _M_acquire_lock() {
|
williamr@4
|
358 |
// we do not care about race conditions here : there is only one thread at this point
|
williamr@4
|
359 |
if(!_M_lock) pthread_spin_init( &_M_lock, 0 );
|
williamr@4
|
360 |
|
williamr@4
|
361 |
// fbp: here, initialization on demand should happen before the lock
|
williamr@4
|
362 |
// we use simple strategy as we are sure this only happens on initialization
|
williamr@4
|
363 |
pthread_spin_lock( &_M_lock );
|
williamr@4
|
364 |
}
|
williamr@4
|
365 |
|
williamr@4
|
366 |
inline void _M_acquire_lock_nodemand() {
|
williamr@4
|
367 |
pthread_spin_lock( &_M_lock );
|
williamr@4
|
368 |
}
|
williamr@4
|
369 |
inline void _M_release_lock() { pthread_spin_unlock( &_M_lock ); }
|
williamr@4
|
370 |
# else // !_STLP_USE_PTHREAD_SPINLOCK
|
williamr@4
|
371 |
pthread_mutex_t _M_lock;
|
williamr@4
|
372 |
|
williamr@4
|
373 |
inline void _M_initialize() {
|
williamr@4
|
374 |
pthread_mutex_init(&_M_lock,_STLP_PTHREAD_ATTR_DEFAULT);
|
williamr@4
|
375 |
}
|
williamr@4
|
376 |
inline void _M_destroy() {
|
williamr@4
|
377 |
pthread_mutex_destroy(&_M_lock);
|
williamr@4
|
378 |
}
|
williamr@4
|
379 |
inline void _M_acquire_lock_nodemand() {
|
williamr@4
|
380 |
pthread_mutex_lock(&_M_lock);
|
williamr@4
|
381 |
}
|
williamr@4
|
382 |
|
williamr@4
|
383 |
inline void _M_acquire_lock() {
|
williamr@4
|
384 |
# if defined (__hpux) && !defined (PTHREAD_MUTEX_INITIALIZER)
|
williamr@4
|
385 |
if (!_M_lock.field1) _M_initialize();
|
williamr@4
|
386 |
# endif
|
williamr@4
|
387 |
pthread_mutex_lock(&_M_lock);
|
williamr@4
|
388 |
}
|
williamr@4
|
389 |
inline void _M_release_lock() { pthread_mutex_unlock(&_M_lock); }
|
williamr@4
|
390 |
# endif // !_STLP_USE_PTHREAD_SPINLOCK
|
williamr@4
|
391 |
|
williamr@4
|
392 |
# elif defined (_STLP_UITHREADS)
|
williamr@4
|
393 |
mutex_t _M_lock;
|
williamr@4
|
394 |
inline void _M_initialize() {
|
williamr@4
|
395 |
mutex_init(&_M_lock,0,NULL);
|
williamr@4
|
396 |
}
|
williamr@4
|
397 |
inline void _M_destroy() {
|
williamr@4
|
398 |
mutex_destroy(&_M_lock);
|
williamr@4
|
399 |
}
|
williamr@4
|
400 |
inline void _M_acquire_lock() { mutex_lock(&_M_lock); }
|
williamr@4
|
401 |
inline void _M_release_lock() { mutex_unlock(&_M_lock); }
|
williamr@4
|
402 |
|
williamr@4
|
403 |
# elif defined(_STLP_OS2THREADS)
|
williamr@4
|
404 |
HMTX _M_lock;
|
williamr@4
|
405 |
inline void _M_initialize() { DosCreateMutexSem(NULL, &_M_lock, 0, false); }
|
williamr@4
|
406 |
inline void _M_destroy() { DosCloseMutexSem(_M_lock); }
|
williamr@4
|
407 |
inline void _M_acquire_lock_nodemand() {
|
williamr@4
|
408 |
DosRequestMutexSem(_M_lock, SEM_INDEFINITE_WAIT);
|
williamr@4
|
409 |
}
|
williamr@4
|
410 |
inline void _M_acquire_lock() {
|
williamr@4
|
411 |
if(!_M_lock) _M_initialize();
|
williamr@4
|
412 |
DosRequestMutexSem(_M_lock, SEM_INDEFINITE_WAIT);
|
williamr@4
|
413 |
}
|
williamr@4
|
414 |
inline void _M_release_lock() { DosReleaseMutexSem(_M_lock); }
|
williamr@4
|
415 |
# elif defined(_STLP_BETHREADS)
|
williamr@4
|
416 |
sem_id sem;
|
williamr@4
|
417 |
inline void _M_initialize()
|
williamr@4
|
418 |
{
|
williamr@4
|
419 |
sem = create_sem(1, "STLPort");
|
williamr@4
|
420 |
assert(sem > 0);
|
williamr@4
|
421 |
}
|
williamr@4
|
422 |
inline void _M_destroy()
|
williamr@4
|
423 |
{
|
williamr@4
|
424 |
int t = delete_sem(sem);
|
williamr@4
|
425 |
assert(t == B_NO_ERROR);
|
williamr@4
|
426 |
}
|
williamr@4
|
427 |
inline void _M_acquire_lock_nodemand()
|
williamr@4
|
428 |
{
|
williamr@4
|
429 |
status_t t;
|
williamr@4
|
430 |
t = acquire_sem(sem);
|
williamr@4
|
431 |
assert(t == B_NO_ERROR);
|
williamr@4
|
432 |
}
|
williamr@4
|
433 |
inline void _M_acquire_lock();
|
williamr@4
|
434 |
inline void _M_release_lock()
|
williamr@4
|
435 |
{
|
williamr@4
|
436 |
status_t t = release_sem(sem);
|
williamr@4
|
437 |
assert(t == B_NO_ERROR);
|
williamr@4
|
438 |
}
|
williamr@4
|
439 |
# elif defined(_STLP_VXWORKS_THREADS)
|
williamr@4
|
440 |
SEM_ID _M_sem;
|
williamr@4
|
441 |
inline void _M_initialize()
|
williamr@4
|
442 |
{
|
williamr@4
|
443 |
_M_sem = semMCreate(SEM_Q_FIFO);
|
williamr@4
|
444 |
assert(_M_sem > 0);
|
williamr@4
|
445 |
}
|
williamr@4
|
446 |
inline void _M_destroy()
|
williamr@4
|
447 |
{
|
williamr@4
|
448 |
STATUS __s;
|
williamr@4
|
449 |
semDelete (_M_sem);
|
williamr@4
|
450 |
assert(__s == OK);
|
williamr@4
|
451 |
}
|
williamr@4
|
452 |
inline void _M_acquire_lock_nodemand()
|
williamr@4
|
453 |
{
|
williamr@4
|
454 |
STATUS __s;
|
williamr@4
|
455 |
semTake (_M_sem, WAIT_FOREVER);
|
williamr@4
|
456 |
assert(__s == OK);
|
williamr@4
|
457 |
}
|
williamr@4
|
458 |
inline void _M_acquire_lock()
|
williamr@4
|
459 |
{
|
williamr@4
|
460 |
if (!_M_sem)
|
williamr@4
|
461 |
_M_initialize();
|
williamr@4
|
462 |
_M_acquire_lock_nodemand();
|
williamr@4
|
463 |
}
|
williamr@4
|
464 |
inline void _M_release_lock()
|
williamr@4
|
465 |
{
|
williamr@4
|
466 |
STATUS __s;
|
williamr@4
|
467 |
semGive (_M_sem, WAIT_FOREVER);
|
williamr@4
|
468 |
assert(__s == OK);
|
williamr@4
|
469 |
}
|
williamr@4
|
470 |
# else //*ty 11/24/2001 - added configuration check
|
williamr@4
|
471 |
# error "Unknown thread facility configuration"
|
williamr@4
|
472 |
# endif
|
williamr@4
|
473 |
#else /* No threads */
|
williamr@4
|
474 |
inline void _M_initialize() {}
|
williamr@4
|
475 |
inline void _M_destroy() {}
|
williamr@4
|
476 |
inline void _M_acquire_lock() {}
|
williamr@4
|
477 |
inline void _M_release_lock() {}
|
williamr@4
|
478 |
#endif // _STLP_PTHREADS
|
williamr@4
|
479 |
};
|
williamr@4
|
480 |
|
williamr@4
|
481 |
|
williamr@4
|
482 |
#if defined (_STLP_THREADS) && defined (_STLP_MUTEX_NEEDS_ONDEMAND_INITIALIZATION)
|
williamr@4
|
483 |
// for use in _STLP_mutex, our purposes do not require ondemand initialization
|
williamr@4
|
484 |
// also, mutex_base may use some hacks to determine uninitialized state by zero data, which only works for globals.
|
williamr@4
|
485 |
class _STLP_CLASS_DECLSPEC _STLP_mutex_nodemand : public _STLP_mutex_base {
|
williamr@4
|
486 |
inline void _M_acquire_lock() {
|
williamr@4
|
487 |
_M_acquire_lock_nodemand();
|
williamr@4
|
488 |
}
|
williamr@4
|
489 |
};
|
williamr@4
|
490 |
#else
|
williamr@4
|
491 |
typedef _STLP_mutex_base _STLP_mutex_nodemand;
|
williamr@4
|
492 |
#endif
|
williamr@4
|
493 |
|
williamr@4
|
494 |
|
williamr@4
|
495 |
// Locking class. The constructor initializes the lock, the destructor destroys it.
|
williamr@4
|
496 |
// Well - behaving class, does not need static initializer
|
williamr@4
|
497 |
class _STLP_CLASS_DECLSPEC _STLP_mutex : public _STLP_mutex_nodemand {
|
williamr@4
|
498 |
public:
|
williamr@4
|
499 |
inline _STLP_mutex () { _M_initialize(); }
|
williamr@4
|
500 |
inline ~_STLP_mutex () { _M_destroy(); }
|
williamr@4
|
501 |
private:
|
williamr@4
|
502 |
_STLP_mutex(const _STLP_mutex&);
|
williamr@4
|
503 |
void operator=(const _STLP_mutex&);
|
williamr@4
|
504 |
};
|
williamr@4
|
505 |
|
williamr@4
|
506 |
|
williamr@4
|
507 |
|
williamr@4
|
508 |
/*
|
williamr@4
|
509 |
* Class _Refcount_Base provides a type, __stl_atomic_t, a data member,
|
williamr@4
|
510 |
* _M_ref_count, and member functions _M_incr and _M_decr, which perform
|
williamr@4
|
511 |
* atomic preincrement/predecrement. The constructor initializes
|
williamr@4
|
512 |
* _M_ref_count.
|
williamr@4
|
513 |
*/
|
williamr@4
|
514 |
struct _STLP_CLASS_DECLSPEC _Refcount_Base
|
williamr@4
|
515 |
{
|
williamr@4
|
516 |
// The data member _M_ref_count
|
williamr@4
|
517 |
volatile __stl_atomic_t _M_ref_count;
|
williamr@4
|
518 |
|
williamr@4
|
519 |
# if !defined (_STLP_ATOMIC_EXCHANGE)
|
williamr@4
|
520 |
_STLP_mutex _M_mutex;
|
williamr@4
|
521 |
# endif
|
williamr@4
|
522 |
|
williamr@4
|
523 |
// Constructor
|
williamr@4
|
524 |
_Refcount_Base(__stl_atomic_t __n) : _M_ref_count(__n) {}
|
williamr@4
|
525 |
|
williamr@4
|
526 |
// _M_incr and _M_decr
|
williamr@4
|
527 |
# if defined (_STLP_THREADS) && defined (_STLP_ATOMIC_EXCHANGE)
|
williamr@4
|
528 |
void _M_incr() { _STLP_ATOMIC_INCREMENT((__stl_atomic_t*)&_M_ref_count); }
|
williamr@4
|
529 |
void _M_decr() { _STLP_ATOMIC_DECREMENT((__stl_atomic_t*)&_M_ref_count); }
|
williamr@4
|
530 |
# elif defined(_STLP_THREADS)
|
williamr@4
|
531 |
void _M_incr() {
|
williamr@4
|
532 |
_M_mutex._M_acquire_lock();
|
williamr@4
|
533 |
++_M_ref_count;
|
williamr@4
|
534 |
_M_mutex._M_release_lock();
|
williamr@4
|
535 |
}
|
williamr@4
|
536 |
void _M_decr() {
|
williamr@4
|
537 |
_M_mutex._M_acquire_lock();
|
williamr@4
|
538 |
--_M_ref_count;
|
williamr@4
|
539 |
_M_mutex._M_release_lock();
|
williamr@4
|
540 |
}
|
williamr@4
|
541 |
# else /* No threads */
|
williamr@4
|
542 |
void _M_incr() { ++_M_ref_count; }
|
williamr@4
|
543 |
void _M_decr() { --_M_ref_count; }
|
williamr@4
|
544 |
# endif
|
williamr@4
|
545 |
};
|
williamr@4
|
546 |
|
williamr@4
|
547 |
// Atomic swap on unsigned long
|
williamr@4
|
548 |
// This is guaranteed to behave as though it were atomic only if all
|
williamr@4
|
549 |
// possibly concurrent updates use _Atomic_swap.
|
williamr@4
|
550 |
// In some cases the operation is emulated with a lock.
|
williamr@4
|
551 |
# if defined (_STLP_THREADS)
|
williamr@4
|
552 |
# ifdef _STLP_ATOMIC_EXCHANGE
|
williamr@4
|
553 |
inline __stl_atomic_t _Atomic_swap(volatile __stl_atomic_t * __p, __stl_atomic_t __q) {
|
williamr@4
|
554 |
return (__stl_atomic_t) _STLP_ATOMIC_EXCHANGE(__p,__q);
|
williamr@4
|
555 |
}
|
williamr@4
|
556 |
# elif defined(_STLP_PTHREADS) || defined (_STLP_UITHREADS) || defined (_STLP_OS2THREADS) || defined(_STLP_USE_PTHREAD_SPINLOCK)
|
williamr@4
|
557 |
// We use a template here only to get a unique initialized instance.
|
williamr@4
|
558 |
template<int __dummy>
|
williamr@4
|
559 |
struct _Swap_lock_struct {
|
williamr@4
|
560 |
static _STLP_STATIC_MUTEX _S_swap_lock;
|
williamr@4
|
561 |
};
|
williamr@4
|
562 |
|
williamr@4
|
563 |
|
williamr@4
|
564 |
// This should be portable, but performance is expected
|
williamr@4
|
565 |
// to be quite awful. This really needs platform specific
|
williamr@4
|
566 |
// code.
|
williamr@4
|
567 |
inline __stl_atomic_t _Atomic_swap(volatile __stl_atomic_t * __p, __stl_atomic_t __q) {
|
williamr@4
|
568 |
_Swap_lock_struct<0>::_S_swap_lock._M_acquire_lock();
|
williamr@4
|
569 |
__stl_atomic_t __result = *__p;
|
williamr@4
|
570 |
*__p = __q;
|
williamr@4
|
571 |
_Swap_lock_struct<0>::_S_swap_lock._M_release_lock();
|
williamr@4
|
572 |
return __result;
|
williamr@4
|
573 |
}
|
williamr@4
|
574 |
# endif // _STLP_PTHREADS || _STLP_UITHREADS || _STLP_OS2THREADS || _STLP_USE_PTHREAD_SPINLOCK
|
williamr@4
|
575 |
# else // !_STLP_THREADS
|
williamr@4
|
576 |
/* no threads */
|
williamr@4
|
577 |
static inline __stl_atomic_t _STLP_CALL
|
williamr@4
|
578 |
_Atomic_swap(volatile __stl_atomic_t * __p, __stl_atomic_t __q) {
|
williamr@4
|
579 |
__stl_atomic_t __result = *__p;
|
williamr@4
|
580 |
*__p = __q;
|
williamr@4
|
581 |
return __result;
|
williamr@4
|
582 |
}
|
williamr@4
|
583 |
# endif // _STLP_THREADS
|
williamr@4
|
584 |
|
williamr@4
|
585 |
// A locking class that uses _STLP_STATIC_MUTEX. The constructor takes
|
williamr@4
|
586 |
// a reference to an _STLP_STATIC_MUTEX, and acquires a lock. The destructor
|
williamr@4
|
587 |
// releases the lock.
|
williamr@4
|
588 |
|
williamr@4
|
589 |
struct _STLP_CLASS_DECLSPEC _STLP_auto_lock
|
williamr@4
|
590 |
{
|
williamr@4
|
591 |
_STLP_STATIC_MUTEX& _M_lock;
|
williamr@4
|
592 |
|
williamr@4
|
593 |
_STLP_auto_lock(_STLP_STATIC_MUTEX& __lock) : _M_lock(__lock)
|
williamr@4
|
594 |
{ _M_lock._M_acquire_lock(); }
|
williamr@4
|
595 |
~_STLP_auto_lock() { _M_lock._M_release_lock(); }
|
williamr@4
|
596 |
|
williamr@4
|
597 |
private:
|
williamr@4
|
598 |
void operator=(const _STLP_auto_lock&);
|
williamr@4
|
599 |
_STLP_auto_lock(const _STLP_auto_lock&);
|
williamr@4
|
600 |
};
|
williamr@4
|
601 |
|
williamr@4
|
602 |
typedef _STLP_auto_lock _STLP_mutex_lock;
|
williamr@4
|
603 |
|
williamr@4
|
604 |
#ifdef _STLP_BETHREADS
|
williamr@4
|
605 |
|
williamr@4
|
606 |
template <int __inst>
|
williamr@4
|
607 |
struct _STLP_beos_static_lock_data
|
williamr@4
|
608 |
{
|
williamr@4
|
609 |
static bool is_init;
|
williamr@4
|
610 |
struct mutex_t : public _STLP_mutex
|
williamr@4
|
611 |
{
|
williamr@4
|
612 |
mutex_t()
|
williamr@4
|
613 |
{
|
williamr@4
|
614 |
_STLP_beos_static_lock_data<0>::is_init = true;
|
williamr@4
|
615 |
}
|
williamr@4
|
616 |
~mutex_t()
|
williamr@4
|
617 |
{
|
williamr@4
|
618 |
_STLP_beos_static_lock_data<0>::is_init = false;
|
williamr@4
|
619 |
}
|
williamr@4
|
620 |
};
|
williamr@4
|
621 |
static mutex_t mut;
|
williamr@4
|
622 |
};
|
williamr@4
|
623 |
|
williamr@4
|
624 |
template <int __inst>
|
williamr@4
|
625 |
bool _STLP_beos_static_lock_data<__inst>::is_init = false;
|
williamr@4
|
626 |
template <int __inst>
|
williamr@4
|
627 |
typename _STLP_beos_static_lock_data<__inst>::mutex_t _STLP_beos_static_lock_data<__inst>::mut;
|
williamr@4
|
628 |
|
williamr@4
|
629 |
|
williamr@4
|
630 |
inline void _STLP_mutex_base::_M_acquire_lock()
|
williamr@4
|
631 |
{
|
williamr@4
|
632 |
if(sem == 0)
|
williamr@4
|
633 |
{
|
williamr@4
|
634 |
// we need to initialise on demand here
|
williamr@4
|
635 |
// to prevent race conditions use our global
|
williamr@4
|
636 |
// mutex if it's available:
|
williamr@4
|
637 |
if(_STLP_beos_static_lock_data<0>::is_init)
|
williamr@4
|
638 |
{
|
williamr@4
|
639 |
_STLP_auto_lock al(_STLP_beos_static_lock_data<0>::mut);
|
williamr@4
|
640 |
if(sem == 0) _M_initialize();
|
williamr@4
|
641 |
}
|
williamr@4
|
642 |
else
|
williamr@4
|
643 |
{
|
williamr@4
|
644 |
// no lock available, we must still be
|
williamr@4
|
645 |
// in startup code, THERE MUST BE ONE THREAD
|
williamr@4
|
646 |
// ONLY active at this point.
|
williamr@4
|
647 |
_M_initialize();
|
williamr@4
|
648 |
}
|
williamr@4
|
649 |
}
|
williamr@4
|
650 |
_M_acquire_lock_nodemand();
|
williamr@4
|
651 |
}
|
williamr@4
|
652 |
|
williamr@4
|
653 |
#endif
|
williamr@4
|
654 |
|
williamr@4
|
655 |
_STLP_END_NAMESPACE
|
williamr@4
|
656 |
|
williamr@4
|
657 |
# if !defined (_STLP_LINK_TIME_INSTANTIATION)
|
williamr@4
|
658 |
# include <stl/_threads.c>
|
williamr@4
|
659 |
# endif
|
williamr@4
|
660 |
|
williamr@4
|
661 |
#endif /* _STLP_INTERNAL_THREADS_H */
|
williamr@4
|
662 |
|
williamr@4
|
663 |
// Local Variables:
|
williamr@4
|
664 |
// mode:C++
|
williamr@4
|
665 |
// End:
|
williamr@4
|
666 |
|