2 * Portions Copyright (c) 2008 Nokia Corporation and/or its subsidiary(-ies). All rights reserved.
5 * Hewlett-Packard Company
7 * Copyright (c) 1996,1997
8 * Silicon Graphics Computer Systems, Inc.
11 * Moscow Center for SPARC Technology
16 * This material is provided "as is", with absolutely no warranty expressed
17 * or implied. Any use is at your own risk.
19 * Permission to use or copy this software for any purpose is hereby granted
20 * without fee, provided the above notices are retained on all copies.
21 * Permission to modify the code and to distribute modified code is granted,
22 * provided the above notices are retained, and a notice that the code was
23 * modified is included with the above copyright notice.
26 #ifndef _STLP_THREADS_C
27 #define _STLP_THREADS_C
29 #ifndef _STLP_INTERNAL_THREADS_H
30 # include <stl/_threads.h>
33 #if defined (_STLP_EXPOSE_GLOBALS_IMPLEMENTATION)
35 #if defined (_STLP_SGI_THREADS)
37 #elif defined (_STLP_UNIX)
38 # ifndef _STLP_INTERNAL_CTIME
39 # include <stl/_ctime.h>
41 # if defined (_STLP_USE_NAMESPACES) && !defined (_STLP_VENDOR_GLOBAL_CSTD)
42 using _STLP_VENDOR_CSTD::time_t;
44 # include <sys/time.h>
49 #if (_STLP_STATIC_TEMPLATE_DATA > 0)
51 # if defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
52 #if !defined(__SYMBIAN32__WSD__)
53 template<int __32bits>
55 _Atomic_swap_struct<__32bits>::_S_swap_lock _STLP_MUTEX_INITIALIZER;
57 # undef _STLP_USE_ATOMIC_SWAP_MUTEX
60 # if defined (_STLP_THREADS) && !defined (_STLP_USE_PTHREAD_SPINLOCK)
61 //Note: For SYMBIAN Emulator, these entries are to be considered WSD.
62 //Still, EWSD solution can't be applied since it's templated.
64 unsigned _STLP_mutex_spin<__inst>::__max = _STLP_mutex_spin<__inst>::__low_max;
67 unsigned _STLP_mutex_spin<__inst>::__last = 0;
69 # endif // _STLP_USE_PTHREAD_SPINLOCK
71 #else /* ( _STLP_STATIC_TEMPLATE_DATA > 0 ) */
73 # if defined (_STLP_USE_ATOMIC_SWAP_MUTEX)
74 __DECLARE_INSTANCE(_STLP_STATIC_MUTEX, _Atomic_swap_struct<sizeof(__stl_atomic_t) == sizeof(void*)>::_S_swap_lock,
75 _STLP_MUTEX_INITIALIZER );
76 # undef _STLP_USE_ATOMIC_SWAP_MUTEX
77 # endif /* _STLP_PTHREADS */
79 # if defined (_STLP_THREADS) && !defined (_STLP_USE_PTHREAD_SPINLOCK)
80 __DECLARE_INSTANCE(unsigned, _STLP_mutex_spin<0>::__max, =30);
81 __DECLARE_INSTANCE(unsigned, _STLP_mutex_spin<0>::__last, =0);
82 # endif // _STLP_USE_PTHREAD_SPINLOCK
84 #endif /* ( _STLP_STATIC_TEMPLATE_DATA > 0 ) */
86 #if defined (_STLP_THREADS) && !defined (_STLP_USE_PTHREAD_SPINLOCK)
88 # if defined (_STLP_SPARC_SOLARIS_THREADS)
89 // underground function in libc.so; we do not want dependance on librt
90 extern "C" int __nanosleep(const struct timespec*, struct timespec*);
91 # define _STLP_NANOSLEEP __nanosleep
93 # define _STLP_NANOSLEEP nanosleep
98 _STLP_mutex_spin<__inst>::_S_nsec_sleep(int __log_nsec) {
99 # if defined (_STLP_WIN32THREADS)
100 if (__log_nsec <= 20) {
101 // Note from boost (www.boost.org):
102 // Changed to Sleep(1) from Sleep(0).
103 // According to MSDN, Sleep(0) will never yield
104 // to a lower-priority thread, whereas Sleep(1)
105 // will. Performance seems not to be affected.
108 Sleep(1 << (__log_nsec - 20));
110 # elif defined(_STLP_OS2THREADS)
111 if (__log_nsec <= 20) {
114 DosSleep(1 << (__log_nsec - 20));
116 # elif defined (_STLP_UNIX)
118 /* Max sleep is 2**27nsec ~ 60msec */
120 __ts.tv_nsec = 1 << __log_nsec;
121 _STLP_NANOSLEEP(&__ts, 0);
125 template <int __inst>
127 _STLP_mutex_spin<__inst>::_M_do_lock(volatile __stl_atomic_t* __lock) {
128 # if defined(_STLP_ATOMIC_EXCHANGE)
129 if (_Atomic_swap(__lock, 1)) {
130 unsigned __my_spin_max = _STLP_mutex_spin<0>::__max;
131 unsigned __my_last_spins = _STLP_mutex_spin<0>::__last;
132 volatile unsigned __junk = 17; // Value doesn't matter.
135 for (__i = 0; __i < __my_spin_max; ++__i) {
136 if (__i < __my_last_spins/2 || *__lock) {
137 __junk *= __junk; __junk *= __junk;
138 __junk *= __junk; __junk *= __junk;
140 if (!_Atomic_swap(__lock, 1)) {
142 // Spinning worked. Thus we're probably not being scheduled
143 // against the other process with which we were contending.
144 // Thus it makes sense to spin longer the next time.
145 _STLP_mutex_spin<0>::__last = __i;
146 _STLP_mutex_spin<0>::__max = _STLP_mutex_spin<0>::__high_max;
152 // We are probably being scheduled against the other process. Sleep.
153 _STLP_mutex_spin<0>::__max = _STLP_mutex_spin<0>::__low_max;
155 for (__i = 0 ;; ++__i) {
156 int __log_nsec = __i + 6;
158 if (__log_nsec > 27) __log_nsec = 27;
159 if (!_Atomic_swap(__lock, 1)) {
162 _S_nsec_sleep(__log_nsec);
164 } /* first _Atomic_swap */
167 #endif // _STLP_USE_PTHREAD_SPINLOCK
171 #endif /* _STLP_EXPOSE_GLOBALS_IMPLEMENTATION */
172 #endif /* _STLP_THREADS_C */