sl@0
|
1 |
// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
|
sl@0
|
2 |
// All rights reserved.
|
sl@0
|
3 |
// This component and the accompanying materials are made available
|
sl@0
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
sl@0
|
5 |
// which accompanies this distribution, and is available
|
sl@0
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
sl@0
|
7 |
//
|
sl@0
|
8 |
// Initial Contributors:
|
sl@0
|
9 |
// Nokia Corporation - initial contribution.
|
sl@0
|
10 |
//
|
sl@0
|
11 |
// Contributors:
|
sl@0
|
12 |
//
|
sl@0
|
13 |
// Description:
|
sl@0
|
14 |
// e32test\system\t_atomic_common.cpp
|
sl@0
|
15 |
//
|
sl@0
|
16 |
//
|
sl@0
|
17 |
|
sl@0
|
18 |
#ifdef __KERNEL_MODE__
|
sl@0
|
19 |
#include <kernel/kernel.h>
|
sl@0
|
20 |
#else
|
sl@0
|
21 |
#define __E32TEST_EXTENSION__
|
sl@0
|
22 |
|
sl@0
|
23 |
#include <e32test.h>
|
sl@0
|
24 |
|
sl@0
|
25 |
extern RTest test;
|
sl@0
|
26 |
|
sl@0
|
27 |
#define __INCLUDE_FUNC_NAMES__
|
sl@0
|
28 |
#endif
|
sl@0
|
29 |
|
sl@0
|
30 |
#define __INCLUDE_ATOMIC_FUNCTIONS__
|
sl@0
|
31 |
#define __INCLUDE_CONTROL_FUNCTIONS__
|
sl@0
|
32 |
#define __INCLUDE_FUNCTION_ATTRIBUTES__
|
sl@0
|
33 |
|
sl@0
|
34 |
#include "t_atomic.h"
|
sl@0
|
35 |
|
sl@0
|
36 |
#define DEBUGPRINTVAR(x) \
|
sl@0
|
37 |
{ \
|
sl@0
|
38 |
const TUint8* p = (const TUint8*)&(x); \
|
sl@0
|
39 |
DEBUGPRINT("Line %d: " #x "=%02x %02x %02x %02x %02x %02x %02x %02x", __LINE__, p[0], p[1], p[2], p[3], p[4], p[5], p[6], p[7]); \
|
sl@0
|
40 |
}
|
sl@0
|
41 |
|
sl@0
|
42 |
extern "C" {
|
sl@0
|
43 |
|
sl@0
|
44 |
// Simulated versions of atomic functions without the atomicity
|
sl@0
|
45 |
#define __LOAD(T) return *(T*)a
|
sl@0
|
46 |
#define __STORE(T) *(T*)a=v; return v
|
sl@0
|
47 |
#define __SWP(T) T oldv=*(T*)a; *(T*)a=v; return oldv
|
sl@0
|
48 |
#define __CAS(T) if (*(T*)a==*q) {*(T*)a=v; return 1;} *q=*(T*)a; return 0
|
sl@0
|
49 |
#define __ADD(T) T oldv=*(T*)a; *(T*)a=(T)(oldv+v); return oldv
|
sl@0
|
50 |
#define __AND(T) T oldv=*(T*)a; *(T*)a=(T)(oldv&v); return oldv
|
sl@0
|
51 |
#define __IOR(T) T oldv=*(T*)a; *(T*)a=(T)(oldv|v); return oldv
|
sl@0
|
52 |
#define __XOR(T) T oldv=*(T*)a; *(T*)a=(T)(oldv^v); return oldv
|
sl@0
|
53 |
#define __AXO(T) T oldv=*(T*)a; *(T*)a=(T)((oldv&u)^v); return oldv
|
sl@0
|
54 |
#define __TA(T) T oldv=*(T*)a; *(T*)a=(T)(oldv+((oldv>=t)?u:v)); return oldv
|
sl@0
|
55 |
|
sl@0
|
56 |
TUint8 __nonatomic_load8(const volatile TAny* a)
|
sl@0
|
57 |
{
|
sl@0
|
58 |
__LOAD(TUint8);
|
sl@0
|
59 |
}
|
sl@0
|
60 |
|
sl@0
|
61 |
TUint8 __nonatomic_store8(volatile TAny* a, TUint8 v)
|
sl@0
|
62 |
{
|
sl@0
|
63 |
__STORE(TUint8);
|
sl@0
|
64 |
}
|
sl@0
|
65 |
|
sl@0
|
66 |
TUint8 __nonatomic_swp8(volatile TAny* a, TUint8 v)
|
sl@0
|
67 |
{
|
sl@0
|
68 |
__SWP(TUint8);
|
sl@0
|
69 |
}
|
sl@0
|
70 |
|
sl@0
|
71 |
TBool __nonatomic_cas8(volatile TAny* a, TUint8* q, TUint8 v)
|
sl@0
|
72 |
{
|
sl@0
|
73 |
__CAS(TUint8);
|
sl@0
|
74 |
}
|
sl@0
|
75 |
|
sl@0
|
76 |
TUint8 __nonatomic_add8(volatile TAny* a, TUint8 v)
|
sl@0
|
77 |
{
|
sl@0
|
78 |
__ADD(TUint8);
|
sl@0
|
79 |
}
|
sl@0
|
80 |
|
sl@0
|
81 |
TUint8 __nonatomic_and8(volatile TAny* a, TUint8 v)
|
sl@0
|
82 |
{
|
sl@0
|
83 |
__AND(TUint8);
|
sl@0
|
84 |
}
|
sl@0
|
85 |
|
sl@0
|
86 |
TUint8 __nonatomic_ior8(volatile TAny* a, TUint8 v)
|
sl@0
|
87 |
{
|
sl@0
|
88 |
__IOR(TUint8);
|
sl@0
|
89 |
}
|
sl@0
|
90 |
|
sl@0
|
91 |
TUint8 __nonatomic_xor8(volatile TAny* a, TUint8 v)
|
sl@0
|
92 |
{
|
sl@0
|
93 |
__XOR(TUint8);
|
sl@0
|
94 |
}
|
sl@0
|
95 |
|
sl@0
|
96 |
TUint8 __nonatomic_axo8(volatile TAny* a, TUint8 u, TUint8 v)
|
sl@0
|
97 |
{
|
sl@0
|
98 |
__AXO(TUint8);
|
sl@0
|
99 |
}
|
sl@0
|
100 |
|
sl@0
|
101 |
TUint8 __nonatomic_tau8(volatile TAny* a, TUint8 t, TUint8 u, TUint8 v)
|
sl@0
|
102 |
{
|
sl@0
|
103 |
__TA(TUint8);
|
sl@0
|
104 |
}
|
sl@0
|
105 |
|
sl@0
|
106 |
TInt8 __nonatomic_tas8(volatile TAny* a, TInt8 t, TInt8 u, TInt8 v)
|
sl@0
|
107 |
{
|
sl@0
|
108 |
__TA(TInt8);
|
sl@0
|
109 |
}
|
sl@0
|
110 |
|
sl@0
|
111 |
|
sl@0
|
112 |
TUint16 __nonatomic_load16(const volatile TAny* a)
|
sl@0
|
113 |
{
|
sl@0
|
114 |
__LOAD(TUint16);
|
sl@0
|
115 |
}
|
sl@0
|
116 |
|
sl@0
|
117 |
TUint16 __nonatomic_store16(volatile TAny* a, TUint16 v)
|
sl@0
|
118 |
{
|
sl@0
|
119 |
__STORE(TUint16);
|
sl@0
|
120 |
}
|
sl@0
|
121 |
|
sl@0
|
122 |
TUint16 __nonatomic_swp16(volatile TAny* a, TUint16 v)
|
sl@0
|
123 |
{
|
sl@0
|
124 |
__SWP(TUint16);
|
sl@0
|
125 |
}
|
sl@0
|
126 |
|
sl@0
|
127 |
TBool __nonatomic_cas16(volatile TAny* a, TUint16* q, TUint16 v)
|
sl@0
|
128 |
{
|
sl@0
|
129 |
__CAS(TUint16);
|
sl@0
|
130 |
}
|
sl@0
|
131 |
|
sl@0
|
132 |
TUint16 __nonatomic_add16(volatile TAny* a, TUint16 v)
|
sl@0
|
133 |
{
|
sl@0
|
134 |
__ADD(TUint16);
|
sl@0
|
135 |
}
|
sl@0
|
136 |
|
sl@0
|
137 |
TUint16 __nonatomic_and16(volatile TAny* a, TUint16 v)
|
sl@0
|
138 |
{
|
sl@0
|
139 |
__AND(TUint16);
|
sl@0
|
140 |
}
|
sl@0
|
141 |
|
sl@0
|
142 |
TUint16 __nonatomic_ior16(volatile TAny* a, TUint16 v)
|
sl@0
|
143 |
{
|
sl@0
|
144 |
__IOR(TUint16);
|
sl@0
|
145 |
}
|
sl@0
|
146 |
|
sl@0
|
147 |
TUint16 __nonatomic_xor16(volatile TAny* a, TUint16 v)
|
sl@0
|
148 |
{
|
sl@0
|
149 |
__XOR(TUint16);
|
sl@0
|
150 |
}
|
sl@0
|
151 |
|
sl@0
|
152 |
TUint16 __nonatomic_axo16(volatile TAny* a, TUint16 u, TUint16 v)
|
sl@0
|
153 |
{
|
sl@0
|
154 |
__AXO(TUint16);
|
sl@0
|
155 |
}
|
sl@0
|
156 |
|
sl@0
|
157 |
TUint16 __nonatomic_tau16(volatile TAny* a, TUint16 t, TUint16 u, TUint16 v)
|
sl@0
|
158 |
{
|
sl@0
|
159 |
__TA(TUint16);
|
sl@0
|
160 |
}
|
sl@0
|
161 |
|
sl@0
|
162 |
TInt16 __nonatomic_tas16(volatile TAny* a, TInt16 t, TInt16 u, TInt16 v)
|
sl@0
|
163 |
{
|
sl@0
|
164 |
__TA(TInt16);
|
sl@0
|
165 |
}
|
sl@0
|
166 |
|
sl@0
|
167 |
|
sl@0
|
168 |
TUint32 __nonatomic_load32(const volatile TAny* a)
|
sl@0
|
169 |
{
|
sl@0
|
170 |
__LOAD(TUint32);
|
sl@0
|
171 |
}
|
sl@0
|
172 |
|
sl@0
|
173 |
TUint32 __nonatomic_store32(volatile TAny* a, TUint32 v)
|
sl@0
|
174 |
{
|
sl@0
|
175 |
__STORE(TUint32);
|
sl@0
|
176 |
}
|
sl@0
|
177 |
|
sl@0
|
178 |
TUint32 __nonatomic_swp32(volatile TAny* a, TUint32 v)
|
sl@0
|
179 |
{
|
sl@0
|
180 |
__SWP(TUint32);
|
sl@0
|
181 |
}
|
sl@0
|
182 |
|
sl@0
|
183 |
TBool __nonatomic_cas32(volatile TAny* a, TUint32* q, TUint32 v)
|
sl@0
|
184 |
{
|
sl@0
|
185 |
__CAS(TUint32);
|
sl@0
|
186 |
}
|
sl@0
|
187 |
|
sl@0
|
188 |
TUint32 __nonatomic_add32(volatile TAny* a, TUint32 v)
|
sl@0
|
189 |
{
|
sl@0
|
190 |
__ADD(TUint32);
|
sl@0
|
191 |
}
|
sl@0
|
192 |
|
sl@0
|
193 |
TUint32 __nonatomic_and32(volatile TAny* a, TUint32 v)
|
sl@0
|
194 |
{
|
sl@0
|
195 |
__AND(TUint32);
|
sl@0
|
196 |
}
|
sl@0
|
197 |
|
sl@0
|
198 |
TUint32 __nonatomic_ior32(volatile TAny* a, TUint32 v)
|
sl@0
|
199 |
{
|
sl@0
|
200 |
__IOR(TUint32);
|
sl@0
|
201 |
}
|
sl@0
|
202 |
|
sl@0
|
203 |
TUint32 __nonatomic_xor32(volatile TAny* a, TUint32 v)
|
sl@0
|
204 |
{
|
sl@0
|
205 |
__XOR(TUint32);
|
sl@0
|
206 |
}
|
sl@0
|
207 |
|
sl@0
|
208 |
TUint32 __nonatomic_axo32(volatile TAny* a, TUint32 u, TUint32 v)
|
sl@0
|
209 |
{
|
sl@0
|
210 |
__AXO(TUint32);
|
sl@0
|
211 |
}
|
sl@0
|
212 |
|
sl@0
|
213 |
TUint32 __nonatomic_tau32(volatile TAny* a, TUint32 t, TUint32 u, TUint32 v)
|
sl@0
|
214 |
{
|
sl@0
|
215 |
__TA(TUint32);
|
sl@0
|
216 |
}
|
sl@0
|
217 |
|
sl@0
|
218 |
TInt32 __nonatomic_tas32(volatile TAny* a, TInt32 t, TInt32 u, TInt32 v)
|
sl@0
|
219 |
{
|
sl@0
|
220 |
__TA(TInt32);
|
sl@0
|
221 |
}
|
sl@0
|
222 |
|
sl@0
|
223 |
|
sl@0
|
224 |
TUint64 __nonatomic_load64(const volatile TAny* a)
|
sl@0
|
225 |
{
|
sl@0
|
226 |
__LOAD(TUint64);
|
sl@0
|
227 |
}
|
sl@0
|
228 |
|
sl@0
|
229 |
TUint64 __nonatomic_store64(volatile TAny* a, TUint64 v)
|
sl@0
|
230 |
{
|
sl@0
|
231 |
__STORE(TUint64);
|
sl@0
|
232 |
}
|
sl@0
|
233 |
|
sl@0
|
234 |
TUint64 __nonatomic_swp64(volatile TAny* a, TUint64 v)
|
sl@0
|
235 |
{
|
sl@0
|
236 |
__SWP(TUint64);
|
sl@0
|
237 |
}
|
sl@0
|
238 |
|
sl@0
|
239 |
TBool __nonatomic_cas64(volatile TAny* a, TUint64* q, TUint64 v)
|
sl@0
|
240 |
{
|
sl@0
|
241 |
__CAS(TUint64);
|
sl@0
|
242 |
}
|
sl@0
|
243 |
|
sl@0
|
244 |
TUint64 __nonatomic_add64(volatile TAny* a, TUint64 v)
|
sl@0
|
245 |
{
|
sl@0
|
246 |
__ADD(TUint64);
|
sl@0
|
247 |
}
|
sl@0
|
248 |
|
sl@0
|
249 |
TUint64 __nonatomic_and64(volatile TAny* a, TUint64 v)
|
sl@0
|
250 |
{
|
sl@0
|
251 |
__AND(TUint64);
|
sl@0
|
252 |
}
|
sl@0
|
253 |
|
sl@0
|
254 |
TUint64 __nonatomic_ior64(volatile TAny* a, TUint64 v)
|
sl@0
|
255 |
{
|
sl@0
|
256 |
__IOR(TUint64);
|
sl@0
|
257 |
}
|
sl@0
|
258 |
|
sl@0
|
259 |
TUint64 __nonatomic_xor64(volatile TAny* a, TUint64 v)
|
sl@0
|
260 |
{
|
sl@0
|
261 |
__XOR(TUint64);
|
sl@0
|
262 |
}
|
sl@0
|
263 |
|
sl@0
|
264 |
TUint64 __nonatomic_axo64(volatile TAny* a, TUint64 u, TUint64 v)
|
sl@0
|
265 |
{
|
sl@0
|
266 |
__AXO(TUint64);
|
sl@0
|
267 |
}
|
sl@0
|
268 |
|
sl@0
|
269 |
TUint64 __nonatomic_tau64(volatile TAny* a, TUint64 t, TUint64 u, TUint64 v)
|
sl@0
|
270 |
{
|
sl@0
|
271 |
__TA(TUint64);
|
sl@0
|
272 |
}
|
sl@0
|
273 |
|
sl@0
|
274 |
TInt64 __nonatomic_tas64(volatile TAny* a, TInt64 t, TInt64 u, TInt64 v)
|
sl@0
|
275 |
{
|
sl@0
|
276 |
__TA(TInt64);
|
sl@0
|
277 |
}
|
sl@0
|
278 |
|
sl@0
|
279 |
} // extern "C"
|
sl@0
|
280 |
|
sl@0
|
281 |
|
sl@0
|
282 |
#define DEBUGPRINTxyrc() \
|
sl@0
|
283 |
DEBUGPRINTVAR(x); \
|
sl@0
|
284 |
DEBUGPRINTVAR(y); \
|
sl@0
|
285 |
DEBUGPRINTVAR(r); \
|
sl@0
|
286 |
DEBUGPRINTVAR(c)
|
sl@0
|
287 |
|
sl@0
|
288 |
template<class T> TInt DoLoadTest(TInt aIndex, TAny* aPtr, T aInitialValue)
|
sl@0
|
289 |
{
|
sl@0
|
290 |
#ifdef __EXTRA_DEBUG__
|
sl@0
|
291 |
DEBUGPRINT("DoLoadTest %d %08x", aIndex, aPtr);
|
sl@0
|
292 |
#endif
|
sl@0
|
293 |
typename TLoadFn<T>::F atomic = (typename TLoadFn<T>::F)AtomicFuncPtr[aIndex];
|
sl@0
|
294 |
typename TLoadFn<T>::F control = (typename TLoadFn<T>::F)ControlFuncPtr[aIndex];
|
sl@0
|
295 |
T& x = *(T*)aPtr;
|
sl@0
|
296 |
x = aInitialValue;
|
sl@0
|
297 |
T y = aInitialValue;
|
sl@0
|
298 |
T r = atomic(&x);
|
sl@0
|
299 |
T c = control(&y);
|
sl@0
|
300 |
if (r!=c || x!=y)
|
sl@0
|
301 |
{
|
sl@0
|
302 |
DEBUGPRINTxyrc();
|
sl@0
|
303 |
return __LINE__;
|
sl@0
|
304 |
}
|
sl@0
|
305 |
return 0;
|
sl@0
|
306 |
}
|
sl@0
|
307 |
|
sl@0
|
308 |
template<class T> TInt DoRmw1Test(TInt aIndex, TAny* aPtr, T aInitialValue, T a1)
|
sl@0
|
309 |
{
|
sl@0
|
310 |
#ifdef __EXTRA_DEBUG__
|
sl@0
|
311 |
DEBUGPRINT("DoRmw1Test %d %08x", aIndex, aPtr);
|
sl@0
|
312 |
#endif
|
sl@0
|
313 |
typename TRmw1Fn<T>::F atomic = (typename TRmw1Fn<T>::F)AtomicFuncPtr[aIndex];
|
sl@0
|
314 |
typename TRmw1Fn<T>::F control = (typename TRmw1Fn<T>::F)ControlFuncPtr[aIndex];
|
sl@0
|
315 |
T& x = *(T*)aPtr;
|
sl@0
|
316 |
x = aInitialValue;
|
sl@0
|
317 |
T y = aInitialValue;
|
sl@0
|
318 |
T r = atomic(&x,a1);
|
sl@0
|
319 |
T c = control(&y,a1);
|
sl@0
|
320 |
if (r!=c || x!=y)
|
sl@0
|
321 |
{
|
sl@0
|
322 |
DEBUGPRINTxyrc();
|
sl@0
|
323 |
return __LINE__;
|
sl@0
|
324 |
}
|
sl@0
|
325 |
return 0;
|
sl@0
|
326 |
}
|
sl@0
|
327 |
|
sl@0
|
328 |
template<class T> TInt DoRmw2Test(TInt aIndex, TAny* aPtr, T aInitialValue, T a1, T a2)
|
sl@0
|
329 |
{
|
sl@0
|
330 |
#ifdef __EXTRA_DEBUG__
|
sl@0
|
331 |
DEBUGPRINT("DoRmw2Test %d %08x", aIndex, aPtr);
|
sl@0
|
332 |
#endif
|
sl@0
|
333 |
typename TRmw2Fn<T>::F atomic = (typename TRmw2Fn<T>::F)AtomicFuncPtr[aIndex];
|
sl@0
|
334 |
typename TRmw2Fn<T>::F control = (typename TRmw2Fn<T>::F)ControlFuncPtr[aIndex];
|
sl@0
|
335 |
T& x = *(T*)aPtr;
|
sl@0
|
336 |
x = aInitialValue;
|
sl@0
|
337 |
T y = aInitialValue;
|
sl@0
|
338 |
T r = atomic(&x,a1,a2);
|
sl@0
|
339 |
T c = control(&y,a1,a2);
|
sl@0
|
340 |
if (r!=c || x!=y)
|
sl@0
|
341 |
{
|
sl@0
|
342 |
DEBUGPRINTxyrc();
|
sl@0
|
343 |
return __LINE__;
|
sl@0
|
344 |
}
|
sl@0
|
345 |
return 0;
|
sl@0
|
346 |
}
|
sl@0
|
347 |
|
sl@0
|
348 |
template<class T> TInt DoRmw3Test(TInt aIndex, TAny* aPtr, T aInitialValue, T a1, T a2, T a3)
|
sl@0
|
349 |
{
|
sl@0
|
350 |
#ifdef __EXTRA_DEBUG__
|
sl@0
|
351 |
DEBUGPRINT("DoRmw3Test %d %08x", aIndex, aPtr);
|
sl@0
|
352 |
#endif
|
sl@0
|
353 |
typename TRmw3Fn<T>::F atomic = (typename TRmw3Fn<T>::F)AtomicFuncPtr[aIndex];
|
sl@0
|
354 |
typename TRmw3Fn<T>::F control = (typename TRmw3Fn<T>::F)ControlFuncPtr[aIndex];
|
sl@0
|
355 |
T& x = *(T*)aPtr;
|
sl@0
|
356 |
x = aInitialValue;
|
sl@0
|
357 |
T y = aInitialValue;
|
sl@0
|
358 |
T r = atomic(&x,a1,a2,a3);
|
sl@0
|
359 |
T c = control(&y,a1,a2,a3);
|
sl@0
|
360 |
if (r!=c || x!=y)
|
sl@0
|
361 |
{
|
sl@0
|
362 |
DEBUGPRINTxyrc();
|
sl@0
|
363 |
return __LINE__;
|
sl@0
|
364 |
}
|
sl@0
|
365 |
return 0;
|
sl@0
|
366 |
}
|
sl@0
|
367 |
|
sl@0
|
368 |
template<class T> TInt DoCasTest(TInt aIndex, TAny* aPtr, T aInitialValue, T aExpectedValue, T aFinalValue)
|
sl@0
|
369 |
{
|
sl@0
|
370 |
#ifdef __EXTRA_DEBUG__
|
sl@0
|
371 |
DEBUGPRINT("DoCasTest %d %08x", aIndex, aPtr);
|
sl@0
|
372 |
#endif
|
sl@0
|
373 |
typename TCasFn<T>::F atomic = (typename TCasFn<T>::F)AtomicFuncPtr[aIndex];
|
sl@0
|
374 |
typename TCasFn<T>::F control = (typename TCasFn<T>::F)ControlFuncPtr[aIndex];
|
sl@0
|
375 |
T& x = *(T*)aPtr;
|
sl@0
|
376 |
x = aInitialValue;
|
sl@0
|
377 |
T ex = aExpectedValue;
|
sl@0
|
378 |
T y = aInitialValue;
|
sl@0
|
379 |
T ey = aExpectedValue;
|
sl@0
|
380 |
TBool r = atomic(&x,&ex,aFinalValue);
|
sl@0
|
381 |
TBool c = control(&y,&ey,aFinalValue);
|
sl@0
|
382 |
TInt line = 0;
|
sl@0
|
383 |
if (r && !c)
|
sl@0
|
384 |
line = __LINE__;
|
sl@0
|
385 |
else if (!r && c)
|
sl@0
|
386 |
line = __LINE__;
|
sl@0
|
387 |
else if (x!=y)
|
sl@0
|
388 |
line = __LINE__;
|
sl@0
|
389 |
else if (ex!=ey)
|
sl@0
|
390 |
line = __LINE__;
|
sl@0
|
391 |
else if (r && x!=aFinalValue)
|
sl@0
|
392 |
line = __LINE__;
|
sl@0
|
393 |
else if (!r && ex!=aInitialValue)
|
sl@0
|
394 |
line = __LINE__;
|
sl@0
|
395 |
if (line)
|
sl@0
|
396 |
{
|
sl@0
|
397 |
DEBUGPRINT("r=%d",r);
|
sl@0
|
398 |
DEBUGPRINTVAR(x);
|
sl@0
|
399 |
DEBUGPRINTVAR(ex);
|
sl@0
|
400 |
DEBUGPRINT("c=%d",c);
|
sl@0
|
401 |
DEBUGPRINTVAR(y);
|
sl@0
|
402 |
DEBUGPRINTVAR(ey);
|
sl@0
|
403 |
}
|
sl@0
|
404 |
return line;
|
sl@0
|
405 |
}
|
sl@0
|
406 |
|
sl@0
|
407 |
|
sl@0
|
408 |
|
sl@0
|
409 |
TEnclosed::TEnclosed(TInt aSize)
|
sl@0
|
410 |
{
|
sl@0
|
411 |
iOffset = -1;
|
sl@0
|
412 |
iSize = aSize;
|
sl@0
|
413 |
iData = (TUint64*)((T_UintPtr(i_Data) + 7) &~ 7); // align up to next 8 byte boundary
|
sl@0
|
414 |
iBackup = iData + 8;
|
sl@0
|
415 |
}
|
sl@0
|
416 |
|
sl@0
|
417 |
TAny* TEnclosed::Ptr()
|
sl@0
|
418 |
{
|
sl@0
|
419 |
return ((TUint8*)iData + iOffset);
|
sl@0
|
420 |
}
|
sl@0
|
421 |
|
sl@0
|
422 |
TInt TEnclosed::Next()
|
sl@0
|
423 |
{
|
sl@0
|
424 |
const TInt KLimit[8] = {8, 16, 0, 32, 0, 0, 0, 32};
|
sl@0
|
425 |
if (iOffset<0)
|
sl@0
|
426 |
iOffset = 0;
|
sl@0
|
427 |
else
|
sl@0
|
428 |
{
|
sl@0
|
429 |
TInt r = Verify();
|
sl@0
|
430 |
if (r!=0)
|
sl@0
|
431 |
return r;
|
sl@0
|
432 |
iOffset += iSize;
|
sl@0
|
433 |
}
|
sl@0
|
434 |
if (iOffset >= KLimit[iSize-1])
|
sl@0
|
435 |
return KErrEof;
|
sl@0
|
436 |
Init();
|
sl@0
|
437 |
return KErrNone;
|
sl@0
|
438 |
}
|
sl@0
|
439 |
|
sl@0
|
440 |
void TEnclosed::Init()
|
sl@0
|
441 |
{
|
sl@0
|
442 |
TUint32 x = iOffset+1;
|
sl@0
|
443 |
x |= (x<<8);
|
sl@0
|
444 |
x |= (x<<16);
|
sl@0
|
445 |
TUint32* d = (TUint32*)iData;
|
sl@0
|
446 |
TUint32* b = (TUint32*)iBackup;
|
sl@0
|
447 |
TInt i;
|
sl@0
|
448 |
for (i=0; i<16; ++i)
|
sl@0
|
449 |
{
|
sl@0
|
450 |
*d++ = x;
|
sl@0
|
451 |
*b++ = x;
|
sl@0
|
452 |
x = 69069*x + 41;
|
sl@0
|
453 |
}
|
sl@0
|
454 |
}
|
sl@0
|
455 |
|
sl@0
|
456 |
TInt TEnclosed::Verify()
|
sl@0
|
457 |
{
|
sl@0
|
458 |
TUint8* d = (TUint8*)iData;
|
sl@0
|
459 |
const TUint8* b = (const TUint8*)iBackup;
|
sl@0
|
460 |
TInt i;
|
sl@0
|
461 |
for (i=0; i<iSize; ++i)
|
sl@0
|
462 |
d[iOffset+i] = b[iOffset+i];
|
sl@0
|
463 |
if (memcompare(b,64,d,64))
|
sl@0
|
464 |
{
|
sl@0
|
465 |
DEBUGPRINT("FAIL! iOffset=%02x, sizeof(T)=%1d", iOffset, iSize);
|
sl@0
|
466 |
for (i=0; i<64; ++i)
|
sl@0
|
467 |
{
|
sl@0
|
468 |
if (d[i]!=b[i])
|
sl@0
|
469 |
{
|
sl@0
|
470 |
DEBUGPRINT("d[%02x]=%02x b[%02x]=%02x", i, d[i], i, b[i]);
|
sl@0
|
471 |
}
|
sl@0
|
472 |
}
|
sl@0
|
473 |
return __LINE__;
|
sl@0
|
474 |
}
|
sl@0
|
475 |
return 0;
|
sl@0
|
476 |
}
|
sl@0
|
477 |
|
sl@0
|
478 |
|
sl@0
|
479 |
TInt TDGBase::Execute()
|
sl@0
|
480 |
{
|
sl@0
|
481 |
PFV af0 = AtomicFuncPtr[iIndex];
|
sl@0
|
482 |
PFV cf0 = ControlFuncPtr[iIndex];
|
sl@0
|
483 |
if (!af0 || !cf0)
|
sl@0
|
484 |
return __LINE__;
|
sl@0
|
485 |
TUint attr = FuncAttr[iIndex];
|
sl@0
|
486 |
TInt type = ATTR_TO_TYPE(attr);
|
sl@0
|
487 |
TInt size = ATTR_TO_SIZE(attr);
|
sl@0
|
488 |
TInt func = ATTR_TO_FUNC(attr);
|
sl@0
|
489 |
if (type==EFuncTypeInvalid)
|
sl@0
|
490 |
return __LINE__;
|
sl@0
|
491 |
#ifdef __EXTRA_DEBUG__
|
sl@0
|
492 |
TInt ord = ATTR_TO_ORD(attr);
|
sl@0
|
493 |
DEBUGPRINT("A=%08x T=%d O=%d S=%d F=%d", attr, type, ord, size, func);
|
sl@0
|
494 |
#endif
|
sl@0
|
495 |
TEnclosed enc(size);
|
sl@0
|
496 |
TInt res = 0;
|
sl@0
|
497 |
while ( (res = enc.Next()) == KErrNone )
|
sl@0
|
498 |
{
|
sl@0
|
499 |
#ifdef __EXTRA_DEBUG__
|
sl@0
|
500 |
DEBUGPRINT("Offset %02x", enc.Offset());
|
sl@0
|
501 |
#endif
|
sl@0
|
502 |
TAny* ptr = enc.Ptr();
|
sl@0
|
503 |
switch (type)
|
sl@0
|
504 |
{
|
sl@0
|
505 |
case EFuncTypeLoad:
|
sl@0
|
506 |
{
|
sl@0
|
507 |
switch (size)
|
sl@0
|
508 |
{
|
sl@0
|
509 |
case 1: res = DoLoadTest<TUint8>(iIndex, ptr, (TUint8)i0); break;
|
sl@0
|
510 |
case 2: res = DoLoadTest<TUint16>(iIndex, ptr, (TUint16)i0); break;
|
sl@0
|
511 |
case 4: res = DoLoadTest<TUint32>(iIndex, ptr, (TUint32)i0); break;
|
sl@0
|
512 |
case 8: res = DoLoadTest<TUint64>(iIndex, ptr, i0); break;
|
sl@0
|
513 |
default: res = __LINE__; break;
|
sl@0
|
514 |
}
|
sl@0
|
515 |
break;
|
sl@0
|
516 |
}
|
sl@0
|
517 |
case EFuncTypeRmw1:
|
sl@0
|
518 |
{
|
sl@0
|
519 |
switch (size)
|
sl@0
|
520 |
{
|
sl@0
|
521 |
case 1: res = DoRmw1Test<TUint8>(iIndex, ptr, (TUint8)i0, (TUint8)i1); break;
|
sl@0
|
522 |
case 2: res = DoRmw1Test<TUint16>(iIndex, ptr, (TUint16)i0, (TUint16)i1); break;
|
sl@0
|
523 |
case 4: res = DoRmw1Test<TUint32>(iIndex, ptr, (TUint32)i0, (TUint32)i1); break;
|
sl@0
|
524 |
case 8: res = DoRmw1Test<TUint64>(iIndex, ptr, i0, i1); break;
|
sl@0
|
525 |
default: res = __LINE__; break;
|
sl@0
|
526 |
}
|
sl@0
|
527 |
break;
|
sl@0
|
528 |
}
|
sl@0
|
529 |
case EFuncTypeRmw2:
|
sl@0
|
530 |
{
|
sl@0
|
531 |
switch (size)
|
sl@0
|
532 |
{
|
sl@0
|
533 |
case 1: res = DoRmw2Test<TUint8>(iIndex, ptr, (TUint8)i0, (TUint8)i1, (TUint8)i2); break;
|
sl@0
|
534 |
case 2: res = DoRmw2Test<TUint16>(iIndex, ptr, (TUint16)i0, (TUint16)i1, (TUint16)i2); break;
|
sl@0
|
535 |
case 4: res = DoRmw2Test<TUint32>(iIndex, ptr, (TUint32)i0, (TUint32)i1, (TUint32)i2); break;
|
sl@0
|
536 |
case 8: res = DoRmw2Test<TUint64>(iIndex, ptr, i0, i1, i2); break;
|
sl@0
|
537 |
default: res = __LINE__; break;
|
sl@0
|
538 |
}
|
sl@0
|
539 |
break;
|
sl@0
|
540 |
}
|
sl@0
|
541 |
case EFuncTypeRmw3:
|
sl@0
|
542 |
{
|
sl@0
|
543 |
if (func==EAtomicFuncTAU)
|
sl@0
|
544 |
{
|
sl@0
|
545 |
switch (size)
|
sl@0
|
546 |
{
|
sl@0
|
547 |
case 1: res = DoRmw3Test<TUint8>(iIndex, ptr, (TUint8)i0, (TUint8)i1, (TUint8)i2, (TUint8)i3); break;
|
sl@0
|
548 |
case 2: res = DoRmw3Test<TUint16>(iIndex, ptr, (TUint16)i0, (TUint16)i1, (TUint16)i2, (TUint16)i3); break;
|
sl@0
|
549 |
case 4: res = DoRmw3Test<TUint32>(iIndex, ptr, (TUint32)i0, (TUint32)i1, (TUint32)i2, (TUint32)i3); break;
|
sl@0
|
550 |
case 8: res = DoRmw3Test<TUint64>(iIndex, ptr, i0, i1, i2, i3); break;
|
sl@0
|
551 |
default: res = __LINE__; break;
|
sl@0
|
552 |
}
|
sl@0
|
553 |
}
|
sl@0
|
554 |
else if (func==EAtomicFuncTAS)
|
sl@0
|
555 |
{
|
sl@0
|
556 |
switch (size)
|
sl@0
|
557 |
{
|
sl@0
|
558 |
case 1: res = DoRmw3Test<TInt8>(iIndex, ptr, (TInt8)i0, (TInt8)i1, (TInt8)i2, (TInt8)i3); break;
|
sl@0
|
559 |
case 2: res = DoRmw3Test<TInt16>(iIndex, ptr, (TInt16)i0, (TInt16)i1, (TInt16)i2, (TInt16)i3); break;
|
sl@0
|
560 |
case 4: res = DoRmw3Test<TInt32>(iIndex, ptr, (TInt32)i0, (TInt32)i1, (TInt32)i2, (TInt32)i3); break;
|
sl@0
|
561 |
case 8: res = DoRmw3Test<TInt64>(iIndex, ptr, i0, i1, i2, i3); break;
|
sl@0
|
562 |
default: res = __LINE__; break;
|
sl@0
|
563 |
}
|
sl@0
|
564 |
}
|
sl@0
|
565 |
else
|
sl@0
|
566 |
res = __LINE__;
|
sl@0
|
567 |
break;
|
sl@0
|
568 |
}
|
sl@0
|
569 |
case EFuncTypeCas:
|
sl@0
|
570 |
{
|
sl@0
|
571 |
switch (size)
|
sl@0
|
572 |
{
|
sl@0
|
573 |
case 1: res = DoCasTest<TUint8>(iIndex, ptr, (TUint8)i0, (TUint8)i1, (TUint8)i2); break;
|
sl@0
|
574 |
case 2: res = DoCasTest<TUint16>(iIndex, ptr, (TUint16)i0, (TUint16)i1, (TUint16)i2); break;
|
sl@0
|
575 |
case 4: res = DoCasTest<TUint32>(iIndex, ptr, (TUint32)i0, (TUint32)i1, (TUint32)i2); break;
|
sl@0
|
576 |
case 8: res = DoCasTest<TUint64>(iIndex, ptr, i0, i1, i2); break;
|
sl@0
|
577 |
default: res = __LINE__; break;
|
sl@0
|
578 |
}
|
sl@0
|
579 |
break;
|
sl@0
|
580 |
}
|
sl@0
|
581 |
default:
|
sl@0
|
582 |
res = __LINE__;
|
sl@0
|
583 |
break;
|
sl@0
|
584 |
}
|
sl@0
|
585 |
if (res)
|
sl@0
|
586 |
return res;
|
sl@0
|
587 |
}
|
sl@0
|
588 |
if (res == KErrEof)
|
sl@0
|
589 |
res = 0;
|
sl@0
|
590 |
return res;
|
sl@0
|
591 |
}
|
sl@0
|
592 |
|
sl@0
|
593 |
#ifndef __KERNEL_MODE__
|
sl@0
|
594 |
void TDGBase::Dump(const char* aTitle)
|
sl@0
|
595 |
{
|
sl@0
|
596 |
TPtrC8 fname8((const TText8*)FuncName[iIndex]);
|
sl@0
|
597 |
TBuf<64> fname;
|
sl@0
|
598 |
fname.Copy(fname8);
|
sl@0
|
599 |
DEBUGPRINT(aTitle);
|
sl@0
|
600 |
DEBUGPRINT("iIndex=%d (%S)", iIndex, &fname);
|
sl@0
|
601 |
DEBUGPRINT("i0 = %08x %08x", I64HIGH(i0), I64LOW(i0));
|
sl@0
|
602 |
DEBUGPRINT("i1 = %08x %08x", I64HIGH(i1), I64LOW(i1));
|
sl@0
|
603 |
DEBUGPRINT("i2 = %08x %08x", I64HIGH(i2), I64LOW(i2));
|
sl@0
|
604 |
DEBUGPRINT("i3 = %08x %08x", I64HIGH(i3), I64LOW(i3));
|
sl@0
|
605 |
}
|
sl@0
|
606 |
#endif
|
sl@0
|
607 |
|
sl@0
|
608 |
template<class T> TInt DoSwap(TAny* aPtr, TPerThread* aT, TAtomicAction& aA, T*)
|
sl@0
|
609 |
{
|
sl@0
|
610 |
typename TRmw1Fn<T>::F atomic = (typename TRmw1Fn<T>::F)AtomicFuncPtr[aA.iIndex];
|
sl@0
|
611 |
T newv = (T)aA.i0;
|
sl@0
|
612 |
T orig = atomic(aPtr, newv);
|
sl@0
|
613 |
T xr = (T)(newv ^ orig);
|
sl@0
|
614 |
aT->iXor ^= xr;
|
sl@0
|
615 |
T diff = (T)(newv - orig);
|
sl@0
|
616 |
aT->iDiff += diff;
|
sl@0
|
617 |
return 0;
|
sl@0
|
618 |
}
|
sl@0
|
619 |
|
sl@0
|
620 |
template<class T> TInt DoAdd(TAny* aPtr, TPerThread* aT, TAtomicAction& aA, T*)
|
sl@0
|
621 |
{
|
sl@0
|
622 |
typename TRmw1Fn<T>::F atomic = (typename TRmw1Fn<T>::F)AtomicFuncPtr[aA.iIndex];
|
sl@0
|
623 |
T arg = (T)aA.i0;
|
sl@0
|
624 |
T orig = atomic(aPtr, arg);
|
sl@0
|
625 |
T xr = (T)((arg+orig) ^ orig);
|
sl@0
|
626 |
aT->iXor ^= xr;
|
sl@0
|
627 |
aT->iDiff += arg;
|
sl@0
|
628 |
return 0;
|
sl@0
|
629 |
}
|
sl@0
|
630 |
|
sl@0
|
631 |
template<class T> TInt DoXor(TAny* aPtr, TPerThread* aT, TAtomicAction& aA, T*)
|
sl@0
|
632 |
{
|
sl@0
|
633 |
typename TRmw1Fn<T>::F atomic = (typename TRmw1Fn<T>::F)AtomicFuncPtr[aA.iIndex];
|
sl@0
|
634 |
T arg = (T)aA.i0;
|
sl@0
|
635 |
T orig = atomic(aPtr, arg);
|
sl@0
|
636 |
T diff = (T)((arg^orig) - orig);
|
sl@0
|
637 |
aT->iDiff += diff;
|
sl@0
|
638 |
aT->iXor ^= arg;
|
sl@0
|
639 |
return 0;
|
sl@0
|
640 |
}
|
sl@0
|
641 |
|
sl@0
|
642 |
template<class T> TInt DoAndOr(TAny* aPtr, TPerThread* aT, TAtomicAction& aA, T*)
|
sl@0
|
643 |
{
|
sl@0
|
644 |
typename TRmw1Fn<T>::F atomic_and = (typename TRmw1Fn<T>::F)AtomicFuncPtr[aA.iIndex];
|
sl@0
|
645 |
typename TRmw1Fn<T>::F atomic_or = (typename TRmw1Fn<T>::F)AtomicFuncPtr[aA.iIndex+4];
|
sl@0
|
646 |
T aarg = (T)aA.i0;
|
sl@0
|
647 |
T oarg = (T)aA.i1;
|
sl@0
|
648 |
T aorig = atomic_and(aPtr, aarg);
|
sl@0
|
649 |
T oorig = atomic_or(aPtr, oarg);
|
sl@0
|
650 |
T adiff = (T)((aorig & aarg) - aorig);
|
sl@0
|
651 |
T odiff = (T)((oorig | oarg) - oorig);
|
sl@0
|
652 |
aT->iDiff += adiff + odiff;
|
sl@0
|
653 |
T axor = (T)((aorig & aarg) ^ aorig);
|
sl@0
|
654 |
T oxor = (T)((oorig | oarg) ^ oorig);
|
sl@0
|
655 |
aT->iXor ^= axor ^ oxor;
|
sl@0
|
656 |
return 0;
|
sl@0
|
657 |
}
|
sl@0
|
658 |
|
sl@0
|
659 |
template<class T> TInt DoAxo(TAny* aPtr, TPerThread* aT, TAtomicAction& aA, T*)
|
sl@0
|
660 |
{
|
sl@0
|
661 |
typename TRmw2Fn<T>::F atomic = (typename TRmw2Fn<T>::F)AtomicFuncPtr[aA.iIndex];
|
sl@0
|
662 |
T aarg = (T)aA.i0;
|
sl@0
|
663 |
T xarg = (T)aA.i1;
|
sl@0
|
664 |
T orig = atomic(aPtr, aarg, xarg);
|
sl@0
|
665 |
T newv = (T)((orig & aarg) ^ xarg);
|
sl@0
|
666 |
aT->iDiff += (newv - orig);
|
sl@0
|
667 |
aT->iXor ^= (newv ^ orig);
|
sl@0
|
668 |
return 0;
|
sl@0
|
669 |
}
|
sl@0
|
670 |
|
sl@0
|
671 |
template<class T> TInt DoThAdd(TAny* aPtr, TPerThread* aT, TAtomicAction& aA, T*)
|
sl@0
|
672 |
{
|
sl@0
|
673 |
typename TRmw3Fn<T>::F atomic = (typename TRmw3Fn<T>::F)AtomicFuncPtr[aA.iIndex];
|
sl@0
|
674 |
T thr = (T)aA.i0;
|
sl@0
|
675 |
T arg1 = (T)aA.i1;
|
sl@0
|
676 |
T arg2 = (T)aA.i2;
|
sl@0
|
677 |
T orig = atomic(aPtr, thr, arg1, arg2);
|
sl@0
|
678 |
T newv = (T)((orig >= thr) ? (orig + arg1) : (orig + arg2));
|
sl@0
|
679 |
aT->iDiff += (orig >= thr) ? arg1 : arg2;
|
sl@0
|
680 |
aT->iXor ^= (newv ^ orig);
|
sl@0
|
681 |
return 0;
|
sl@0
|
682 |
}
|
sl@0
|
683 |
|
sl@0
|
684 |
template<class T> TInt DoCas(TAny* aPtr, TPerThread* aT, TAtomicAction& aA, T*)
|
sl@0
|
685 |
{
|
sl@0
|
686 |
typename TCasFn<T>::F atomic = (typename TCasFn<T>::F)AtomicFuncPtr[aA.iIndex];
|
sl@0
|
687 |
T orig = *(const volatile T*)aPtr;
|
sl@0
|
688 |
T newv;
|
sl@0
|
689 |
TBool done = FALSE;
|
sl@0
|
690 |
TUint32 fails = 0xffffffffu;
|
sl@0
|
691 |
do {
|
sl@0
|
692 |
++fails;
|
sl@0
|
693 |
newv = Transform<T>::F(orig);
|
sl@0
|
694 |
done = atomic(aPtr, &orig, newv);
|
sl@0
|
695 |
} while(!done);
|
sl@0
|
696 |
aT->iFailCount += fails;
|
sl@0
|
697 |
++aT->iDiff;
|
sl@0
|
698 |
aT->iXor ^= (newv ^ orig);
|
sl@0
|
699 |
return 0;
|
sl@0
|
700 |
}
|
sl@0
|
701 |
|
sl@0
|
702 |
volatile TUint Dummy;
|
sl@0
|
703 |
extern "C" TInt DoAtomicAction(TAny* aPtr, TPerThread* aT, TAtomicAction& aA)
|
sl@0
|
704 |
{
|
sl@0
|
705 |
TUint x = TUint(aT)*0x9E3779B9u;
|
sl@0
|
706 |
x = (x>>8)&15;
|
sl@0
|
707 |
while(x--)
|
sl@0
|
708 |
++Dummy;
|
sl@0
|
709 |
TInt r = KErrNotSupported;
|
sl@0
|
710 |
TUint attr = FuncAttr[aA.iIndex];
|
sl@0
|
711 |
TUint func = ATTR_TO_FUNC(attr);
|
sl@0
|
712 |
TUint size = ATTR_TO_SIZE(attr);
|
sl@0
|
713 |
switch (size)
|
sl@0
|
714 |
{
|
sl@0
|
715 |
case 1:
|
sl@0
|
716 |
{
|
sl@0
|
717 |
TUint8 xx;
|
sl@0
|
718 |
TUint8* dummy = &xx;
|
sl@0
|
719 |
TInt8 yy;
|
sl@0
|
720 |
TInt8* sdummy = &yy;
|
sl@0
|
721 |
switch (func)
|
sl@0
|
722 |
{
|
sl@0
|
723 |
case EAtomicFuncSWP: r=DoSwap<TUint8>(aPtr, aT, aA, dummy); break;
|
sl@0
|
724 |
case EAtomicFuncADD: r=DoAdd<TUint8>(aPtr, aT, aA, dummy); break;
|
sl@0
|
725 |
case EAtomicFuncAND: r=DoAndOr<TUint8>(aPtr, aT, aA, dummy); break;
|
sl@0
|
726 |
case EAtomicFuncXOR: r=DoXor<TUint8>(aPtr, aT, aA, dummy); break;
|
sl@0
|
727 |
case EAtomicFuncAXO: r=DoAxo<TUint8>(aPtr, aT, aA, dummy); break;
|
sl@0
|
728 |
case EAtomicFuncTAU: r=DoThAdd<TUint8>(aPtr, aT, aA, dummy); break;
|
sl@0
|
729 |
case EAtomicFuncTAS: r=DoThAdd<TInt8>(aPtr, aT, aA, sdummy); break;
|
sl@0
|
730 |
case EAtomicFuncCAS: r=DoCas<TUint8>(aPtr, aT, aA, dummy); break;
|
sl@0
|
731 |
default: break;
|
sl@0
|
732 |
}
|
sl@0
|
733 |
break;
|
sl@0
|
734 |
}
|
sl@0
|
735 |
case 2:
|
sl@0
|
736 |
{
|
sl@0
|
737 |
TUint16 xx;
|
sl@0
|
738 |
TUint16* dummy = &xx;
|
sl@0
|
739 |
TInt16 yy;
|
sl@0
|
740 |
TInt16* sdummy = &yy;
|
sl@0
|
741 |
switch (func)
|
sl@0
|
742 |
{
|
sl@0
|
743 |
case EAtomicFuncSWP: r=DoSwap<TUint16>(aPtr, aT, aA, dummy); break;
|
sl@0
|
744 |
case EAtomicFuncADD: r=DoAdd<TUint16>(aPtr, aT, aA, dummy); break;
|
sl@0
|
745 |
case EAtomicFuncAND: r=DoAndOr<TUint16>(aPtr, aT, aA, dummy); break;
|
sl@0
|
746 |
case EAtomicFuncXOR: r=DoXor<TUint16>(aPtr, aT, aA, dummy); break;
|
sl@0
|
747 |
case EAtomicFuncAXO: r=DoAxo<TUint16>(aPtr, aT, aA, dummy); break;
|
sl@0
|
748 |
case EAtomicFuncTAU: r=DoThAdd<TUint16>(aPtr, aT, aA, dummy); break;
|
sl@0
|
749 |
case EAtomicFuncTAS: r=DoThAdd<TInt16>(aPtr, aT, aA, sdummy); break;
|
sl@0
|
750 |
case EAtomicFuncCAS: r=DoCas<TUint16>(aPtr, aT, aA, dummy); break;
|
sl@0
|
751 |
default: break;
|
sl@0
|
752 |
}
|
sl@0
|
753 |
break;
|
sl@0
|
754 |
}
|
sl@0
|
755 |
case 4:
|
sl@0
|
756 |
{
|
sl@0
|
757 |
TUint32 xx;
|
sl@0
|
758 |
TUint32* dummy = &xx;
|
sl@0
|
759 |
TInt32 yy;
|
sl@0
|
760 |
TInt32* sdummy = &yy;
|
sl@0
|
761 |
switch (func)
|
sl@0
|
762 |
{
|
sl@0
|
763 |
case EAtomicFuncSWP: r=DoSwap<TUint32>(aPtr, aT, aA, dummy); break;
|
sl@0
|
764 |
case EAtomicFuncADD: r=DoAdd<TUint32>(aPtr, aT, aA, dummy); break;
|
sl@0
|
765 |
case EAtomicFuncAND: r=DoAndOr<TUint32>(aPtr, aT, aA, dummy); break;
|
sl@0
|
766 |
case EAtomicFuncXOR: r=DoXor<TUint32>(aPtr, aT, aA, dummy); break;
|
sl@0
|
767 |
case EAtomicFuncAXO: r=DoAxo<TUint32>(aPtr, aT, aA, dummy); break;
|
sl@0
|
768 |
case EAtomicFuncTAU: r=DoThAdd<TUint32>(aPtr, aT, aA, dummy); break;
|
sl@0
|
769 |
case EAtomicFuncTAS: r=DoThAdd<TInt32>(aPtr, aT, aA, sdummy); break;
|
sl@0
|
770 |
case EAtomicFuncCAS: r=DoCas<TUint32>(aPtr, aT, aA, dummy); break;
|
sl@0
|
771 |
default: break;
|
sl@0
|
772 |
}
|
sl@0
|
773 |
break;
|
sl@0
|
774 |
}
|
sl@0
|
775 |
case 8:
|
sl@0
|
776 |
{
|
sl@0
|
777 |
TUint64A xx;
|
sl@0
|
778 |
TUint64* dummy = &xx;
|
sl@0
|
779 |
TInt64A yy;
|
sl@0
|
780 |
TInt64* sdummy = &yy;
|
sl@0
|
781 |
switch (func)
|
sl@0
|
782 |
{
|
sl@0
|
783 |
case EAtomicFuncSWP: r=DoSwap<TUint64>(aPtr, aT, aA, dummy); break;
|
sl@0
|
784 |
case EAtomicFuncADD: r=DoAdd<TUint64>(aPtr, aT, aA, dummy); break;
|
sl@0
|
785 |
case EAtomicFuncAND: r=DoAndOr<TUint64>(aPtr, aT, aA, dummy); break;
|
sl@0
|
786 |
case EAtomicFuncXOR: r=DoXor<TUint64>(aPtr, aT, aA, dummy); break;
|
sl@0
|
787 |
case EAtomicFuncAXO: r=DoAxo<TUint64>(aPtr, aT, aA, dummy); break;
|
sl@0
|
788 |
case EAtomicFuncTAU: r=DoThAdd<TUint64>(aPtr, aT, aA, dummy); break;
|
sl@0
|
789 |
case EAtomicFuncTAS: r=DoThAdd<TInt64>(aPtr, aT, aA, sdummy); break;
|
sl@0
|
790 |
case EAtomicFuncCAS: r=DoCas<TUint64>(aPtr, aT, aA, dummy); break;
|
sl@0
|
791 |
default: break;
|
sl@0
|
792 |
}
|
sl@0
|
793 |
break;
|
sl@0
|
794 |
}
|
sl@0
|
795 |
default:
|
sl@0
|
796 |
break;
|
sl@0
|
797 |
}
|
sl@0
|
798 |
++aT->iCount;
|
sl@0
|
799 |
return r;
|
sl@0
|
800 |
}
|
sl@0
|
801 |
|
sl@0
|
802 |
|
sl@0
|
803 |
|