First public contribution.
1 // Copyright (c) 2006-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32test\nkernsa\rwspinlock.cpp
16 //---------------------------------------------------------------------------------------------------------------------
17 //! @SYMTestCaseID KBASE-rwspinlock-2442
19 //! @SYMTestCaseDesc Verifying the nkern SpinLock
21 //! @SYMTestPriority High
23 //! 1. RWParallelTest: run a number of reader and writer threads accessing a
24 //! common data block. Each writer completely rewrites the block over and
25 //! over, and the readers verify that the data is consistent.
26 //! 2. RWOrderingTest: run a number of reader and writer threads which spin-
27 //! wait while holding the spinlock. Each works out the maximum time it
28 //! had to wait to acquire the spinlock.
31 //! @SYMTestExpectedResults
32 //! 1. Properties checked:
33 //! 1) readers never see a partial write transaction
34 //! 2) the number of writers active is never greater than 1
35 //! 3) the number of readers active while a writer is active is 0
36 //! 4) more than one reader ran concurrently
38 //! 2. Properties checked:
39 //! 5) Threads acquire the spinlock in the order which they asked for it
40 //! i.e. neither reader nor writer priority but FIFO
41 //---------------------------------------------------------------------------------------------------------------------
43 #include <nktest/nkutils.h>
47 // cheap and cheerful, no side effects please
48 #define MIN(a, b) ((a)<(b)?(a):(b))
50 // The spinlock, used throughout
51 TRWSpinLock RW(TSpinLock::EOrderNone);
54 ///////////////////////////////////////////////
55 // First test: RWParallelTest
58 // Number of words in the data block
59 #define BLOCK_SIZE 1024
60 // Number of write transactions to execute in total (across all writers)
61 #define WRITE_GOAL 100000
63 // The data block, the first entry is used as the seed value and is just
64 // incremented by one each time.
65 TUint32 Array[BLOCK_SIZE];
66 // The number of readers currently holding the lock
68 // The number of writers currently holding the lock
70 // The maximum number of readers that were seen holding the lock at once
71 TUint32 HighReaders = 0;
73 void RWParallelReadThread(TAny*)
75 // high_r is the maximum number of readers seen by this particular reader
76 TUint32 c, r, high_r = 0;
82 // Take read lock and update reader count
84 __e32_atomic_add_ord32(&Readers, 1);
88 if (!verify_block_no_trace(Array, BLOCK_SIZE))
91 // Update reader count and release read lock
92 r = __e32_atomic_add_ord32(&Readers, (TUint32)-1);
95 TEST_RESULT(!failed, "Array data inconsistent");
97 // Update local high reader count
101 while (c < WRITE_GOAL);
103 // Update HighReaders if our high reader count is greater
104 TUint32 global_high = __e32_atomic_load_acq32(&HighReaders);
107 if (global_high >= high_r)
110 while (!__e32_atomic_cas_ord32(&HighReaders, &global_high, high_r));
113 void RWParallelWriteThread(TAny*)
118 // Take write lock and update writer count
120 w = __e32_atomic_add_ord32(&Writers, 1);
123 r = __e32_atomic_load_acq32(&Readers);
125 // Increment seed and recalculate array data
127 setup_block(Array, BLOCK_SIZE);
129 // Update writer count and release write lock
130 __e32_atomic_add_ord32(&Writers, (TUint32)-1);
133 // Check properties 2 and 3
134 TEST_RESULT(w == 0, "Multiple writers active");
135 TEST_RESULT(r == 0, "Reader active while writing");
137 while (c < WRITE_GOAL);
140 void RWParallelTest()
142 TEST_PRINT("Testing read consistency during parallel accesses");
144 NFastSemaphore exitSem(0);
146 // Set up the block for the initial seed of 0
147 setup_block(Array, BLOCK_SIZE);
149 // Spawn three readers and a writer for each processor, all equal priority
154 CreateThreadSignalOnExit("RWParallelTestR1", &RWParallelReadThread, 10, NULL, 0, KSmallTimeslice, &exitSem, cpu);
155 CreateThreadSignalOnExit("RWParallelTestR2", &RWParallelReadThread, 10, NULL, 0, KSmallTimeslice, &exitSem, cpu);
156 CreateThreadSignalOnExit("RWParallelTestR3", &RWParallelReadThread, 10, NULL, 0, KSmallTimeslice, &exitSem, cpu);
157 CreateThreadSignalOnExit("RWParallelTestW", &RWParallelWriteThread, 10, NULL, 0, KSmallTimeslice, &exitSem, cpu);
161 // Wait for all threads to terminate
163 NKern::FSWait(&exitSem);
166 TUint r = __e32_atomic_load_acq32(&HighReaders);
167 TEST_RESULT(r > 1, "Didn't see concurrent reads");
169 TEST_PRINT1("Done, max concurrent readers was %d", r);
173 ///////////////////////////////////////////////
174 // Second test: RWOrderingTest
177 // Number of times for each thread to try the lock
178 #define ORDERING_REPEATS 5000
179 // Time base for spinning
180 #define SPIN_BASE 100
181 // Time for read threads to spin (prime)
183 // Time for write threads to spin (different prime)
184 #define WRITE_SPIN 11
185 // Maximum write-thread wait seen
186 TUint32 MaxWriteWait;
187 // Maximum read-thread wait seen
190 void RWOrderingThread(TAny* aWrite)
192 NThreadBase* us = NKern::CurrentThread();
193 TUint32 seed[2] = {(TUint32)us, 0};
194 TUint32 c, maxdelay = 0;
195 for (c = 0; c < ORDERING_REPEATS; ++c)
197 // Disable interrupts to stop preemption disrupting timing
198 TInt irq = NKern::DisableAllInterrupts();
201 TUint32 before = norm_fast_counter();
206 TUint32 after = norm_fast_counter();
207 TUint32 delay = after - before;
208 if (delay > maxdelay)
211 // Spin for a fixed amount of time
212 nfcfspin(SPIN_BASE * (aWrite ? WRITE_SPIN : READ_SPIN));
220 // Reenable interrupts
221 NKern::RestoreInterrupts(irq);
223 // Sleep for a tick ~50% of the time to shuffle ordering
224 if (random(seed) & 0x4000)
228 // Update Max{Read,Write}Wait if ours is higher
229 TUint32 global_high = __e32_atomic_load_acq32(aWrite ? &MaxWriteWait : &MaxReadWait);
232 if (global_high >= maxdelay)
235 while (!__e32_atomic_cas_ord32(aWrite ? &MaxWriteWait : &MaxReadWait, &global_high, maxdelay));
238 TEST_PRINT1("Write max delay: %d", maxdelay);
240 TEST_PRINT1("Read max delay: %d", maxdelay);
243 void RWOrderingTest()
245 TEST_PRINT("Testing lock acquisition ordering");
247 NFastSemaphore exitSem(0);
249 TInt cpus = NKern::NumberOfCpus();
251 for (writers = 0; writers <= cpus; ++writers)
253 TInt readers = cpus - writers;
256 __e32_atomic_store_rel32(&MaxWriteWait, 0);
257 __e32_atomic_store_rel32(&MaxReadWait, 0);
259 // start one thread on each cpu, according to readers/writers
260 for (cpu = 0; cpu < writers; ++cpu)
261 CreateThreadSignalOnExit("RWOrderingTestW", &RWOrderingThread, 10, (TAny*)ETrue, 0, KSmallTimeslice, &exitSem, cpu);
262 for ( ; cpu < cpus; ++cpu)
263 CreateThreadSignalOnExit("RWOrderingTestR", &RWOrderingThread, 10, (TAny*)EFalse, 0, KSmallTimeslice, &exitSem, cpu);
265 // Wait for all threads to terminate
267 NKern::FSWait(&exitSem);
269 // Get, round, and print maximum delays
270 TUint32 w = __e32_atomic_load_acq32(&MaxWriteWait);
271 TUint32 r = __e32_atomic_load_acq32(&MaxReadWait);
272 w += (SPIN_BASE/2) - 1;
273 r += (SPIN_BASE/2) - 1;
276 TEST_PRINT4("%d writers, %d readers, max delay: write %d read %d", writers, readers, w, r);
278 // Work out expected delays
279 // For writers, we might have every other writer ahead of us, with the readers interleaved
280 TUint32 we = ((writers-1) * WRITE_SPIN) + (MIN(readers , writers) * READ_SPIN);
281 // For readers, we might have every writer ahead of us, with the other readers interleaved
282 TUint32 re = ((writers ) * WRITE_SPIN) + (MIN(readers-1, writers) * READ_SPIN);
287 TEST_PRINT1("Expected write %d", we);
288 TEST_RESULT(w==we, "Write delay not expected time");
292 TEST_PRINT1("Expected read %d", re);
293 TEST_RESULT(r==re, "Read delay not expected time");
301 /////////////////////
303 void TestRWSpinLock()
305 TEST_PRINT("Testing R/W Spinlocks...");
313 void TestRWSpinLock()
315 TEST_PRINT("Skipping R/W Spinlock tests on uniproc");