1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kerneltest/e32test/nkernsa/rwspinlock.cpp Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,318 @@
1.4 +// Copyright (c) 2006-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32test\nkernsa\rwspinlock.cpp
1.18 +
1.19 +//---------------------------------------------------------------------------------------------------------------------
1.20 +//! @SYMTestCaseID KBASE-rwspinlock-2442
1.21 +//! @SYMTestType UT
1.22 +//! @SYMTestCaseDesc Verifying the nkern SpinLock
1.23 +//! @SYMPREQ PREQ2094
1.24 +//! @SYMTestPriority High
1.25 +//! @SYMTestActions
1.26 +//! 1. RWParallelTest: run a number of reader and writer threads accessing a
1.27 +//! common data block. Each writer completely rewrites the block over and
1.28 +//! over, and the readers verify that the data is consistent.
1.29 +//! 2. RWOrderingTest: run a number of reader and writer threads which spin-
1.30 +//! wait while holding the spinlock. Each works out the maximum time it
1.31 +//! had to wait to acquire the spinlock.
1.32 +//!
1.33 +//!
1.34 +//! @SYMTestExpectedResults
1.35 +//! 1. Properties checked:
1.36 +//! 1) readers never see a partial write transaction
1.37 +//! 2) the number of writers active is never greater than 1
1.38 +//! 3) the number of readers active while a writer is active is 0
1.39 +//! 4) more than one reader ran concurrently
1.40 +//!
1.41 +//! 2. Properties checked:
1.42 +//! 5) Threads acquire the spinlock in the order which they asked for it
1.43 +//! i.e. neither reader nor writer priority but FIFO
1.44 +//---------------------------------------------------------------------------------------------------------------------
1.45 +
1.46 +#include <nktest/nkutils.h>
1.47 +
1.48 +#ifdef __SMP__
1.49 +
1.50 +// cheap and cheerful, no side effects please
1.51 +#define MIN(a, b) ((a)<(b)?(a):(b))
1.52 +
1.53 +// The spinlock, used throughout
1.54 +TRWSpinLock RW(TSpinLock::EOrderNone);
1.55 +
1.56 +
1.57 +///////////////////////////////////////////////
1.58 +// First test: RWParallelTest
1.59 +//
1.60 +
1.61 +// Number of words in the data block
1.62 +#define BLOCK_SIZE 1024
1.63 +// Number of write transactions to execute in total (across all writers)
1.64 +#define WRITE_GOAL 100000
1.65 +
1.66 +// The data block, the first entry is used as the seed value and is just
1.67 +// incremented by one each time.
1.68 +TUint32 Array[BLOCK_SIZE];
1.69 +// The number of readers currently holding the lock
1.70 +TUint32 Readers = 0;
1.71 +// The number of writers currently holding the lock
1.72 +TUint32 Writers = 0;
1.73 +// The maximum number of readers that were seen holding the lock at once
1.74 +TUint32 HighReaders = 0;
1.75 +
1.76 +void RWParallelReadThread(TAny*)
1.77 + {
1.78 + // high_r is the maximum number of readers seen by this particular reader
1.79 + TUint32 c, r, high_r = 0;
1.80 + TBool failed;
1.81 + do
1.82 + {
1.83 + failed = EFalse;
1.84 +
1.85 + // Take read lock and update reader count
1.86 + RW.LockIrqR();
1.87 + __e32_atomic_add_ord32(&Readers, 1);
1.88 +
1.89 + // Check property 1
1.90 + c = Array[0];
1.91 + if (!verify_block_no_trace(Array, BLOCK_SIZE))
1.92 + failed = ETrue;
1.93 +
1.94 + // Update reader count and release read lock
1.95 + r = __e32_atomic_add_ord32(&Readers, (TUint32)-1);
1.96 + RW.UnlockIrqR();
1.97 +
1.98 + TEST_RESULT(!failed, "Array data inconsistent");
1.99 +
1.100 + // Update local high reader count
1.101 + if (r > high_r)
1.102 + high_r = r;
1.103 + }
1.104 + while (c < WRITE_GOAL);
1.105 +
1.106 + // Update HighReaders if our high reader count is greater
1.107 + TUint32 global_high = __e32_atomic_load_acq32(&HighReaders);
1.108 + do
1.109 + {
1.110 + if (global_high >= high_r)
1.111 + break;
1.112 + }
1.113 + while (!__e32_atomic_cas_ord32(&HighReaders, &global_high, high_r));
1.114 + }
1.115 +
1.116 +void RWParallelWriteThread(TAny*)
1.117 + {
1.118 + TUint32 c, r, w;
1.119 + do
1.120 + {
1.121 + // Take write lock and update writer count
1.122 + RW.LockIrqW();
1.123 + w = __e32_atomic_add_ord32(&Writers, 1);
1.124 +
1.125 + // Get reader count
1.126 + r = __e32_atomic_load_acq32(&Readers);
1.127 +
1.128 + // Increment seed and recalculate array data
1.129 + c = ++Array[0];
1.130 + setup_block(Array, BLOCK_SIZE);
1.131 +
1.132 + // Update writer count and release write lock
1.133 + __e32_atomic_add_ord32(&Writers, (TUint32)-1);
1.134 + RW.UnlockIrqW();
1.135 +
1.136 + // Check properties 2 and 3
1.137 + TEST_RESULT(w == 0, "Multiple writers active");
1.138 + TEST_RESULT(r == 0, "Reader active while writing");
1.139 + }
1.140 + while (c < WRITE_GOAL);
1.141 + }
1.142 +
1.143 +void RWParallelTest()
1.144 + {
1.145 + TEST_PRINT("Testing read consistency during parallel accesses");
1.146 +
1.147 + NFastSemaphore exitSem(0);
1.148 +
1.149 + // Set up the block for the initial seed of 0
1.150 + setup_block(Array, BLOCK_SIZE);
1.151 +
1.152 + // Spawn three readers and a writer for each processor, all equal priority
1.153 + TInt cpu;
1.154 + TInt threads = 0;
1.155 + for_each_cpu(cpu)
1.156 + {
1.157 + CreateThreadSignalOnExit("RWParallelTestR1", &RWParallelReadThread, 10, NULL, 0, KSmallTimeslice, &exitSem, cpu);
1.158 + CreateThreadSignalOnExit("RWParallelTestR2", &RWParallelReadThread, 10, NULL, 0, KSmallTimeslice, &exitSem, cpu);
1.159 + CreateThreadSignalOnExit("RWParallelTestR3", &RWParallelReadThread, 10, NULL, 0, KSmallTimeslice, &exitSem, cpu);
1.160 + CreateThreadSignalOnExit("RWParallelTestW", &RWParallelWriteThread, 10, NULL, 0, KSmallTimeslice, &exitSem, cpu);
1.161 + threads += 4;
1.162 + }
1.163 +
1.164 + // Wait for all threads to terminate
1.165 + while (threads--)
1.166 + NKern::FSWait(&exitSem);
1.167 +
1.168 + // Check property 4
1.169 + TUint r = __e32_atomic_load_acq32(&HighReaders);
1.170 + TEST_RESULT(r > 1, "Didn't see concurrent reads");
1.171 +
1.172 + TEST_PRINT1("Done, max concurrent readers was %d", r);
1.173 + }
1.174 +
1.175 +
1.176 +///////////////////////////////////////////////
1.177 +// Second test: RWOrderingTest
1.178 +//
1.179 +
1.180 +// Number of times for each thread to try the lock
1.181 +#define ORDERING_REPEATS 5000
1.182 +// Time base for spinning
1.183 +#define SPIN_BASE 100
1.184 +// Time for read threads to spin (prime)
1.185 +#define READ_SPIN 7
1.186 +// Time for write threads to spin (different prime)
1.187 +#define WRITE_SPIN 11
1.188 +// Maximum write-thread wait seen
1.189 +TUint32 MaxWriteWait;
1.190 +// Maximum read-thread wait seen
1.191 +TUint32 MaxReadWait;
1.192 +
1.193 +void RWOrderingThread(TAny* aWrite)
1.194 + {
1.195 + NThreadBase* us = NKern::CurrentThread();
1.196 + TUint32 seed[2] = {(TUint32)us, 0};
1.197 + TUint32 c, maxdelay = 0;
1.198 + for (c = 0; c < ORDERING_REPEATS; ++c)
1.199 + {
1.200 + // Disable interrupts to stop preemption disrupting timing
1.201 + TInt irq = NKern::DisableAllInterrupts();
1.202 +
1.203 + // Time taking lock
1.204 + TUint32 before = norm_fast_counter();
1.205 + if (aWrite)
1.206 + RW.LockOnlyW();
1.207 + else
1.208 + RW.LockOnlyR();
1.209 + TUint32 after = norm_fast_counter();
1.210 + TUint32 delay = after - before;
1.211 + if (delay > maxdelay)
1.212 + maxdelay = delay;
1.213 +
1.214 + // Spin for a fixed amount of time
1.215 + nfcfspin(SPIN_BASE * (aWrite ? WRITE_SPIN : READ_SPIN));
1.216 +
1.217 + // Release lock
1.218 + if (aWrite)
1.219 + RW.UnlockOnlyW();
1.220 + else
1.221 + RW.UnlockOnlyR();
1.222 +
1.223 + // Reenable interrupts
1.224 + NKern::RestoreInterrupts(irq);
1.225 +
1.226 + // Sleep for a tick ~50% of the time to shuffle ordering
1.227 + if (random(seed) & 0x4000)
1.228 + NKern::Sleep(1);
1.229 + }
1.230 +
1.231 + // Update Max{Read,Write}Wait if ours is higher
1.232 + TUint32 global_high = __e32_atomic_load_acq32(aWrite ? &MaxWriteWait : &MaxReadWait);
1.233 + do
1.234 + {
1.235 + if (global_high >= maxdelay)
1.236 + break;
1.237 + }
1.238 + while (!__e32_atomic_cas_ord32(aWrite ? &MaxWriteWait : &MaxReadWait, &global_high, maxdelay));
1.239 +
1.240 + if (aWrite)
1.241 + TEST_PRINT1("Write max delay: %d", maxdelay);
1.242 + else
1.243 + TEST_PRINT1("Read max delay: %d", maxdelay);
1.244 + }
1.245 +
1.246 +void RWOrderingTest()
1.247 + {
1.248 + TEST_PRINT("Testing lock acquisition ordering");
1.249 +
1.250 + NFastSemaphore exitSem(0);
1.251 +
1.252 + TInt cpus = NKern::NumberOfCpus();
1.253 + TInt writers, cpu;
1.254 + for (writers = 0; writers <= cpus; ++writers)
1.255 + {
1.256 + TInt readers = cpus - writers;
1.257 +
1.258 + // reset maximums
1.259 + __e32_atomic_store_rel32(&MaxWriteWait, 0);
1.260 + __e32_atomic_store_rel32(&MaxReadWait, 0);
1.261 +
1.262 + // start one thread on each cpu, according to readers/writers
1.263 + for (cpu = 0; cpu < writers; ++cpu)
1.264 + CreateThreadSignalOnExit("RWOrderingTestW", &RWOrderingThread, 10, (TAny*)ETrue, 0, KSmallTimeslice, &exitSem, cpu);
1.265 + for ( ; cpu < cpus; ++cpu)
1.266 + CreateThreadSignalOnExit("RWOrderingTestR", &RWOrderingThread, 10, (TAny*)EFalse, 0, KSmallTimeslice, &exitSem, cpu);
1.267 +
1.268 + // Wait for all threads to terminate
1.269 + while (cpu--)
1.270 + NKern::FSWait(&exitSem);
1.271 +
1.272 + // Get, round, and print maximum delays
1.273 + TUint32 w = __e32_atomic_load_acq32(&MaxWriteWait);
1.274 + TUint32 r = __e32_atomic_load_acq32(&MaxReadWait);
1.275 + w += (SPIN_BASE/2) - 1;
1.276 + r += (SPIN_BASE/2) - 1;
1.277 + w /= SPIN_BASE;
1.278 + r /= SPIN_BASE;
1.279 + TEST_PRINT4("%d writers, %d readers, max delay: write %d read %d", writers, readers, w, r);
1.280 +
1.281 + // Work out expected delays
1.282 + // For writers, we might have every other writer ahead of us, with the readers interleaved
1.283 + TUint32 we = ((writers-1) * WRITE_SPIN) + (MIN(readers , writers) * READ_SPIN);
1.284 + // For readers, we might have every writer ahead of us, with the other readers interleaved
1.285 + TUint32 re = ((writers ) * WRITE_SPIN) + (MIN(readers-1, writers) * READ_SPIN);
1.286 +
1.287 + // Compare
1.288 + if (writers)
1.289 + {
1.290 + TEST_PRINT1("Expected write %d", we);
1.291 + TEST_RESULT(w==we, "Write delay not expected time");
1.292 + }
1.293 + if (readers)
1.294 + {
1.295 + TEST_PRINT1("Expected read %d", re);
1.296 + TEST_RESULT(r==re, "Read delay not expected time");
1.297 + }
1.298 + }
1.299 +
1.300 + TEST_PRINT("Done");
1.301 + }
1.302 +
1.303 +
1.304 +/////////////////////
1.305 +// Run all tests
1.306 +void TestRWSpinLock()
1.307 + {
1.308 + TEST_PRINT("Testing R/W Spinlocks...");
1.309 +
1.310 + RWParallelTest();
1.311 + RWOrderingTest();
1.312 + }
1.313 +
1.314 +#else
1.315 +
1.316 +void TestRWSpinLock()
1.317 + {
1.318 + TEST_PRINT("Skipping R/W Spinlock tests on uniproc");
1.319 + }
1.320 +
1.321 +#endif