sl@0: // Copyright (c) 2006-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32test\nkernsa\rwspinlock.cpp sl@0: sl@0: //--------------------------------------------------------------------------------------------------------------------- sl@0: //! @SYMTestCaseID KBASE-rwspinlock-2442 sl@0: //! @SYMTestType UT sl@0: //! @SYMTestCaseDesc Verifying the nkern SpinLock sl@0: //! @SYMPREQ PREQ2094 sl@0: //! @SYMTestPriority High sl@0: //! @SYMTestActions sl@0: //! 1. RWParallelTest: run a number of reader and writer threads accessing a sl@0: //! common data block. Each writer completely rewrites the block over and sl@0: //! over, and the readers verify that the data is consistent. sl@0: //! 2. RWOrderingTest: run a number of reader and writer threads which spin- sl@0: //! wait while holding the spinlock. Each works out the maximum time it sl@0: //! had to wait to acquire the spinlock. sl@0: //! sl@0: //! sl@0: //! @SYMTestExpectedResults sl@0: //! 1. Properties checked: sl@0: //! 1) readers never see a partial write transaction sl@0: //! 2) the number of writers active is never greater than 1 sl@0: //! 3) the number of readers active while a writer is active is 0 sl@0: //! 4) more than one reader ran concurrently sl@0: //! sl@0: //! 2. Properties checked: sl@0: //! 5) Threads acquire the spinlock in the order which they asked for it sl@0: //! i.e. neither reader nor writer priority but FIFO sl@0: //--------------------------------------------------------------------------------------------------------------------- sl@0: sl@0: #include sl@0: sl@0: #ifdef __SMP__ sl@0: sl@0: // cheap and cheerful, no side effects please sl@0: #define MIN(a, b) ((a)<(b)?(a):(b)) sl@0: sl@0: // The spinlock, used throughout sl@0: TRWSpinLock RW(TSpinLock::EOrderNone); sl@0: sl@0: sl@0: /////////////////////////////////////////////// sl@0: // First test: RWParallelTest sl@0: // sl@0: sl@0: // Number of words in the data block sl@0: #define BLOCK_SIZE 1024 sl@0: // Number of write transactions to execute in total (across all writers) sl@0: #define WRITE_GOAL 100000 sl@0: sl@0: // The data block, the first entry is used as the seed value and is just sl@0: // incremented by one each time. sl@0: TUint32 Array[BLOCK_SIZE]; sl@0: // The number of readers currently holding the lock sl@0: TUint32 Readers = 0; sl@0: // The number of writers currently holding the lock sl@0: TUint32 Writers = 0; sl@0: // The maximum number of readers that were seen holding the lock at once sl@0: TUint32 HighReaders = 0; sl@0: sl@0: void RWParallelReadThread(TAny*) sl@0: { sl@0: // high_r is the maximum number of readers seen by this particular reader sl@0: TUint32 c, r, high_r = 0; sl@0: TBool failed; sl@0: do sl@0: { sl@0: failed = EFalse; sl@0: sl@0: // Take read lock and update reader count sl@0: RW.LockIrqR(); sl@0: __e32_atomic_add_ord32(&Readers, 1); sl@0: sl@0: // Check property 1 sl@0: c = Array[0]; sl@0: if (!verify_block_no_trace(Array, BLOCK_SIZE)) sl@0: failed = ETrue; sl@0: sl@0: // Update reader count and release read lock sl@0: r = __e32_atomic_add_ord32(&Readers, (TUint32)-1); sl@0: RW.UnlockIrqR(); sl@0: sl@0: TEST_RESULT(!failed, "Array data inconsistent"); sl@0: sl@0: // Update local high reader count sl@0: if (r > high_r) sl@0: high_r = r; sl@0: } sl@0: while (c < WRITE_GOAL); sl@0: sl@0: // Update HighReaders if our high reader count is greater sl@0: TUint32 global_high = __e32_atomic_load_acq32(&HighReaders); sl@0: do sl@0: { sl@0: if (global_high >= high_r) sl@0: break; sl@0: } sl@0: while (!__e32_atomic_cas_ord32(&HighReaders, &global_high, high_r)); sl@0: } sl@0: sl@0: void RWParallelWriteThread(TAny*) sl@0: { sl@0: TUint32 c, r, w; sl@0: do sl@0: { sl@0: // Take write lock and update writer count sl@0: RW.LockIrqW(); sl@0: w = __e32_atomic_add_ord32(&Writers, 1); sl@0: sl@0: // Get reader count sl@0: r = __e32_atomic_load_acq32(&Readers); sl@0: sl@0: // Increment seed and recalculate array data sl@0: c = ++Array[0]; sl@0: setup_block(Array, BLOCK_SIZE); sl@0: sl@0: // Update writer count and release write lock sl@0: __e32_atomic_add_ord32(&Writers, (TUint32)-1); sl@0: RW.UnlockIrqW(); sl@0: sl@0: // Check properties 2 and 3 sl@0: TEST_RESULT(w == 0, "Multiple writers active"); sl@0: TEST_RESULT(r == 0, "Reader active while writing"); sl@0: } sl@0: while (c < WRITE_GOAL); sl@0: } sl@0: sl@0: void RWParallelTest() sl@0: { sl@0: TEST_PRINT("Testing read consistency during parallel accesses"); sl@0: sl@0: NFastSemaphore exitSem(0); sl@0: sl@0: // Set up the block for the initial seed of 0 sl@0: setup_block(Array, BLOCK_SIZE); sl@0: sl@0: // Spawn three readers and a writer for each processor, all equal priority sl@0: TInt cpu; sl@0: TInt threads = 0; sl@0: for_each_cpu(cpu) sl@0: { sl@0: CreateThreadSignalOnExit("RWParallelTestR1", &RWParallelReadThread, 10, NULL, 0, KSmallTimeslice, &exitSem, cpu); sl@0: CreateThreadSignalOnExit("RWParallelTestR2", &RWParallelReadThread, 10, NULL, 0, KSmallTimeslice, &exitSem, cpu); sl@0: CreateThreadSignalOnExit("RWParallelTestR3", &RWParallelReadThread, 10, NULL, 0, KSmallTimeslice, &exitSem, cpu); sl@0: CreateThreadSignalOnExit("RWParallelTestW", &RWParallelWriteThread, 10, NULL, 0, KSmallTimeslice, &exitSem, cpu); sl@0: threads += 4; sl@0: } sl@0: sl@0: // Wait for all threads to terminate sl@0: while (threads--) sl@0: NKern::FSWait(&exitSem); sl@0: sl@0: // Check property 4 sl@0: TUint r = __e32_atomic_load_acq32(&HighReaders); sl@0: TEST_RESULT(r > 1, "Didn't see concurrent reads"); sl@0: sl@0: TEST_PRINT1("Done, max concurrent readers was %d", r); sl@0: } sl@0: sl@0: sl@0: /////////////////////////////////////////////// sl@0: // Second test: RWOrderingTest sl@0: // sl@0: sl@0: // Number of times for each thread to try the lock sl@0: #define ORDERING_REPEATS 5000 sl@0: // Time base for spinning sl@0: #define SPIN_BASE 100 sl@0: // Time for read threads to spin (prime) sl@0: #define READ_SPIN 7 sl@0: // Time for write threads to spin (different prime) sl@0: #define WRITE_SPIN 11 sl@0: // Maximum write-thread wait seen sl@0: TUint32 MaxWriteWait; sl@0: // Maximum read-thread wait seen sl@0: TUint32 MaxReadWait; sl@0: sl@0: void RWOrderingThread(TAny* aWrite) sl@0: { sl@0: NThreadBase* us = NKern::CurrentThread(); sl@0: TUint32 seed[2] = {(TUint32)us, 0}; sl@0: TUint32 c, maxdelay = 0; sl@0: for (c = 0; c < ORDERING_REPEATS; ++c) sl@0: { sl@0: // Disable interrupts to stop preemption disrupting timing sl@0: TInt irq = NKern::DisableAllInterrupts(); sl@0: sl@0: // Time taking lock sl@0: TUint32 before = norm_fast_counter(); sl@0: if (aWrite) sl@0: RW.LockOnlyW(); sl@0: else sl@0: RW.LockOnlyR(); sl@0: TUint32 after = norm_fast_counter(); sl@0: TUint32 delay = after - before; sl@0: if (delay > maxdelay) sl@0: maxdelay = delay; sl@0: sl@0: // Spin for a fixed amount of time sl@0: nfcfspin(SPIN_BASE * (aWrite ? WRITE_SPIN : READ_SPIN)); sl@0: sl@0: // Release lock sl@0: if (aWrite) sl@0: RW.UnlockOnlyW(); sl@0: else sl@0: RW.UnlockOnlyR(); sl@0: sl@0: // Reenable interrupts sl@0: NKern::RestoreInterrupts(irq); sl@0: sl@0: // Sleep for a tick ~50% of the time to shuffle ordering sl@0: if (random(seed) & 0x4000) sl@0: NKern::Sleep(1); sl@0: } sl@0: sl@0: // Update Max{Read,Write}Wait if ours is higher sl@0: TUint32 global_high = __e32_atomic_load_acq32(aWrite ? &MaxWriteWait : &MaxReadWait); sl@0: do sl@0: { sl@0: if (global_high >= maxdelay) sl@0: break; sl@0: } sl@0: while (!__e32_atomic_cas_ord32(aWrite ? &MaxWriteWait : &MaxReadWait, &global_high, maxdelay)); sl@0: sl@0: if (aWrite) sl@0: TEST_PRINT1("Write max delay: %d", maxdelay); sl@0: else sl@0: TEST_PRINT1("Read max delay: %d", maxdelay); sl@0: } sl@0: sl@0: void RWOrderingTest() sl@0: { sl@0: TEST_PRINT("Testing lock acquisition ordering"); sl@0: sl@0: NFastSemaphore exitSem(0); sl@0: sl@0: TInt cpus = NKern::NumberOfCpus(); sl@0: TInt writers, cpu; sl@0: for (writers = 0; writers <= cpus; ++writers) sl@0: { sl@0: TInt readers = cpus - writers; sl@0: sl@0: // reset maximums sl@0: __e32_atomic_store_rel32(&MaxWriteWait, 0); sl@0: __e32_atomic_store_rel32(&MaxReadWait, 0); sl@0: sl@0: // start one thread on each cpu, according to readers/writers sl@0: for (cpu = 0; cpu < writers; ++cpu) sl@0: CreateThreadSignalOnExit("RWOrderingTestW", &RWOrderingThread, 10, (TAny*)ETrue, 0, KSmallTimeslice, &exitSem, cpu); sl@0: for ( ; cpu < cpus; ++cpu) sl@0: CreateThreadSignalOnExit("RWOrderingTestR", &RWOrderingThread, 10, (TAny*)EFalse, 0, KSmallTimeslice, &exitSem, cpu); sl@0: sl@0: // Wait for all threads to terminate sl@0: while (cpu--) sl@0: NKern::FSWait(&exitSem); sl@0: sl@0: // Get, round, and print maximum delays sl@0: TUint32 w = __e32_atomic_load_acq32(&MaxWriteWait); sl@0: TUint32 r = __e32_atomic_load_acq32(&MaxReadWait); sl@0: w += (SPIN_BASE/2) - 1; sl@0: r += (SPIN_BASE/2) - 1; sl@0: w /= SPIN_BASE; sl@0: r /= SPIN_BASE; sl@0: TEST_PRINT4("%d writers, %d readers, max delay: write %d read %d", writers, readers, w, r); sl@0: sl@0: // Work out expected delays sl@0: // For writers, we might have every other writer ahead of us, with the readers interleaved sl@0: TUint32 we = ((writers-1) * WRITE_SPIN) + (MIN(readers , writers) * READ_SPIN); sl@0: // For readers, we might have every writer ahead of us, with the other readers interleaved sl@0: TUint32 re = ((writers ) * WRITE_SPIN) + (MIN(readers-1, writers) * READ_SPIN); sl@0: sl@0: // Compare sl@0: if (writers) sl@0: { sl@0: TEST_PRINT1("Expected write %d", we); sl@0: TEST_RESULT(w==we, "Write delay not expected time"); sl@0: } sl@0: if (readers) sl@0: { sl@0: TEST_PRINT1("Expected read %d", re); sl@0: TEST_RESULT(r==re, "Read delay not expected time"); sl@0: } sl@0: } sl@0: sl@0: TEST_PRINT("Done"); sl@0: } sl@0: sl@0: sl@0: ///////////////////// sl@0: // Run all tests sl@0: void TestRWSpinLock() sl@0: { sl@0: TEST_PRINT("Testing R/W Spinlocks..."); sl@0: sl@0: RWParallelTest(); sl@0: RWOrderingTest(); sl@0: } sl@0: sl@0: #else sl@0: sl@0: void TestRWSpinLock() sl@0: { sl@0: TEST_PRINT("Skipping R/W Spinlock tests on uniproc"); sl@0: } sl@0: sl@0: #endif