os/kernelhwsrv/kerneltest/e32test/nkernsa/rwspinlock.cpp
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
     1 // Copyright (c) 2006-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of the License "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 // e32test\nkernsa\rwspinlock.cpp
    15 
    16 //---------------------------------------------------------------------------------------------------------------------
    17 //! @SYMTestCaseID				KBASE-rwspinlock-2442
    18 //! @SYMTestType				UT
    19 //! @SYMTestCaseDesc			Verifying the nkern SpinLock
    20 //! @SYMPREQ					PREQ2094
    21 //! @SYMTestPriority			High
    22 //! @SYMTestActions				
    23 //! 	1. 	RWParallelTest: run a number of reader and writer threads accessing a
    24 //! 		common data block. Each writer completely rewrites the block over and
    25 //! 		over, and the readers verify that the data is consistent.
    26 //! 	2. 	RWOrderingTest: run a number of reader and writer threads which spin-
    27 //! 		wait while holding the spinlock. Each works out the maximum time it
    28 //!         had to wait to acquire the spinlock.
    29 //! 		
    30 //! 
    31 //! @SYMTestExpectedResults
    32 //! 	1.	Properties checked:
    33 //! 		1) readers never see a partial write transaction
    34 //! 		2) the number of writers active is never greater than 1
    35 //! 		3) the number of readers active while a writer is active is 0
    36 //! 		4) more than one reader ran concurrently
    37 //! 	
    38 //! 	2. Properties checked:
    39 //! 		5) Threads acquire the spinlock in the order which they asked for it
    40 //!     	   i.e. neither reader nor writer priority but FIFO
    41 //---------------------------------------------------------------------------------------------------------------------
    42 
    43 #include <nktest/nkutils.h>
    44 
    45 #ifdef __SMP__
    46 
    47 // cheap and cheerful, no side effects please
    48 #define MIN(a, b) ((a)<(b)?(a):(b))
    49 
    50 // The spinlock, used throughout
    51 TRWSpinLock RW(TSpinLock::EOrderNone);
    52 
    53 
    54 ///////////////////////////////////////////////
    55 // First test: RWParallelTest
    56 //
    57 
    58 // Number of words in the data block
    59 #define BLOCK_SIZE 1024
    60 // Number of write transactions to execute in total (across all writers)
    61 #define WRITE_GOAL 100000
    62 
    63 // The data block, the first entry is used as the seed value and is just
    64 // incremented by one each time.
    65 TUint32 Array[BLOCK_SIZE];
    66 // The number of readers currently holding the lock
    67 TUint32 Readers = 0;
    68 // The number of writers currently holding the lock
    69 TUint32 Writers = 0;
    70 // The maximum number of readers that were seen holding the lock at once
    71 TUint32 HighReaders = 0;
    72 
    73 void RWParallelReadThread(TAny*)
    74 	{
    75 	// high_r is the maximum number of readers seen by this particular reader
    76 	TUint32 c, r, high_r = 0;
    77 	TBool failed;
    78 	do
    79 		{
    80 		failed = EFalse;
    81 
    82 		// Take read lock and update reader count
    83 		RW.LockIrqR();
    84 		__e32_atomic_add_ord32(&Readers, 1);
    85 
    86 		// Check property 1
    87 		c = Array[0];
    88 		if (!verify_block_no_trace(Array, BLOCK_SIZE))
    89 			failed = ETrue;
    90 
    91 		// Update reader count and release read lock
    92 		r = __e32_atomic_add_ord32(&Readers, (TUint32)-1);
    93 		RW.UnlockIrqR();
    94 
    95 		TEST_RESULT(!failed, "Array data inconsistent");
    96 
    97 		// Update local high reader count
    98 		if (r > high_r)
    99 			high_r = r;
   100 		}
   101 	while (c < WRITE_GOAL);
   102 
   103 	// Update HighReaders if our high reader count is greater
   104 	TUint32 global_high = __e32_atomic_load_acq32(&HighReaders);
   105 	do
   106 		{
   107 		if (global_high >= high_r)
   108 			break;
   109 		}
   110 	while (!__e32_atomic_cas_ord32(&HighReaders, &global_high, high_r));
   111 	}
   112 
   113 void RWParallelWriteThread(TAny*)
   114 	{
   115 	TUint32 c, r, w;
   116 	do
   117 		{
   118 		// Take write lock and update writer count
   119 		RW.LockIrqW();
   120 		w = __e32_atomic_add_ord32(&Writers, 1);
   121 
   122 		// Get reader count
   123 		r = __e32_atomic_load_acq32(&Readers);
   124 
   125 		// Increment seed and recalculate array data
   126 		c = ++Array[0];
   127 		setup_block(Array, BLOCK_SIZE);
   128 
   129 		// Update writer count and release write lock
   130 		__e32_atomic_add_ord32(&Writers, (TUint32)-1);
   131 		RW.UnlockIrqW();
   132 
   133 		// Check properties 2 and 3
   134 		TEST_RESULT(w == 0, "Multiple writers active");
   135 		TEST_RESULT(r == 0, "Reader active while writing");
   136 		}
   137 	while (c < WRITE_GOAL);
   138 	}
   139 
   140 void RWParallelTest()
   141 	{
   142 	TEST_PRINT("Testing read consistency during parallel accesses");
   143 
   144 	NFastSemaphore exitSem(0);
   145 
   146 	// Set up the block for the initial seed of 0
   147 	setup_block(Array, BLOCK_SIZE);
   148 
   149 	// Spawn three readers and a writer for each processor, all equal priority
   150 	TInt cpu;
   151 	TInt threads = 0;
   152 	for_each_cpu(cpu)
   153 		{
   154 		CreateThreadSignalOnExit("RWParallelTestR1", &RWParallelReadThread, 10, NULL, 0, KSmallTimeslice, &exitSem, cpu);
   155 		CreateThreadSignalOnExit("RWParallelTestR2", &RWParallelReadThread, 10, NULL, 0, KSmallTimeslice, &exitSem, cpu);
   156 		CreateThreadSignalOnExit("RWParallelTestR3", &RWParallelReadThread, 10, NULL, 0, KSmallTimeslice, &exitSem, cpu);
   157 		CreateThreadSignalOnExit("RWParallelTestW", &RWParallelWriteThread, 10, NULL, 0, KSmallTimeslice, &exitSem, cpu);
   158 		threads += 4;
   159 		}
   160 
   161 	// Wait for all threads to terminate
   162 	while (threads--)
   163 		NKern::FSWait(&exitSem);
   164 
   165 	// Check property 4
   166 	TUint r = __e32_atomic_load_acq32(&HighReaders);
   167 	TEST_RESULT(r > 1, "Didn't see concurrent reads");
   168 
   169 	TEST_PRINT1("Done, max concurrent readers was %d", r);
   170 	}
   171 
   172 
   173 ///////////////////////////////////////////////
   174 // Second test: RWOrderingTest
   175 //
   176 
   177 // Number of times for each thread to try the lock
   178 #define ORDERING_REPEATS 5000
   179 // Time base for spinning
   180 #define SPIN_BASE 100
   181 // Time for read threads to spin (prime)
   182 #define READ_SPIN 7
   183 // Time for write threads to spin (different prime)
   184 #define WRITE_SPIN 11
   185 // Maximum write-thread wait seen
   186 TUint32 MaxWriteWait;
   187 // Maximum read-thread wait seen
   188 TUint32 MaxReadWait;
   189 
   190 void RWOrderingThread(TAny* aWrite)
   191 	{
   192 	NThreadBase* us = NKern::CurrentThread();
   193 	TUint32 seed[2] = {(TUint32)us, 0};
   194 	TUint32 c, maxdelay = 0;
   195 	for (c = 0; c < ORDERING_REPEATS; ++c)
   196 		{
   197 		// Disable interrupts to stop preemption disrupting timing
   198 		TInt irq = NKern::DisableAllInterrupts();
   199 
   200 		// Time taking lock
   201 		TUint32 before = norm_fast_counter();
   202 		if (aWrite)
   203 			RW.LockOnlyW();
   204 		else
   205 			RW.LockOnlyR();
   206 		TUint32 after = norm_fast_counter();
   207 		TUint32 delay = after - before;
   208 		if (delay > maxdelay)
   209 			maxdelay = delay;
   210 
   211 		// Spin for a fixed amount of time
   212 		nfcfspin(SPIN_BASE * (aWrite ? WRITE_SPIN : READ_SPIN));
   213 
   214 		// Release lock
   215 		if (aWrite)
   216 			RW.UnlockOnlyW();
   217 		else
   218 			RW.UnlockOnlyR();
   219 
   220 		// Reenable interrupts
   221 		NKern::RestoreInterrupts(irq);
   222 
   223 		// Sleep for a tick ~50% of the time to shuffle ordering
   224 		if (random(seed) & 0x4000)
   225 			NKern::Sleep(1);
   226 		}
   227 
   228 	// Update Max{Read,Write}Wait if ours is higher
   229 	TUint32 global_high = __e32_atomic_load_acq32(aWrite ? &MaxWriteWait : &MaxReadWait);
   230 	do
   231 		{
   232 		if (global_high >= maxdelay)
   233 			break;
   234 		}
   235 	while (!__e32_atomic_cas_ord32(aWrite ? &MaxWriteWait : &MaxReadWait, &global_high, maxdelay));
   236 	
   237 	if (aWrite)
   238 		TEST_PRINT1("Write max delay: %d", maxdelay);
   239 	else
   240 		TEST_PRINT1("Read max delay: %d", maxdelay);
   241 	}
   242 
   243 void RWOrderingTest()
   244 	{
   245 	TEST_PRINT("Testing lock acquisition ordering");
   246 
   247 	NFastSemaphore exitSem(0);
   248 
   249 	TInt cpus = NKern::NumberOfCpus();
   250 	TInt writers, cpu;
   251 	for (writers = 0; writers <= cpus; ++writers)
   252 		{
   253 		TInt readers = cpus - writers;
   254 
   255 		// reset maximums
   256 		__e32_atomic_store_rel32(&MaxWriteWait, 0);
   257 		__e32_atomic_store_rel32(&MaxReadWait, 0);
   258 
   259 		// start one thread on each cpu, according to readers/writers
   260 		for (cpu = 0; cpu < writers; ++cpu)
   261 			CreateThreadSignalOnExit("RWOrderingTestW", &RWOrderingThread, 10, (TAny*)ETrue, 0, KSmallTimeslice, &exitSem, cpu);
   262 		for (       ; cpu < cpus; ++cpu)
   263 			CreateThreadSignalOnExit("RWOrderingTestR", &RWOrderingThread, 10, (TAny*)EFalse, 0, KSmallTimeslice, &exitSem, cpu);
   264 
   265 		// Wait for all threads to terminate
   266 		while (cpu--)
   267 			NKern::FSWait(&exitSem);
   268 
   269 		// Get, round, and print maximum delays
   270 		TUint32 w = __e32_atomic_load_acq32(&MaxWriteWait);
   271 		TUint32 r = __e32_atomic_load_acq32(&MaxReadWait);
   272 		w += (SPIN_BASE/2) - 1;
   273 		r += (SPIN_BASE/2) - 1;
   274 		w /= SPIN_BASE;
   275 		r /= SPIN_BASE;
   276 		TEST_PRINT4("%d writers, %d readers, max delay: write %d read %d", writers, readers, w, r);
   277 
   278 		// Work out expected delays
   279 		// For writers, we might have every other writer ahead of us, with the readers interleaved
   280 		TUint32 we = ((writers-1) * WRITE_SPIN) + (MIN(readers  , writers) * READ_SPIN);
   281 		// For readers, we might have every writer ahead of us, with the other readers interleaved
   282 		TUint32 re = ((writers  ) * WRITE_SPIN) + (MIN(readers-1, writers) * READ_SPIN);
   283 
   284 		// Compare
   285 		if (writers)
   286 			{
   287 			TEST_PRINT1("Expected write %d", we);
   288 			TEST_RESULT(w==we, "Write delay not expected time");
   289 			}
   290 		if (readers)
   291 			{
   292 			TEST_PRINT1("Expected read %d", re);
   293 			TEST_RESULT(r==re, "Read delay not expected time");
   294 			}
   295 		}
   296 
   297 	TEST_PRINT("Done");
   298 	}
   299 
   300 
   301 /////////////////////
   302 // Run all tests
   303 void TestRWSpinLock()
   304 	{
   305 	TEST_PRINT("Testing R/W Spinlocks...");
   306 
   307 	RWParallelTest();
   308 	RWOrderingTest();
   309 	}
   310 
   311 #else
   312 
   313 void TestRWSpinLock()
   314 	{
   315 	TEST_PRINT("Skipping R/W Spinlock tests on uniproc");
   316 	}
   317 
   318 #endif