os/kernelhwsrv/kerneltest/e32test/nkernsa/rwspinlock.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 2006-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32test\nkernsa\rwspinlock.cpp
sl@0
    15
sl@0
    16
//---------------------------------------------------------------------------------------------------------------------
sl@0
    17
//! @SYMTestCaseID				KBASE-rwspinlock-2442
sl@0
    18
//! @SYMTestType				UT
sl@0
    19
//! @SYMTestCaseDesc			Verifying the nkern SpinLock
sl@0
    20
//! @SYMPREQ					PREQ2094
sl@0
    21
//! @SYMTestPriority			High
sl@0
    22
//! @SYMTestActions				
sl@0
    23
//! 	1. 	RWParallelTest: run a number of reader and writer threads accessing a
sl@0
    24
//! 		common data block. Each writer completely rewrites the block over and
sl@0
    25
//! 		over, and the readers verify that the data is consistent.
sl@0
    26
//! 	2. 	RWOrderingTest: run a number of reader and writer threads which spin-
sl@0
    27
//! 		wait while holding the spinlock. Each works out the maximum time it
sl@0
    28
//!         had to wait to acquire the spinlock.
sl@0
    29
//! 		
sl@0
    30
//! 
sl@0
    31
//! @SYMTestExpectedResults
sl@0
    32
//! 	1.	Properties checked:
sl@0
    33
//! 		1) readers never see a partial write transaction
sl@0
    34
//! 		2) the number of writers active is never greater than 1
sl@0
    35
//! 		3) the number of readers active while a writer is active is 0
sl@0
    36
//! 		4) more than one reader ran concurrently
sl@0
    37
//! 	
sl@0
    38
//! 	2. Properties checked:
sl@0
    39
//! 		5) Threads acquire the spinlock in the order which they asked for it
sl@0
    40
//!     	   i.e. neither reader nor writer priority but FIFO
sl@0
    41
//---------------------------------------------------------------------------------------------------------------------
sl@0
    42
sl@0
    43
#include <nktest/nkutils.h>
sl@0
    44
sl@0
    45
#ifdef __SMP__
sl@0
    46
sl@0
    47
// cheap and cheerful, no side effects please
sl@0
    48
#define MIN(a, b) ((a)<(b)?(a):(b))
sl@0
    49
sl@0
    50
// The spinlock, used throughout
sl@0
    51
TRWSpinLock RW(TSpinLock::EOrderNone);
sl@0
    52
sl@0
    53
sl@0
    54
///////////////////////////////////////////////
sl@0
    55
// First test: RWParallelTest
sl@0
    56
//
sl@0
    57
sl@0
    58
// Number of words in the data block
sl@0
    59
#define BLOCK_SIZE 1024
sl@0
    60
// Number of write transactions to execute in total (across all writers)
sl@0
    61
#define WRITE_GOAL 100000
sl@0
    62
sl@0
    63
// The data block, the first entry is used as the seed value and is just
sl@0
    64
// incremented by one each time.
sl@0
    65
TUint32 Array[BLOCK_SIZE];
sl@0
    66
// The number of readers currently holding the lock
sl@0
    67
TUint32 Readers = 0;
sl@0
    68
// The number of writers currently holding the lock
sl@0
    69
TUint32 Writers = 0;
sl@0
    70
// The maximum number of readers that were seen holding the lock at once
sl@0
    71
TUint32 HighReaders = 0;
sl@0
    72
sl@0
    73
void RWParallelReadThread(TAny*)
sl@0
    74
	{
sl@0
    75
	// high_r is the maximum number of readers seen by this particular reader
sl@0
    76
	TUint32 c, r, high_r = 0;
sl@0
    77
	TBool failed;
sl@0
    78
	do
sl@0
    79
		{
sl@0
    80
		failed = EFalse;
sl@0
    81
sl@0
    82
		// Take read lock and update reader count
sl@0
    83
		RW.LockIrqR();
sl@0
    84
		__e32_atomic_add_ord32(&Readers, 1);
sl@0
    85
sl@0
    86
		// Check property 1
sl@0
    87
		c = Array[0];
sl@0
    88
		if (!verify_block_no_trace(Array, BLOCK_SIZE))
sl@0
    89
			failed = ETrue;
sl@0
    90
sl@0
    91
		// Update reader count and release read lock
sl@0
    92
		r = __e32_atomic_add_ord32(&Readers, (TUint32)-1);
sl@0
    93
		RW.UnlockIrqR();
sl@0
    94
sl@0
    95
		TEST_RESULT(!failed, "Array data inconsistent");
sl@0
    96
sl@0
    97
		// Update local high reader count
sl@0
    98
		if (r > high_r)
sl@0
    99
			high_r = r;
sl@0
   100
		}
sl@0
   101
	while (c < WRITE_GOAL);
sl@0
   102
sl@0
   103
	// Update HighReaders if our high reader count is greater
sl@0
   104
	TUint32 global_high = __e32_atomic_load_acq32(&HighReaders);
sl@0
   105
	do
sl@0
   106
		{
sl@0
   107
		if (global_high >= high_r)
sl@0
   108
			break;
sl@0
   109
		}
sl@0
   110
	while (!__e32_atomic_cas_ord32(&HighReaders, &global_high, high_r));
sl@0
   111
	}
sl@0
   112
sl@0
   113
void RWParallelWriteThread(TAny*)
sl@0
   114
	{
sl@0
   115
	TUint32 c, r, w;
sl@0
   116
	do
sl@0
   117
		{
sl@0
   118
		// Take write lock and update writer count
sl@0
   119
		RW.LockIrqW();
sl@0
   120
		w = __e32_atomic_add_ord32(&Writers, 1);
sl@0
   121
sl@0
   122
		// Get reader count
sl@0
   123
		r = __e32_atomic_load_acq32(&Readers);
sl@0
   124
sl@0
   125
		// Increment seed and recalculate array data
sl@0
   126
		c = ++Array[0];
sl@0
   127
		setup_block(Array, BLOCK_SIZE);
sl@0
   128
sl@0
   129
		// Update writer count and release write lock
sl@0
   130
		__e32_atomic_add_ord32(&Writers, (TUint32)-1);
sl@0
   131
		RW.UnlockIrqW();
sl@0
   132
sl@0
   133
		// Check properties 2 and 3
sl@0
   134
		TEST_RESULT(w == 0, "Multiple writers active");
sl@0
   135
		TEST_RESULT(r == 0, "Reader active while writing");
sl@0
   136
		}
sl@0
   137
	while (c < WRITE_GOAL);
sl@0
   138
	}
sl@0
   139
sl@0
   140
void RWParallelTest()
sl@0
   141
	{
sl@0
   142
	TEST_PRINT("Testing read consistency during parallel accesses");
sl@0
   143
sl@0
   144
	NFastSemaphore exitSem(0);
sl@0
   145
sl@0
   146
	// Set up the block for the initial seed of 0
sl@0
   147
	setup_block(Array, BLOCK_SIZE);
sl@0
   148
sl@0
   149
	// Spawn three readers and a writer for each processor, all equal priority
sl@0
   150
	TInt cpu;
sl@0
   151
	TInt threads = 0;
sl@0
   152
	for_each_cpu(cpu)
sl@0
   153
		{
sl@0
   154
		CreateThreadSignalOnExit("RWParallelTestR1", &RWParallelReadThread, 10, NULL, 0, KSmallTimeslice, &exitSem, cpu);
sl@0
   155
		CreateThreadSignalOnExit("RWParallelTestR2", &RWParallelReadThread, 10, NULL, 0, KSmallTimeslice, &exitSem, cpu);
sl@0
   156
		CreateThreadSignalOnExit("RWParallelTestR3", &RWParallelReadThread, 10, NULL, 0, KSmallTimeslice, &exitSem, cpu);
sl@0
   157
		CreateThreadSignalOnExit("RWParallelTestW", &RWParallelWriteThread, 10, NULL, 0, KSmallTimeslice, &exitSem, cpu);
sl@0
   158
		threads += 4;
sl@0
   159
		}
sl@0
   160
sl@0
   161
	// Wait for all threads to terminate
sl@0
   162
	while (threads--)
sl@0
   163
		NKern::FSWait(&exitSem);
sl@0
   164
sl@0
   165
	// Check property 4
sl@0
   166
	TUint r = __e32_atomic_load_acq32(&HighReaders);
sl@0
   167
	TEST_RESULT(r > 1, "Didn't see concurrent reads");
sl@0
   168
sl@0
   169
	TEST_PRINT1("Done, max concurrent readers was %d", r);
sl@0
   170
	}
sl@0
   171
sl@0
   172
sl@0
   173
///////////////////////////////////////////////
sl@0
   174
// Second test: RWOrderingTest
sl@0
   175
//
sl@0
   176
sl@0
   177
// Number of times for each thread to try the lock
sl@0
   178
#define ORDERING_REPEATS 5000
sl@0
   179
// Time base for spinning
sl@0
   180
#define SPIN_BASE 100
sl@0
   181
// Time for read threads to spin (prime)
sl@0
   182
#define READ_SPIN 7
sl@0
   183
// Time for write threads to spin (different prime)
sl@0
   184
#define WRITE_SPIN 11
sl@0
   185
// Maximum write-thread wait seen
sl@0
   186
TUint32 MaxWriteWait;
sl@0
   187
// Maximum read-thread wait seen
sl@0
   188
TUint32 MaxReadWait;
sl@0
   189
sl@0
   190
void RWOrderingThread(TAny* aWrite)
sl@0
   191
	{
sl@0
   192
	NThreadBase* us = NKern::CurrentThread();
sl@0
   193
	TUint32 seed[2] = {(TUint32)us, 0};
sl@0
   194
	TUint32 c, maxdelay = 0;
sl@0
   195
	for (c = 0; c < ORDERING_REPEATS; ++c)
sl@0
   196
		{
sl@0
   197
		// Disable interrupts to stop preemption disrupting timing
sl@0
   198
		TInt irq = NKern::DisableAllInterrupts();
sl@0
   199
sl@0
   200
		// Time taking lock
sl@0
   201
		TUint32 before = norm_fast_counter();
sl@0
   202
		if (aWrite)
sl@0
   203
			RW.LockOnlyW();
sl@0
   204
		else
sl@0
   205
			RW.LockOnlyR();
sl@0
   206
		TUint32 after = norm_fast_counter();
sl@0
   207
		TUint32 delay = after - before;
sl@0
   208
		if (delay > maxdelay)
sl@0
   209
			maxdelay = delay;
sl@0
   210
sl@0
   211
		// Spin for a fixed amount of time
sl@0
   212
		nfcfspin(SPIN_BASE * (aWrite ? WRITE_SPIN : READ_SPIN));
sl@0
   213
sl@0
   214
		// Release lock
sl@0
   215
		if (aWrite)
sl@0
   216
			RW.UnlockOnlyW();
sl@0
   217
		else
sl@0
   218
			RW.UnlockOnlyR();
sl@0
   219
sl@0
   220
		// Reenable interrupts
sl@0
   221
		NKern::RestoreInterrupts(irq);
sl@0
   222
sl@0
   223
		// Sleep for a tick ~50% of the time to shuffle ordering
sl@0
   224
		if (random(seed) & 0x4000)
sl@0
   225
			NKern::Sleep(1);
sl@0
   226
		}
sl@0
   227
sl@0
   228
	// Update Max{Read,Write}Wait if ours is higher
sl@0
   229
	TUint32 global_high = __e32_atomic_load_acq32(aWrite ? &MaxWriteWait : &MaxReadWait);
sl@0
   230
	do
sl@0
   231
		{
sl@0
   232
		if (global_high >= maxdelay)
sl@0
   233
			break;
sl@0
   234
		}
sl@0
   235
	while (!__e32_atomic_cas_ord32(aWrite ? &MaxWriteWait : &MaxReadWait, &global_high, maxdelay));
sl@0
   236
	
sl@0
   237
	if (aWrite)
sl@0
   238
		TEST_PRINT1("Write max delay: %d", maxdelay);
sl@0
   239
	else
sl@0
   240
		TEST_PRINT1("Read max delay: %d", maxdelay);
sl@0
   241
	}
sl@0
   242
sl@0
   243
void RWOrderingTest()
sl@0
   244
	{
sl@0
   245
	TEST_PRINT("Testing lock acquisition ordering");
sl@0
   246
sl@0
   247
	NFastSemaphore exitSem(0);
sl@0
   248
sl@0
   249
	TInt cpus = NKern::NumberOfCpus();
sl@0
   250
	TInt writers, cpu;
sl@0
   251
	for (writers = 0; writers <= cpus; ++writers)
sl@0
   252
		{
sl@0
   253
		TInt readers = cpus - writers;
sl@0
   254
sl@0
   255
		// reset maximums
sl@0
   256
		__e32_atomic_store_rel32(&MaxWriteWait, 0);
sl@0
   257
		__e32_atomic_store_rel32(&MaxReadWait, 0);
sl@0
   258
sl@0
   259
		// start one thread on each cpu, according to readers/writers
sl@0
   260
		for (cpu = 0; cpu < writers; ++cpu)
sl@0
   261
			CreateThreadSignalOnExit("RWOrderingTestW", &RWOrderingThread, 10, (TAny*)ETrue, 0, KSmallTimeslice, &exitSem, cpu);
sl@0
   262
		for (       ; cpu < cpus; ++cpu)
sl@0
   263
			CreateThreadSignalOnExit("RWOrderingTestR", &RWOrderingThread, 10, (TAny*)EFalse, 0, KSmallTimeslice, &exitSem, cpu);
sl@0
   264
sl@0
   265
		// Wait for all threads to terminate
sl@0
   266
		while (cpu--)
sl@0
   267
			NKern::FSWait(&exitSem);
sl@0
   268
sl@0
   269
		// Get, round, and print maximum delays
sl@0
   270
		TUint32 w = __e32_atomic_load_acq32(&MaxWriteWait);
sl@0
   271
		TUint32 r = __e32_atomic_load_acq32(&MaxReadWait);
sl@0
   272
		w += (SPIN_BASE/2) - 1;
sl@0
   273
		r += (SPIN_BASE/2) - 1;
sl@0
   274
		w /= SPIN_BASE;
sl@0
   275
		r /= SPIN_BASE;
sl@0
   276
		TEST_PRINT4("%d writers, %d readers, max delay: write %d read %d", writers, readers, w, r);
sl@0
   277
sl@0
   278
		// Work out expected delays
sl@0
   279
		// For writers, we might have every other writer ahead of us, with the readers interleaved
sl@0
   280
		TUint32 we = ((writers-1) * WRITE_SPIN) + (MIN(readers  , writers) * READ_SPIN);
sl@0
   281
		// For readers, we might have every writer ahead of us, with the other readers interleaved
sl@0
   282
		TUint32 re = ((writers  ) * WRITE_SPIN) + (MIN(readers-1, writers) * READ_SPIN);
sl@0
   283
sl@0
   284
		// Compare
sl@0
   285
		if (writers)
sl@0
   286
			{
sl@0
   287
			TEST_PRINT1("Expected write %d", we);
sl@0
   288
			TEST_RESULT(w==we, "Write delay not expected time");
sl@0
   289
			}
sl@0
   290
		if (readers)
sl@0
   291
			{
sl@0
   292
			TEST_PRINT1("Expected read %d", re);
sl@0
   293
			TEST_RESULT(r==re, "Read delay not expected time");
sl@0
   294
			}
sl@0
   295
		}
sl@0
   296
sl@0
   297
	TEST_PRINT("Done");
sl@0
   298
	}
sl@0
   299
sl@0
   300
sl@0
   301
/////////////////////
sl@0
   302
// Run all tests
sl@0
   303
void TestRWSpinLock()
sl@0
   304
	{
sl@0
   305
	TEST_PRINT("Testing R/W Spinlocks...");
sl@0
   306
sl@0
   307
	RWParallelTest();
sl@0
   308
	RWOrderingTest();
sl@0
   309
	}
sl@0
   310
sl@0
   311
#else
sl@0
   312
sl@0
   313
void TestRWSpinLock()
sl@0
   314
	{
sl@0
   315
	TEST_PRINT("Skipping R/W Spinlock tests on uniproc");
sl@0
   316
	}
sl@0
   317
sl@0
   318
#endif