os/kernelhwsrv/kernel/eka/nkernsmp/nk_irq.cpp
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\nkernsmp\nk_irq.cpp
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
/**
sl@0
    19
 @file
sl@0
    20
 @internalTechnology
sl@0
    21
*/
sl@0
    22
sl@0
    23
#include <e32cmn.h>
sl@0
    24
#include <e32cmn_private.h>
sl@0
    25
#include "nk_priv.h"
sl@0
    26
#include <nk_irq.h>
sl@0
    27
sl@0
    28
NIrq		Irq[NK_MAX_IRQS];
sl@0
    29
NIrqHandler	Handlers[NK_MAX_IRQ_HANDLERS];
sl@0
    30
NIrqHandler* NIrqHandler::FirstFree;
sl@0
    31
sl@0
    32
extern "C" void send_irq_ipi(TSubScheduler*);
sl@0
    33
sl@0
    34
void StepCookie(volatile TUint16& p, TInt n)
sl@0
    35
	{
sl@0
    36
	TUint32 x = p<<17;
sl@0
    37
	while(n--)
sl@0
    38
		{
sl@0
    39
		TUint32 y = x;
sl@0
    40
		x<<=1;
sl@0
    41
		y^=x;
sl@0
    42
		x |= ((y>>31)<<17);
sl@0
    43
		}
sl@0
    44
	p = (TUint16)(x>>17);
sl@0
    45
	}
sl@0
    46
sl@0
    47
NIrq::NIrq()
sl@0
    48
	:	iNIrqLock(TSpinLock::EOrderNIrq)
sl@0
    49
	{
sl@0
    50
	iIState = EWait;
sl@0
    51
	iEventsPending = 0;
sl@0
    52
	iEnabledEvents = 0;
sl@0
    53
	iHwId = 0;
sl@0
    54
	iX = 0;
sl@0
    55
	}
sl@0
    56
sl@0
    57
TInt NIrq::BindRaw(NIsr aIsr, TAny* aPtr)
sl@0
    58
	{
sl@0
    59
	// Call only from thread context
sl@0
    60
	TInt r = KErrNone;
sl@0
    61
	Wait();
sl@0
    62
	iNIrqLock.LockOnly();
sl@0
    63
	if (iStaticFlags & EShared)
sl@0
    64
		{
sl@0
    65
		r = KErrAccessDenied;
sl@0
    66
		goto error;
sl@0
    67
		}
sl@0
    68
	if ( (iIState & ERaw) || !iHandlers.IsEmpty())
sl@0
    69
		{
sl@0
    70
		r = KErrInUse;
sl@0
    71
		goto error;
sl@0
    72
		}
sl@0
    73
	iHandlers.iA.iNext = (SDblQueLink*)aIsr;
sl@0
    74
	iHandlers.iA.iPrev = (SDblQueLink*)aPtr;
sl@0
    75
	__e32_atomic_ior_rel32(&iIState, ERaw);
sl@0
    76
error:
sl@0
    77
	iNIrqLock.UnlockOnly();
sl@0
    78
	Done();
sl@0
    79
	return r;
sl@0
    80
	}
sl@0
    81
sl@0
    82
TInt NIrq::UnbindRaw()
sl@0
    83
	{
sl@0
    84
	// Call only from thread context
sl@0
    85
	TInt r = DisableRaw(TRUE);
sl@0
    86
	if (r != KErrNone)
sl@0
    87
		return r;
sl@0
    88
	Wait();
sl@0
    89
	iNIrqLock.LockOnly();
sl@0
    90
	if (iIState & ERaw)
sl@0
    91
		{
sl@0
    92
		iHandlers.iA.iNext = 0;
sl@0
    93
		iHandlers.iA.iPrev = 0;
sl@0
    94
		++iGeneration;	// release anyone still waiting in Disable()
sl@0
    95
		__e32_atomic_and_rel32(&iIState, ~(ERaw|EUnbind));
sl@0
    96
		}
sl@0
    97
	iNIrqLock.UnlockOnly();
sl@0
    98
	Done();
sl@0
    99
	return r;
sl@0
   100
	}
sl@0
   101
sl@0
   102
TInt NIrq::DisableRaw(TBool aUnbind)
sl@0
   103
	{
sl@0
   104
	TBool wait = FALSE;
sl@0
   105
	TInt r = KErrNone;
sl@0
   106
	TInt irq = __SPIN_LOCK_IRQSAVE(iNIrqLock);
sl@0
   107
	if (!(iIState & ERaw))
sl@0
   108
		r = KErrGeneral;
sl@0
   109
	else
sl@0
   110
		{
sl@0
   111
		wait = TRUE;
sl@0
   112
		if (aUnbind)
sl@0
   113
			__e32_atomic_ior_acq32(&iIState, EUnbind);
sl@0
   114
		if (!(iEnabledEvents & 1))
sl@0
   115
			{
sl@0
   116
			iEnabledEvents |= 1;
sl@0
   117
			HwDisable();
sl@0
   118
//			wait = TRUE;
sl@0
   119
			}
sl@0
   120
		}
sl@0
   121
	__SPIN_UNLOCK_IRQRESTORE(iNIrqLock,irq);
sl@0
   122
	TInt c = NKern::CurrentContext();
sl@0
   123
	if (wait && c!=NKern::EInterrupt)
sl@0
   124
		{
sl@0
   125
		// wait for currently running handler to finish or interrupt to be reenabled
sl@0
   126
		if (c==NKern::EThread)
sl@0
   127
			NKern::ThreadEnterCS();
sl@0
   128
		HwWaitCpus();	// ensure other CPUs have had a chance to accept any outstanding interrupts
sl@0
   129
		TUint32 g = iGeneration;
sl@0
   130
		while ( ((iIState >> 16) || HwPending()) && (iGeneration == g))
sl@0
   131
			{
sl@0
   132
			__chill();
sl@0
   133
			}
sl@0
   134
		if (c==NKern::EThread)
sl@0
   135
			NKern::ThreadLeaveCS();
sl@0
   136
		}
sl@0
   137
	return r;
sl@0
   138
	}
sl@0
   139
sl@0
   140
TInt NIrq::EnableRaw()
sl@0
   141
	{
sl@0
   142
	TInt r = KErrNone;
sl@0
   143
	TInt irq = __SPIN_LOCK_IRQSAVE(iNIrqLock);
sl@0
   144
	if (!(iIState & ERaw))
sl@0
   145
		r = KErrGeneral;
sl@0
   146
	else if (iIState & EUnbind)
sl@0
   147
		r = KErrNotReady;
sl@0
   148
	else if (iEnabledEvents & 1)
sl@0
   149
		{
sl@0
   150
		iEnabledEvents = 0;
sl@0
   151
		HwEnable();
sl@0
   152
		++iGeneration;
sl@0
   153
		}
sl@0
   154
	__SPIN_UNLOCK_IRQRESTORE(iNIrqLock,irq);
sl@0
   155
	return r;
sl@0
   156
	}
sl@0
   157
sl@0
   158
TInt NIrq::Bind(NIrqHandler* aH)
sl@0
   159
	{
sl@0
   160
	// Call only from thread context
sl@0
   161
	TInt r = KErrInUse;
sl@0
   162
	Wait();
sl@0
   163
	if (!(iIState & ERaw))
sl@0
   164
		{
sl@0
   165
		r = KErrNone;
sl@0
   166
		TBool empty = iHandlers.IsEmpty();
sl@0
   167
		TBool shared = iStaticFlags & EShared;
sl@0
   168
		TBool exclusive = iIState & NIrqHandler::EExclusive;
sl@0
   169
		if (!empty)
sl@0
   170
			{
sl@0
   171
			if (!shared || exclusive)
sl@0
   172
				{
sl@0
   173
				r = KErrAccessDenied;
sl@0
   174
				goto error;
sl@0
   175
				}
sl@0
   176
			NIrqHandler* h = _LOFF(iHandlers.First(), NIrqHandler, iIrqLink);
sl@0
   177
			if (h->iHState & NIrqHandler::EExclusive)
sl@0
   178
				{
sl@0
   179
				r = KErrAccessDenied;
sl@0
   180
				goto error;
sl@0
   181
				}
sl@0
   182
			}
sl@0
   183
		aH->iIrq = this;
sl@0
   184
		iHandlers.Add(&aH->iIrqLink);
sl@0
   185
		}
sl@0
   186
error:
sl@0
   187
	Done();
sl@0
   188
	return r;
sl@0
   189
	}
sl@0
   190
sl@0
   191
void NIrq::HwIsr()
sl@0
   192
	{
sl@0
   193
	TRACE_IRQ12(16, this, iVector, iIState);
sl@0
   194
	TBool eoi_done = FALSE;
sl@0
   195
	TUint32 rcf0 = EnterIsr();		// for initial run count
sl@0
   196
	TUint32 rcf1 = iIState;			// might have changed while we were waiting in EnterIsr()
sl@0
   197
	if (rcf1 & ERaw)
sl@0
   198
		{
sl@0
   199
		if (!(rcf1 & EUnbind))
sl@0
   200
			{
sl@0
   201
			NIsr f = (NIsr)iHandlers.iA.iNext;
sl@0
   202
			TAny* p = iHandlers.iA.iPrev;
sl@0
   203
			(*f)(p);
sl@0
   204
			}
sl@0
   205
		HwEoi();
sl@0
   206
		IsrDone();
sl@0
   207
		return;
sl@0
   208
		}
sl@0
   209
	if (rcf0 >> 16)
sl@0
   210
		{
sl@0
   211
		HwEoi();
sl@0
   212
		return;
sl@0
   213
		}
sl@0
   214
	if (!(iStaticFlags & ELevel))
sl@0
   215
		{
sl@0
   216
		eoi_done = TRUE;
sl@0
   217
		HwEoi();
sl@0
   218
		}
sl@0
   219
	do	{
sl@0
   220
		// Handler list can't be touched now
sl@0
   221
		SDblQueLink* anchor = &iHandlers.iA;
sl@0
   222
		SDblQueLink* p = anchor->iNext;
sl@0
   223
		while (p != anchor)
sl@0
   224
			{
sl@0
   225
			NIrqHandler* h = _LOFF(p, NIrqHandler, iIrqLink);
sl@0
   226
			h->Activate(1);
sl@0
   227
			p = p->iNext;
sl@0
   228
			}
sl@0
   229
		if (!eoi_done)
sl@0
   230
			{
sl@0
   231
			eoi_done = TRUE;
sl@0
   232
			HwEoi();
sl@0
   233
			}
sl@0
   234
		if ((iStaticFlags & ELevel) && iEventsPending)
sl@0
   235
			{
sl@0
   236
			// For a level triggered interrupt make sure interrupt is disabled until
sl@0
   237
			// all pending event handlers have run, to avoid a continuous interrupt.
sl@0
   238
			TInt irq = __SPIN_LOCK_IRQSAVE(iNIrqLock);
sl@0
   239
			if (iEventsPending)
sl@0
   240
				{
sl@0
   241
				iEnabledEvents |= 1;
sl@0
   242
				HwDisable();
sl@0
   243
				}
sl@0
   244
			__SPIN_UNLOCK_IRQRESTORE(iNIrqLock,irq);
sl@0
   245
			}
sl@0
   246
		} while (IsrDone());
sl@0
   247
	}
sl@0
   248
sl@0
   249
void NIrqHandler::Activate(TInt aCount)
sl@0
   250
	{
sl@0
   251
	TUint32 orig = DoActivate(aCount);
sl@0
   252
	TRACE_IRQ12(17, this, orig, aCount);
sl@0
   253
	if (orig & (EDisable|EUnbind|EActive))
sl@0
   254
		return;	// disabled or already active
sl@0
   255
	if (iTied)
sl@0
   256
		{
sl@0
   257
		// we need to enforce mutual exclusion between the event handler
sl@0
   258
		// and the tied thread or thread group, so the event handler must
sl@0
   259
		// run on the CPU to which the thread or group is currently attached
sl@0
   260
		// once the event has been attached to that CPU, the thread/group
sl@0
   261
		// can't be migrated until the event handler completes.
sl@0
   262
		// need a pending event count for the tied thread/group
sl@0
   263
		// so we know when the thread/group can be migrated
sl@0
   264
		TInt tied_cpu = iTied->BeginTiedEvent();
sl@0
   265
		TInt this_cpu = NKern::CurrentCpu();
sl@0
   266
		if (tied_cpu != this_cpu)
sl@0
   267
			{
sl@0
   268
			__e32_atomic_add_acq32(&iIrq->iEventsPending, 1);
sl@0
   269
			TheSubSchedulers[tied_cpu].QueueEventAndKick(this);
sl@0
   270
			// FIXME: move IRQ over to tied CPU if this is the only handler for that IRQ
sl@0
   271
			//			what to do about shared IRQs?
sl@0
   272
			return;
sl@0
   273
			}
sl@0
   274
		}
sl@0
   275
	// event can run on this CPU so run it now
sl@0
   276
	if (aCount)
sl@0
   277
		{
sl@0
   278
		orig = EventBegin();
sl@0
   279
		TRACE_IRQ8(18, this, orig);
sl@0
   280
		(*iFn)(iPtr);
sl@0
   281
		orig = EventDone();
sl@0
   282
		TRACE_IRQ8(19, this, orig);
sl@0
   283
		if (!(orig & EActive))
sl@0
   284
			{
sl@0
   285
			if (iTied)
sl@0
   286
				iTied->EndTiedEvent();
sl@0
   287
			return;	// that was last occurrence or event now disabled
sl@0
   288
			}
sl@0
   289
		}
sl@0
   290
	__e32_atomic_add_ord32(&iIrq->iEventsPending, 1);
sl@0
   291
//	add event to this cpu
sl@0
   292
	SubScheduler().QueueEventAndKick(this);
sl@0
   293
	}
sl@0
   294
sl@0
   295
sl@0
   296
NIrqHandler::NIrqHandler()
sl@0
   297
	{
sl@0
   298
	iIrqLink.iNext = 0;
sl@0
   299
	iIrq = 0;
sl@0
   300
	iTied = 0;
sl@0
   301
	iHState = EDisable|EBind|ENotReady|EEventHandlerIrq;
sl@0
   302
	iFn = 0;
sl@0
   303
	iPtr = 0;
sl@0
   304
	memclr(iNIrqHandlerSpare, sizeof(iNIrqHandlerSpare));
sl@0
   305
	}
sl@0
   306
sl@0
   307
void NIrqHandler::Free()
sl@0
   308
	{
sl@0
   309
	NKern::Lock();
sl@0
   310
	NEventHandler::TiedLock.LockOnly();
sl@0
   311
	if (!iTied)	// Only free if iTied has been cleared
sl@0
   312
		{
sl@0
   313
		iIrqLink.iNext = FirstFree;
sl@0
   314
		FirstFree = this;
sl@0
   315
		}
sl@0
   316
	NEventHandler::TiedLock.UnlockOnly();
sl@0
   317
	NKern::Unlock();
sl@0
   318
	}
sl@0
   319
sl@0
   320
NIrqHandler* NIrqHandler::Alloc()
sl@0
   321
	{
sl@0
   322
	NKern::Lock();
sl@0
   323
	NEventHandler::TiedLock.LockOnly();
sl@0
   324
	NIrqHandler* p = FirstFree;
sl@0
   325
	if (p)
sl@0
   326
		FirstFree = (NIrqHandler*)p->iIrqLink.iNext;
sl@0
   327
	NEventHandler::TiedLock.UnlockOnly();
sl@0
   328
	NKern::Unlock();
sl@0
   329
	if (p)
sl@0
   330
		new (p) NIrqHandler();
sl@0
   331
	return p;
sl@0
   332
	}
sl@0
   333
sl@0
   334
TInt NIrqHandler::Enable(TInt aHandle)
sl@0
   335
	{
sl@0
   336
	// call from any context
sl@0
   337
	TBool reactivate = FALSE;
sl@0
   338
	TInt r = KErrNotReady;
sl@0
   339
	NIrq* pI = iIrq;
sl@0
   340
	if (!pI)
sl@0
   341
		return KErrNotReady;
sl@0
   342
	TInt irq = __SPIN_LOCK_IRQSAVE(pI->iNIrqLock);	// OK since NIrq's are never deleted
sl@0
   343
	if (iIrq==pI && TUint(aHandle)==iHandle)	// check handler not unbound
sl@0
   344
		{
sl@0
   345
		TUint32 orig = DoSetEnabled();	// clear EDisable and EBind provided neither EUnbind nor ENotReady set
sl@0
   346
		if (!(orig & (EUnbind|ENotReady)))
sl@0
   347
			{
sl@0
   348
			r = KErrNone;
sl@0
   349
			if (orig & EDisable)	// check not already enabled
sl@0
   350
				{
sl@0
   351
				++iGeneration;
sl@0
   352
				TUint32 n = pI->iEnabledEvents;
sl@0
   353
				pI->iEnabledEvents += 2;
sl@0
   354
				if (n==0)
sl@0
   355
					pI->HwEnable();	// enable HW interrupt if this is first handler to be enabled
sl@0
   356
				if ((orig >> 16) && !(orig & EActive))
sl@0
   357
					// replay remembered interrupt(s)
sl@0
   358
					reactivate = TRUE;
sl@0
   359
				}
sl@0
   360
			}
sl@0
   361
		}
sl@0
   362
	if (reactivate)
sl@0
   363
		{
sl@0
   364
		pI->iNIrqLock.UnlockOnly();
sl@0
   365
		Activate(0);
sl@0
   366
		pI->iNIrqLock.LockOnly();
sl@0
   367
		}
sl@0
   368
	__SPIN_UNLOCK_IRQRESTORE(pI->iNIrqLock,irq);
sl@0
   369
	return r;
sl@0
   370
	}
sl@0
   371
sl@0
   372
TInt NIrqHandler::Disable(TBool aUnbind, TInt aHandle)
sl@0
   373
	{
sl@0
   374
	// call from any context
sl@0
   375
	NIrq* pI = iIrq;
sl@0
   376
	if (!pI)
sl@0
   377
		return KErrGeneral;
sl@0
   378
	TInt irq = __SPIN_LOCK_IRQSAVE(pI->iNIrqLock);	// OK since NIrq's are never deleted
sl@0
   379
	if (iIrq != pI || TUint(aHandle)!=iHandle)	// check handler not unbound
sl@0
   380
		{
sl@0
   381
		__SPIN_UNLOCK_IRQRESTORE(pI->iNIrqLock,irq);
sl@0
   382
		return KErrGeneral;
sl@0
   383
		}
sl@0
   384
	TInt r = aUnbind ? KErrGeneral : KErrNone;
sl@0
   385
	TUint32 f = aUnbind ? EUnbind|EDisable : EDisable;
sl@0
   386
	TUint32 orig = __e32_atomic_ior_acq32(&iHState, f);
sl@0
   387
	TUint32 g = iGeneration;
sl@0
   388
	if (!(orig & EDisable))	// check not already disabled
sl@0
   389
		{
sl@0
   390
		pI->iEnabledEvents -= 2;
sl@0
   391
		if (!pI->iEnabledEvents)
sl@0
   392
			pI->HwDisable();	// disable HW interrupt if no more enabled handlers
sl@0
   393
		}
sl@0
   394
	if (aUnbind && !(orig & EUnbind))
sl@0
   395
		{
sl@0
   396
		volatile TUint16& cookie = *(volatile TUint16*)(((TUint8*)&iHandle)+2);
sl@0
   397
		StepCookie(cookie, 1);
sl@0
   398
		r = KErrNone;
sl@0
   399
		}
sl@0
   400
	__SPIN_UNLOCK_IRQRESTORE(pI->iNIrqLock,irq);
sl@0
   401
	if (NKern::CurrentContext() != NKern::EInterrupt)
sl@0
   402
		{
sl@0
   403
		// wait for currently running handler to finish or interrupt to be reenabled
sl@0
   404
 		while ((iHState & EActive) && (iGeneration == g))
sl@0
   405
			{
sl@0
   406
			__chill();
sl@0
   407
			}
sl@0
   408
		}
sl@0
   409
	return r;
sl@0
   410
	}
sl@0
   411
sl@0
   412
TInt NIrqHandler::Unbind(TInt aId, NSchedulable* aTied)
sl@0
   413
	{
sl@0
   414
	TInt r = Disable(TRUE, aId);	// waits for any current activation of ISR to finish
sl@0
   415
	if (r==KErrNone || aTied)	// returns KErrGeneral if someone else already unbound this interrupt handler
sl@0
   416
		{
sl@0
   417
		// Possible race condition here between tied thread termination and interrupt unbind.
sl@0
   418
		// We need to be sure that the iTied field must be NULL before the tied thread/group
sl@0
   419
		// is destroyed.
sl@0
   420
		NKern::Lock();
sl@0
   421
		NEventHandler::TiedLock.LockOnly();	// this guarantees pH->iTied cannot change
sl@0
   422
		NSchedulable* t = iTied;
sl@0
   423
		if (t)
sl@0
   424
			{
sl@0
   425
			// We need to guarantee the object pointed to by t cannot be deleted until we
sl@0
   426
			// have finished with it.
sl@0
   427
			t->AcqSLock();
sl@0
   428
			if (iTiedLink.iNext)
sl@0
   429
				{
sl@0
   430
				iTiedLink.Deque();
sl@0
   431
				iTiedLink.iNext = 0;
sl@0
   432
				iTied = 0;
sl@0
   433
				}
sl@0
   434
			if (aTied && aTied==t)
sl@0
   435
				iTied = 0;
sl@0
   436
			t->RelSLock();
sl@0
   437
			}
sl@0
   438
		NEventHandler::TiedLock.UnlockOnly();
sl@0
   439
		NKern::Unlock();
sl@0
   440
		}
sl@0
   441
	if (r==KErrNone)
sl@0
   442
		{
sl@0
   443
		DoUnbind();
sl@0
   444
		Free();
sl@0
   445
		}
sl@0
   446
	return r;
sl@0
   447
	}
sl@0
   448
sl@0
   449
void NIrqHandler::DoUnbind()
sl@0
   450
	{
sl@0
   451
	// Call only from thread context
sl@0
   452
	NIrq* pI = iIrq;
sl@0
   453
	pI->Wait();
sl@0
   454
	iIrqLink.Deque();
sl@0
   455
	iIrq = 0;
sl@0
   456
	pI->Done();
sl@0
   457
	}
sl@0
   458
sl@0
   459
TBool TSubScheduler::QueueEvent(NEventHandler* aEvent)
sl@0
   460
	{
sl@0
   461
	TInt irq = __SPIN_LOCK_IRQSAVE(iEventHandlerLock);
sl@0
   462
	TBool pending = iEventHandlersPending;
sl@0
   463
	iEventHandlersPending = TRUE;
sl@0
   464
	iEventHandlers.Add(aEvent);
sl@0
   465
	__SPIN_UNLOCK_IRQRESTORE(iEventHandlerLock,irq);
sl@0
   466
	return !pending;
sl@0
   467
	}
sl@0
   468
sl@0
   469
void TSubScheduler::QueueEventAndKick(NEventHandler* aEvent)
sl@0
   470
	{
sl@0
   471
	if (QueueEvent(aEvent))
sl@0
   472
		{
sl@0
   473
		// extra barrier ?
sl@0
   474
		send_irq_ipi(this);
sl@0
   475
		}
sl@0
   476
	}
sl@0
   477
sl@0
   478
extern "C" void run_event_handlers(TSubScheduler* aS)
sl@0
   479
	{
sl@0
   480
	while (aS->iEventHandlersPending)
sl@0
   481
		{
sl@0
   482
		TInt irq = __SPIN_LOCK_IRQSAVE(aS->iEventHandlerLock);
sl@0
   483
		if (aS->iEventHandlers.IsEmpty())
sl@0
   484
			{
sl@0
   485
			aS->iEventHandlersPending = FALSE;
sl@0
   486
			__SPIN_UNLOCK_IRQRESTORE(aS->iEventHandlerLock, irq);
sl@0
   487
			break;
sl@0
   488
			}
sl@0
   489
		NIrqHandler* h = (NIrqHandler*)aS->iEventHandlers.First()->Deque();
sl@0
   490
		if (aS->iEventHandlers.IsEmpty())
sl@0
   491
			aS->iEventHandlersPending = FALSE;
sl@0
   492
		TInt type = h->iHType;
sl@0
   493
		NSchedulable* tied = h->iTied;
sl@0
   494
		if (type == NEventHandler::EEventHandlerNTimer)
sl@0
   495
			{
sl@0
   496
			NEventFn f = h->iFn;
sl@0
   497
			TAny* p = h->iPtr;
sl@0
   498
			mb();	// make sure dequeue observed and iFn,iPtr,iTied sampled before state change observed
sl@0
   499
			h->i8888.iHState1 = NTimer::EIdle; // can't touch timer again after this
sl@0
   500
			__SPIN_UNLOCK_IRQRESTORE(aS->iEventHandlerLock, irq);
sl@0
   501
			(*f)(p);
sl@0
   502
			if (tied)
sl@0
   503
				tied->EndTiedEvent();
sl@0
   504
			continue;
sl@0
   505
			}
sl@0
   506
		__SPIN_UNLOCK_IRQRESTORE(aS->iEventHandlerLock, irq);
sl@0
   507
		TBool requeue = TRUE;
sl@0
   508
		switch (h->iHType)
sl@0
   509
			{
sl@0
   510
			case NEventHandler::EEventHandlerIrq:
sl@0
   511
				{
sl@0
   512
				TUint32 orig;
sl@0
   513
				// event can run on this CPU so run it now
sl@0
   514
				// if event tied, migration of tied thread/group will have been blocked
sl@0
   515
				orig = h->EventBegin();
sl@0
   516
				TRACE_IRQ8(20, h, orig);
sl@0
   517
				(*h->iFn)(h->iPtr);
sl@0
   518
				TRACE_IRQ4(21, h);
sl@0
   519
				if (!(h->iHState & NIrqHandler::ERunCountMask))	// if run count still nonzero, definitely still active
sl@0
   520
					{
sl@0
   521
					NIrq* pI = h->iIrq;
sl@0
   522
					irq = __SPIN_LOCK_IRQSAVE(pI->iNIrqLock);
sl@0
   523
					orig = h->EventDone();
sl@0
   524
					TRACE_IRQ8(22, h, orig);
sl@0
   525
					if (!(orig & NIrqHandler::EActive))
sl@0
   526
						{
sl@0
   527
						// handler is no longer active - can't touch it again
sl@0
   528
						// pI is OK since NIrq's are never deleted/reused
sl@0
   529
						requeue = FALSE;
sl@0
   530
						if (__e32_atomic_add_rel32(&pI->iEventsPending, TUint32(-1)) == 1)
sl@0
   531
							{
sl@0
   532
							if (pI->iEnabledEvents & 1)
sl@0
   533
								{
sl@0
   534
								pI->iEnabledEvents &= ~1;
sl@0
   535
								if (pI->iEnabledEvents)
sl@0
   536
									pI->HwEnable();
sl@0
   537
								}
sl@0
   538
							}
sl@0
   539
						}
sl@0
   540
					__SPIN_UNLOCK_IRQRESTORE(pI->iNIrqLock,irq);
sl@0
   541
					}
sl@0
   542
				break;
sl@0
   543
				}
sl@0
   544
			default:
sl@0
   545
				__KTRACE_OPT(KPANIC,DEBUGPRINT("h=%08x",h));
sl@0
   546
				__NK_ASSERT_ALWAYS(0);
sl@0
   547
			}
sl@0
   548
		if (tied && !requeue)
sl@0
   549
			{
sl@0
   550
			// If the tied thread/group has no more tied events outstanding
sl@0
   551
			// and has a migration pending, trigger the migration now.
sl@0
   552
			// Atomically change the tied_cpu to the target CPU here. An IDFC
sl@0
   553
			// can then effect the migration.
sl@0
   554
			// Note that the tied code can't run in parallel with us until
sl@0
   555
			// the tied_cpu is changed. However it could run as soon as the
sl@0
   556
			// tied_cpu is changed (e.g. if added to ready list after change)
sl@0
   557
			tied->EndTiedEvent();
sl@0
   558
			}
sl@0
   559
		if (requeue)
sl@0
   560
			{
sl@0
   561
			// still pending so put it back on the queue
sl@0
   562
			// leave interrupt disabled (if so) and migration of tied thread/group blocked
sl@0
   563
			aS->QueueEvent(h);
sl@0
   564
			}
sl@0
   565
		}
sl@0
   566
	}
sl@0
   567
sl@0
   568
/******************************************************************************
sl@0
   569
 * Public interrupt management functions
sl@0
   570
 ******************************************************************************/
sl@0
   571
sl@0
   572
void NKern::InterruptInit0()
sl@0
   573
	 {
sl@0
   574
	 TInt i;
sl@0
   575
	 TUint16 cookie = 1;
sl@0
   576
	 NIrqHandler::FirstFree = 0;
sl@0
   577
	 for (i=NK_MAX_IRQ_HANDLERS-1; i>=0; --i)
sl@0
   578
		 {
sl@0
   579
		 StepCookie(cookie, 61);
sl@0
   580
		 NIrqHandler* h = &::Handlers[i];
sl@0
   581
		__KTRACE_OPT(KBOOT,DEBUGPRINT("NIrqHandler[%d] at %08x", i, h));
sl@0
   582
		 h->iGeneration = 0;
sl@0
   583
		 h->iHandle = (cookie << 16) | i;
sl@0
   584
		 h->iIrqLink.iNext = NIrqHandler::FirstFree;
sl@0
   585
		 NIrqHandler::FirstFree = h;
sl@0
   586
		 }
sl@0
   587
	 NIrq::HwInit0();
sl@0
   588
	 }
sl@0
   589
sl@0
   590
EXPORT_C TInt NKern::InterruptInit(TInt aId, TUint32 aFlags, TInt aVector, TUint32 aHwId, TAny* aExt)
sl@0
   591
	{
sl@0
   592
	__KTRACE_OPT(KBOOT,DEBUGPRINT("NKII: ID=%02x F=%08x V=%03x HWID=%08x X=%08x", aId, aFlags, aVector, aHwId, aExt));
sl@0
   593
	TRACE_IRQ12(0, (aId|(aVector<<16)), aFlags, aHwId);
sl@0
   594
	if (TUint(aId) >= TUint(NK_MAX_IRQS))
sl@0
   595
  		return KErrArgument;
sl@0
   596
	NIrq* pI = &Irq[aId];
sl@0
   597
	__KTRACE_OPT(KBOOT,DEBUGPRINT("NIrq[%02x] at %08x", aId, pI));
sl@0
   598
	TRACE_IRQ8(1, aId, pI);
sl@0
   599
	new (pI) NIrq;
sl@0
   600
	pI->iX = (NIrqX*)aExt;
sl@0
   601
	pI->iIndex = (TUint16)aId;
sl@0
   602
	pI->iHwId = aHwId;
sl@0
   603
	pI->iVector = aVector;
sl@0
   604
	pI->iStaticFlags = (TUint16)(aFlags & 0x13);
sl@0
   605
	if (aFlags & NKern::EIrqInit_Count)
sl@0
   606
		pI->iIState |= NIrq::ECount;
sl@0
   607
	pI->HwInit();
sl@0
   608
	__e32_atomic_and_rel32(&pI->iIState, ~NIrq::EWait);
sl@0
   609
	return KErrNone;
sl@0
   610
	}
sl@0
   611
sl@0
   612
EXPORT_C TInt NKern::InterruptBind(TInt aId, NIsr aIsr, TAny* aPtr, TUint32 aFlags, NSchedulable* aTied)
sl@0
   613
	{
sl@0
   614
	__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKIB: ID=%02x ISR=%08x(%08x) F=%08x T=%T", aId, aIsr, aPtr, aFlags, aTied));
sl@0
   615
	TRACE_IRQ12(2, aId, aIsr, aPtr);
sl@0
   616
	TRACE_IRQ12(3, aId, aFlags, aTied);
sl@0
   617
	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::InterruptBind");
sl@0
   618
	if (TUint(aId) >= TUint(NK_MAX_IRQS))
sl@0
   619
		{
sl@0
   620
		TRACE_IRQ8(4, aId, KErrArgument);
sl@0
   621
		return KErrArgument;
sl@0
   622
		}
sl@0
   623
	NIrq* pI = &Irq[aId];
sl@0
   624
	NIrqHandler* pH = 0;
sl@0
   625
	NSchedulable* pT = 0;
sl@0
   626
	if (aFlags & NKern::EIrqBind_Tied)
sl@0
   627
		{
sl@0
   628
		if (!aTied)
sl@0
   629
			aTied = NKern::CurrentThread();
sl@0
   630
		pT = aTied;
sl@0
   631
		}
sl@0
   632
	TInt r = KErrNoMemory;
sl@0
   633
	TInt handle = 0;
sl@0
   634
	NKern::ThreadEnterCS();
sl@0
   635
	if (!(aFlags & NKern::EIrqBind_Raw))
sl@0
   636
		{
sl@0
   637
		pH = NIrqHandler::Alloc();
sl@0
   638
		if (!pH)
sl@0
   639
			goto out;
sl@0
   640
		pH->iFn = aIsr;
sl@0
   641
		pH->iPtr = aPtr;
sl@0
   642
		__e32_atomic_add_ord32(&pH->iGeneration, 1);
sl@0
   643
		if (aFlags & EIrqBind_Exclusive)
sl@0
   644
			pH->iHState |= NIrqHandler::EExclusive;
sl@0
   645
		if (aFlags & EIrqBind_Count)
sl@0
   646
			pH->iHState |= NIrqHandler::ECount;
sl@0
   647
		r = pI->Bind(pH);
sl@0
   648
		if (r==KErrNone)
sl@0
   649
			{
sl@0
   650
			handle = pH->iHandle;
sl@0
   651
			// We assume that aTied cannot disappear entirely before we return
sl@0
   652
			if (pT)
sl@0
   653
				{
sl@0
   654
				NKern::Lock();
sl@0
   655
				r = pT->AddTiedEvent(pH);
sl@0
   656
				NKern::Unlock();
sl@0
   657
				}
sl@0
   658
			if (r!=KErrNone)
sl@0
   659
				{
sl@0
   660
				// unbind
sl@0
   661
				pH->DoUnbind();
sl@0
   662
				}
sl@0
   663
			}
sl@0
   664
		if (r!=KErrNone)
sl@0
   665
			pH->Free();
sl@0
   666
		}
sl@0
   667
	else
sl@0
   668
		{
sl@0
   669
		if (aFlags & NKern::EIrqBind_Tied)
sl@0
   670
			r = KErrNotSupported;
sl@0
   671
		else
sl@0
   672
			r = pI->BindRaw(aIsr, aPtr);
sl@0
   673
		}
sl@0
   674
out:
sl@0
   675
	if (r==KErrNone)
sl@0
   676
		{
sl@0
   677
		// clear ENotReady so handler can be enabled
sl@0
   678
		__e32_atomic_and_rel32(&pH->iHState, ~NIrqHandler::ENotReady);
sl@0
   679
		r = handle;
sl@0
   680
		}
sl@0
   681
	NKern::ThreadLeaveCS();
sl@0
   682
	__KTRACE_OPT(KNKERN,DEBUGPRINT("<NKIB: %08x", r));
sl@0
   683
	TRACE_IRQ8(4, aId, r);
sl@0
   684
	return r;
sl@0
   685
	}
sl@0
   686
sl@0
   687
TInt NIrq::FromHandle(TInt& aHandle, NIrq*& aIrq, NIrqHandler*& aHandler)
sl@0
   688
	{
sl@0
   689
	TRACE_IRQ4(5, aHandle);
sl@0
   690
	aIrq = 0;
sl@0
   691
	aHandler = 0;
sl@0
   692
	NIrqHandler* pH = 0;
sl@0
   693
	NIrqHandler* pH2 = 0;
sl@0
   694
	NIrq* pI = 0;
sl@0
   695
	SDblQueLink* anchor = 0;
sl@0
   696
	TUint32 i;
sl@0
   697
	TInt r = KErrArgument;
sl@0
   698
	if (aHandle & NKern::EIrqCookieMask)
sl@0
   699
		{
sl@0
   700
		i = aHandle & NKern::EIrqIndexMask;
sl@0
   701
		if (i>=NK_MAX_IRQ_HANDLERS)
sl@0
   702
			goto out;
sl@0
   703
		pH = &::Handlers[i];
sl@0
   704
		if (pH->iHandle != TUint(aHandle))
sl@0
   705
			goto out;
sl@0
   706
		aHandler = pH;
sl@0
   707
		aIrq = pH->iIrq;
sl@0
   708
		r = KErrNone;
sl@0
   709
		goto out;
sl@0
   710
		}
sl@0
   711
	if (TUint32(aHandle)>=NK_MAX_IRQS)
sl@0
   712
		goto out;
sl@0
   713
	pI = &::Irq[aHandle];
sl@0
   714
	if (pI->iIState & NIrq::ERaw)
sl@0
   715
		{
sl@0
   716
		aIrq = pI;
sl@0
   717
		r = KErrNone;
sl@0
   718
		goto out;
sl@0
   719
		}
sl@0
   720
	if (pI->iStaticFlags & NIrq::EShared)
sl@0
   721
		goto out;
sl@0
   722
	anchor = &pI->iHandlers.iA;
sl@0
   723
	pH = _LOFF(anchor->iNext, NIrqHandler, iIrqLink);
sl@0
   724
	i = pH - ::Handlers;
sl@0
   725
	if (i>=NK_MAX_IRQ_HANDLERS)
sl@0
   726
		goto out;
sl@0
   727
	pH2 = &::Handlers[i];
sl@0
   728
	if (pH2 != pH)
sl@0
   729
		goto out;
sl@0
   730
	if (pH->iIrq != pI || anchor->iPrev != anchor->iNext)
sl@0
   731
		goto out;
sl@0
   732
	aHandle = pH->iHandle;
sl@0
   733
	aHandler = pH;
sl@0
   734
	aIrq = pI;
sl@0
   735
	r = KErrNone;
sl@0
   736
out:
sl@0
   737
	TRACE_IRQ4(6, r);
sl@0
   738
	TRACE_IRQ12(7, aHandle, aIrq, aHandler);
sl@0
   739
	return r;
sl@0
   740
	}
sl@0
   741
sl@0
   742
EXPORT_C TInt NKern::InterruptUnbind(TInt aId)
sl@0
   743
	{
sl@0
   744
	TRACE_IRQ4(8, aId);
sl@0
   745
	__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKIU: ID=%08x", aId));
sl@0
   746
	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::InterruptUnbind");
sl@0
   747
	NIrq* pI;
sl@0
   748
	NIrqHandler* pH;
sl@0
   749
	TInt r = NIrq::FromHandle(aId, pI, pH);
sl@0
   750
	if (r!=KErrNone)
sl@0
   751
		return r;
sl@0
   752
	NKern::ThreadEnterCS();
sl@0
   753
	if (!pH)
sl@0
   754
		{
sl@0
   755
		// raw ISR
sl@0
   756
		r = pI->UnbindRaw();
sl@0
   757
		}
sl@0
   758
	else
sl@0
   759
		{
sl@0
   760
		r = pH->Unbind(aId, 0);
sl@0
   761
		}
sl@0
   762
	NKern::ThreadLeaveCS();
sl@0
   763
	TRACE_IRQ4(9, r);
sl@0
   764
	return r;
sl@0
   765
	}
sl@0
   766
sl@0
   767
EXPORT_C TInt NKern::InterruptEnable(TInt aId)
sl@0
   768
	{
sl@0
   769
	__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKIE: ID=%08x", aId));
sl@0
   770
	TRACE_IRQ4(10, aId);
sl@0
   771
	NIrq* pI;
sl@0
   772
	NIrqHandler* pH;
sl@0
   773
	TInt r = NIrq::FromHandle(aId, pI, pH);
sl@0
   774
	if (r==KErrNone)
sl@0
   775
		r = pH ? pH->Enable(aId) : pI->EnableRaw();
sl@0
   776
	TRACE_IRQ4(11, r);
sl@0
   777
	return r;
sl@0
   778
	}
sl@0
   779
sl@0
   780
EXPORT_C TInt NKern::InterruptDisable(TInt aId)
sl@0
   781
	{
sl@0
   782
	__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKID: ID=%08x", aId));
sl@0
   783
	TRACE_IRQ4(12, aId);
sl@0
   784
	NIrq* pI;
sl@0
   785
	NIrqHandler* pH;
sl@0
   786
	TInt r = NIrq::FromHandle(aId, pI, pH);
sl@0
   787
	if (r==KErrNone)
sl@0
   788
		r = pH ? pH->Disable(FALSE, aId) : pI->DisableRaw(FALSE);
sl@0
   789
	TRACE_IRQ4(13, r);
sl@0
   790
	return r;
sl@0
   791
	}
sl@0
   792
sl@0
   793
EXPORT_C TInt NKern::InterruptClear(TInt aId)
sl@0
   794
	{
sl@0
   795
	__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKIC: ID=%08x", aId));
sl@0
   796
	return KErrNotSupported;
sl@0
   797
	}
sl@0
   798
sl@0
   799
EXPORT_C TInt NKern::InterruptSetPriority(TInt aId, TInt aPri)
sl@0
   800
	{
sl@0
   801
	__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKIS: ID=%08x PRI=%08x", aId, aPri));
sl@0
   802
	return KErrNotSupported;
sl@0
   803
	}
sl@0
   804
sl@0
   805
EXPORT_C TInt NKern::InterruptSetCpuMask(TInt aId, TUint32 aMask)
sl@0
   806
	{
sl@0
   807
	__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKIM: ID=%08x M=%08x", aId, aMask));
sl@0
   808
	return KErrNotSupported;
sl@0
   809
	}
sl@0
   810
sl@0
   811
EXPORT_C void NKern::Interrupt(TInt aId)
sl@0
   812
	{
sl@0
   813
	__NK_ASSERT_ALWAYS(TUint(aId) < TUint(NK_MAX_IRQS));
sl@0
   814
	NIrq* pI = &Irq[aId];
sl@0
   815
	pI->HwIsr();
sl@0
   816
	}
sl@0
   817