os/kernelhwsrv/kernel/eka/nkernsmp/nk_irq.cpp
changeset 0 bde4ae8d615e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/os/kernelhwsrv/kernel/eka/nkernsmp/nk_irq.cpp	Fri Jun 15 03:10:57 2012 +0200
     1.3 @@ -0,0 +1,817 @@
     1.4 +// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
     1.5 +// All rights reserved.
     1.6 +// This component and the accompanying materials are made available
     1.7 +// under the terms of the License "Eclipse Public License v1.0"
     1.8 +// which accompanies this distribution, and is available
     1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
    1.10 +//
    1.11 +// Initial Contributors:
    1.12 +// Nokia Corporation - initial contribution.
    1.13 +//
    1.14 +// Contributors:
    1.15 +//
    1.16 +// Description:
    1.17 +// e32\nkernsmp\nk_irq.cpp
    1.18 +// 
    1.19 +//
    1.20 +
    1.21 +/**
    1.22 + @file
    1.23 + @internalTechnology
    1.24 +*/
    1.25 +
    1.26 +#include <e32cmn.h>
    1.27 +#include <e32cmn_private.h>
    1.28 +#include "nk_priv.h"
    1.29 +#include <nk_irq.h>
    1.30 +
    1.31 +NIrq		Irq[NK_MAX_IRQS];
    1.32 +NIrqHandler	Handlers[NK_MAX_IRQ_HANDLERS];
    1.33 +NIrqHandler* NIrqHandler::FirstFree;
    1.34 +
    1.35 +extern "C" void send_irq_ipi(TSubScheduler*);
    1.36 +
    1.37 +void StepCookie(volatile TUint16& p, TInt n)
    1.38 +	{
    1.39 +	TUint32 x = p<<17;
    1.40 +	while(n--)
    1.41 +		{
    1.42 +		TUint32 y = x;
    1.43 +		x<<=1;
    1.44 +		y^=x;
    1.45 +		x |= ((y>>31)<<17);
    1.46 +		}
    1.47 +	p = (TUint16)(x>>17);
    1.48 +	}
    1.49 +
    1.50 +NIrq::NIrq()
    1.51 +	:	iNIrqLock(TSpinLock::EOrderNIrq)
    1.52 +	{
    1.53 +	iIState = EWait;
    1.54 +	iEventsPending = 0;
    1.55 +	iEnabledEvents = 0;
    1.56 +	iHwId = 0;
    1.57 +	iX = 0;
    1.58 +	}
    1.59 +
    1.60 +TInt NIrq::BindRaw(NIsr aIsr, TAny* aPtr)
    1.61 +	{
    1.62 +	// Call only from thread context
    1.63 +	TInt r = KErrNone;
    1.64 +	Wait();
    1.65 +	iNIrqLock.LockOnly();
    1.66 +	if (iStaticFlags & EShared)
    1.67 +		{
    1.68 +		r = KErrAccessDenied;
    1.69 +		goto error;
    1.70 +		}
    1.71 +	if ( (iIState & ERaw) || !iHandlers.IsEmpty())
    1.72 +		{
    1.73 +		r = KErrInUse;
    1.74 +		goto error;
    1.75 +		}
    1.76 +	iHandlers.iA.iNext = (SDblQueLink*)aIsr;
    1.77 +	iHandlers.iA.iPrev = (SDblQueLink*)aPtr;
    1.78 +	__e32_atomic_ior_rel32(&iIState, ERaw);
    1.79 +error:
    1.80 +	iNIrqLock.UnlockOnly();
    1.81 +	Done();
    1.82 +	return r;
    1.83 +	}
    1.84 +
    1.85 +TInt NIrq::UnbindRaw()
    1.86 +	{
    1.87 +	// Call only from thread context
    1.88 +	TInt r = DisableRaw(TRUE);
    1.89 +	if (r != KErrNone)
    1.90 +		return r;
    1.91 +	Wait();
    1.92 +	iNIrqLock.LockOnly();
    1.93 +	if (iIState & ERaw)
    1.94 +		{
    1.95 +		iHandlers.iA.iNext = 0;
    1.96 +		iHandlers.iA.iPrev = 0;
    1.97 +		++iGeneration;	// release anyone still waiting in Disable()
    1.98 +		__e32_atomic_and_rel32(&iIState, ~(ERaw|EUnbind));
    1.99 +		}
   1.100 +	iNIrqLock.UnlockOnly();
   1.101 +	Done();
   1.102 +	return r;
   1.103 +	}
   1.104 +
   1.105 +TInt NIrq::DisableRaw(TBool aUnbind)
   1.106 +	{
   1.107 +	TBool wait = FALSE;
   1.108 +	TInt r = KErrNone;
   1.109 +	TInt irq = __SPIN_LOCK_IRQSAVE(iNIrqLock);
   1.110 +	if (!(iIState & ERaw))
   1.111 +		r = KErrGeneral;
   1.112 +	else
   1.113 +		{
   1.114 +		wait = TRUE;
   1.115 +		if (aUnbind)
   1.116 +			__e32_atomic_ior_acq32(&iIState, EUnbind);
   1.117 +		if (!(iEnabledEvents & 1))
   1.118 +			{
   1.119 +			iEnabledEvents |= 1;
   1.120 +			HwDisable();
   1.121 +//			wait = TRUE;
   1.122 +			}
   1.123 +		}
   1.124 +	__SPIN_UNLOCK_IRQRESTORE(iNIrqLock,irq);
   1.125 +	TInt c = NKern::CurrentContext();
   1.126 +	if (wait && c!=NKern::EInterrupt)
   1.127 +		{
   1.128 +		// wait for currently running handler to finish or interrupt to be reenabled
   1.129 +		if (c==NKern::EThread)
   1.130 +			NKern::ThreadEnterCS();
   1.131 +		HwWaitCpus();	// ensure other CPUs have had a chance to accept any outstanding interrupts
   1.132 +		TUint32 g = iGeneration;
   1.133 +		while ( ((iIState >> 16) || HwPending()) && (iGeneration == g))
   1.134 +			{
   1.135 +			__chill();
   1.136 +			}
   1.137 +		if (c==NKern::EThread)
   1.138 +			NKern::ThreadLeaveCS();
   1.139 +		}
   1.140 +	return r;
   1.141 +	}
   1.142 +
   1.143 +TInt NIrq::EnableRaw()
   1.144 +	{
   1.145 +	TInt r = KErrNone;
   1.146 +	TInt irq = __SPIN_LOCK_IRQSAVE(iNIrqLock);
   1.147 +	if (!(iIState & ERaw))
   1.148 +		r = KErrGeneral;
   1.149 +	else if (iIState & EUnbind)
   1.150 +		r = KErrNotReady;
   1.151 +	else if (iEnabledEvents & 1)
   1.152 +		{
   1.153 +		iEnabledEvents = 0;
   1.154 +		HwEnable();
   1.155 +		++iGeneration;
   1.156 +		}
   1.157 +	__SPIN_UNLOCK_IRQRESTORE(iNIrqLock,irq);
   1.158 +	return r;
   1.159 +	}
   1.160 +
   1.161 +TInt NIrq::Bind(NIrqHandler* aH)
   1.162 +	{
   1.163 +	// Call only from thread context
   1.164 +	TInt r = KErrInUse;
   1.165 +	Wait();
   1.166 +	if (!(iIState & ERaw))
   1.167 +		{
   1.168 +		r = KErrNone;
   1.169 +		TBool empty = iHandlers.IsEmpty();
   1.170 +		TBool shared = iStaticFlags & EShared;
   1.171 +		TBool exclusive = iIState & NIrqHandler::EExclusive;
   1.172 +		if (!empty)
   1.173 +			{
   1.174 +			if (!shared || exclusive)
   1.175 +				{
   1.176 +				r = KErrAccessDenied;
   1.177 +				goto error;
   1.178 +				}
   1.179 +			NIrqHandler* h = _LOFF(iHandlers.First(), NIrqHandler, iIrqLink);
   1.180 +			if (h->iHState & NIrqHandler::EExclusive)
   1.181 +				{
   1.182 +				r = KErrAccessDenied;
   1.183 +				goto error;
   1.184 +				}
   1.185 +			}
   1.186 +		aH->iIrq = this;
   1.187 +		iHandlers.Add(&aH->iIrqLink);
   1.188 +		}
   1.189 +error:
   1.190 +	Done();
   1.191 +	return r;
   1.192 +	}
   1.193 +
   1.194 +void NIrq::HwIsr()
   1.195 +	{
   1.196 +	TRACE_IRQ12(16, this, iVector, iIState);
   1.197 +	TBool eoi_done = FALSE;
   1.198 +	TUint32 rcf0 = EnterIsr();		// for initial run count
   1.199 +	TUint32 rcf1 = iIState;			// might have changed while we were waiting in EnterIsr()
   1.200 +	if (rcf1 & ERaw)
   1.201 +		{
   1.202 +		if (!(rcf1 & EUnbind))
   1.203 +			{
   1.204 +			NIsr f = (NIsr)iHandlers.iA.iNext;
   1.205 +			TAny* p = iHandlers.iA.iPrev;
   1.206 +			(*f)(p);
   1.207 +			}
   1.208 +		HwEoi();
   1.209 +		IsrDone();
   1.210 +		return;
   1.211 +		}
   1.212 +	if (rcf0 >> 16)
   1.213 +		{
   1.214 +		HwEoi();
   1.215 +		return;
   1.216 +		}
   1.217 +	if (!(iStaticFlags & ELevel))
   1.218 +		{
   1.219 +		eoi_done = TRUE;
   1.220 +		HwEoi();
   1.221 +		}
   1.222 +	do	{
   1.223 +		// Handler list can't be touched now
   1.224 +		SDblQueLink* anchor = &iHandlers.iA;
   1.225 +		SDblQueLink* p = anchor->iNext;
   1.226 +		while (p != anchor)
   1.227 +			{
   1.228 +			NIrqHandler* h = _LOFF(p, NIrqHandler, iIrqLink);
   1.229 +			h->Activate(1);
   1.230 +			p = p->iNext;
   1.231 +			}
   1.232 +		if (!eoi_done)
   1.233 +			{
   1.234 +			eoi_done = TRUE;
   1.235 +			HwEoi();
   1.236 +			}
   1.237 +		if ((iStaticFlags & ELevel) && iEventsPending)
   1.238 +			{
   1.239 +			// For a level triggered interrupt make sure interrupt is disabled until
   1.240 +			// all pending event handlers have run, to avoid a continuous interrupt.
   1.241 +			TInt irq = __SPIN_LOCK_IRQSAVE(iNIrqLock);
   1.242 +			if (iEventsPending)
   1.243 +				{
   1.244 +				iEnabledEvents |= 1;
   1.245 +				HwDisable();
   1.246 +				}
   1.247 +			__SPIN_UNLOCK_IRQRESTORE(iNIrqLock,irq);
   1.248 +			}
   1.249 +		} while (IsrDone());
   1.250 +	}
   1.251 +
   1.252 +void NIrqHandler::Activate(TInt aCount)
   1.253 +	{
   1.254 +	TUint32 orig = DoActivate(aCount);
   1.255 +	TRACE_IRQ12(17, this, orig, aCount);
   1.256 +	if (orig & (EDisable|EUnbind|EActive))
   1.257 +		return;	// disabled or already active
   1.258 +	if (iTied)
   1.259 +		{
   1.260 +		// we need to enforce mutual exclusion between the event handler
   1.261 +		// and the tied thread or thread group, so the event handler must
   1.262 +		// run on the CPU to which the thread or group is currently attached
   1.263 +		// once the event has been attached to that CPU, the thread/group
   1.264 +		// can't be migrated until the event handler completes.
   1.265 +		// need a pending event count for the tied thread/group
   1.266 +		// so we know when the thread/group can be migrated
   1.267 +		TInt tied_cpu = iTied->BeginTiedEvent();
   1.268 +		TInt this_cpu = NKern::CurrentCpu();
   1.269 +		if (tied_cpu != this_cpu)
   1.270 +			{
   1.271 +			__e32_atomic_add_acq32(&iIrq->iEventsPending, 1);
   1.272 +			TheSubSchedulers[tied_cpu].QueueEventAndKick(this);
   1.273 +			// FIXME: move IRQ over to tied CPU if this is the only handler for that IRQ
   1.274 +			//			what to do about shared IRQs?
   1.275 +			return;
   1.276 +			}
   1.277 +		}
   1.278 +	// event can run on this CPU so run it now
   1.279 +	if (aCount)
   1.280 +		{
   1.281 +		orig = EventBegin();
   1.282 +		TRACE_IRQ8(18, this, orig);
   1.283 +		(*iFn)(iPtr);
   1.284 +		orig = EventDone();
   1.285 +		TRACE_IRQ8(19, this, orig);
   1.286 +		if (!(orig & EActive))
   1.287 +			{
   1.288 +			if (iTied)
   1.289 +				iTied->EndTiedEvent();
   1.290 +			return;	// that was last occurrence or event now disabled
   1.291 +			}
   1.292 +		}
   1.293 +	__e32_atomic_add_ord32(&iIrq->iEventsPending, 1);
   1.294 +//	add event to this cpu
   1.295 +	SubScheduler().QueueEventAndKick(this);
   1.296 +	}
   1.297 +
   1.298 +
   1.299 +NIrqHandler::NIrqHandler()
   1.300 +	{
   1.301 +	iIrqLink.iNext = 0;
   1.302 +	iIrq = 0;
   1.303 +	iTied = 0;
   1.304 +	iHState = EDisable|EBind|ENotReady|EEventHandlerIrq;
   1.305 +	iFn = 0;
   1.306 +	iPtr = 0;
   1.307 +	memclr(iNIrqHandlerSpare, sizeof(iNIrqHandlerSpare));
   1.308 +	}
   1.309 +
   1.310 +void NIrqHandler::Free()
   1.311 +	{
   1.312 +	NKern::Lock();
   1.313 +	NEventHandler::TiedLock.LockOnly();
   1.314 +	if (!iTied)	// Only free if iTied has been cleared
   1.315 +		{
   1.316 +		iIrqLink.iNext = FirstFree;
   1.317 +		FirstFree = this;
   1.318 +		}
   1.319 +	NEventHandler::TiedLock.UnlockOnly();
   1.320 +	NKern::Unlock();
   1.321 +	}
   1.322 +
   1.323 +NIrqHandler* NIrqHandler::Alloc()
   1.324 +	{
   1.325 +	NKern::Lock();
   1.326 +	NEventHandler::TiedLock.LockOnly();
   1.327 +	NIrqHandler* p = FirstFree;
   1.328 +	if (p)
   1.329 +		FirstFree = (NIrqHandler*)p->iIrqLink.iNext;
   1.330 +	NEventHandler::TiedLock.UnlockOnly();
   1.331 +	NKern::Unlock();
   1.332 +	if (p)
   1.333 +		new (p) NIrqHandler();
   1.334 +	return p;
   1.335 +	}
   1.336 +
   1.337 +TInt NIrqHandler::Enable(TInt aHandle)
   1.338 +	{
   1.339 +	// call from any context
   1.340 +	TBool reactivate = FALSE;
   1.341 +	TInt r = KErrNotReady;
   1.342 +	NIrq* pI = iIrq;
   1.343 +	if (!pI)
   1.344 +		return KErrNotReady;
   1.345 +	TInt irq = __SPIN_LOCK_IRQSAVE(pI->iNIrqLock);	// OK since NIrq's are never deleted
   1.346 +	if (iIrq==pI && TUint(aHandle)==iHandle)	// check handler not unbound
   1.347 +		{
   1.348 +		TUint32 orig = DoSetEnabled();	// clear EDisable and EBind provided neither EUnbind nor ENotReady set
   1.349 +		if (!(orig & (EUnbind|ENotReady)))
   1.350 +			{
   1.351 +			r = KErrNone;
   1.352 +			if (orig & EDisable)	// check not already enabled
   1.353 +				{
   1.354 +				++iGeneration;
   1.355 +				TUint32 n = pI->iEnabledEvents;
   1.356 +				pI->iEnabledEvents += 2;
   1.357 +				if (n==0)
   1.358 +					pI->HwEnable();	// enable HW interrupt if this is first handler to be enabled
   1.359 +				if ((orig >> 16) && !(orig & EActive))
   1.360 +					// replay remembered interrupt(s)
   1.361 +					reactivate = TRUE;
   1.362 +				}
   1.363 +			}
   1.364 +		}
   1.365 +	if (reactivate)
   1.366 +		{
   1.367 +		pI->iNIrqLock.UnlockOnly();
   1.368 +		Activate(0);
   1.369 +		pI->iNIrqLock.LockOnly();
   1.370 +		}
   1.371 +	__SPIN_UNLOCK_IRQRESTORE(pI->iNIrqLock,irq);
   1.372 +	return r;
   1.373 +	}
   1.374 +
   1.375 +TInt NIrqHandler::Disable(TBool aUnbind, TInt aHandle)
   1.376 +	{
   1.377 +	// call from any context
   1.378 +	NIrq* pI = iIrq;
   1.379 +	if (!pI)
   1.380 +		return KErrGeneral;
   1.381 +	TInt irq = __SPIN_LOCK_IRQSAVE(pI->iNIrqLock);	// OK since NIrq's are never deleted
   1.382 +	if (iIrq != pI || TUint(aHandle)!=iHandle)	// check handler not unbound
   1.383 +		{
   1.384 +		__SPIN_UNLOCK_IRQRESTORE(pI->iNIrqLock,irq);
   1.385 +		return KErrGeneral;
   1.386 +		}
   1.387 +	TInt r = aUnbind ? KErrGeneral : KErrNone;
   1.388 +	TUint32 f = aUnbind ? EUnbind|EDisable : EDisable;
   1.389 +	TUint32 orig = __e32_atomic_ior_acq32(&iHState, f);
   1.390 +	TUint32 g = iGeneration;
   1.391 +	if (!(orig & EDisable))	// check not already disabled
   1.392 +		{
   1.393 +		pI->iEnabledEvents -= 2;
   1.394 +		if (!pI->iEnabledEvents)
   1.395 +			pI->HwDisable();	// disable HW interrupt if no more enabled handlers
   1.396 +		}
   1.397 +	if (aUnbind && !(orig & EUnbind))
   1.398 +		{
   1.399 +		volatile TUint16& cookie = *(volatile TUint16*)(((TUint8*)&iHandle)+2);
   1.400 +		StepCookie(cookie, 1);
   1.401 +		r = KErrNone;
   1.402 +		}
   1.403 +	__SPIN_UNLOCK_IRQRESTORE(pI->iNIrqLock,irq);
   1.404 +	if (NKern::CurrentContext() != NKern::EInterrupt)
   1.405 +		{
   1.406 +		// wait for currently running handler to finish or interrupt to be reenabled
   1.407 + 		while ((iHState & EActive) && (iGeneration == g))
   1.408 +			{
   1.409 +			__chill();
   1.410 +			}
   1.411 +		}
   1.412 +	return r;
   1.413 +	}
   1.414 +
   1.415 +TInt NIrqHandler::Unbind(TInt aId, NSchedulable* aTied)
   1.416 +	{
   1.417 +	TInt r = Disable(TRUE, aId);	// waits for any current activation of ISR to finish
   1.418 +	if (r==KErrNone || aTied)	// returns KErrGeneral if someone else already unbound this interrupt handler
   1.419 +		{
   1.420 +		// Possible race condition here between tied thread termination and interrupt unbind.
   1.421 +		// We need to be sure that the iTied field must be NULL before the tied thread/group
   1.422 +		// is destroyed.
   1.423 +		NKern::Lock();
   1.424 +		NEventHandler::TiedLock.LockOnly();	// this guarantees pH->iTied cannot change
   1.425 +		NSchedulable* t = iTied;
   1.426 +		if (t)
   1.427 +			{
   1.428 +			// We need to guarantee the object pointed to by t cannot be deleted until we
   1.429 +			// have finished with it.
   1.430 +			t->AcqSLock();
   1.431 +			if (iTiedLink.iNext)
   1.432 +				{
   1.433 +				iTiedLink.Deque();
   1.434 +				iTiedLink.iNext = 0;
   1.435 +				iTied = 0;
   1.436 +				}
   1.437 +			if (aTied && aTied==t)
   1.438 +				iTied = 0;
   1.439 +			t->RelSLock();
   1.440 +			}
   1.441 +		NEventHandler::TiedLock.UnlockOnly();
   1.442 +		NKern::Unlock();
   1.443 +		}
   1.444 +	if (r==KErrNone)
   1.445 +		{
   1.446 +		DoUnbind();
   1.447 +		Free();
   1.448 +		}
   1.449 +	return r;
   1.450 +	}
   1.451 +
   1.452 +void NIrqHandler::DoUnbind()
   1.453 +	{
   1.454 +	// Call only from thread context
   1.455 +	NIrq* pI = iIrq;
   1.456 +	pI->Wait();
   1.457 +	iIrqLink.Deque();
   1.458 +	iIrq = 0;
   1.459 +	pI->Done();
   1.460 +	}
   1.461 +
   1.462 +TBool TSubScheduler::QueueEvent(NEventHandler* aEvent)
   1.463 +	{
   1.464 +	TInt irq = __SPIN_LOCK_IRQSAVE(iEventHandlerLock);
   1.465 +	TBool pending = iEventHandlersPending;
   1.466 +	iEventHandlersPending = TRUE;
   1.467 +	iEventHandlers.Add(aEvent);
   1.468 +	__SPIN_UNLOCK_IRQRESTORE(iEventHandlerLock,irq);
   1.469 +	return !pending;
   1.470 +	}
   1.471 +
   1.472 +void TSubScheduler::QueueEventAndKick(NEventHandler* aEvent)
   1.473 +	{
   1.474 +	if (QueueEvent(aEvent))
   1.475 +		{
   1.476 +		// extra barrier ?
   1.477 +		send_irq_ipi(this);
   1.478 +		}
   1.479 +	}
   1.480 +
   1.481 +extern "C" void run_event_handlers(TSubScheduler* aS)
   1.482 +	{
   1.483 +	while (aS->iEventHandlersPending)
   1.484 +		{
   1.485 +		TInt irq = __SPIN_LOCK_IRQSAVE(aS->iEventHandlerLock);
   1.486 +		if (aS->iEventHandlers.IsEmpty())
   1.487 +			{
   1.488 +			aS->iEventHandlersPending = FALSE;
   1.489 +			__SPIN_UNLOCK_IRQRESTORE(aS->iEventHandlerLock, irq);
   1.490 +			break;
   1.491 +			}
   1.492 +		NIrqHandler* h = (NIrqHandler*)aS->iEventHandlers.First()->Deque();
   1.493 +		if (aS->iEventHandlers.IsEmpty())
   1.494 +			aS->iEventHandlersPending = FALSE;
   1.495 +		TInt type = h->iHType;
   1.496 +		NSchedulable* tied = h->iTied;
   1.497 +		if (type == NEventHandler::EEventHandlerNTimer)
   1.498 +			{
   1.499 +			NEventFn f = h->iFn;
   1.500 +			TAny* p = h->iPtr;
   1.501 +			mb();	// make sure dequeue observed and iFn,iPtr,iTied sampled before state change observed
   1.502 +			h->i8888.iHState1 = NTimer::EIdle; // can't touch timer again after this
   1.503 +			__SPIN_UNLOCK_IRQRESTORE(aS->iEventHandlerLock, irq);
   1.504 +			(*f)(p);
   1.505 +			if (tied)
   1.506 +				tied->EndTiedEvent();
   1.507 +			continue;
   1.508 +			}
   1.509 +		__SPIN_UNLOCK_IRQRESTORE(aS->iEventHandlerLock, irq);
   1.510 +		TBool requeue = TRUE;
   1.511 +		switch (h->iHType)
   1.512 +			{
   1.513 +			case NEventHandler::EEventHandlerIrq:
   1.514 +				{
   1.515 +				TUint32 orig;
   1.516 +				// event can run on this CPU so run it now
   1.517 +				// if event tied, migration of tied thread/group will have been blocked
   1.518 +				orig = h->EventBegin();
   1.519 +				TRACE_IRQ8(20, h, orig);
   1.520 +				(*h->iFn)(h->iPtr);
   1.521 +				TRACE_IRQ4(21, h);
   1.522 +				if (!(h->iHState & NIrqHandler::ERunCountMask))	// if run count still nonzero, definitely still active
   1.523 +					{
   1.524 +					NIrq* pI = h->iIrq;
   1.525 +					irq = __SPIN_LOCK_IRQSAVE(pI->iNIrqLock);
   1.526 +					orig = h->EventDone();
   1.527 +					TRACE_IRQ8(22, h, orig);
   1.528 +					if (!(orig & NIrqHandler::EActive))
   1.529 +						{
   1.530 +						// handler is no longer active - can't touch it again
   1.531 +						// pI is OK since NIrq's are never deleted/reused
   1.532 +						requeue = FALSE;
   1.533 +						if (__e32_atomic_add_rel32(&pI->iEventsPending, TUint32(-1)) == 1)
   1.534 +							{
   1.535 +							if (pI->iEnabledEvents & 1)
   1.536 +								{
   1.537 +								pI->iEnabledEvents &= ~1;
   1.538 +								if (pI->iEnabledEvents)
   1.539 +									pI->HwEnable();
   1.540 +								}
   1.541 +							}
   1.542 +						}
   1.543 +					__SPIN_UNLOCK_IRQRESTORE(pI->iNIrqLock,irq);
   1.544 +					}
   1.545 +				break;
   1.546 +				}
   1.547 +			default:
   1.548 +				__KTRACE_OPT(KPANIC,DEBUGPRINT("h=%08x",h));
   1.549 +				__NK_ASSERT_ALWAYS(0);
   1.550 +			}
   1.551 +		if (tied && !requeue)
   1.552 +			{
   1.553 +			// If the tied thread/group has no more tied events outstanding
   1.554 +			// and has a migration pending, trigger the migration now.
   1.555 +			// Atomically change the tied_cpu to the target CPU here. An IDFC
   1.556 +			// can then effect the migration.
   1.557 +			// Note that the tied code can't run in parallel with us until
   1.558 +			// the tied_cpu is changed. However it could run as soon as the
   1.559 +			// tied_cpu is changed (e.g. if added to ready list after change)
   1.560 +			tied->EndTiedEvent();
   1.561 +			}
   1.562 +		if (requeue)
   1.563 +			{
   1.564 +			// still pending so put it back on the queue
   1.565 +			// leave interrupt disabled (if so) and migration of tied thread/group blocked
   1.566 +			aS->QueueEvent(h);
   1.567 +			}
   1.568 +		}
   1.569 +	}
   1.570 +
   1.571 +/******************************************************************************
   1.572 + * Public interrupt management functions
   1.573 + ******************************************************************************/
   1.574 +
   1.575 +void NKern::InterruptInit0()
   1.576 +	 {
   1.577 +	 TInt i;
   1.578 +	 TUint16 cookie = 1;
   1.579 +	 NIrqHandler::FirstFree = 0;
   1.580 +	 for (i=NK_MAX_IRQ_HANDLERS-1; i>=0; --i)
   1.581 +		 {
   1.582 +		 StepCookie(cookie, 61);
   1.583 +		 NIrqHandler* h = &::Handlers[i];
   1.584 +		__KTRACE_OPT(KBOOT,DEBUGPRINT("NIrqHandler[%d] at %08x", i, h));
   1.585 +		 h->iGeneration = 0;
   1.586 +		 h->iHandle = (cookie << 16) | i;
   1.587 +		 h->iIrqLink.iNext = NIrqHandler::FirstFree;
   1.588 +		 NIrqHandler::FirstFree = h;
   1.589 +		 }
   1.590 +	 NIrq::HwInit0();
   1.591 +	 }
   1.592 +
   1.593 +EXPORT_C TInt NKern::InterruptInit(TInt aId, TUint32 aFlags, TInt aVector, TUint32 aHwId, TAny* aExt)
   1.594 +	{
   1.595 +	__KTRACE_OPT(KBOOT,DEBUGPRINT("NKII: ID=%02x F=%08x V=%03x HWID=%08x X=%08x", aId, aFlags, aVector, aHwId, aExt));
   1.596 +	TRACE_IRQ12(0, (aId|(aVector<<16)), aFlags, aHwId);
   1.597 +	if (TUint(aId) >= TUint(NK_MAX_IRQS))
   1.598 +  		return KErrArgument;
   1.599 +	NIrq* pI = &Irq[aId];
   1.600 +	__KTRACE_OPT(KBOOT,DEBUGPRINT("NIrq[%02x] at %08x", aId, pI));
   1.601 +	TRACE_IRQ8(1, aId, pI);
   1.602 +	new (pI) NIrq;
   1.603 +	pI->iX = (NIrqX*)aExt;
   1.604 +	pI->iIndex = (TUint16)aId;
   1.605 +	pI->iHwId = aHwId;
   1.606 +	pI->iVector = aVector;
   1.607 +	pI->iStaticFlags = (TUint16)(aFlags & 0x13);
   1.608 +	if (aFlags & NKern::EIrqInit_Count)
   1.609 +		pI->iIState |= NIrq::ECount;
   1.610 +	pI->HwInit();
   1.611 +	__e32_atomic_and_rel32(&pI->iIState, ~NIrq::EWait);
   1.612 +	return KErrNone;
   1.613 +	}
   1.614 +
   1.615 +EXPORT_C TInt NKern::InterruptBind(TInt aId, NIsr aIsr, TAny* aPtr, TUint32 aFlags, NSchedulable* aTied)
   1.616 +	{
   1.617 +	__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKIB: ID=%02x ISR=%08x(%08x) F=%08x T=%T", aId, aIsr, aPtr, aFlags, aTied));
   1.618 +	TRACE_IRQ12(2, aId, aIsr, aPtr);
   1.619 +	TRACE_IRQ12(3, aId, aFlags, aTied);
   1.620 +	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::InterruptBind");
   1.621 +	if (TUint(aId) >= TUint(NK_MAX_IRQS))
   1.622 +		{
   1.623 +		TRACE_IRQ8(4, aId, KErrArgument);
   1.624 +		return KErrArgument;
   1.625 +		}
   1.626 +	NIrq* pI = &Irq[aId];
   1.627 +	NIrqHandler* pH = 0;
   1.628 +	NSchedulable* pT = 0;
   1.629 +	if (aFlags & NKern::EIrqBind_Tied)
   1.630 +		{
   1.631 +		if (!aTied)
   1.632 +			aTied = NKern::CurrentThread();
   1.633 +		pT = aTied;
   1.634 +		}
   1.635 +	TInt r = KErrNoMemory;
   1.636 +	TInt handle = 0;
   1.637 +	NKern::ThreadEnterCS();
   1.638 +	if (!(aFlags & NKern::EIrqBind_Raw))
   1.639 +		{
   1.640 +		pH = NIrqHandler::Alloc();
   1.641 +		if (!pH)
   1.642 +			goto out;
   1.643 +		pH->iFn = aIsr;
   1.644 +		pH->iPtr = aPtr;
   1.645 +		__e32_atomic_add_ord32(&pH->iGeneration, 1);
   1.646 +		if (aFlags & EIrqBind_Exclusive)
   1.647 +			pH->iHState |= NIrqHandler::EExclusive;
   1.648 +		if (aFlags & EIrqBind_Count)
   1.649 +			pH->iHState |= NIrqHandler::ECount;
   1.650 +		r = pI->Bind(pH);
   1.651 +		if (r==KErrNone)
   1.652 +			{
   1.653 +			handle = pH->iHandle;
   1.654 +			// We assume that aTied cannot disappear entirely before we return
   1.655 +			if (pT)
   1.656 +				{
   1.657 +				NKern::Lock();
   1.658 +				r = pT->AddTiedEvent(pH);
   1.659 +				NKern::Unlock();
   1.660 +				}
   1.661 +			if (r!=KErrNone)
   1.662 +				{
   1.663 +				// unbind
   1.664 +				pH->DoUnbind();
   1.665 +				}
   1.666 +			}
   1.667 +		if (r!=KErrNone)
   1.668 +			pH->Free();
   1.669 +		}
   1.670 +	else
   1.671 +		{
   1.672 +		if (aFlags & NKern::EIrqBind_Tied)
   1.673 +			r = KErrNotSupported;
   1.674 +		else
   1.675 +			r = pI->BindRaw(aIsr, aPtr);
   1.676 +		}
   1.677 +out:
   1.678 +	if (r==KErrNone)
   1.679 +		{
   1.680 +		// clear ENotReady so handler can be enabled
   1.681 +		__e32_atomic_and_rel32(&pH->iHState, ~NIrqHandler::ENotReady);
   1.682 +		r = handle;
   1.683 +		}
   1.684 +	NKern::ThreadLeaveCS();
   1.685 +	__KTRACE_OPT(KNKERN,DEBUGPRINT("<NKIB: %08x", r));
   1.686 +	TRACE_IRQ8(4, aId, r);
   1.687 +	return r;
   1.688 +	}
   1.689 +
   1.690 +TInt NIrq::FromHandle(TInt& aHandle, NIrq*& aIrq, NIrqHandler*& aHandler)
   1.691 +	{
   1.692 +	TRACE_IRQ4(5, aHandle);
   1.693 +	aIrq = 0;
   1.694 +	aHandler = 0;
   1.695 +	NIrqHandler* pH = 0;
   1.696 +	NIrqHandler* pH2 = 0;
   1.697 +	NIrq* pI = 0;
   1.698 +	SDblQueLink* anchor = 0;
   1.699 +	TUint32 i;
   1.700 +	TInt r = KErrArgument;
   1.701 +	if (aHandle & NKern::EIrqCookieMask)
   1.702 +		{
   1.703 +		i = aHandle & NKern::EIrqIndexMask;
   1.704 +		if (i>=NK_MAX_IRQ_HANDLERS)
   1.705 +			goto out;
   1.706 +		pH = &::Handlers[i];
   1.707 +		if (pH->iHandle != TUint(aHandle))
   1.708 +			goto out;
   1.709 +		aHandler = pH;
   1.710 +		aIrq = pH->iIrq;
   1.711 +		r = KErrNone;
   1.712 +		goto out;
   1.713 +		}
   1.714 +	if (TUint32(aHandle)>=NK_MAX_IRQS)
   1.715 +		goto out;
   1.716 +	pI = &::Irq[aHandle];
   1.717 +	if (pI->iIState & NIrq::ERaw)
   1.718 +		{
   1.719 +		aIrq = pI;
   1.720 +		r = KErrNone;
   1.721 +		goto out;
   1.722 +		}
   1.723 +	if (pI->iStaticFlags & NIrq::EShared)
   1.724 +		goto out;
   1.725 +	anchor = &pI->iHandlers.iA;
   1.726 +	pH = _LOFF(anchor->iNext, NIrqHandler, iIrqLink);
   1.727 +	i = pH - ::Handlers;
   1.728 +	if (i>=NK_MAX_IRQ_HANDLERS)
   1.729 +		goto out;
   1.730 +	pH2 = &::Handlers[i];
   1.731 +	if (pH2 != pH)
   1.732 +		goto out;
   1.733 +	if (pH->iIrq != pI || anchor->iPrev != anchor->iNext)
   1.734 +		goto out;
   1.735 +	aHandle = pH->iHandle;
   1.736 +	aHandler = pH;
   1.737 +	aIrq = pI;
   1.738 +	r = KErrNone;
   1.739 +out:
   1.740 +	TRACE_IRQ4(6, r);
   1.741 +	TRACE_IRQ12(7, aHandle, aIrq, aHandler);
   1.742 +	return r;
   1.743 +	}
   1.744 +
   1.745 +EXPORT_C TInt NKern::InterruptUnbind(TInt aId)
   1.746 +	{
   1.747 +	TRACE_IRQ4(8, aId);
   1.748 +	__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKIU: ID=%08x", aId));
   1.749 +	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"NKern::InterruptUnbind");
   1.750 +	NIrq* pI;
   1.751 +	NIrqHandler* pH;
   1.752 +	TInt r = NIrq::FromHandle(aId, pI, pH);
   1.753 +	if (r!=KErrNone)
   1.754 +		return r;
   1.755 +	NKern::ThreadEnterCS();
   1.756 +	if (!pH)
   1.757 +		{
   1.758 +		// raw ISR
   1.759 +		r = pI->UnbindRaw();
   1.760 +		}
   1.761 +	else
   1.762 +		{
   1.763 +		r = pH->Unbind(aId, 0);
   1.764 +		}
   1.765 +	NKern::ThreadLeaveCS();
   1.766 +	TRACE_IRQ4(9, r);
   1.767 +	return r;
   1.768 +	}
   1.769 +
   1.770 +EXPORT_C TInt NKern::InterruptEnable(TInt aId)
   1.771 +	{
   1.772 +	__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKIE: ID=%08x", aId));
   1.773 +	TRACE_IRQ4(10, aId);
   1.774 +	NIrq* pI;
   1.775 +	NIrqHandler* pH;
   1.776 +	TInt r = NIrq::FromHandle(aId, pI, pH);
   1.777 +	if (r==KErrNone)
   1.778 +		r = pH ? pH->Enable(aId) : pI->EnableRaw();
   1.779 +	TRACE_IRQ4(11, r);
   1.780 +	return r;
   1.781 +	}
   1.782 +
   1.783 +EXPORT_C TInt NKern::InterruptDisable(TInt aId)
   1.784 +	{
   1.785 +	__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKID: ID=%08x", aId));
   1.786 +	TRACE_IRQ4(12, aId);
   1.787 +	NIrq* pI;
   1.788 +	NIrqHandler* pH;
   1.789 +	TInt r = NIrq::FromHandle(aId, pI, pH);
   1.790 +	if (r==KErrNone)
   1.791 +		r = pH ? pH->Disable(FALSE, aId) : pI->DisableRaw(FALSE);
   1.792 +	TRACE_IRQ4(13, r);
   1.793 +	return r;
   1.794 +	}
   1.795 +
   1.796 +EXPORT_C TInt NKern::InterruptClear(TInt aId)
   1.797 +	{
   1.798 +	__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKIC: ID=%08x", aId));
   1.799 +	return KErrNotSupported;
   1.800 +	}
   1.801 +
   1.802 +EXPORT_C TInt NKern::InterruptSetPriority(TInt aId, TInt aPri)
   1.803 +	{
   1.804 +	__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKIS: ID=%08x PRI=%08x", aId, aPri));
   1.805 +	return KErrNotSupported;
   1.806 +	}
   1.807 +
   1.808 +EXPORT_C TInt NKern::InterruptSetCpuMask(TInt aId, TUint32 aMask)
   1.809 +	{
   1.810 +	__KTRACE_OPT(KNKERN,DEBUGPRINT(">NKIM: ID=%08x M=%08x", aId, aMask));
   1.811 +	return KErrNotSupported;
   1.812 +	}
   1.813 +
   1.814 +EXPORT_C void NKern::Interrupt(TInt aId)
   1.815 +	{
   1.816 +	__NK_ASSERT_ALWAYS(TUint(aId) < TUint(NK_MAX_IRQS));
   1.817 +	NIrq* pI = &Irq[aId];
   1.818 +	pI->HwIsr();
   1.819 +	}
   1.820 +