os/kernelhwsrv/kernel/eka/nkernsmp/dfcs.cpp
changeset 0 bde4ae8d615e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/os/kernelhwsrv/kernel/eka/nkernsmp/dfcs.cpp	Fri Jun 15 03:10:57 2012 +0200
     1.3 @@ -0,0 +1,930 @@
     1.4 +// Copyright (c) 2006-2009 Nokia Corporation and/or its subsidiary(-ies).
     1.5 +// All rights reserved.
     1.6 +// This component and the accompanying materials are made available
     1.7 +// under the terms of the License "Eclipse Public License v1.0"
     1.8 +// which accompanies this distribution, and is available
     1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
    1.10 +//
    1.11 +// Initial Contributors:
    1.12 +// Nokia Corporation - initial contribution.
    1.13 +//
    1.14 +// Contributors:
    1.15 +//
    1.16 +// Description:
    1.17 +// e32\nkernsmp\dfcs.cpp
    1.18 +// DFCs
    1.19 +// 
    1.20 +//
    1.21 +
    1.22 +// NThreadBase member data
    1.23 +#define __INCLUDE_NTHREADBASE_DEFINES__
    1.24 +
    1.25 +// TDfc member data
    1.26 +#define __INCLUDE_TDFC_DEFINES__
    1.27 +
    1.28 +#include "nk_priv.h"
    1.29 +
    1.30 +extern "C" void send_self_resched_ipi();
    1.31 +
    1.32 +/** Construct an IDFC
    1.33 +
    1.34 +	@param aFunction = function to call
    1.35 +	@param aPtr = parameter to be passed to function
    1.36 + */
    1.37 +EXPORT_C TDfc::TDfc(TDfcFn aFunction, TAny* aPtr)
    1.38 +	{
    1.39 +	iPtr = aPtr;
    1.40 +	iFn = aFunction;
    1.41 +	iTied = 0;
    1.42 +	iHType = EEventHandlerIDFC;
    1.43 +	i8888.iHState0 = 0;
    1.44 +	i8888.iHState1 = 0;
    1.45 +	i8888.iHState2 = 0;
    1.46 +	iTiedLink.iNext = 0;
    1.47 +	}
    1.48 +
    1.49 +
    1.50 +/** Construct an IDFC tied to a thread or group
    1.51 +
    1.52 +	@param aTied = pointer to thread or group to which IDFC should be tied
    1.53 +	@param aFunction = function to call
    1.54 +	@param aPtr = parameter to be passed to function
    1.55 +
    1.56 +	@pre Call in thread context, interrupts enabled
    1.57 + */
    1.58 +EXPORT_C TDfc::TDfc(NSchedulable* aTied, TDfcFn aFunction, TAny* aPtr)
    1.59 +	{
    1.60 +	iPtr = aPtr;
    1.61 +	iFn = aFunction;
    1.62 +	iTied = 0;
    1.63 +	iHType = EEventHandlerIDFC;
    1.64 +	i8888.iHState0 = 0;
    1.65 +	i8888.iHState1 = 0;
    1.66 +	i8888.iHState2 = 0;
    1.67 +	iTiedLink.iNext = 0;
    1.68 +	if (aTied)
    1.69 +		{
    1.70 +		SetTied(aTied);
    1.71 +		}
    1.72 +	}
    1.73 +
    1.74 +
    1.75 +/** Construct a DFC without specifying a DFC queue.
    1.76 +	The DFC queue must be set before the DFC may be queued.
    1.77 +
    1.78 +	@param aFunction = function to call
    1.79 +	@param aPtr = parameter to be passed to function
    1.80 +	@param aPriority = priority of DFC within the queue (0 to 7, where 7 is highest)
    1.81 + */
    1.82 +EXPORT_C TDfc::TDfc(TDfcFn aFunction, TAny* aPtr, TInt aPriority)
    1.83 +	{
    1.84 +	__NK_ASSERT_DEBUG((TUint)aPriority<(TUint)KNumDfcPriorities);
    1.85 +	iPtr = aPtr;
    1.86 +	iFn = aFunction;
    1.87 +	iTied = 0;
    1.88 +	iHType = TUint8(aPriority);
    1.89 +	i8888.iHState0 = 0;
    1.90 +	i8888.iHState1 = 0;
    1.91 +	i8888.iHState2 = 0;
    1.92 +	iTiedLink.iNext = 0;
    1.93 +	}
    1.94 +
    1.95 +
    1.96 +/** Construct a DFC specifying a DFC queue.
    1.97 +
    1.98 +	@param aFunction = function to call
    1.99 +	@param aPtr = parameter to be passed to function
   1.100 +	@param aDfcQ = pointer to DFC queue which this DFC should use
   1.101 +	@param aPriority = priority of DFC within the queue (0-7)
   1.102 + */
   1.103 +EXPORT_C TDfc::TDfc(TDfcFn aFunction, TAny* aPtr, TDfcQue* aDfcQ, TInt aPriority)
   1.104 +	{
   1.105 +	__NK_ASSERT_DEBUG((TUint)aPriority<(TUint)KNumDfcPriorities);
   1.106 +	iPtr = aPtr;
   1.107 +	iFn = aFunction;
   1.108 +	iDfcQ = aDfcQ;
   1.109 +	iHType = TUint8(aPriority);
   1.110 +	i8888.iHState0 = 0;
   1.111 +	i8888.iHState1 = 0;
   1.112 +	i8888.iHState2 = 0;
   1.113 +	iTiedLink.iNext = 0;
   1.114 +	}
   1.115 +
   1.116 +
   1.117 +/** Tie an IDFC to a thread or group
   1.118 +
   1.119 +	@param	aTied = pointer to thread or group to which IDFC should be tied
   1.120 +	@return	KErrNone if successful
   1.121 +	@return	KErrDied if thread has exited or group has been destroyed.
   1.122 +
   1.123 +	@pre Call in thread context, interrupts enabled
   1.124 +	@pre Must be IDFC not DFC
   1.125 +	@pre IDFC must not be queued or running
   1.126 +	@pre IDFC must not already be tied
   1.127 + */
   1.128 +EXPORT_C TInt TDfc::SetTied(NSchedulable* aTied)
   1.129 +	{
   1.130 +	__NK_ASSERT_ALWAYS(IsIDFC() && i8816.iHState16==0);
   1.131 +	__NK_ASSERT_ALWAYS(aTied && !iTied);
   1.132 +	NKern::Lock();
   1.133 +	TInt r = aTied->AddTiedEvent(this);
   1.134 +	__NK_ASSERT_ALWAYS(r==KErrNone || r==KErrDied);
   1.135 +	NKern::Unlock();
   1.136 +	return r;
   1.137 +	}
   1.138 +
   1.139 +
   1.140 +/** Destroy a DFC or IDFC
   1.141 +
   1.142 +	@pre Call from thread context with interrupts and preemption enabled
   1.143 +	@pre Calling thread holds no fast mutex
   1.144 +	@pre Calling thread in critical section
   1.145 + */
   1.146 +EXPORT_C TDfc::~TDfc()
   1.147 +	{
   1.148 +	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"TDfc::~TDfc");
   1.149 +	NKern::Lock();
   1.150 +	NEventHandler::TiedLock.LockOnly();
   1.151 +	NSchedulable* tied = iTied;
   1.152 +	if (IsDFC() || (IsIDFC() && !tied))
   1.153 +		{
   1.154 +		Cancel();
   1.155 +		iHType = (TUint8)EEventHandlerDummy;
   1.156 +		}
   1.157 +	if (IsIDFC())
   1.158 +		{
   1.159 +		__NK_ASSERT_ALWAYS(tied!=0);
   1.160 +		tied->AcqSLock();
   1.161 +		if (iTiedLink.iNext)
   1.162 +			{
   1.163 +			iTiedLink.Deque();
   1.164 +			iTiedLink.iNext = 0;
   1.165 +			}
   1.166 +		tied->RelSLock();
   1.167 +		Cancel();
   1.168 +		iHType = (TUint8)EEventHandlerDummy;
   1.169 +		iTied = 0;
   1.170 +		}
   1.171 +	NEventHandler::TiedLock.UnlockOnly();
   1.172 +	NKern::Unlock();
   1.173 +	}
   1.174 +
   1.175 +
   1.176 +/** Construct a DFC queue
   1.177 +	Kern::DfcQInit() should be called on the new DFC queue before it can be used.
   1.178 + */
   1.179 +EXPORT_C TDfcQue::TDfcQue()
   1.180 +	: iThread(NULL)
   1.181 +	{}
   1.182 +
   1.183 +
   1.184 +
   1.185 +/** Queue an IDFC or a DFC from an ISR
   1.186 +
   1.187 +	This function is the only way to queue an IDFC and is the only way to queue
   1.188 +	a DFC from an ISR. To queue a DFC from an IDFC or a thread either Enque()
   1.189 +	or DoEnque() should be used.
   1.190 +
   1.191 +	This function does nothing if the IDFC/DFC is already queued.
   1.192 +
   1.193 +	@pre Call only from ISR, IDFC or thread with preemption disabled.
   1.194 +	@pre Do not call from thread with preemption enabled.
   1.195 +	@return	TRUE if DFC was actually queued by this call
   1.196 +			FALSE if DFC was already queued on entry so this call did nothing
   1.197 +
   1.198 +	@see TDfc::DoEnque()
   1.199 +	@see TDfc::Enque()
   1.200 + */
   1.201 +EXPORT_C TBool TDfc::Add()
   1.202 +	{
   1.203 +	__ASSERT_DEBUG(NKern::CurrentContext()!=NKern::EThread || NKern::KernelLocked(), *(int*)0xdfcadd01=0);
   1.204 +	__ASSERT_DEBUG(IsIDFC() || (IsDFC() && iDfcQ), *(int*)0xdfcadd03=0);
   1.205 +//	__ASSERT_WITH_MESSAGE_DEBUG(  NKern::CurrentContext()!=NKern::EThread  ||  NKern::KernelLocked(),"Do not call from thread with preemption enabled","TDfc::Add");
   1.206 +//	__ASSERT_WITH_MESSAGE_DEBUG(  IsIDFC() || (IsDFC() && iDfcQ), "DFC queue not set", "TDfc::Add");
   1.207 +#ifdef __WINS__
   1.208 +	__NK_ASSERT_ALWAYS(Interrupt.InInterrupt() || NKern::KernelLocked());
   1.209 +#endif
   1.210 +	TInt irq = NKern::DisableAllInterrupts();
   1.211 +	TSubScheduler& ss = SubScheduler();
   1.212 +	TUint32 orig = 0xFF00;
   1.213 +
   1.214 +	// Transition the state to 'on normal IDFC queue'
   1.215 +	// 0000->008n
   1.216 +	// 00Cn->00En
   1.217 +	// All other states unchanged
   1.218 +	// Return original state
   1.219 +	if (IsValid())	// don't add if tied and tied thread/group is being/has been destroyed
   1.220 +		orig = AddStateChange();
   1.221 +	if (orig==0)
   1.222 +		{
   1.223 +		// wasn't already queued
   1.224 +		i8888.iHState0 = 0;	// BeginTiedEvent() not done
   1.225 +		ss.iDfcs.Add(this);
   1.226 +		ss.iDfcPendingFlag = 1;
   1.227 +#ifdef _DEBUG
   1.228 +		TUint32 st8 = DFC_STATE(this) & 0xFF;
   1.229 +		if (st8 != (0x80|ss.iCpuNum))
   1.230 +			__crash();
   1.231 +#endif
   1.232 +		}
   1.233 +	NKern::RestoreInterrupts(irq);
   1.234 +	return (orig==0 || (orig&0xFFE0)==0x00C0);
   1.235 +	}
   1.236 +
   1.237 +
   1.238 +/** Queue an IDFC or a DFC from any context
   1.239 +
   1.240 +	This function is identical to TDfc::Add() but no checks are performed for correct usage,
   1.241 +	and it contains no instrumentation code.
   1.242 +
   1.243 +	@return	TRUE if DFC was actually queued by this call
   1.244 +			FALSE if DFC was already queued on entry so this call did nothing
   1.245 +
   1.246 +	@see TDfc::DoEnque()
   1.247 +	@see TDfc::Enque()
   1.248 +	@see TDfc::Add()
   1.249 + */
   1.250 +EXPORT_C TBool TDfc::RawAdd()
   1.251 +	{
   1.252 +	TInt irq = NKern::DisableAllInterrupts();
   1.253 +	TSubScheduler& ss = SubScheduler();
   1.254 +	TUint32 orig = 0xFF00;
   1.255 +	if (IsValid())	// don't add if tied and tied thread/group is being/has been destroyed
   1.256 +		orig = AddStateChange();
   1.257 +	if (orig==0)
   1.258 +		{
   1.259 +		// wasn't already queued
   1.260 +		i8888.iHState0 = 0;	// BeginTiedEvent() not done
   1.261 +		ss.iDfcs.Add(this);
   1.262 +		ss.iDfcPendingFlag = 1;
   1.263 +		send_self_resched_ipi();	// ensure current CPU runs the DFC
   1.264 +#ifdef _DEBUG
   1.265 +		TUint32 st8 = DFC_STATE(this) & 0xFF;
   1.266 +		if (st8 != (0x80|ss.iCpuNum))
   1.267 +			__crash();
   1.268 +#endif
   1.269 +		// FIXME: Need to wait to ensure IRQ is active before reenabling interrupts
   1.270 +		}
   1.271 +	NKern::RestoreInterrupts(irq);
   1.272 +	return (orig==0 || (orig&0xFFE0)==0x00C0);
   1.273 +	}
   1.274 +
   1.275 +
   1.276 +/** Queue a DFC (not an IDFC) from an IDFC or thread with preemption disabled.
   1.277 +
   1.278 +	This function is the preferred way to queue a DFC from an IDFC. It should not
   1.279 +	be used to queue an IDFC - use TDfc::Add() for this.
   1.280 +
   1.281 +	This function does nothing if the DFC is already queued.
   1.282 +
   1.283 +	@pre Call only from IDFC or thread with preemption disabled.
   1.284 +	@pre Do not call from ISR or thread with preemption enabled.
   1.285 +	@return	TRUE if DFC was actually queued by this call
   1.286 +			FALSE if DFC was already queued on entry so this call did nothing
   1.287 +
   1.288 +	@see TDfc::Add()
   1.289 +	@see TDfc::Enque()
   1.290 + */
   1.291 +EXPORT_C TBool TDfc::DoEnque()
   1.292 +	{
   1.293 +	__ASSERT_WITH_MESSAGE_DEBUG(  (NKern::CurrentContext()==NKern::EIDFC )||( NKern::CurrentContext()==NKern::EThread && NKern::KernelLocked()),"Do not call from ISR or thread with preemption enabled","TDfc::DoEnque");
   1.294 +	__NK_ASSERT_DEBUG(IsDFC());
   1.295 +	__ASSERT_WITH_MESSAGE_DEBUG(iDfcQ, "DFC queue not set", "TDfc::DoEnque");
   1.296 +
   1.297 +	// Check not already queued and then mark queued to prevent ISRs touching this DFC
   1.298 +	TDfcQue* q = iDfcQ;
   1.299 +	NThreadBase* t = q->iThread;
   1.300 +	t->AcqSLock();	// also protects DFC queue
   1.301 +	TUint16 expect = 0;
   1.302 +	TBool ok = __e32_atomic_cas_acq16(&iDfcState, &expect, 1);
   1.303 +	if (ok)
   1.304 +		{
   1.305 +		// wasn't already queued, now marked as on final queue, which means
   1.306 +		// attempts to cancel will block on the thread spin lock
   1.307 +		TUint present = q->iPresent[0];
   1.308 +		q->Add((TPriListLink*)this);
   1.309 +		if (!present)
   1.310 +			t->iWaitState.UnBlockT(NThreadBase::EWaitDfc, q, KErrNone);
   1.311 +		}
   1.312 +	t->RelSLock();	// also protects DFC queue
   1.313 +	return ok;
   1.314 +	}
   1.315 +
   1.316 +void TDfcQue::ThreadFunction(TAny* aDfcQ)
   1.317 +	{
   1.318 +	TDfcQue& q = *(TDfcQue*)aDfcQ;
   1.319 +	NThreadBase* t = NKern::CurrentThread();
   1.320 +	FOREVER
   1.321 +		{
   1.322 +		NKern::Lock();
   1.323 +		t->AcqSLock();	// also protects DFC queue
   1.324 +		if (q.IsEmpty())
   1.325 +			{
   1.326 +			t->iWaitState.SetUpWait(NThreadBase::EWaitDfc, 0, &q);
   1.327 +			RescheduleNeeded();
   1.328 +			t->RelSLock();	// also protects DFC queue
   1.329 +			NKern::Unlock();
   1.330 +			}
   1.331 +		else
   1.332 +			{
   1.333 +			TDfc* d = q.First();
   1.334 +			q.Remove((TPriListLink*)d);
   1.335 +			TDfcFn f = d->iFn;
   1.336 +			TAny* p = d->iPtr;
   1.337 +			d->ResetState();
   1.338 +			t->RelSLock();	// also protects DFC queue
   1.339 +			NKern::Unlock();
   1.340 +			(*f)(p);
   1.341 +			}
   1.342 +		}
   1.343 +	}
   1.344 +
   1.345 +
   1.346 +
   1.347 +void TCancelIPI::Send(TDfc* aDfc, TInt aCpu)
   1.348 +	{
   1.349 +	iDfc = aDfc;
   1.350 +	Queue(&Isr, 1u<<aCpu);
   1.351 +	}
   1.352 +
   1.353 +void TCancelIPI::Isr(TGenericIPI* aIPI)
   1.354 +	{
   1.355 +	TCancelIPI* p = (TCancelIPI*)aIPI;
   1.356 +	TDfc* d = p->iDfc;
   1.357 +	if (d->iNext)
   1.358 +		{
   1.359 +		// QueueDfcs() hasn't dequeued it yet
   1.360 +		// just dequeue it here and reset the state - QueueDfcs() will never see it
   1.361 +		// Note that this means we have to release the tied thread/group if necessary
   1.362 +		// BeginTiedEvent() has occurred if iHState0 is set and it's actually an IDFC not an NTimer
   1.363 +		NSchedulable* tied = (d->iHType==NEventHandler::EEventHandlerIDFC && d->i8888.iHState0) ? d->iTied : 0;
   1.364 +		d->Deque();
   1.365 +		d->ResetState();
   1.366 +		if (tied)
   1.367 +			tied->EndTiedEvent();
   1.368 +		}
   1.369 +	else
   1.370 +		{
   1.371 +		// QueueDfcs() has already dequeued it
   1.372 +		// state transition:
   1.373 +		//		XXYY->XX00
   1.374 +		//		XX00->0000
   1.375 +		// QueueDfcs() will take care of the tied thread/group
   1.376 +		d->CancelFinalStateChange();
   1.377 +		}
   1.378 +	}
   1.379 +
   1.380 +
   1.381 +/** Cancels an IDFC or DFC.
   1.382 +
   1.383 +	This function does nothing if the IDFC or DFC is not queued.
   1.384 +
   1.385 +	For any DFC or IDFC the following identity holds:
   1.386 +			Number of times Add() is called and returns TRUE
   1.387 +		+	Number of times DoEnque() is called and returns TRUE
   1.388 +		+	Number of times Enque() is called and returns TRUE
   1.389 +		+	Number of times QueueOnIdle() is called and returns TRUE
   1.390 +		=	Number of times Cancel() is called and returns TRUE
   1.391 +		+	Number of times the DFC/IDFC function executes
   1.392 +
   1.393 +	@pre IDFC or thread context. Do not call from ISRs.
   1.394 +
   1.395 +	@pre If the DFC function accesses the DFC object itself, the user must ensure that
   1.396 +	     Cancel() cannot be called while the DFC function is running.
   1.397 +
   1.398 +	@return	TRUE	if the DFC was actually dequeued by this call - i.e. an
   1.399 +					instance of the DFC's execution has been prevented. It
   1.400 +					is still possible that a previous execution is still in
   1.401 +					progress.
   1.402 +			FALSE	if the DFC was not queued on entry to the call, or was in
   1.403 +					the process of being executed or cancelled. In this case
   1.404 +					it is possible that the DFC executes after this call
   1.405 +					returns.
   1.406 +
   1.407 +	@post	However in either case it is safe to delete the DFC object on
   1.408 +			return from this call provided only that the DFC function does not
   1.409 +			refer to the DFC object itself.
   1.410 + */
   1.411 +EXPORT_C TBool TDfc::Cancel()
   1.412 +	{
   1.413 +	enum TAction { EDeque=1, EReset=2, EIdleUnlock=4, ESendIPI=8, EWait=16 };
   1.414 +
   1.415 +	CHECK_PRECONDITIONS(MASK_NOT_ISR|MASK_INTERRUPTS_ENABLED,"TDfc::Cancel");
   1.416 +	if (!iDfcState)
   1.417 +		return FALSE;
   1.418 +	TUint action = EIdleUnlock;
   1.419 +	TBool ret = FALSE;
   1.420 +	TInt cpu = -1;
   1.421 +	NSchedulable* tied = 0;
   1.422 +	TDfcQue* q = 0;
   1.423 +	NThreadBase* t = 0;
   1.424 +	NKern::Lock();
   1.425 +	TSubScheduler& ss0 = SubScheduler();
   1.426 +	if (IsDFC())
   1.427 +		q = iDfcQ, t = q->iThread, t->AcqSLock();
   1.428 +	TInt irq = NKern::DisableAllInterrupts();
   1.429 +	TheScheduler.iIdleSpinLock.LockOnly();
   1.430 +
   1.431 +	// 0000->0000, XX00->ZZ00, xxYY->zzYY
   1.432 +	TUint state = CancelInitialStateChange();
   1.433 +	TUint stt = state >> 5;
   1.434 +	if (state & 0xFF00)
   1.435 +		{
   1.436 +		// someone else cancelling at the same time - just wait for them to finish
   1.437 +		action = EWait|EIdleUnlock;
   1.438 +		goto end;
   1.439 +		}
   1.440 +	if (state == 0)	// DFC not active
   1.441 +		goto end;
   1.442 +
   1.443 +	// possible states here are 0001, 002g, 006m, 008m, 00Am, 00Cm, 00Em
   1.444 +	ret = (stt!=6);	// if running but not pending, Cancel() will not have prevented an execution
   1.445 +	if (state == TUint(TheScheduler.iIdleGeneration | 0x20))
   1.446 +		{
   1.447 +		// was on idle queue, BeginTiedEvent() isn't called until QueueDfcs() runs
   1.448 +		action = EDeque|EReset|EIdleUnlock;
   1.449 +		goto end;
   1.450 +		}
   1.451 +	if (state == 1)
   1.452 +		{
   1.453 +		// was on final queue, must be DFC not IDFC
   1.454 +		q->Remove((TPriListLink*)this);
   1.455 +		action = EReset|EIdleUnlock;
   1.456 +		goto end;
   1.457 +		}
   1.458 +
   1.459 +	// possible states here are 002g (spilled), 006m, 008m, 00Am, 00Cm, 00Em
   1.460 +	// i.e. either on IDFC queue, ExIDFC queue or running
   1.461 +	// For IDFCs, tied thread/group is now in play.
   1.462 +	cpu = state & 0x1f;	// CPU it's on for states 006m, 008m, 00Am, 00Cm, 00Em
   1.463 +	if (stt==3 || stt==6 || stt==7)
   1.464 +		{
   1.465 +		// It's actually running - must be IDFC. A re-queue may also be pending.
   1.466 +		TheScheduler.iIdleSpinLock.UnlockOnly();
   1.467 +		TSubScheduler* ss = TheSubSchedulers + cpu;
   1.468 +		TDfc* expect = this;
   1.469 +		TBool done = __e32_atomic_cas_acq_ptr(&ss->iCurrentIDFC, &expect, 0);
   1.470 +		if (done)
   1.471 +			{
   1.472 +			// We cleared iCurrentIDFC so QueueDfcs() won't touch this again - we reset the state and finish up
   1.473 +			// We must also release the tied thread/group
   1.474 +			tied = iTied;
   1.475 +			action = EReset;
   1.476 +			goto end;
   1.477 +			}
   1.478 +		// QueueDfcs() got to iCurrentIDFC before we did, so we interlock with it
   1.479 +		// and we can leave the EndTiedEvent to it as well
   1.480 +		// State transition:
   1.481 +		//		XXAm->XX00, wait
   1.482 +		//		XX00->0000, don't wait
   1.483 +		TUint32 orig = CancelFinalStateChange() & 0xFF;
   1.484 +		__NK_ASSERT_ALWAYS(orig==0 || orig==state);
   1.485 +		action = orig ? EWait : 0;
   1.486 +		goto end;
   1.487 +		}
   1.488 +
   1.489 +	// possible states here 002g (propagated), 008m, 00Am so it's either on the endogenous or exogenous IDFC queue
   1.490 +	if (stt==5)
   1.491 +		{
   1.492 +		// it's on the exogenous IDFC queue
   1.493 +		TheScheduler.iIdleSpinLock.UnlockOnly();
   1.494 +		TSubScheduler* ss = TheSubSchedulers + cpu;
   1.495 +		ss->iExIDfcLock.LockOnly();
   1.496 +		if (iNext)
   1.497 +			{
   1.498 +			// we got to it before QueueDfcs() on the other CPU so we can finish up here
   1.499 +			// QueueDfcs() will never see it again so we must release tied thread/group
   1.500 +			Deque();
   1.501 +			tied = iTied;
   1.502 +			ss->iExIDfcLock.UnlockOnly();
   1.503 +			action = EReset;
   1.504 +			goto end;
   1.505 +			}
   1.506 +		// QueueDfcs() on other CPU has already dequeued it - we must now interlock with RunIDFCStateChange()
   1.507 +		ss->iExIDfcLock.UnlockOnly();
   1.508 +		// State transition:
   1.509 +		//		XXAm->XX00, wait
   1.510 +		//		XX00->0000, don't wait
   1.511 +		// QueueDfcs() will take care of tied thread/group
   1.512 +		TUint32 orig = CancelFinalStateChange() & 0xFF;
   1.513 +		__NK_ASSERT_ALWAYS(orig==0 || orig==state);
   1.514 +		action = orig ? EWait : 0;
   1.515 +		goto end;
   1.516 +		}
   1.517 +
   1.518 +	// possible states here 002g (propagated idle) or 008m (IDFC or DFC on endogenous DFC queue)
   1.519 +	if (stt==1)	// propagated idle
   1.520 +		cpu = TheScheduler.iIdleSpillCpu;
   1.521 +
   1.522 +	// if it's on this CPU's IDFC queue we can just remove it and reset the state here
   1.523 +	// otherwise we send a cancel IPI to the CPU it's on
   1.524 +	// We are guaranteed to dequeue the DFC before it executes since the
   1.525 +	// QueueDfcs() on the target CPU will notice that a cancel is in progress and
   1.526 +	// so will not run the DFC even if it dequeues it.
   1.527 +	// QueueDfcs() takes care of the tied thread/group if it sees the DFC/IDFC again, otherwise
   1.528 +	// we must do it here.
   1.529 +	if (TUint(cpu) == ss0.iCpuNum)
   1.530 +		{
   1.531 +		if (IsIDFC())
   1.532 +			tied = iTied;
   1.533 +		action = EDeque|EReset|EIdleUnlock;
   1.534 +		}
   1.535 +	else
   1.536 +		action = EIdleUnlock|ESendIPI|EWait;
   1.537 +
   1.538 +end:
   1.539 +	// Common exit point
   1.540 +	if (action & EDeque)
   1.541 +		Deque();
   1.542 +	if (action & EReset)
   1.543 +		{
   1.544 +		ResetState();
   1.545 +		}
   1.546 +	if (action & EIdleUnlock)
   1.547 +		TheScheduler.iIdleSpinLock.UnlockOnly();
   1.548 +	NKern::RestoreInterrupts(irq);
   1.549 +	if (t)
   1.550 +		t->RelSLock();
   1.551 +
   1.552 +	// on another CPU's IDFC queue so send IPI to remove it
   1.553 +	if (action & ESendIPI)
   1.554 +		{
   1.555 +		TCancelIPI ipi;
   1.556 +		ipi.Send(this, cpu);
   1.557 +		ipi.WaitCompletion();
   1.558 +		tied = 0;
   1.559 +		}
   1.560 +
   1.561 +	// wait for cancel to complete
   1.562 +	if (action & EWait)
   1.563 +		{
   1.564 +		TUint n = 0x01000000;
   1.565 +		while ((iDfcState>>8) & ss0.iCpuMask)
   1.566 +			{
   1.567 +			__chill();
   1.568 +			if (!--n)
   1.569 +				__crash();
   1.570 +			}
   1.571 +		}
   1.572 +
   1.573 +	// release tied thread/group if waiting for IDFC to complete
   1.574 +	if (tied)
   1.575 +		tied->EndTiedEvent();
   1.576 +	NKern::Unlock();
   1.577 +	return ret;
   1.578 +	}
   1.579 +
   1.580 +
   1.581 +/** Queues a DFC (not an IDFC) from a thread.
   1.582 +
   1.583 +	Does nothing if DFC is already queued.
   1.584 +
   1.585 +    NOTE: Although this can be called in an IDFC context, it is more efficient to call
   1.586 +    DoEnque() in this case.
   1.587 +    
   1.588 +    @pre    Call either in a thread or an IDFC context.
   1.589 +	@pre	Do not call from an ISR.
   1.590 +	@return	TRUE if DFC was actually queued by this call
   1.591 +			FALSE if DFC was already queued on entry so this call did nothing
   1.592 + */
   1.593 +EXPORT_C TBool TDfc::Enque()
   1.594 +	{
   1.595 +	CHECK_PRECONDITIONS(MASK_NOT_ISR,"TDfc::Enque()");		
   1.596 +	NKern::Lock();
   1.597 +	TBool ret = DoEnque();
   1.598 +	NKern::Unlock();
   1.599 +	return ret;
   1.600 +	}
   1.601 +
   1.602 +
   1.603 +/** Queue a DFC (not an IDFC) from a thread and also signals a fast mutex.
   1.604 +
   1.605 +	The DFC is unaffected if it is already queued.
   1.606 +
   1.607 +	The fast mutex is signalled before preemption is reenabled to avoid potential
   1.608 +	scheduler thrashing.
   1.609 +
   1.610 +	@param	aMutex =	pointer to fast mutex to be signalled;
   1.611 +						NULL means system lock mutex.
   1.612 +	@return	TRUE if DFC was actually queued by this call
   1.613 +			FALSE if DFC was already queued on entry so this call did nothing
   1.614 +	@pre	Call in a thread context.
   1.615 +	@pre	Kernel must be unlocked.
   1.616 +	@pre	Do not call from an ISR.
   1.617 +	@pre    Do not call from an IDFC.
   1.618 + */
   1.619 +EXPORT_C TBool TDfc::Enque(NFastMutex* aMutex)
   1.620 +	{
   1.621 +	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_NOT_ISR|MASK_NOT_IDFC,"TDfc::Enque(NFastMutex* aMutex)");		
   1.622 +	if (!aMutex)
   1.623 +		aMutex=&TheScheduler.iLock;
   1.624 +	NKern::Lock();
   1.625 +	TBool ret = DoEnque();
   1.626 +	aMutex->Signal();
   1.627 +	NKern::Unlock();
   1.628 +	return ret;
   1.629 +	}
   1.630 +
   1.631 +
   1.632 +/** Returns a pointer to the thread on which a DFC runs
   1.633 +
   1.634 +	@return	If this is a DFC and the DFC queue has been set, a pointer to the
   1.635 +			thread which will run the DFC.
   1.636 +			NULL if this is an IDFC or the DFC queue has not been set.
   1.637 + */
   1.638 +EXPORT_C NThreadBase* TDfc::Thread()
   1.639 +	{
   1.640 +	if (!IsDFC())
   1.641 +		return 0;
   1.642 +	return iDfcQ ? iDfcQ->iThread : 0;
   1.643 +	}
   1.644 +
   1.645 +
   1.646 +/******************************************************************************
   1.647 + * Idle notification
   1.648 + ******************************************************************************/
   1.649 +
   1.650 +/** Register an IDFC or a DFC to be called when the system goes idle
   1.651 +
   1.652 +	This function does nothing if the IDFC/DFC is already queued.
   1.653 +
   1.654 +	@return	TRUE if DFC was actually queued by this call
   1.655 +			FALSE if DFC was already queued on entry so this call did nothing
   1.656 + */
   1.657 +EXPORT_C TBool TDfc::QueueOnIdle()
   1.658 +	{
   1.659 +	TInt irq = TheScheduler.iIdleSpinLock.LockIrqSave();
   1.660 +	TUint32 orig = 0xFF00;
   1.661 +
   1.662 +	// Transition the state to 'on normal idle queue'
   1.663 +	// 0000->002g
   1.664 +	// 00Cn->006n
   1.665 +	// All other states unchanged
   1.666 +	// Return original state
   1.667 +	if (IsValid())	// don't add if tied and tied thread/group is being/has been destroyed
   1.668 +		orig = QueueOnIdleStateChange();
   1.669 +	if (orig==0)
   1.670 +		{
   1.671 +		i8888.iHState0 = 0;	// BeginTiedEvent() not done
   1.672 +		TheScheduler.iIdleDfcs.Add(this);
   1.673 +		}
   1.674 +
   1.675 +	TheScheduler.iIdleSpinLock.UnlockIrqRestore(irq);
   1.676 +	return (orig==0 || (orig&0xFFE0)==0x00C0);
   1.677 +	}
   1.678 +
   1.679 +
   1.680 +/******************************************************************************
   1.681 + * Scheduler IDFC/DFC Processing
   1.682 + ******************************************************************************/
   1.683 +
   1.684 +void TSubScheduler::QueueDfcs()
   1.685 +//
   1.686 +// Enter with interrupts off and kernel locked
   1.687 +// Leave with interrupts off and kernel locked
   1.688 +//
   1.689 +// In state descriptions:
   1.690 +//		XX=8 bits not all zero (bitmask representing cancelling CPUs)
   1.691 +//		xx=8 bits (bitmask representing cancelling CPUs)
   1.692 +//		YY=8 bits not all zero
   1.693 +//		ZZ=XX with an additional bit set corresponding to the current CPU
   1.694 +//		zz=xx with an additional bit set corresponding to the current CPU
   1.695 +//		n = current CPU number
   1.696 +//		m = another CPU number
   1.697 +//		g = idle generation number
   1.698 +	{
   1.699 +	__KTRACE_OPT(KSCHED2,DEBUGPRINT("^"));
   1.700 +	iInIDFC = TRUE;
   1.701 +	BTrace0(BTrace::ECpuUsage, BTrace::EIDFCStart);
   1.702 +	TDfc* d = 0;
   1.703 +	NSchedulable* tied = 0;
   1.704 +	FOREVER
   1.705 +		{
   1.706 +		NKern::DisableAllInterrupts();
   1.707 +		// remove from pending queue with interrupts disabled
   1.708 +		d = (TDfc*)iDfcs.GetFirst();
   1.709 +		if (d)
   1.710 +			{
   1.711 +			d->iNext = 0;
   1.712 +#ifdef _DEBUG
   1.713 +			TUint32 st8 = DFC_STATE(d) & 0xFF;
   1.714 +			if (st8 != TUint(0x80|iCpuNum) && st8 != TUint(0x21^TheScheduler.iIdleGeneration))
   1.715 +				__crash();
   1.716 +#endif
   1.717 +			if (d->IsDFC())	// also true for mutating NTimer
   1.718 +				{
   1.719 +				NKern::EnableAllInterrupts();
   1.720 +				TDfcQue* q = d->iDfcQ;
   1.721 +				NThreadBase* t = q->iThread;
   1.722 +				t->AcqSLock();	// also protects DFC queue
   1.723 +
   1.724 +				// transition to 'final queue' state
   1.725 +				// 002g->0001, ok=TRUE
   1.726 +				// 008n->0001, ok=TRUE
   1.727 +				// XXYY->XX00, ok=FALSE
   1.728 +				// XX00->0000, ok=FALSE
   1.729 +				// other starting states invalid
   1.730 +				TUint32 orig = d->MoveToFinalQStateChange() >> 5;
   1.731 +				if (orig==1 || orig==4)
   1.732 +					{
   1.733 +					// wasn't being cancelled, now marked as on final queue, which means
   1.734 +					// attempts to cancel will block on the thread spin lock
   1.735 +					TUint present = q->iPresent[0];
   1.736 +					q->Add((TPriListLink*)d);
   1.737 +					if (!present)
   1.738 +						t->iWaitState.UnBlockT(NThreadBase::EWaitDfc, q, KErrNone);
   1.739 +					}
   1.740 +				t->RelSLock();	// also protects DFC queue
   1.741 +				continue;
   1.742 +				}
   1.743 +			// endogenous IDFC - could be tied in which case may need to be punted over to another CPU
   1.744 +			// can't be mutating NTimer since that would have gone into IsDFC() path
   1.745 +			tied = d->iTied;
   1.746 +			if (tied && !d->i8888.iHState0)	// if tied and BeginTiedEvent() not already done
   1.747 +				{
   1.748 +				d->i8888.iHState0 = 1;		// flag that BeginTiedEvent() done
   1.749 +				TInt cpu = tied->BeginTiedEvent();
   1.750 +				if (TUint(cpu) != iCpuNum)
   1.751 +					{
   1.752 +					// punt over to other CPU
   1.753 +					TBool kick = FALSE;
   1.754 +					TSubScheduler* ss = TheSubSchedulers + cpu;
   1.755 +					ss->iExIDfcLock.LockOnly();
   1.756 +					// transition state here to handle cancel
   1.757 +					// XXYY->XX00, ok=FALSE
   1.758 +					// XX00->0000, ok=FALSE
   1.759 +					// 008n->00Am, ok=TRUE
   1.760 +					// 002g->00Am, ok=TRUE
   1.761 +					// other starting states invalid
   1.762 +					TUint32 orig = d->TransferIDFCStateChange(cpu) >> 5;
   1.763 +					if (orig==1 || orig==4)
   1.764 +						{
   1.765 +						kick = !ss->iExIDfcPendingFlag;
   1.766 +						ss->iExIDfcPendingFlag = TRUE;
   1.767 +						ss->iExIDfcs.Add(d);
   1.768 +						}
   1.769 +					ss->iExIDfcLock.UnlockOnly();
   1.770 +					if (kick)
   1.771 +						send_resched_ipi(cpu);
   1.772 +					NKern::EnableAllInterrupts();	// let interrupts in
   1.773 +					if (orig >= 8)
   1.774 +						tied->EndTiedEvent();		// IDFC cancelled so release tied thread/group
   1.775 +					continue;
   1.776 +					}
   1.777 +				}
   1.778 +			}
   1.779 +		else
   1.780 +			{
   1.781 +			if (!iExIDfcPendingFlag)
   1.782 +				break;
   1.783 +			iExIDfcLock.LockOnly();
   1.784 +			d = (TDfc*)iExIDfcs.GetFirst();
   1.785 +			if (!d)
   1.786 +				{
   1.787 +				iExIDfcPendingFlag = 0;
   1.788 +				iExIDfcLock.UnlockOnly();
   1.789 +				break;
   1.790 +				}
   1.791 +			d->iNext = 0;
   1.792 +			tied = d->iTied;
   1.793 +			__NK_ASSERT_ALWAYS(d->IsIDFC() && tied);	// only tied IDFCs should get here
   1.794 +#ifdef _DEBUG
   1.795 +			TUint32 st8 = DFC_STATE(d) & 0xFF;
   1.796 +			if (st8 != (0xA0|iCpuNum))
   1.797 +				__crash();
   1.798 +#endif
   1.799 +			iExIDfcLock.UnlockOnly();
   1.800 +			}
   1.801 +
   1.802 +		// endogenous or exogenous IDFC
   1.803 +		// if tied, we are on correct CPU
   1.804 +		TDfcFn f = d->iFn;
   1.805 +		TAny* p = d->iPtr;
   1.806 +
   1.807 +		// If Cancel() finds the IDFC in the running state (00Cn or 00En) it will do the following
   1.808 +		// atomic { if (iCurrentIDFC==d) iCurrentIDFC=0; }
   1.809 +		// We must guarantee that the following access is observed before the state change in RunIDFCStateChange()
   1.810 +		// We assume the latter has full barrier semantics to guarantee this.
   1.811 +		iCurrentIDFC = d;
   1.812 +
   1.813 +		// transition to running state
   1.814 +		// 002g->00Cn, ok=TRUE
   1.815 +		// 008n->00Cn, ok=TRUE
   1.816 +		// 00An->00Cn, ok=TRUE
   1.817 +		// XXYY->XX00, ok=FALSE
   1.818 +		// XX00->0000, ok=FALSE
   1.819 +		// other starting states invalid
   1.820 +		TUint32 orig = d->RunIDFCStateChange() >> 5;
   1.821 +		NKern::EnableAllInterrupts();
   1.822 +		if (orig==1 || orig==4 || orig==5)
   1.823 +			{
   1.824 +			(*f)(p);
   1.825 +
   1.826 +			// transition to idle state or rerun if necessary
   1.827 +			// first swap iCurrentIDFC with 0 - if original value != d, don't touch d again, return 0xFFFFFFFF
   1.828 +			// 00Cn->0000
   1.829 +			// 00En->008n
   1.830 +			// 006n->006n
   1.831 +			// XXCn->XX00
   1.832 +			// XXEn->XX00
   1.833 +			// XX6n->XX00
   1.834 +			// other starting states invalid
   1.835 +			// return original state
   1.836 +			NKern::DisableAllInterrupts();
   1.837 +			TUint32 orig = d->EndIDFCStateChange(this);
   1.838 +			if ((orig>>5)==7)
   1.839 +				{
   1.840 +				iDfcs.Add(d);
   1.841 +#ifdef _DEBUG
   1.842 +				TUint32 st8 = DFC_STATE(d) & 0xFF;
   1.843 +				if (st8 != (0x80|iCpuNum))
   1.844 +					__crash();
   1.845 +#endif
   1.846 +				continue;
   1.847 +				}
   1.848 +			else if ((orig>>5)==3)
   1.849 +				{
   1.850 +				TheScheduler.iIdleSpinLock.LockOnly();
   1.851 +				// 006n->002g
   1.852 +				// XX6n->XX00
   1.853 +				orig = d->EndIDFCStateChange2();
   1.854 +				if ((orig>>5)==3)
   1.855 +					TheScheduler.iIdleDfcs.Add(d);
   1.856 +				TheScheduler.iIdleSpinLock.UnlockOnly();
   1.857 +				}
   1.858 +			NKern::EnableAllInterrupts();
   1.859 +			if (tied && orig<0x10000)
   1.860 +				tied->EndTiedEvent(); // if we set iCurrentIDFC back to 0, we release the tied thread/group
   1.861 +			}
   1.862 +		else
   1.863 +			{
   1.864 +			iCurrentIDFC = 0;
   1.865 +			if (tied)
   1.866 +				tied->EndTiedEvent();		// IDFC cancelled so release tied thread/group
   1.867 +			}
   1.868 +		}
   1.869 +	iDfcPendingFlag = 0;
   1.870 +	BTrace0(BTrace::ECpuUsage, BTrace::EIDFCEnd);
   1.871 +	iInIDFC = 0;
   1.872 +	__KTRACE_OPT(KSCHED2,DEBUGPRINT("~"));
   1.873 +	}
   1.874 +
   1.875 +
   1.876 +/******************************************************************************
   1.877 + * Kernel-side asynchronous request DFCs
   1.878 + ******************************************************************************/
   1.879 +
   1.880 +EXPORT_C TAsyncRequest::TAsyncRequest(TDfcFn aFunction, TDfcQue* aDfcQ, TInt aPriority)
   1.881 +	: TDfc(aFunction, this, aDfcQ, aPriority), iCompletionObject(0), iCancel(0), iResult(0)
   1.882 +	{
   1.883 +	}
   1.884 +
   1.885 +
   1.886 +EXPORT_C void TAsyncRequest::Send(TDfc* aCompletionDfc)
   1.887 +	{
   1.888 +	__NK_ASSERT_DEBUG(!iCompletionObject);
   1.889 +	iCancel = EFalse;
   1.890 +	iCompletionObject = (TAny*)((TLinAddr)aCompletionDfc|1);
   1.891 +	TDfc::Enque();
   1.892 +	}
   1.893 +
   1.894 +
   1.895 +EXPORT_C void TAsyncRequest::Send(NFastSemaphore* aCompletionSemaphore)
   1.896 +	{
   1.897 +	__NK_ASSERT_DEBUG(!iCompletionObject);
   1.898 +	iCancel = EFalse;
   1.899 +	iCompletionObject = aCompletionSemaphore;
   1.900 +	TDfc::Enque();
   1.901 +	}
   1.902 +
   1.903 +
   1.904 +EXPORT_C TInt TAsyncRequest::SendReceive()
   1.905 +	{
   1.906 +	NFastSemaphore signal;
   1.907 +	NKern::FSSetOwner(&signal, 0);
   1.908 +	Send(&signal);
   1.909 +	NKern::FSWait(&signal);
   1.910 +	return iResult;
   1.911 +	}
   1.912 +
   1.913 +
   1.914 +EXPORT_C void TAsyncRequest::Cancel()
   1.915 +	{
   1.916 +	iCancel = ETrue;
   1.917 +	if(TDfc::Cancel())
   1.918 +		Complete(KErrCancel);
   1.919 +	}
   1.920 +
   1.921 +
   1.922 +EXPORT_C void TAsyncRequest::Complete(TInt aResult)
   1.923 +	{
   1.924 +	TLinAddr signal = (TLinAddr)__e32_atomic_swp_ord_ptr(&iCompletionObject, 0);
   1.925 +	if(signal)
   1.926 +		{
   1.927 +		iResult = aResult;
   1.928 +		if(signal&1)
   1.929 +			((TDfc*)(signal&~1))->Enque();
   1.930 +		else
   1.931 +			NKern::FSSignal((NFastSemaphore*)signal);
   1.932 +		}
   1.933 +	}