sl@0
|
1 |
// Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies).
|
sl@0
|
2 |
// All rights reserved.
|
sl@0
|
3 |
// This component and the accompanying materials are made available
|
sl@0
|
4 |
// under the terms of the License "Eclipse Public License v1.0"
|
sl@0
|
5 |
// which accompanies this distribution, and is available
|
sl@0
|
6 |
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
|
sl@0
|
7 |
//
|
sl@0
|
8 |
// Initial Contributors:
|
sl@0
|
9 |
// Nokia Corporation - initial contribution.
|
sl@0
|
10 |
//
|
sl@0
|
11 |
// Contributors:
|
sl@0
|
12 |
//
|
sl@0
|
13 |
// Description:
|
sl@0
|
14 |
// e32\nkernsmp\sched.cpp
|
sl@0
|
15 |
//
|
sl@0
|
16 |
//
|
sl@0
|
17 |
|
sl@0
|
18 |
// NThreadBase member data
|
sl@0
|
19 |
#define __INCLUDE_NTHREADBASE_DEFINES__
|
sl@0
|
20 |
|
sl@0
|
21 |
// TDfc member data
|
sl@0
|
22 |
#define __INCLUDE_TDFC_DEFINES__
|
sl@0
|
23 |
|
sl@0
|
24 |
#include "nk_priv.h"
|
sl@0
|
25 |
#include <nk_irq.h>
|
sl@0
|
26 |
|
sl@0
|
27 |
TSpinLock NEventHandler::TiedLock(TSpinLock::EOrderEventHandlerTied);
|
sl@0
|
28 |
|
sl@0
|
29 |
/******************************************************************************
|
sl@0
|
30 |
* TScheduler
|
sl@0
|
31 |
******************************************************************************/
|
sl@0
|
32 |
|
sl@0
|
33 |
// TScheduler resides in .bss so other fields are zero-initialised
|
sl@0
|
34 |
TScheduler::TScheduler()
|
sl@0
|
35 |
: iActiveCpus1(1), // only boot CPU for now
|
sl@0
|
36 |
iActiveCpus2(1), // only boot CPU for now
|
sl@0
|
37 |
iIdleSpinLock(TSpinLock::EOrderIdleDFCList),
|
sl@0
|
38 |
iCpusNotIdle(1) // only boot CPU for now
|
sl@0
|
39 |
{
|
sl@0
|
40 |
TInt i;
|
sl@0
|
41 |
for (i=0; i<KMaxCpus; ++i)
|
sl@0
|
42 |
{
|
sl@0
|
43 |
TSubScheduler* s = TheSubSchedulers + i;
|
sl@0
|
44 |
iSub[i] = s;
|
sl@0
|
45 |
s->iScheduler = this;
|
sl@0
|
46 |
s->iCpuNum = TUint32(i);
|
sl@0
|
47 |
s->iCpuMask = 1u<<i;
|
sl@0
|
48 |
}
|
sl@0
|
49 |
}
|
sl@0
|
50 |
|
sl@0
|
51 |
|
sl@0
|
52 |
/** Return a pointer to the scheduler
|
sl@0
|
53 |
Intended for use by the crash debugger, not for general device driver use.
|
sl@0
|
54 |
|
sl@0
|
55 |
@return Pointer to the scheduler object
|
sl@0
|
56 |
@internalTechnology
|
sl@0
|
57 |
*/
|
sl@0
|
58 |
EXPORT_C TScheduler* TScheduler::Ptr()
|
sl@0
|
59 |
{
|
sl@0
|
60 |
return &TheScheduler;
|
sl@0
|
61 |
}
|
sl@0
|
62 |
|
sl@0
|
63 |
|
sl@0
|
64 |
/******************************************************************************
|
sl@0
|
65 |
* TSubScheduler
|
sl@0
|
66 |
******************************************************************************/
|
sl@0
|
67 |
|
sl@0
|
68 |
// TSubScheduler resides in .bss so other fields are zero-initialised
|
sl@0
|
69 |
TSubScheduler::TSubScheduler()
|
sl@0
|
70 |
: TPriListBase(KNumPriorities),
|
sl@0
|
71 |
iExIDfcLock(TSpinLock::EOrderExIDfcQ),
|
sl@0
|
72 |
iReadyListLock(TSpinLock::EOrderReadyList),
|
sl@0
|
73 |
iKernLockCount(1),
|
sl@0
|
74 |
iEventHandlerLock(TSpinLock::EOrderEventHandlerList)
|
sl@0
|
75 |
{
|
sl@0
|
76 |
}
|
sl@0
|
77 |
|
sl@0
|
78 |
|
sl@0
|
79 |
/******************************************************************************
|
sl@0
|
80 |
* NSchedulable
|
sl@0
|
81 |
******************************************************************************/
|
sl@0
|
82 |
void NSchedulable::AcqSLock()
|
sl@0
|
83 |
{
|
sl@0
|
84 |
iSSpinLock.LockOnly();
|
sl@0
|
85 |
if (iParent!=this && iParent)
|
sl@0
|
86 |
iParent->AcqSLock();
|
sl@0
|
87 |
}
|
sl@0
|
88 |
|
sl@0
|
89 |
void NSchedulable::RelSLock()
|
sl@0
|
90 |
{
|
sl@0
|
91 |
if (iParent!=this && iParent)
|
sl@0
|
92 |
iParent->RelSLock();
|
sl@0
|
93 |
iSSpinLock.UnlockOnly();
|
sl@0
|
94 |
}
|
sl@0
|
95 |
|
sl@0
|
96 |
void NSchedulable::LAcqSLock()
|
sl@0
|
97 |
{
|
sl@0
|
98 |
NKern::Lock();
|
sl@0
|
99 |
AcqSLock();
|
sl@0
|
100 |
}
|
sl@0
|
101 |
|
sl@0
|
102 |
void NSchedulable::RelSLockU()
|
sl@0
|
103 |
{
|
sl@0
|
104 |
RelSLock();
|
sl@0
|
105 |
NKern::Unlock();
|
sl@0
|
106 |
}
|
sl@0
|
107 |
|
sl@0
|
108 |
void NSchedulable::UnPauseT()
|
sl@0
|
109 |
{
|
sl@0
|
110 |
CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NSchedulable::UnPauseT");
|
sl@0
|
111 |
__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nUnPauseT",this));
|
sl@0
|
112 |
__NK_ASSERT_DEBUG(iPauseCount);
|
sl@0
|
113 |
if (--iPauseCount || iReady || iSuspended || (iParent && ((NThread*)this)->iWaitState.ThreadIsBlocked()))
|
sl@0
|
114 |
return;
|
sl@0
|
115 |
ReadyT(EUnPause);
|
sl@0
|
116 |
}
|
sl@0
|
117 |
|
sl@0
|
118 |
void NSchedulable::DeferredReadyIDfcFn(TAny* aPtr)
|
sl@0
|
119 |
{
|
sl@0
|
120 |
NSchedulable* a = (NSchedulable*)aPtr;
|
sl@0
|
121 |
a->AcqSLock();
|
sl@0
|
122 |
TUint32 evs = __e32_atomic_and_acq32(&a->iEventState, ~EDeferredReady);
|
sl@0
|
123 |
if (evs & EDeferredReady)
|
sl@0
|
124 |
{
|
sl@0
|
125 |
if (a->iParent)
|
sl@0
|
126 |
{
|
sl@0
|
127 |
// thread
|
sl@0
|
128 |
a->UnPauseT();
|
sl@0
|
129 |
}
|
sl@0
|
130 |
else
|
sl@0
|
131 |
{
|
sl@0
|
132 |
// thread group
|
sl@0
|
133 |
NThreadGroup* g = (NThreadGroup*)a;
|
sl@0
|
134 |
__KTRACE_OPT(KNKERN,DEBUGPRINT("%G nDeferredReady",g));
|
sl@0
|
135 |
__NK_ASSERT_DEBUG(g->iPauseCount);
|
sl@0
|
136 |
if (--g->iPauseCount && g->iNThreadList.NonEmpty())
|
sl@0
|
137 |
g->ReadyT(EUnPause);
|
sl@0
|
138 |
}
|
sl@0
|
139 |
}
|
sl@0
|
140 |
a->RelSLock();
|
sl@0
|
141 |
}
|
sl@0
|
142 |
|
sl@0
|
143 |
TInt NSchedulable::AddTiedEvent(NEventHandler* aEvent)
|
sl@0
|
144 |
{
|
sl@0
|
145 |
__KTRACE_OPT(KNKERN,DEBUGPRINT("%T AddEv %08x",this,aEvent));
|
sl@0
|
146 |
TInt r = KErrGeneral;
|
sl@0
|
147 |
NEventHandler::TiedLock.LockOnly();
|
sl@0
|
148 |
AcqSLock();
|
sl@0
|
149 |
if (iStopping)
|
sl@0
|
150 |
r = KErrDied;
|
sl@0
|
151 |
else if (!aEvent->iTied)
|
sl@0
|
152 |
{
|
sl@0
|
153 |
aEvent->iTied = this;
|
sl@0
|
154 |
iEvents.Add(&aEvent->iTiedLink);
|
sl@0
|
155 |
r = KErrNone;
|
sl@0
|
156 |
}
|
sl@0
|
157 |
RelSLock();
|
sl@0
|
158 |
NEventHandler::TiedLock.UnlockOnly();
|
sl@0
|
159 |
return r;
|
sl@0
|
160 |
}
|
sl@0
|
161 |
|
sl@0
|
162 |
void ipi_dummy(TGenericIPI*)
|
sl@0
|
163 |
{
|
sl@0
|
164 |
}
|
sl@0
|
165 |
|
sl@0
|
166 |
/** Detach and cancel any tied events attached to this thread/group
|
sl@0
|
167 |
|
sl@0
|
168 |
Call in a thread context with interrupts and preemption enabled.
|
sl@0
|
169 |
Calling thread in critical section.
|
sl@0
|
170 |
|
sl@0
|
171 |
@internalComponent
|
sl@0
|
172 |
*/
|
sl@0
|
173 |
void NSchedulable::DetachTiedEvents()
|
sl@0
|
174 |
{
|
sl@0
|
175 |
__KTRACE_OPT(KNKERN,DEBUGPRINT("%T DetTiedEv",this));
|
sl@0
|
176 |
NKern::Lock();
|
sl@0
|
177 |
NEventHandler::TiedLock.LockOnly();
|
sl@0
|
178 |
AcqSLock();
|
sl@0
|
179 |
iStopping = TRUE;
|
sl@0
|
180 |
if (!iParent)
|
sl@0
|
181 |
{
|
sl@0
|
182 |
// can't destroy a group until all threads have detached from it
|
sl@0
|
183 |
NThreadGroup* g = (NThreadGroup*)this;
|
sl@0
|
184 |
__NK_ASSERT_ALWAYS(g->iThreadCount==0 && g->iNThreadList.IsEmpty());
|
sl@0
|
185 |
}
|
sl@0
|
186 |
RelSLock();
|
sl@0
|
187 |
NEventHandler::TiedLock.UnlockOnly();
|
sl@0
|
188 |
|
sl@0
|
189 |
// send IPI to all processors to synchronise
|
sl@0
|
190 |
// after this, any tied IDFCs can only proceed to completion
|
sl@0
|
191 |
// they can't be queued again
|
sl@0
|
192 |
TGenericIPI ipi;
|
sl@0
|
193 |
ipi.QueueAllOther(&ipi_dummy);
|
sl@0
|
194 |
NKern::Unlock();
|
sl@0
|
195 |
ipi.WaitCompletion();
|
sl@0
|
196 |
|
sl@0
|
197 |
FOREVER
|
sl@0
|
198 |
{
|
sl@0
|
199 |
NKern::Lock();
|
sl@0
|
200 |
NEventHandler::TiedLock.LockOnly();
|
sl@0
|
201 |
AcqSLock();
|
sl@0
|
202 |
NEventHandler* h = 0;
|
sl@0
|
203 |
TInt type = -1;
|
sl@0
|
204 |
if (!iEvents.IsEmpty())
|
sl@0
|
205 |
{
|
sl@0
|
206 |
h = _LOFF(iEvents.First()->Deque(), NEventHandler, iTiedLink);
|
sl@0
|
207 |
h->iTiedLink.iNext = 0;
|
sl@0
|
208 |
type = h->iHType;
|
sl@0
|
209 |
}
|
sl@0
|
210 |
RelSLock();
|
sl@0
|
211 |
if (type == NEventHandler::EEventHandlerNTimer)
|
sl@0
|
212 |
{
|
sl@0
|
213 |
// everything's easy for a timer since we can just cancel it here
|
sl@0
|
214 |
NTimer* tmr = (NTimer*)h;
|
sl@0
|
215 |
tmr->DoCancel(NTimer::ECancelDestroy);
|
sl@0
|
216 |
tmr->iTied = 0;
|
sl@0
|
217 |
}
|
sl@0
|
218 |
else if (type == NEventHandler::EEventHandlerIDFC)
|
sl@0
|
219 |
{
|
sl@0
|
220 |
// can just cancel the IDFC with TiedLock held
|
sl@0
|
221 |
// EndTiedEvent() may be delayed, but we wait for that further down
|
sl@0
|
222 |
// iTied will have been captured before the IDFC state is reset
|
sl@0
|
223 |
// Cancel() waits for the state to be reset
|
sl@0
|
224 |
TDfc* d = (TDfc*)h;
|
sl@0
|
225 |
d->Cancel();
|
sl@0
|
226 |
d->iHType = (TUint8)NEventHandler::EEventHandlerDummy;
|
sl@0
|
227 |
d->iTied = 0;
|
sl@0
|
228 |
}
|
sl@0
|
229 |
NEventHandler::TiedLock.UnlockOnly();
|
sl@0
|
230 |
NKern::Unlock();
|
sl@0
|
231 |
if (!h)
|
sl@0
|
232 |
break;
|
sl@0
|
233 |
switch (type)
|
sl@0
|
234 |
{
|
sl@0
|
235 |
case NEventHandler::EEventHandlerIrq:
|
sl@0
|
236 |
{
|
sl@0
|
237 |
NIrqHandler* pH = (NIrqHandler*)h;
|
sl@0
|
238 |
// pH can't have been freed since we dequeued it but left iTied set
|
sl@0
|
239 |
pH->Unbind(pH->iHandle, this);
|
sl@0
|
240 |
break;
|
sl@0
|
241 |
}
|
sl@0
|
242 |
case NEventHandler::EEventHandlerNTimer:
|
sl@0
|
243 |
case NEventHandler::EEventHandlerIDFC:
|
sl@0
|
244 |
case NEventHandler::EEventHandlerDummy:
|
sl@0
|
245 |
// nothing left to do
|
sl@0
|
246 |
break;
|
sl@0
|
247 |
default:
|
sl@0
|
248 |
__NK_ASSERT_ALWAYS(0);
|
sl@0
|
249 |
break;
|
sl@0
|
250 |
}
|
sl@0
|
251 |
}
|
sl@0
|
252 |
|
sl@0
|
253 |
// Wait for any remaining tied event handlers to complete
|
sl@0
|
254 |
while (iEventState & EEventCountMask)
|
sl@0
|
255 |
{
|
sl@0
|
256 |
__chill();
|
sl@0
|
257 |
}
|
sl@0
|
258 |
}
|
sl@0
|
259 |
|
sl@0
|
260 |
/******************************************************************************
|
sl@0
|
261 |
* NThreadGroup
|
sl@0
|
262 |
******************************************************************************/
|
sl@0
|
263 |
|
sl@0
|
264 |
|
sl@0
|
265 |
/******************************************************************************
|
sl@0
|
266 |
* NThreadBase
|
sl@0
|
267 |
******************************************************************************/
|
sl@0
|
268 |
|
sl@0
|
269 |
/** Makes a nanothread ready.
|
sl@0
|
270 |
|
sl@0
|
271 |
For use by RTOS personality layers.
|
sl@0
|
272 |
|
sl@0
|
273 |
@pre Kernel must be locked.
|
sl@0
|
274 |
@pre Call either in a thread or an IDFC context.
|
sl@0
|
275 |
@pre The thread being made ready must not be explicitly suspended
|
sl@0
|
276 |
|
sl@0
|
277 |
@post Kernel is locked.
|
sl@0
|
278 |
*/
|
sl@0
|
279 |
void NSchedulable::ReadyT(TUint aMode)
|
sl@0
|
280 |
{
|
sl@0
|
281 |
CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_ISR,"NSchedulable::ReadyT");
|
sl@0
|
282 |
__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nReadyT(%x)",this,aMode));
|
sl@0
|
283 |
NThreadBase* t = (NThreadBase*)this;
|
sl@0
|
284 |
#ifdef _DEBUG
|
sl@0
|
285 |
if (!iParent)
|
sl@0
|
286 |
t = (NThreadBase*)0xface0fff;
|
sl@0
|
287 |
#endif
|
sl@0
|
288 |
__NK_ASSERT_DEBUG(!iReady && (!iParent || (!t->iWaitState.iWtC.iWtStFlags && !t->iPauseCount && !t->iSuspended)));
|
sl@0
|
289 |
TSubScheduler& ss0 = SubScheduler();
|
sl@0
|
290 |
NSchedulable* g = this;
|
sl@0
|
291 |
if (iParent != this && iParent)
|
sl@0
|
292 |
{
|
sl@0
|
293 |
NThreadGroup* tg = (NThreadGroup*)iParent;
|
sl@0
|
294 |
iReady = EReadyGroup;
|
sl@0
|
295 |
if (tg->iReady)
|
sl@0
|
296 |
{
|
sl@0
|
297 |
// extra thread added to group - change priority if necessary
|
sl@0
|
298 |
tg->iNThreadList.Add(this);
|
sl@0
|
299 |
TInt gp = tg->iPriority;
|
sl@0
|
300 |
TSubScheduler& ss = TheSubSchedulers[tg->iReady & EReadyCpuMask];
|
sl@0
|
301 |
ss.iReadyListLock.LockOnly();
|
sl@0
|
302 |
TInt hp = ss.HighestPriority();
|
sl@0
|
303 |
if (iPriority>gp)
|
sl@0
|
304 |
ss.ChangePriority(tg, iPriority);
|
sl@0
|
305 |
if (iPriority>hp || (iPriority==hp && ss.iCurrentThread && ss.iCurrentThread->iTime==0))
|
sl@0
|
306 |
{
|
sl@0
|
307 |
if (&ss == &ss0)
|
sl@0
|
308 |
RescheduleNeeded(); // reschedule on this processor
|
sl@0
|
309 |
else
|
sl@0
|
310 |
ss0.iReschedIPIs |= ss.iCpuMask; // will kick the other CPU when this CPU reenables preemption
|
sl@0
|
311 |
}
|
sl@0
|
312 |
if ((aMode & ENewTimeslice) && t->iTime==0 && (iNext!=this || ss.iQueue[iPriority]))
|
sl@0
|
313 |
t->iTime = t->iTimeslice;
|
sl@0
|
314 |
ss.iReadyListLock.UnlockOnly();
|
sl@0
|
315 |
return;
|
sl@0
|
316 |
}
|
sl@0
|
317 |
tg->iNThreadList.Add(this);
|
sl@0
|
318 |
tg->iPriority = iPriority; // first in group
|
sl@0
|
319 |
g = tg; // fall through to add group to subscheduler
|
sl@0
|
320 |
}
|
sl@0
|
321 |
TInt cpu = -1;
|
sl@0
|
322 |
if (aMode & EUnPause)
|
sl@0
|
323 |
{
|
sl@0
|
324 |
cpu = (g->iEventState & EThreadCpuMask)>>EThreadCpuShift;
|
sl@0
|
325 |
if (CheckCpuAgainstAffinity(cpu, g->iCpuAffinity))
|
sl@0
|
326 |
goto cpu_ok;
|
sl@0
|
327 |
}
|
sl@0
|
328 |
else if (g->iFreezeCpu)
|
sl@0
|
329 |
{
|
sl@0
|
330 |
cpu = g->iLastCpu;
|
sl@0
|
331 |
if (!CheckCpuAgainstAffinity(cpu, g->iCpuAffinity))
|
sl@0
|
332 |
g->iCpuChange = TRUE;
|
sl@0
|
333 |
}
|
sl@0
|
334 |
else if (!(g->iCpuAffinity & NTHREADBASE_CPU_AFFINITY_MASK))
|
sl@0
|
335 |
cpu = g->iCpuAffinity;
|
sl@0
|
336 |
else if ((aMode & EPreferSameCpu) && (g->iCpuAffinity & ss0.iCpuMask))
|
sl@0
|
337 |
cpu = ss0.iCpuNum;
|
sl@0
|
338 |
if (cpu < 0)
|
sl@0
|
339 |
{
|
sl@0
|
340 |
// pick a cpu
|
sl@0
|
341 |
TScheduler& s = TheScheduler;
|
sl@0
|
342 |
TUint32 m = g->iCpuAffinity & s.iActiveCpus1;
|
sl@0
|
343 |
TInt i;
|
sl@0
|
344 |
TInt lowest_p = KMaxTInt;
|
sl@0
|
345 |
for (i=0; i<s.iNumCpus; ++i)
|
sl@0
|
346 |
{
|
sl@0
|
347 |
TSubScheduler& ss = *s.iSub[i];
|
sl@0
|
348 |
if (!(m & ss.iCpuMask))
|
sl@0
|
349 |
continue;
|
sl@0
|
350 |
TInt hp = ss.HighestPriority();
|
sl@0
|
351 |
if (hp < lowest_p)
|
sl@0
|
352 |
{
|
sl@0
|
353 |
lowest_p = hp;
|
sl@0
|
354 |
cpu = i;
|
sl@0
|
355 |
continue;
|
sl@0
|
356 |
}
|
sl@0
|
357 |
if (hp > lowest_p)
|
sl@0
|
358 |
continue;
|
sl@0
|
359 |
if (cpu>=0 && g->iLastCpu!=i)
|
sl@0
|
360 |
continue;
|
sl@0
|
361 |
lowest_p = hp;
|
sl@0
|
362 |
cpu = i;
|
sl@0
|
363 |
}
|
sl@0
|
364 |
}
|
sl@0
|
365 |
cpu_ok:
|
sl@0
|
366 |
__NK_ASSERT_ALWAYS(cpu>=0);
|
sl@0
|
367 |
if (g->TiedEventReadyInterlock(cpu))
|
sl@0
|
368 |
{
|
sl@0
|
369 |
__KTRACE_OPT(KSCHED2,DEBUGPRINT("ReadyT->CPU %dD",cpu));
|
sl@0
|
370 |
++g->iPauseCount;
|
sl@0
|
371 |
// ((TDfc*)g->i_IDfcMem)->Add();
|
sl@0
|
372 |
return;
|
sl@0
|
373 |
}
|
sl@0
|
374 |
__KTRACE_OPT(KSCHED2,DEBUGPRINT("ReadyT->CPU %d",cpu));
|
sl@0
|
375 |
TSubScheduler& ss = TheSubSchedulers[cpu];
|
sl@0
|
376 |
ss.iReadyListLock.LockOnly();
|
sl@0
|
377 |
TInt hp = ss.HighestPriority();
|
sl@0
|
378 |
if (g->iPriority>hp || (g->iPriority==hp && ss.iCurrentThread && ss.iCurrentThread->iTime==0))
|
sl@0
|
379 |
{
|
sl@0
|
380 |
if (&ss == &ss0)
|
sl@0
|
381 |
RescheduleNeeded(); // reschedule on this processor
|
sl@0
|
382 |
else
|
sl@0
|
383 |
ss0.iReschedIPIs |= ss.iCpuMask; // will kick the other CPU when this CPU reenables preemption
|
sl@0
|
384 |
}
|
sl@0
|
385 |
ss.Add(g);
|
sl@0
|
386 |
g->iReady = TUint8(cpu | EReadyOffset);
|
sl@0
|
387 |
if ((aMode & ENewTimeslice) && iParent && t->iTime==0 && g->iNext!=g)
|
sl@0
|
388 |
t->iTime = t->iTimeslice;
|
sl@0
|
389 |
ss.iReadyListLock.UnlockOnly();
|
sl@0
|
390 |
}
|
sl@0
|
391 |
|
sl@0
|
392 |
|
sl@0
|
393 |
NThread* TSubScheduler::SelectNextThread()
|
sl@0
|
394 |
{
|
sl@0
|
395 |
NThread* ot = iCurrentThread;
|
sl@0
|
396 |
NThread* t = 0;
|
sl@0
|
397 |
TBool migrate = FALSE;
|
sl@0
|
398 |
TBool gmigrate = FALSE;
|
sl@0
|
399 |
TBool fmd_done = FALSE;
|
sl@0
|
400 |
TBool fmd_res = FALSE;
|
sl@0
|
401 |
if (!ot)
|
sl@0
|
402 |
{
|
sl@0
|
403 |
iReadyListLock.LockOnly();
|
sl@0
|
404 |
iRescheduleNeededFlag = FALSE;
|
sl@0
|
405 |
goto no_ot;
|
sl@0
|
406 |
}
|
sl@0
|
407 |
ot->AcqSLock();
|
sl@0
|
408 |
if (ot->iNewParent)
|
sl@0
|
409 |
ot->iNewParent->AcqSLock();
|
sl@0
|
410 |
SaveTimesliceTimer(ot); // remember how much of current thread's timeslice remains
|
sl@0
|
411 |
if (ot->iCsFunction==NThreadBase::ECSDivertPending && ot->iWaitState.iWtC.iWtStFlags)
|
sl@0
|
412 |
{
|
sl@0
|
413 |
// thread about to exit so cancel outstanding wait
|
sl@0
|
414 |
ot->DoReleaseT(KErrDied,0);
|
sl@0
|
415 |
}
|
sl@0
|
416 |
if (ot->iWaitState.iWtC.iWtStFlags==0)
|
sl@0
|
417 |
{
|
sl@0
|
418 |
// ASSUMPTION: If iNewParent set, ot can't hold a fast mutex (assertion in JoinGroup)
|
sl@0
|
419 |
TBool pfmd = (ot->iParent!=ot && !ot->iFastMutexDefer);
|
sl@0
|
420 |
if (ot->iTime==0 || pfmd)
|
sl@0
|
421 |
{
|
sl@0
|
422 |
// ot's timeslice has expired
|
sl@0
|
423 |
fmd_res = ot->CheckFastMutexDefer();
|
sl@0
|
424 |
fmd_done = TRUE;
|
sl@0
|
425 |
if (fmd_res)
|
sl@0
|
426 |
{
|
sl@0
|
427 |
if (ot->iTime == 0)
|
sl@0
|
428 |
ot->iTime = 0x80000000; // mark deferred timeslice expiry
|
sl@0
|
429 |
if (pfmd)
|
sl@0
|
430 |
{
|
sl@0
|
431 |
ot->iFastMutexDefer = 1;
|
sl@0
|
432 |
++ot->iParent->iFreezeCpu;
|
sl@0
|
433 |
}
|
sl@0
|
434 |
}
|
sl@0
|
435 |
}
|
sl@0
|
436 |
}
|
sl@0
|
437 |
iReadyListLock.LockOnly();
|
sl@0
|
438 |
iRescheduleNeededFlag = FALSE;
|
sl@0
|
439 |
|
sl@0
|
440 |
// process outstanding suspend/kill/CPU change on ot
|
sl@0
|
441 |
|
sl@0
|
442 |
__NK_ASSERT_DEBUG(!(ot->iWaitState.iWtC.iWtStFlags & NThreadWaitState::EWtStWaitActive));
|
sl@0
|
443 |
if (ot->iWaitState.iWtC.iWtStFlags || ot->iPauseCount || ot->iSuspended)
|
sl@0
|
444 |
{
|
sl@0
|
445 |
// ot is no longer ready to run
|
sl@0
|
446 |
__KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T WS: %02x %02x (%08x) P:%02x S:%1x", ot,
|
sl@0
|
447 |
ot->iWaitState.iWtC.iWtStFlags, ot->iWaitState.iWtC.iWtObjType, ot->iWaitState.iWtC.iWtObj, ot->iPauseCount, ot->iSuspended));
|
sl@0
|
448 |
TInt wtst = ot->iWaitState.DoWait();
|
sl@0
|
449 |
if (wtst>=0 && wtst!=NThread::EWaitFastMutex)
|
sl@0
|
450 |
ot->iTime = ot->iTimeslice;
|
sl@0
|
451 |
ot->UnReadyT();
|
sl@0
|
452 |
if (ot->iNewParent)
|
sl@0
|
453 |
{
|
sl@0
|
454 |
ot->iParent = ot->iNewParent, ++((NThreadGroup*)ot->iParent)->iThreadCount;
|
sl@0
|
455 |
wmb(); // must make sure iParent is updated before iNewParent is cleared
|
sl@0
|
456 |
ot->iNewParent = 0;
|
sl@0
|
457 |
}
|
sl@0
|
458 |
ot->iCpuChange = FALSE;
|
sl@0
|
459 |
}
|
sl@0
|
460 |
else if (ot->iNewParent)
|
sl@0
|
461 |
{
|
sl@0
|
462 |
__NK_ASSERT_ALWAYS(ot->iParent==ot && !ot->iHeldFastMutex && !ot->iFreezeCpu);
|
sl@0
|
463 |
ot->UnReadyT();
|
sl@0
|
464 |
migrate = TRUE;
|
sl@0
|
465 |
ot->iParent = ot->iNewParent;
|
sl@0
|
466 |
ot->iCpuChange = FALSE;
|
sl@0
|
467 |
++((NThreadGroup*)ot->iParent)->iThreadCount;
|
sl@0
|
468 |
wmb(); // must make sure iParent is updated before iNewParent is cleared
|
sl@0
|
469 |
ot->iNewParent = 0;
|
sl@0
|
470 |
}
|
sl@0
|
471 |
else if (ot->iParent->iCpuChange && !ot->iParent->iFreezeCpu)
|
sl@0
|
472 |
{
|
sl@0
|
473 |
if (!CheckCpuAgainstAffinity(iCpuNum, ot->iParent->iCpuAffinity))
|
sl@0
|
474 |
{
|
sl@0
|
475 |
if (ot->iParent==ot)
|
sl@0
|
476 |
{
|
sl@0
|
477 |
if (!fmd_done)
|
sl@0
|
478 |
fmd_res = ot->CheckFastMutexDefer(), fmd_done = TRUE;
|
sl@0
|
479 |
if (!fmd_res)
|
sl@0
|
480 |
{
|
sl@0
|
481 |
__KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T A:%08x",ot,ot->iParent->iCpuAffinity));
|
sl@0
|
482 |
ot->UnReadyT();
|
sl@0
|
483 |
migrate = TRUE;
|
sl@0
|
484 |
ot->iCpuChange = FALSE;
|
sl@0
|
485 |
}
|
sl@0
|
486 |
}
|
sl@0
|
487 |
else
|
sl@0
|
488 |
{
|
sl@0
|
489 |
__KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T GA:%08x",ot,ot->iParent->iCpuAffinity));
|
sl@0
|
490 |
Remove(ot->iParent);
|
sl@0
|
491 |
ot->iParent->iReady = 0;
|
sl@0
|
492 |
gmigrate = TRUE;
|
sl@0
|
493 |
ot->iCpuChange = FALSE;
|
sl@0
|
494 |
ot->iParent->iCpuChange = FALSE;
|
sl@0
|
495 |
}
|
sl@0
|
496 |
}
|
sl@0
|
497 |
else
|
sl@0
|
498 |
{
|
sl@0
|
499 |
ot->iCpuChange = FALSE;
|
sl@0
|
500 |
ot->iParent->iCpuChange = FALSE;
|
sl@0
|
501 |
}
|
sl@0
|
502 |
}
|
sl@0
|
503 |
no_ot:
|
sl@0
|
504 |
NSchedulable* g = (NSchedulable*)First();
|
sl@0
|
505 |
TBool rrcg = FALSE;
|
sl@0
|
506 |
if (g && g->IsGroup())
|
sl@0
|
507 |
{
|
sl@0
|
508 |
t = (NThread*)((NThreadGroup*)g)->iNThreadList.First();
|
sl@0
|
509 |
if (g->iNext!=g)
|
sl@0
|
510 |
rrcg = TRUE;
|
sl@0
|
511 |
}
|
sl@0
|
512 |
else
|
sl@0
|
513 |
t = (NThread*)g;
|
sl@0
|
514 |
TBool rrct = (t && t->iNext!=t);
|
sl@0
|
515 |
if (t && t->iTime==0 && (rrcg || rrct))
|
sl@0
|
516 |
{
|
sl@0
|
517 |
// candidate thread's timeslice has expired and there is another at the same priority
|
sl@0
|
518 |
if (t==ot)
|
sl@0
|
519 |
{
|
sl@0
|
520 |
if (ot->iParent!=ot)
|
sl@0
|
521 |
{
|
sl@0
|
522 |
((NThreadGroup*)ot->iParent)->iNThreadList.iQueue[ot->iPriority] = ot->iNext;
|
sl@0
|
523 |
iQueue[ot->iParent->iPriority] = ot->iParent->iNext;
|
sl@0
|
524 |
}
|
sl@0
|
525 |
else
|
sl@0
|
526 |
iQueue[ot->iPriority] = ot->iNext;
|
sl@0
|
527 |
ot->iTime = ot->iTimeslice;
|
sl@0
|
528 |
NSchedulable* g2 = (NSchedulable*)First();
|
sl@0
|
529 |
if (g2->IsGroup())
|
sl@0
|
530 |
t = (NThread*)((NThreadGroup*)g2)->iNThreadList.First();
|
sl@0
|
531 |
else
|
sl@0
|
532 |
t = (NThread*)g2;
|
sl@0
|
533 |
if (t->iTime==0)
|
sl@0
|
534 |
{
|
sl@0
|
535 |
// loop again since we need to lock t before round robining it
|
sl@0
|
536 |
__KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T RRL",ot));
|
sl@0
|
537 |
iRescheduleNeededFlag = TRUE;
|
sl@0
|
538 |
}
|
sl@0
|
539 |
else
|
sl@0
|
540 |
{
|
sl@0
|
541 |
__KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T RR",ot));
|
sl@0
|
542 |
}
|
sl@0
|
543 |
/* if (ot->iCpuAffinity & NTHREADBASE_CPU_AFFINITY_MASK)
|
sl@0
|
544 |
{
|
sl@0
|
545 |
ot->UnReadyT();
|
sl@0
|
546 |
migrate = TRUE;
|
sl@0
|
547 |
}
|
sl@0
|
548 |
else
|
sl@0
|
549 |
ot->iTime = ot->iTimeslice;
|
sl@0
|
550 |
*/
|
sl@0
|
551 |
}
|
sl@0
|
552 |
else // loop again since we need to lock t before round robining it
|
sl@0
|
553 |
{
|
sl@0
|
554 |
__KTRACE_OPT(KSCHED2,DEBUGPRINT("Rschd<-%T LL",ot));
|
sl@0
|
555 |
iRescheduleNeededFlag = TRUE;
|
sl@0
|
556 |
}
|
sl@0
|
557 |
}
|
sl@0
|
558 |
if (t != ot)
|
sl@0
|
559 |
{
|
sl@0
|
560 |
if (ot)
|
sl@0
|
561 |
{
|
sl@0
|
562 |
ot->iCurrent = 0;
|
sl@0
|
563 |
ot->iParent->iCurrent = 0;
|
sl@0
|
564 |
ot->CompleteContextSave();
|
sl@0
|
565 |
}
|
sl@0
|
566 |
if (t)
|
sl@0
|
567 |
{
|
sl@0
|
568 |
t->iLastCpu = iCpuNum;
|
sl@0
|
569 |
t->iParent->iLastCpu = iCpuNum;
|
sl@0
|
570 |
t->iCurrent = TUint8(iCpuNum | NSchedulable::EReadyOffset);
|
sl@0
|
571 |
t->iParent->iCurrent = t->iCurrent;
|
sl@0
|
572 |
}
|
sl@0
|
573 |
iCurrentThread = t;
|
sl@0
|
574 |
}
|
sl@0
|
575 |
UpdateThreadTimes(ot,t); // update ot's run time and set up the timeslice timer for t
|
sl@0
|
576 |
iReadyListLock.UnlockOnly();
|
sl@0
|
577 |
if (migrate)
|
sl@0
|
578 |
ot->ReadyT(NThreadBase::ENewTimeslice); // new timeslice if it's queued behind another thread at same priority
|
sl@0
|
579 |
if (gmigrate)
|
sl@0
|
580 |
ot->iParent->ReadyT(0); // new timeslice if it's queued behind another thread at same priority
|
sl@0
|
581 |
if (ot)
|
sl@0
|
582 |
{
|
sl@0
|
583 |
ot->RelSLock();
|
sl@0
|
584 |
|
sl@0
|
585 |
// DFC to signal thread is now dead
|
sl@0
|
586 |
if (ot->iWaitState.ThreadIsDead() && ot->iWaitState.iWtC.iKillDfc)
|
sl@0
|
587 |
ot->iWaitState.iWtC.iKillDfc->DoEnque();
|
sl@0
|
588 |
}
|
sl@0
|
589 |
__KTRACE_OPT(KSCHED,DEBUGPRINT("Rschd->%T",t));
|
sl@0
|
590 |
__NK_ASSERT_ALWAYS(!t || t->iParent); // must be a thread not a group
|
sl@0
|
591 |
return t; // could return NULL
|
sl@0
|
592 |
}
|
sl@0
|
593 |
|
sl@0
|
594 |
|
sl@0
|
595 |
void NThreadBase::UnReadyT()
|
sl@0
|
596 |
{
|
sl@0
|
597 |
if (iParent!=this)
|
sl@0
|
598 |
{
|
sl@0
|
599 |
NThreadGroup& g = *(NThreadGroup*)iParent;
|
sl@0
|
600 |
TPriListBase& l = g.iNThreadList;
|
sl@0
|
601 |
l.Remove(this);
|
sl@0
|
602 |
if (g.iReady)
|
sl@0
|
603 |
{
|
sl@0
|
604 |
TSubScheduler& ss = TheSubSchedulers[g.iReady & EReadyCpuMask];
|
sl@0
|
605 |
if (l.IsEmpty())
|
sl@0
|
606 |
{
|
sl@0
|
607 |
// __KTRACE_OPT(KNKERN,DEBUGPRINT("%T UnReadyT (G=%G-)",this,&g));
|
sl@0
|
608 |
ss.Remove(&g);
|
sl@0
|
609 |
g.iReady = 0;
|
sl@0
|
610 |
g.iPriority = 0;
|
sl@0
|
611 |
}
|
sl@0
|
612 |
else
|
sl@0
|
613 |
{
|
sl@0
|
614 |
// __KTRACE_OPT(KNKERN,DEBUGPRINT("%T UnReadyT (G=%G)",this,&g));
|
sl@0
|
615 |
ss.ChangePriority(&g, l.HighestPriority());
|
sl@0
|
616 |
}
|
sl@0
|
617 |
}
|
sl@0
|
618 |
}
|
sl@0
|
619 |
else
|
sl@0
|
620 |
{
|
sl@0
|
621 |
// __KTRACE_OPT(KNKERN,DEBUGPRINT("%T UnReadyT",this));
|
sl@0
|
622 |
TheSubSchedulers[iReady & EReadyCpuMask].Remove(this);
|
sl@0
|
623 |
}
|
sl@0
|
624 |
iReady = 0;
|
sl@0
|
625 |
}
|
sl@0
|
626 |
|
sl@0
|
627 |
|
sl@0
|
628 |
void NThreadBase::ChangeReadyThreadPriority()
|
sl@0
|
629 |
{
|
sl@0
|
630 |
TInt newp = iMutexPri>iBasePri ? iMutexPri : iBasePri;
|
sl@0
|
631 |
TInt oldp = iPriority;
|
sl@0
|
632 |
TSubScheduler* ss0 = &SubScheduler();
|
sl@0
|
633 |
TSubScheduler* ss = 0;
|
sl@0
|
634 |
if (iParent->iReady)
|
sl@0
|
635 |
{
|
sl@0
|
636 |
ss = TheSubSchedulers + (iParent->iReady & EReadyCpuMask);
|
sl@0
|
637 |
ss->iReadyListLock.LockOnly();
|
sl@0
|
638 |
}
|
sl@0
|
639 |
TBool resched = FALSE;
|
sl@0
|
640 |
NSchedulable* g = iParent;
|
sl@0
|
641 |
if (g!=this)
|
sl@0
|
642 |
{
|
sl@0
|
643 |
NThreadGroup* tg = (NThreadGroup*)g;
|
sl@0
|
644 |
tg->iNThreadList.ChangePriority(this, newp);
|
sl@0
|
645 |
if (ss)
|
sl@0
|
646 |
{
|
sl@0
|
647 |
TInt ngp = tg->iNThreadList.HighestPriority();
|
sl@0
|
648 |
if (ngp!=tg->iPriority)
|
sl@0
|
649 |
ss->ChangePriority(tg, ngp);
|
sl@0
|
650 |
}
|
sl@0
|
651 |
}
|
sl@0
|
652 |
else
|
sl@0
|
653 |
ss->ChangePriority(this, newp);
|
sl@0
|
654 |
if (iCurrent) // can't be current if parent not ready
|
sl@0
|
655 |
{
|
sl@0
|
656 |
TInt nhp = ss->HighestPriority();
|
sl@0
|
657 |
if (newp<oldp && (newp<nhp || (newp==nhp && iTime==0)))
|
sl@0
|
658 |
resched = TRUE;
|
sl@0
|
659 |
}
|
sl@0
|
660 |
else if (ss)
|
sl@0
|
661 |
{
|
sl@0
|
662 |
NThreadBase* ct = ss->iCurrentThread;
|
sl@0
|
663 |
TInt cp = ct ? ct->iPriority : -1;
|
sl@0
|
664 |
if (newp>cp || (newp==cp && ct->iTime==0))
|
sl@0
|
665 |
resched = TRUE;
|
sl@0
|
666 |
}
|
sl@0
|
667 |
if (resched)
|
sl@0
|
668 |
{
|
sl@0
|
669 |
if (ss == ss0)
|
sl@0
|
670 |
RescheduleNeeded();
|
sl@0
|
671 |
else
|
sl@0
|
672 |
ss0->iReschedIPIs |= ss->iCpuMask; // will kick the other CPU when this CPU reenables preemption
|
sl@0
|
673 |
}
|
sl@0
|
674 |
if (ss)
|
sl@0
|
675 |
ss->iReadyListLock.UnlockOnly();
|
sl@0
|
676 |
}
|
sl@0
|
677 |
|
sl@0
|
678 |
|
sl@0
|
679 |
/** Changes the priority of a nanokernel thread.
|
sl@0
|
680 |
|
sl@0
|
681 |
For use by RTOS personality layers.
|
sl@0
|
682 |
Do not use this function directly on a Symbian OS thread.
|
sl@0
|
683 |
|
sl@0
|
684 |
The thread's unknown state handler will be invoked with function EChangePriority
|
sl@0
|
685 |
and parameter newp if the current NState is not recognised and the new priority
|
sl@0
|
686 |
is not equal to the original priority.
|
sl@0
|
687 |
|
sl@0
|
688 |
@param newp The new nanokernel priority (0 <= newp < KNumPriorities).
|
sl@0
|
689 |
|
sl@0
|
690 |
@pre Kernel must be locked.
|
sl@0
|
691 |
@pre Call in a thread context.
|
sl@0
|
692 |
|
sl@0
|
693 |
@post Kernel is locked.
|
sl@0
|
694 |
*/
|
sl@0
|
695 |
EXPORT_C void NThreadBase::SetPriority(TInt newp)
|
sl@0
|
696 |
{
|
sl@0
|
697 |
CHECK_PRECONDITIONS(MASK_KERNEL_LOCKED|MASK_NOT_IDFC|MASK_NOT_ISR,"NThreadBase::SetPriority");
|
sl@0
|
698 |
AcqSLock();
|
sl@0
|
699 |
__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nSetPri %d(%d)->%d(%d)",this,iPriority,iBasePri,newp,iMutexPri));
|
sl@0
|
700 |
iBasePri = TUint8(newp);
|
sl@0
|
701 |
if (iMutexPri > iBasePri)
|
sl@0
|
702 |
newp = iMutexPri;
|
sl@0
|
703 |
TInt oldp = iPriority;
|
sl@0
|
704 |
if (newp == oldp)
|
sl@0
|
705 |
{
|
sl@0
|
706 |
RelSLock();
|
sl@0
|
707 |
return;
|
sl@0
|
708 |
}
|
sl@0
|
709 |
NFastMutex* wfm = 0;
|
sl@0
|
710 |
if (iLinkedObj && iLinkedObjType==EWaitFastMutex)
|
sl@0
|
711 |
wfm = (NFastMutex*)iLinkedObj;
|
sl@0
|
712 |
if (wfm)
|
sl@0
|
713 |
{
|
sl@0
|
714 |
// if thread is attached to/waiting on a fast mutex, need to acquire mutex lock
|
sl@0
|
715 |
++iPauseCount;
|
sl@0
|
716 |
RelSLock();
|
sl@0
|
717 |
wfm->iMutexLock.LockOnly();
|
sl@0
|
718 |
AcqSLock();
|
sl@0
|
719 |
UnPauseT();
|
sl@0
|
720 |
wfm->iWaitQ.ChangePriority(&iWaitLink, newp); // change position of this thread on mutex wait queue
|
sl@0
|
721 |
}
|
sl@0
|
722 |
if (iReady)
|
sl@0
|
723 |
{
|
sl@0
|
724 |
ChangeReadyThreadPriority();
|
sl@0
|
725 |
RelSLock();
|
sl@0
|
726 |
if (wfm && newp<=wfm->iWaitQ.HighestPriority())
|
sl@0
|
727 |
{
|
sl@0
|
728 |
// this thread was contending for the mutex but they may be other waiting threads
|
sl@0
|
729 |
// with higher or equal priority, so wake up the first thread on the list.
|
sl@0
|
730 |
NThreadBase* pT = _LOFF(wfm->iWaitQ.First(), NThreadBase, iWaitLink);
|
sl@0
|
731 |
pT->AcqSLock();
|
sl@0
|
732 |
|
sl@0
|
733 |
// if thread is still blocked on this fast mutex, release it but leave it on the wait queue
|
sl@0
|
734 |
// NOTE: it can't be suspended
|
sl@0
|
735 |
pT->iWaitState.UnBlockT(NThreadBase::EWaitFastMutex, wfm, KErrNone);
|
sl@0
|
736 |
pT->RelSLock();
|
sl@0
|
737 |
}
|
sl@0
|
738 |
}
|
sl@0
|
739 |
else
|
sl@0
|
740 |
{
|
sl@0
|
741 |
iPriority = (TUint8)newp;
|
sl@0
|
742 |
if (wfm && newp>oldp)
|
sl@0
|
743 |
{
|
sl@0
|
744 |
NThreadBase* pT = _LOFF(wfm->iWaitQ.First(), NThreadBase, iWaitLink); // highest priority waiting thread
|
sl@0
|
745 |
if (pT==this)
|
sl@0
|
746 |
{
|
sl@0
|
747 |
// this is now highest priority waiting thread so wake it up
|
sl@0
|
748 |
iWaitState.UnBlockT(NThreadBase::EWaitFastMutex, wfm, KErrNone);
|
sl@0
|
749 |
}
|
sl@0
|
750 |
}
|
sl@0
|
751 |
RelSLock();
|
sl@0
|
752 |
}
|
sl@0
|
753 |
if (wfm)
|
sl@0
|
754 |
{
|
sl@0
|
755 |
NThreadBase* t = (NThreadBase*)(TLinAddr(wfm->iHoldingThread)&~3);
|
sl@0
|
756 |
if (t)
|
sl@0
|
757 |
t->SetMutexPriority(wfm);
|
sl@0
|
758 |
wfm->iMutexLock.UnlockOnly();
|
sl@0
|
759 |
}
|
sl@0
|
760 |
}
|
sl@0
|
761 |
|
sl@0
|
762 |
|
sl@0
|
763 |
/** Set the inherited priority of a nanokernel thread.
|
sl@0
|
764 |
|
sl@0
|
765 |
@pre Kernel must be locked.
|
sl@0
|
766 |
@pre Call in a thread context.
|
sl@0
|
767 |
@pre The thread holds a fast mutex
|
sl@0
|
768 |
|
sl@0
|
769 |
@post Kernel is locked.
|
sl@0
|
770 |
*/
|
sl@0
|
771 |
void NThreadBase::SetMutexPriority(NFastMutex* aM)
|
sl@0
|
772 |
{
|
sl@0
|
773 |
TInt newp = aM->iWaitQ.HighestPriority();
|
sl@0
|
774 |
if (newp<0)
|
sl@0
|
775 |
newp = 0;
|
sl@0
|
776 |
AcqSLock();
|
sl@0
|
777 |
__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nSetMPri %d->%d Base %d (mutex %08x)",this,iMutexPri,newp,iBasePri,aM));
|
sl@0
|
778 |
iMutexPri = TUint8(newp);
|
sl@0
|
779 |
if (iMutexPri < iBasePri)
|
sl@0
|
780 |
newp = iBasePri;
|
sl@0
|
781 |
TInt oldp = iPriority;
|
sl@0
|
782 |
if (newp == oldp)
|
sl@0
|
783 |
{
|
sl@0
|
784 |
RelSLock();
|
sl@0
|
785 |
return;
|
sl@0
|
786 |
}
|
sl@0
|
787 |
if (iReady)
|
sl@0
|
788 |
ChangeReadyThreadPriority();
|
sl@0
|
789 |
else
|
sl@0
|
790 |
iPriority = (TUint8)newp;
|
sl@0
|
791 |
RelSLock();
|
sl@0
|
792 |
}
|
sl@0
|
793 |
|
sl@0
|
794 |
|
sl@0
|
795 |
void NThreadBase::LoseInheritedPriorityT()
|
sl@0
|
796 |
{
|
sl@0
|
797 |
__KTRACE_OPT(KNKERN,DEBUGPRINT("%T nLoseInhPri %d->%d",this,iPriority,iBasePri));
|
sl@0
|
798 |
TSubScheduler* ss = &SubScheduler();
|
sl@0
|
799 |
TInt newp = iBasePri;
|
sl@0
|
800 |
NSchedulable* g = iParent;
|
sl@0
|
801 |
ss->iReadyListLock.LockOnly();
|
sl@0
|
802 |
if (g!=this)
|
sl@0
|
803 |
{
|
sl@0
|
804 |
NThreadGroup* tg = (NThreadGroup*)g;
|
sl@0
|
805 |
tg->iNThreadList.ChangePriority(this, newp);
|
sl@0
|
806 |
TInt hp = tg->iNThreadList.HighestPriority();
|
sl@0
|
807 |
if (hp == tg->iPriority)
|
sl@0
|
808 |
{
|
sl@0
|
809 |
if (newp <= hp)
|
sl@0
|
810 |
RescheduleNeeded();
|
sl@0
|
811 |
goto out;
|
sl@0
|
812 |
}
|
sl@0
|
813 |
newp = hp;
|
sl@0
|
814 |
g = tg;
|
sl@0
|
815 |
}
|
sl@0
|
816 |
if (newp <= ss->HighestPriority())
|
sl@0
|
817 |
RescheduleNeeded();
|
sl@0
|
818 |
ss->ChangePriority(g, newp);
|
sl@0
|
819 |
out:
|
sl@0
|
820 |
ss->iReadyListLock.UnlockOnly();
|
sl@0
|
821 |
}
|
sl@0
|
822 |
|
sl@0
|
823 |
|