1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/include/nkern/nkern.h Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,975 @@
1.4 +// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32\include\nkern\nkern.h
1.18 +//
1.19 +// WARNING: This file contains some APIs which are internal and are subject
1.20 +// to change without notice. Such APIs should therefore not be used
1.21 +// outside the Kernel and Hardware Services package.
1.22 +//
1.23 +
1.24 +#ifndef __NKERN_H__
1.25 +#define __NKERN_H__
1.26 +
1.27 +#ifdef __STANDALONE_NANOKERNEL__
1.28 +#undef __IN_KERNEL__
1.29 +#define __IN_KERNEL__
1.30 +#endif
1.31 +
1.32 +#include <e32const.h>
1.33 +#include <nklib.h>
1.34 +#include <dfcs.h>
1.35 +#include <nk_trace.h>
1.36 +#include <e32atomics.h>
1.37 +
1.38 +extern "C" {
1.39 +/** @internalComponent */
1.40 +IMPORT_C void NKFault(const char* file, TInt line);
1.41 +/** @internalComponent */
1.42 +void NKIdle(TInt aStage);
1.43 +}
1.44 +
1.45 +/**
1.46 +@publishedPartner
1.47 +@released
1.48 +*/
1.49 +#define FAULT() NKFault(__FILE__,__LINE__)
1.50 +
1.51 +#ifdef _DEBUG
1.52 +
1.53 +/**
1.54 +@publishedPartner
1.55 +@released
1.56 +*/
1.57 +#define __NK_ASSERT_DEBUG(c) ((void) ((c)||(FAULT(),0)) )
1.58 +
1.59 +#else
1.60 +
1.61 +#define __NK_ASSERT_DEBUG(c)
1.62 +
1.63 +#endif
1.64 +
1.65 +/**
1.66 +@publishedPartner
1.67 +@released
1.68 +*/
1.69 +#define __NK_ASSERT_ALWAYS(c) ((void) ((c)||(FAULT(),0)) )
1.70 +
1.71 +/**
1.72 + @publishedPartner
1.73 + @released
1.74 +*/
1.75 +const TInt KNumPriorities=64;
1.76 +
1.77 +const TInt KMaxCpus=8;
1.78 +
1.79 +class NThread;
1.80 +
1.81 +
1.82 +/** Spin lock
1.83 +
1.84 + Used for protecting a code fragment against both interrupts and concurrent
1.85 + execution on another processor.
1.86 +
1.87 + @internalComponent
1.88 +*/
1.89 +class TSpinLock
1.90 + {
1.91 +public:
1.92 + enum TOrder
1.93 + {
1.94 + // Bit 7 of order clear for locks used with interrupts disabled
1.95 + EOrderGenericIrqLow0 =0x00u, // Device driver spin locks, low range
1.96 + EOrderGenericIrqLow1 =0x01u, // Device driver spin locks, low range
1.97 + EOrderGenericIrqLow2 =0x02u, // Device driver spin locks, low range
1.98 + EOrderGenericIrqLow3 =0x03u, // Device driver spin locks, low range
1.99 + EOrderGenericIrqHigh0 =0x18u, // Device driver spin locks, high range
1.100 + EOrderGenericIrqHigh1 =0x19u, // Device driver spin locks, high range
1.101 + EOrderGenericIrqHigh2 =0x1Au, // Device driver spin locks, high range
1.102 + EOrderGenericIrqHigh3 =0x1Bu, // Device driver spin locks, high range
1.103 +
1.104 + // Bit 7 of order set for locks used with interrupts enabled, preemption disabled
1.105 + EOrderGenericPreLow0 =0x80u, // Device driver spin locks, low range
1.106 + EOrderGenericPreLow1 =0x81u, // Device driver spin locks, low range
1.107 + EOrderGenericPreHigh0 =0x9Eu, // Device driver spin locks, high range
1.108 + EOrderGenericPreHigh1 =0x9Fu, // Device driver spin locks, high range
1.109 +
1.110 + EOrderNone =0xFFu // No order check required (e.g. for dynamic ordering)
1.111 + };
1.112 +public:
1.113 + IMPORT_C TSpinLock(TUint aOrder);
1.114 +private:
1.115 + volatile TUint64 iLock;
1.116 + };
1.117 +
1.118 +/** Macro to disable interrupts and acquire the lock.
1.119 +
1.120 +@publishedPartner
1.121 +@prototype
1.122 +*/
1.123 +#define __SPIN_LOCK_IRQ(lock) ((void)NKern::DisableAllInterrupts())
1.124 +
1.125 +/** Macro to release the lock and enable interrupts.
1.126 +
1.127 +@publishedPartner
1.128 +@prototype
1.129 +*/
1.130 +#define __SPIN_UNLOCK_IRQ(lock) (NKern::EnableAllInterrupts())
1.131 +
1.132 +/** Macro to see if someone else is waiting for the lock, enabling IRQs
1.133 + then disabling IRQs again.
1.134 +
1.135 +@publishedPartner
1.136 +@prototype
1.137 +*/
1.138 +#define __SPIN_FLASH_IRQ(lock) (NKern::EnableAllInterrupts(),(void)NKern::DisableAllInterrupts(),((TBool)TRUE))
1.139 +
1.140 +/** Macro to remember original interrupt state then disable interrupts
1.141 + and acquire the lock.
1.142 +
1.143 +@publishedPartner
1.144 +@prototype
1.145 +*/
1.146 +#define __SPIN_LOCK_IRQSAVE(lock) (NKern::DisableAllInterrupts())
1.147 +
1.148 +/** Macro to release the lock then restore original interrupt state to that
1.149 + supplied.
1.150 +
1.151 +@publishedPartner
1.152 +@prototype
1.153 +*/
1.154 +#define __SPIN_UNLOCK_IRQRESTORE(lock,irq) (NKern::RestoreInterrupts(irq))
1.155 +
1.156 +/** Macro to see if someone else is waiting for the lock, enabling IRQs to
1.157 + the original state supplied then disabling IRQs again.
1.158 +
1.159 +@publishedPartner
1.160 +@prototype
1.161 +*/
1.162 +#define __SPIN_FLASH_IRQRESTORE(lock,irq) (NKern::RestoreInterrupts(irq),((void)NKern::DisableAllInterrupts()),((TBool)TRUE))
1.163 +
1.164 +/** Macro to acquire the lock. This assumes the caller has already disabled
1.165 + interrupts/preemption.
1.166 +
1.167 + If interrupts/preemption is not disabled a run-time assert will occur
1.168 + This is to protect against unsafe code that might lead to same core
1.169 + deadlock.
1.170 +
1.171 + In device driver code it is safer to use __SPIN_LOCK_IRQSAVE() instead,
1.172 + although not as efficient should interrupts aleady be disabled for the
1.173 + duration the lock is held.
1.174 +
1.175 +@publishedPartner
1.176 +@prototype
1.177 +*/
1.178 +#define __SPIN_LOCK(lock)
1.179 +
1.180 +/** Macro to release the lock, don't change interrupt/preemption state.
1.181 +
1.182 +@publishedPartner
1.183 +@prototype
1.184 +*/
1.185 +#define __SPIN_UNLOCK(lock)
1.186 +
1.187 +/**
1.188 +@internalComponent
1.189 +*/
1.190 +#define __SPIN_FLASH(lock) ((TBool)FALSE)
1.191 +
1.192 +/** Macro to see if someone else is waiting for the lock, enabling preemption
1.193 + then disabling it again.
1.194 +
1.195 +@publishedPartner
1.196 +@prototype
1.197 +*/
1.198 +#define __SPIN_FLASH_PREEMPT(lock) ((TBool)NKern::PreemptionPoint())
1.199 +
1.200 +
1.201 +/** Read/Write Spin lock
1.202 +
1.203 + @internalComponent
1.204 +*/
1.205 +class TRWSpinLock
1.206 + {
1.207 +public:
1.208 + IMPORT_C TRWSpinLock(TUint aOrder); // Uses same order space as TSpinLock
1.209 +private:
1.210 + volatile TUint64 iLock;
1.211 + };
1.212 +
1.213 +
1.214 +/**
1.215 +@publishedPartner
1.216 +@prototype
1.217 +*/
1.218 +#define __SPIN_LOCK_IRQ_R(lock) ((void)NKern::DisableAllInterrupts())
1.219 +
1.220 +/**
1.221 +@publishedPartner
1.222 +@prototype
1.223 +*/
1.224 +#define __SPIN_UNLOCK_IRQ_R(lock) (NKern::EnableAllInterrupts())
1.225 +
1.226 +/**
1.227 +@publishedPartner
1.228 +@prototype
1.229 +*/
1.230 +#define __SPIN_FLASH_IRQ_R(lock) (NKern::EnableAllInterrupts(),(void)NKern::DisableAllInterrupts(),((TBool)TRUE))
1.231 +
1.232 +/**
1.233 +@publishedPartner
1.234 +@prototype
1.235 +*/
1.236 +#define __SPIN_LOCK_IRQ_W(lock) ((void)NKern::DisableAllInterrupts())
1.237 +
1.238 +/**
1.239 +@publishedPartner
1.240 +@prototype
1.241 +*/
1.242 +#define __SPIN_UNLOCK_IRQ_W(lock) (NKern::EnableAllInterrupts())
1.243 +
1.244 +/**
1.245 +@publishedPartner
1.246 +@prototype
1.247 +*/
1.248 +#define __SPIN_FLASH_IRQ_W(lock) (NKern::EnableAllInterrupts(),(void)NKern::DisableAllInterrupts(),((TBool)TRUE))
1.249 +
1.250 +
1.251 +/**
1.252 +@publishedPartner
1.253 +@prototype
1.254 +*/
1.255 +#define __SPIN_LOCK_R(lock)
1.256 +
1.257 +/**
1.258 +@publishedPartner
1.259 +@prototype
1.260 +*/
1.261 +#define __SPIN_UNLOCK_R(lock)
1.262 +
1.263 +/**
1.264 +@internalComponent
1.265 +*/
1.266 +#define __SPIN_FLASH_R(lock) ((TBool)FALSE)
1.267 +
1.268 +/**
1.269 +@publishedPartner
1.270 +@prototype
1.271 +*/
1.272 +#define __SPIN_LOCK_W(lock)
1.273 +
1.274 +/**
1.275 +@publishedPartner
1.276 +@prototype
1.277 +*/
1.278 +#define __SPIN_UNLOCK_W(lock)
1.279 +
1.280 +/**
1.281 +@internalComponent
1.282 +*/
1.283 +#define __SPIN_FLASH_W(lock) ((TBool)FALSE)
1.284 +
1.285 +
1.286 +/**
1.287 +@publishedPartner
1.288 +@prototype
1.289 +*/
1.290 +#define __SPIN_LOCK_IRQSAVE_R(lock) (NKern::DisableAllInterrupts())
1.291 +
1.292 +/**
1.293 +@publishedPartner
1.294 +@prototype
1.295 +*/
1.296 +#define __SPIN_UNLOCK_IRQRESTORE_R(lock,irq) (NKern::RestoreInterrupts(irq))
1.297 +
1.298 +/**
1.299 +@publishedPartner
1.300 +@prototype
1.301 +*/
1.302 +#define __SPIN_FLASH_IRQRESTORE_R(lock,irq) (NKern::RestoreInterrupts(irq),((void)NKern::DisableAllInterrupts()),((TBool)TRUE))
1.303 +
1.304 +/**
1.305 +@publishedPartner
1.306 +@prototype
1.307 +*/
1.308 +#define __SPIN_LOCK_IRQSAVE_W(lock) (NKern::DisableAllInterrupts())
1.309 +
1.310 +/**
1.311 +@publishedPartner
1.312 +@prototype
1.313 +*/
1.314 +#define __SPIN_UNLOCK_IRQRESTORE_W(lock,irq) (NKern::RestoreInterrupts(irq))
1.315 +
1.316 +/**
1.317 +@publishedPartner
1.318 +@prototype
1.319 +*/
1.320 +#define __SPIN_FLASH_IRQRESTORE_W(lock,irq) (NKern::RestoreInterrupts(irq),((void)NKern::DisableAllInterrupts()),((TBool)TRUE))
1.321 +
1.322 +
1.323 +/**
1.324 +@publishedPartner
1.325 +@prototype
1.326 +*/
1.327 +#define __SPIN_FLASH_PREEMPT_R(lock) ((TBool)NKern::PreemptionPoint())
1.328 +
1.329 +/**
1.330 +@publishedPartner
1.331 +@prototype
1.332 +*/
1.333 +#define __SPIN_FLASH_PREEMPT_W(lock) ((TBool)NKern::PreemptionPoint())
1.334 +
1.335 +
1.336 +/** Nanokernel fast semaphore
1.337 +
1.338 + A light-weight semaphore class that only supports a single waiting thread,
1.339 + suitable for the Symbian OS thread I/O semaphore.
1.340 +
1.341 + Initialising a NFastSemaphore involves two steps:
1.342 +
1.343 + - Constructing the semaphore
1.344 + - Setting the semaphore owning thread (the one allowed to wait on it)
1.345 +
1.346 + For example, creating one for the current thread to wait on:
1.347 +
1.348 + @code
1.349 + NFastSemaphore sem;
1.350 + sem.iOwningThread = NKern::CurrentThread();
1.351 + @endcode
1.352 +
1.353 + @publishedPartner
1.354 + @released
1.355 +*/
1.356 +class NFastSemaphore
1.357 + {
1.358 +public:
1.359 + inline NFastSemaphore();
1.360 + inline NFastSemaphore(NThreadBase* aThread);
1.361 + IMPORT_C void SetOwner(NThreadBase* aThread);
1.362 + IMPORT_C void Wait();
1.363 + IMPORT_C void Signal();
1.364 + IMPORT_C void SignalN(TInt aCount);
1.365 + IMPORT_C void Reset();
1.366 + void WaitCancel();
1.367 +public:
1.368 + TInt iCount; /**< @internalComponent */
1.369 +
1.370 + /** The thread allowed to wait on the semaphore
1.371 + @internalComponent
1.372 + */
1.373 + NThreadBase* iOwningThread;
1.374 + };
1.375 +
1.376 +/** Create a fast semaphore
1.377 +
1.378 + @publishedPartner
1.379 + @released
1.380 +*/
1.381 +inline NFastSemaphore::NFastSemaphore()
1.382 + : iCount(0), iOwningThread(NULL)
1.383 + {}
1.384 +
1.385 +/** Nanokernel fast mutex
1.386 +
1.387 + A light-weight priority-inheritance mutex that can be used if the following
1.388 + conditions apply:
1.389 +
1.390 + - Threads that hold the mutex never block.
1.391 + - The mutex is never acquired in a nested fashion
1.392 +
1.393 + If either of these conditions is not met, a DMutex object is more appropriate.
1.394 +
1.395 + @publishedPartner
1.396 + @released
1.397 +*/
1.398 +class NFastMutex
1.399 + {
1.400 +public:
1.401 + IMPORT_C NFastMutex();
1.402 + IMPORT_C void Wait();
1.403 + IMPORT_C void Signal();
1.404 + IMPORT_C TBool HeldByCurrentThread(); /**< @internalComponent */
1.405 +public:
1.406 + NThreadBase* iHoldingThread; /**< @internalComponent */
1.407 +
1.408 + /** MUST ALWAYS BE 0 or 1
1.409 + @internalComponent
1.410 + */
1.411 + TInt iWaiting;
1.412 + };
1.413 +
1.414 +
1.415 +/**
1.416 +@publishedPartner
1.417 +@released
1.418 +
1.419 +The type of the callback function used by the nanokernel timer.
1.420 +
1.421 +@see NTimer
1.422 +*/
1.423 +typedef void (*NTimerFn)(TAny*);
1.424 +
1.425 +
1.426 +
1.427 +
1.428 +/**
1.429 +@publishedPartner
1.430 +@released
1.431 +
1.432 +A basic relative timer provided by the nanokernel.
1.433 +
1.434 +It can generate either a one-shot interrupt or periodic interrupts.
1.435 +
1.436 +A timeout handler is called when the timer expires, either:
1.437 +- from the timer ISR - if the timer is queued via OneShot(TInt aTime) or OneShot(TInt aTime, TBool EFalse), or
1.438 +- from the nanokernel timer dfc1 thread - if the timer is queued via OneShot(TInt aTime, TBool ETrue) call, or
1.439 +- from any other dfc thread that provided DFC belongs to - if the timer is queued via OneShot(TInt aTime, TDfc& aDfc) call.
1.440 +Call-back mechanism cannot be changed in the life time of a timer.
1.441 +
1.442 +These timer objects may be manipulated from any context.
1.443 +The timers are driven from a periodic system tick interrupt,
1.444 +usually a 1ms period.
1.445 +
1.446 +@see NTimerFn
1.447 +*/
1.448 +class NTimer : public SDblQueLink
1.449 + {
1.450 +public:
1.451 + /**
1.452 + Default constructor.
1.453 + */
1.454 + inline NTimer()
1.455 + : iState(EIdle)
1.456 + {}
1.457 + /**
1.458 + Constructor taking a callback function and a pointer to be passed
1.459 + to the callback function.
1.460 +
1.461 + @param aFunction The callback function.
1.462 + @param aPtr A pointer to be passed to the callback function
1.463 + when called.
1.464 + */
1.465 + inline NTimer(NTimerFn aFunction, TAny* aPtr)
1.466 + : iPtr(aPtr), iFunction(aFunction), iState(EIdle)
1.467 + {}
1.468 + IMPORT_C TInt OneShot(TInt aTime);
1.469 + IMPORT_C TInt OneShot(TInt aTime, TBool aDfc);
1.470 + IMPORT_C TInt OneShot(TInt aTime, TDfc& aDfc);
1.471 + IMPORT_C TInt Again(TInt aTime);
1.472 + IMPORT_C TBool Cancel();
1.473 + IMPORT_C TBool IsPending();
1.474 +public:
1.475 +/**
1.476 + @internalComponent
1.477 +*/
1.478 + enum TState
1.479 + {
1.480 + EIdle=0, // not queued
1.481 + ETransferring=1, // being transferred from holding to ordered queue
1.482 + EHolding=2, // on holding queue
1.483 + EOrdered=3, // on ordered queue
1.484 + ECritical=4, // on ordered queue and in use by queue walk routine
1.485 + EFinal=5, // on final queue
1.486 + };
1.487 +public:
1.488 + /** Argument for callback function or the pointer to TDfc */
1.489 + TAny* iPtr; /**< @internalComponent */
1.490 +
1.491 + /** Pointer to callback function. NULL value indicates that queuing of provided Dfc queue will be done
1.492 + instead of calling callback function on completion */
1.493 + NTimerFn iFunction; /**< @internalComponent */
1.494 +
1.495 + TUint32 iTriggerTime; /**< @internalComponent */
1.496 + TUint8 iCompleteInDfc; /**< @internalComponent */
1.497 + TUint8 iState; /**< @internalComponent */
1.498 + TUint8 iPad1; /**< @internalComponent */
1.499 +
1.500 + /** Available for timer client to use.
1.501 + @internalTechnology */
1.502 + TUint8 iUserFlags;
1.503 + };
1.504 +
1.505 +/**
1.506 +@internalTechnology
1.507 +*/
1.508 +#define i_NTimer_iUserFlags iUserFlags
1.509 +
1.510 +/**
1.511 +@internalComponent
1.512 +*/
1.513 +#define i_NTimer_iState iState
1.514 +
1.515 +/**
1.516 + @publishedPartner
1.517 + @released
1.518 +*/
1.519 +typedef void (*NThreadFunction)(TAny*);
1.520 +
1.521 +/**
1.522 + @publishedPartner
1.523 + @released
1.524 +*/
1.525 +typedef TDfc* (*NThreadExitHandler)(NThread*);
1.526 +
1.527 +/**
1.528 + @publishedPartner
1.529 + @released
1.530 +*/
1.531 +typedef void (*NThreadStateHandler)(NThread*,TInt,TInt);
1.532 +
1.533 +/**
1.534 + @publishedPartner
1.535 + @released
1.536 +*/
1.537 +typedef void (*NThreadExceptionHandler)(TAny*,NThread*);
1.538 +
1.539 +/**
1.540 + @publishedPartner
1.541 + @released
1.542 +*/
1.543 +typedef void (*NThreadTimeoutHandler)(NThread*,TInt);
1.544 +
1.545 +/**
1.546 + @publishedPartner
1.547 + @released
1.548 +*/
1.549 +struct SNThreadHandlers
1.550 + {
1.551 + NThreadExitHandler iExitHandler;
1.552 + NThreadStateHandler iStateHandler;
1.553 + NThreadExceptionHandler iExceptionHandler;
1.554 + NThreadTimeoutHandler iTimeoutHandler;
1.555 + };
1.556 +
1.557 +/** @internalComponent */
1.558 +extern void NThread_Default_State_Handler(NThread*, TInt, TInt);
1.559 +
1.560 +/** @internalComponent */
1.561 +extern void NThread_Default_Exception_Handler(TAny*, NThread*);
1.562 +
1.563 +/** @internalComponent */
1.564 +#define NTHREAD_DEFAULT_EXIT_HANDLER ((NThreadExitHandler)0)
1.565 +
1.566 +/** @internalComponent */
1.567 +#define NTHREAD_DEFAULT_STATE_HANDLER (&NThread_Default_State_Handler)
1.568 +
1.569 +/** @internalComponent */
1.570 +#define NTHREAD_DEFAULT_EXCEPTION_HANDLER (&NThread_Default_Exception_Handler)
1.571 +
1.572 +/** @internalComponent */
1.573 +#define NTHREAD_DEFAULT_TIMEOUT_HANDLER ((NThreadTimeoutHandler)0)
1.574 +
1.575 +
1.576 +/**
1.577 + @publishedPartner
1.578 + @released
1.579 +*/
1.580 +struct SFastExecTable
1.581 + {
1.582 + TInt iFastExecCount; // includes implicit function#0
1.583 + TLinAddr iFunction[1]; // first entry is for call number 1
1.584 + };
1.585 +
1.586 +/**
1.587 + @publishedPartner
1.588 + @released
1.589 +*/
1.590 +const TUint32 KExecFlagClaim=0x80000000; // claim system lock
1.591 +
1.592 +/**
1.593 + @publishedPartner
1.594 + @released
1.595 +*/
1.596 +const TUint32 KExecFlagRelease=0x40000000; // release system lock
1.597 +
1.598 +/**
1.599 + @publishedPartner
1.600 + @released
1.601 +*/
1.602 +const TUint32 KExecFlagPreprocess=0x20000000; // preprocess
1.603 +
1.604 +/**
1.605 + @publishedPartner
1.606 + @released
1.607 +*/
1.608 +const TUint32 KExecFlagExtraArgMask=0x1C000000; // 3 bits indicating additional arguments
1.609 +
1.610 +/**
1.611 + @publishedPartner
1.612 + @released
1.613 +*/
1.614 +const TUint32 KExecFlagExtraArgs2=0x04000000; // 2 additional arguments
1.615 +
1.616 +/**
1.617 + @publishedPartner
1.618 + @released
1.619 +*/
1.620 +const TUint32 KExecFlagExtraArgs3=0x08000000; // 3 additional arguments
1.621 +
1.622 +/**
1.623 + @publishedPartner
1.624 + @released
1.625 +*/
1.626 +const TUint32 KExecFlagExtraArgs4=0x0C000000; // 4 additional arguments
1.627 +
1.628 +/**
1.629 + @publishedPartner
1.630 + @released
1.631 +*/
1.632 +const TUint32 KExecFlagExtraArgs5=0x10000000; // 5 additional arguments
1.633 +
1.634 +/**
1.635 + @publishedPartner
1.636 + @released
1.637 +*/
1.638 +const TUint32 KExecFlagExtraArgs6=0x14000000; // 6 additional arguments
1.639 +
1.640 +/**
1.641 + @publishedPartner
1.642 + @released
1.643 +*/
1.644 +const TUint32 KExecFlagExtraArgs7=0x18000000; // 7 additional arguments
1.645 +
1.646 +/**
1.647 + @publishedPartner
1.648 + @released
1.649 +*/
1.650 +const TUint32 KExecFlagExtraArgs8=0x1C000000; // 8 additional arguments
1.651 +
1.652 +
1.653 +/**
1.654 + @publishedPartner
1.655 + @released
1.656 +*/
1.657 +struct SSlowExecEntry
1.658 + {
1.659 + TUint32 iFlags; // information about call
1.660 + TLinAddr iFunction; // address of function to be called
1.661 + };
1.662 +
1.663 +
1.664 +/**
1.665 + @publishedPartner
1.666 + @released
1.667 +*/
1.668 +struct SSlowExecTable
1.669 + {
1.670 + TInt iSlowExecCount;
1.671 + TLinAddr iInvalidExecHandler; // used if call number invalid
1.672 + TLinAddr iPreprocessHandler; // used for handle lookups
1.673 + SSlowExecEntry iEntries[1]; // first entry is for call number 0
1.674 + };
1.675 +
1.676 +// Thread iAttributes Constants
1.677 +const TUint8 KThreadAttImplicitSystemLock=1; /**< @internalComponent */
1.678 +const TUint8 KThreadAttAddressSpace=2; /**< @internalComponent */
1.679 +const TUint8 KThreadAttLoggable=4; /**< @internalComponent */
1.680 +const TUint8 KThreadAttDelayed=8; /**< @internalComponent */
1.681 +
1.682 +
1.683 +// Thread CPU
1.684 +const TUint32 KCpuAffinityAny=0xffffffffu; /**< @internalComponent */
1.685 +
1.686 +/** Information needed for creating a nanothread.
1.687 +
1.688 + @publishedPartner
1.689 + @released
1.690 +*/
1.691 +struct SNThreadCreateInfo
1.692 + {
1.693 + NThreadFunction iFunction;
1.694 + TAny* iStackBase;
1.695 + TInt iStackSize;
1.696 + TInt iPriority;
1.697 + TInt iTimeslice;
1.698 + TUint8 iAttributes;
1.699 + TUint32 iCpuAffinity;
1.700 + const SNThreadHandlers* iHandlers;
1.701 + const SFastExecTable* iFastExecTable;
1.702 + const SSlowExecTable* iSlowExecTable;
1.703 + const TUint32* iParameterBlock;
1.704 + TInt iParameterBlockSize; // if zero, iParameterBlock _is_ the initial data
1.705 + // otherwise it points to n bytes of initial data
1.706 + };
1.707 +
1.708 +/** Constant for use with NKern:: functions which release a fast mutex as well
1.709 + as performing some other operations.
1.710 +
1.711 + @publishedPartner
1.712 + @released
1.713 +*/
1.714 +#define SYSTEM_LOCK (NFastMutex*)0
1.715 +
1.716 +
1.717 +/** Idle handler function
1.718 + Pointer to a function which is called whenever a CPU goes idle
1.719 +
1.720 + @param aPtr The iPtr stored in the SCpuIdleHandler structure
1.721 + @param aStage If positive, the number of processors still active
1.722 + If zero, indicates all processors are now idle
1.723 + -1 indicates that postamble processing is required after waking up
1.724 +
1.725 + @internalComponent
1.726 +*/
1.727 +typedef void (*TCpuIdleHandlerFn)(TAny* aPtr, TInt aStage);
1.728 +
1.729 +/** Idle handler structure
1.730 +
1.731 + @internalComponent
1.732 +*/
1.733 +struct SCpuIdleHandler
1.734 + {
1.735 + TCpuIdleHandlerFn iHandler;
1.736 + TAny* iPtr;
1.737 + volatile TBool iPostambleRequired;
1.738 + };
1.739 +
1.740 +
1.741 +/**
1.742 +@internalComponent
1.743 +*/
1.744 +enum TUserModeCallbackReason
1.745 + {
1.746 + EUserModeCallbackRun,
1.747 + EUserModeCallbackCancel,
1.748 + };
1.749 +
1.750 +
1.751 +/**
1.752 +A callback function executed when a thread returns to user mode.
1.753 +
1.754 +@internalComponent
1.755 +*/
1.756 +typedef void (*TUserModeCallbackFunc)(TAny* aThisPtr, TUserModeCallbackReason aReasonCode);
1.757 +
1.758 +
1.759 +/**
1.760 +An object representing a queued callback to be executed when a thread returns to user mode.
1.761 +
1.762 +@internalComponent
1.763 +*/
1.764 +class TUserModeCallback
1.765 + {
1.766 +public:
1.767 + TUserModeCallback(TUserModeCallbackFunc);
1.768 + ~TUserModeCallback();
1.769 +
1.770 +public:
1.771 + TUserModeCallback* volatile iNext;
1.772 + TUserModeCallbackFunc iFunc;
1.773 + };
1.774 +
1.775 +TUserModeCallback* const KUserModeCallbackUnqueued = ((TUserModeCallback*)1);
1.776 +
1.777 +
1.778 +/** Nanokernel functions
1.779 +
1.780 + @publishedPartner
1.781 + @released
1.782 +*/
1.783 +class NKern
1.784 + {
1.785 +public:
1.786 + /** Bitmask values used when blocking a nanothread.
1.787 + @see NKern::Block()
1.788 + */
1.789 + enum TBlockMode
1.790 + {
1.791 + EEnterCS=1, /**< Enter thread critical section before blocking */
1.792 + ERelease=2, /**< Release specified fast mutex before blocking */
1.793 + EClaim=4, /**< Re-acquire specified fast mutex when unblocked */
1.794 + EObstruct=8, /**< Signifies obstruction of thread rather than lack of work to do */
1.795 + };
1.796 +
1.797 + /** Values that specify the context of the processor.
1.798 + @see NKern::CurrentContext()
1.799 + */
1.800 + enum TContext
1.801 + {
1.802 + EThread=0, /**< The processor is in a thread context*/
1.803 + EIDFC=1, /**< The processor is in an IDFC context*/
1.804 + EInterrupt=2, /**< The processor is in an interrupt context*/
1.805 + EEscaped=KMaxTInt /**< Not valid a process context on target hardware*/
1.806 + };
1.807 +
1.808 +public:
1.809 + // Threads
1.810 + IMPORT_C static TInt ThreadCreate(NThread* aThread, SNThreadCreateInfo& anInfo);
1.811 + IMPORT_C static TBool ThreadSuspend(NThread* aThread, TInt aCount);
1.812 + IMPORT_C static TBool ThreadResume(NThread* aThread);
1.813 + IMPORT_C static TBool ThreadResume(NThread* aThread, NFastMutex* aMutex);
1.814 + IMPORT_C static TBool ThreadForceResume(NThread* aThread);
1.815 + IMPORT_C static TBool ThreadForceResume(NThread* aThread, NFastMutex* aMutex);
1.816 + IMPORT_C static void ThreadRelease(NThread* aThread, TInt aReturnValue);
1.817 + IMPORT_C static void ThreadRelease(NThread* aThread, TInt aReturnValue, NFastMutex* aMutex);
1.818 + IMPORT_C static void ThreadSetPriority(NThread* aThread, TInt aPriority);
1.819 + IMPORT_C static void ThreadSetPriority(NThread* aThread, TInt aPriority, NFastMutex* aMutex);
1.820 + IMPORT_C static void ThreadRequestSignal(NThread* aThread);
1.821 + IMPORT_C static void ThreadRequestSignal(NThread* aThread, NFastMutex* aMutex);
1.822 + IMPORT_C static void ThreadRequestSignal(NThread* aThread, TInt aCount);
1.823 + IMPORT_C static void ThreadKill(NThread* aThread);
1.824 + IMPORT_C static void ThreadKill(NThread* aThread, NFastMutex* aMutex);
1.825 + IMPORT_C static void ThreadEnterCS();
1.826 + IMPORT_C static void ThreadLeaveCS();
1.827 + static NThread* _ThreadEnterCS(); /**< @internalComponent */
1.828 + static void _ThreadLeaveCS(); /**< @internalComponent */
1.829 + IMPORT_C static TInt Block(TUint32 aTimeout, TUint aMode, NFastMutex* aMutex);
1.830 + IMPORT_C static TInt Block(TUint32 aTimeout, TUint aMode);
1.831 + IMPORT_C static void NanoBlock(TUint32 aTimeout, TUint aState, TAny* aWaitObj);
1.832 + IMPORT_C static void ThreadGetUserContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask);
1.833 + IMPORT_C static void ThreadSetUserContext(NThread* aThread, TAny* aContext);
1.834 + IMPORT_C static void ThreadGetSystemContext(NThread* aThread, TAny* aContext, TUint32& aAvailRegistersMask);
1.835 + static void ThreadModifyUsp(NThread* aThread, TLinAddr aUsp);
1.836 + IMPORT_C static TInt FreezeCpu(); /**< @internalComponent */
1.837 + IMPORT_C static void EndFreezeCpu(TInt aCookie); /**< @internalComponent */
1.838 + IMPORT_C static TUint32 ThreadSetCpuAffinity(NThread* aThread, TUint32 aAffinity); /**< @internalComponent */
1.839 + IMPORT_C static void ThreadSetTimeslice(NThread* aThread, TInt aTimeslice); /**< @internalComponent */
1.840 + IMPORT_C static TUint64 ThreadCpuTime(NThread* aThread); /**< @internalComponent */
1.841 + IMPORT_C static TUint32 CpuTimeMeasFreq(); /**< @internalComponent */
1.842 + static TInt QueueUserModeCallback(NThreadBase* aThread, TUserModeCallback* aCallback); /**< @internalComponent */
1.843 + static void MoveUserModeCallbacks(NThreadBase* aSrcThread, NThreadBase* aDestThread); /**< @internalComponent */
1.844 + static void CancelUserModeCallbacks(); /**< @internalComponent */
1.845 +
1.846 + // Fast semaphores
1.847 + IMPORT_C static void FSSetOwner(NFastSemaphore* aSem,NThreadBase* aThread);
1.848 + IMPORT_C static void FSWait(NFastSemaphore* aSem);
1.849 + IMPORT_C static void FSSignal(NFastSemaphore* aSem);
1.850 + IMPORT_C static void FSSignal(NFastSemaphore* aSem, NFastMutex* aMutex);
1.851 + IMPORT_C static void FSSignalN(NFastSemaphore* aSem, TInt aCount);
1.852 + IMPORT_C static void FSSignalN(NFastSemaphore* aSem, TInt aCount, NFastMutex* aMutex);
1.853 +
1.854 + // Fast mutexes
1.855 + IMPORT_C static void FMWait(NFastMutex* aMutex);
1.856 + IMPORT_C static void FMSignal(NFastMutex* aMutex);
1.857 + IMPORT_C static TBool FMFlash(NFastMutex* aMutex);
1.858 +
1.859 + // Scheduler
1.860 + IMPORT_C static void Lock();
1.861 + IMPORT_C static NThread* LockC();
1.862 + IMPORT_C static void Unlock();
1.863 + IMPORT_C static TInt PreemptionPoint();
1.864 +
1.865 + // Interrupts
1.866 + IMPORT_C static TInt DisableAllInterrupts();
1.867 + IMPORT_C static TInt DisableInterrupts(TInt aLevel);
1.868 + IMPORT_C static void RestoreInterrupts(TInt aRestoreData);
1.869 + IMPORT_C static void EnableAllInterrupts();
1.870 +
1.871 + // Read-modify-write
1.872 + inline static TInt LockedInc(TInt& aCount)
1.873 + { return __e32_atomic_add_ord32(&aCount,1); }
1.874 + inline static TInt LockedDec(TInt& aCount)
1.875 + { return __e32_atomic_add_ord32(&aCount,0xffffffff); }
1.876 + inline static TInt LockedAdd(TInt& aDest, TInt aSrc)
1.877 + { return __e32_atomic_add_ord32(&aDest,aSrc); }
1.878 + inline static TInt64 LockedInc(TInt64& aCount)
1.879 + { return __e32_atomic_add_ord64(&aCount,1); }
1.880 + inline static TInt64 LockedDec(TInt64& aCount)
1.881 + { return __e32_atomic_add_ord64(&aCount,TUint64(TInt64(-1))); }
1.882 + inline static TInt64 LockedAdd(TInt64& aDest, TInt64 aSrc) /**< @internalComponent */
1.883 + { return __e32_atomic_add_ord64(&aDest,aSrc); }
1.884 + inline static TUint32 LockedSetClear(TUint32& aDest, TUint32 aClearMask, TUint32 aSetMask)
1.885 + { return __e32_atomic_axo_ord32(&aDest,~(aClearMask|aSetMask),aSetMask); }
1.886 + inline static TUint16 LockedSetClear16(TUint16& aDest, TUint16 aClearMask, TUint16 aSetMask) /**< @internalComponent */
1.887 + { return __e32_atomic_axo_ord16(&aDest,TUint16(~(aClearMask|aSetMask)),aSetMask); }
1.888 + inline static TUint8 LockedSetClear8(TUint8& aDest, TUint8 aClearMask, TUint8 aSetMask)
1.889 + { return __e32_atomic_axo_ord8(&aDest,TUint8(~(aClearMask|aSetMask)),aSetMask); }
1.890 + inline static TInt SafeInc(TInt& aCount)
1.891 + { return __e32_atomic_tas_ord32(&aCount,1,1,0); }
1.892 + inline static TInt SafeDec(TInt& aCount)
1.893 + { return __e32_atomic_tas_ord32(&aCount,1,-1,0); }
1.894 + inline static TInt AddIfGe(TInt& aCount, TInt aLimit, TInt aInc) /**< @internalComponent */
1.895 + { return __e32_atomic_tas_ord32(&aCount,aLimit,aInc,0); }
1.896 + inline static TInt AddIfLt(TInt& aCount, TInt aLimit, TInt aInc) /**< @internalComponent */
1.897 + { return __e32_atomic_tas_ord32(&aCount,aLimit,0,aInc); }
1.898 + inline static TAny* SafeSwap(TAny* aNewValue, TAny*& aPtr)
1.899 + { return __e32_atomic_swp_ord_ptr(&aPtr, aNewValue); }
1.900 + inline static TUint8 SafeSwap8(TUint8 aNewValue, TUint8& aPtr)
1.901 + { return __e32_atomic_swp_ord8(&aPtr, aNewValue); }
1.902 + inline static TUint16 SafeSwap16(TUint16 aNewValue, TUint16& aPtr) /**< @internalComponent */
1.903 + { return __e32_atomic_swp_ord16(&aPtr, aNewValue); }
1.904 + inline static TBool CompareAndSwap(TAny*& aPtr, TAny* aExpected, TAny* aNew) /**< @internalComponent */
1.905 + { return __e32_atomic_cas_ord_ptr(&aPtr, &aExpected, aNew); }
1.906 + inline static TBool CompareAndSwap8(TUint8& aPtr, TUint8 aExpected, TUint8 aNew) /**< @internalComponent */
1.907 + { return __e32_atomic_cas_ord8(&aPtr, (TUint8*)&aExpected, (TUint8)aNew); }
1.908 + inline static TBool CompareAndSwap16(TUint16& aPtr, TUint16 aExpected, TUint16 aNew) /**< @internalComponent */
1.909 + { return __e32_atomic_cas_ord16(&aPtr, (TUint16*)&aExpected, (TUint16)aNew); }
1.910 + inline static TUint32 SafeSwap(TUint32 aNewValue, TUint32& aPtr) /**< @internalComponent */
1.911 + { return __e32_atomic_swp_ord32(&aPtr, aNewValue); }
1.912 + inline static TUint SafeSwap(TUint aNewValue, TUint& aPtr) /**< @internalComponent */
1.913 + { return __e32_atomic_swp_ord32(&aPtr, aNewValue); }
1.914 + inline static TInt SafeSwap(TInt aNewValue, TInt& aPtr) /**< @internalComponent */
1.915 + { return __e32_atomic_swp_ord32(&aPtr, aNewValue); }
1.916 + inline static TBool CompareAndSwap(TUint32& aPtr, TUint32 aExpected, TUint32 aNew) /**< @internalComponent */
1.917 + { return __e32_atomic_cas_ord32(&aPtr, &aExpected, aNew); }
1.918 + inline static TBool CompareAndSwap(TUint& aPtr, TUint aExpected, TUint aNew) /**< @internalComponent */
1.919 + { return __e32_atomic_cas_ord32(&aPtr, (TUint32*)&aExpected, (TUint32)aNew); }
1.920 + inline static TBool CompareAndSwap(TInt& aPtr, TInt aExpected, TInt aNew) /**< @internalComponent */
1.921 + { return __e32_atomic_cas_ord32(&aPtr, (TUint32*)&aExpected, (TUint32)aNew); }
1.922 +
1.923 +
1.924 + // Miscellaneous
1.925 + IMPORT_C static NThread* CurrentThread();
1.926 + IMPORT_C static TInt CurrentCpu(); /**< @internalComponent */
1.927 + IMPORT_C static TInt NumberOfCpus(); /**< @internalComponent */
1.928 + IMPORT_C static void LockSystem();
1.929 + IMPORT_C static void UnlockSystem();
1.930 + IMPORT_C static TBool FlashSystem();
1.931 + IMPORT_C static void WaitForAnyRequest();
1.932 + IMPORT_C static void Sleep(TUint32 aTime);
1.933 + IMPORT_C static void Exit();
1.934 + IMPORT_C static void DeferredExit();
1.935 + IMPORT_C static void YieldTimeslice(); /**< @internalComponent */
1.936 + IMPORT_C static void RotateReadyList(TInt aPriority);
1.937 + IMPORT_C static void RotateReadyList(TInt aPriority, TInt aCpu); /**< @internalTechnology */
1.938 + IMPORT_C static void RecordIntLatency(TInt aLatency, TInt aIntMask); /**< @internalTechnology */
1.939 + IMPORT_C static void RecordThreadLatency(TInt aLatency); /**< @internalTechnology */
1.940 + IMPORT_C static TUint32 TickCount();
1.941 + IMPORT_C static TInt TickPeriod();
1.942 + IMPORT_C static TInt TimerTicks(TInt aMilliseconds);
1.943 + IMPORT_C static TInt TimesliceTicks(TUint32 aMicroseconds); /**< @internalTechnology */
1.944 + IMPORT_C static TInt CurrentContext();
1.945 + IMPORT_C static TUint32 FastCounter();
1.946 + IMPORT_C static TInt FastCounterFrequency();
1.947 + static void Init0(TAny* aVariantData);
1.948 + static void Init(NThread* aThread, SNThreadCreateInfo& anInfo);
1.949 + IMPORT_C static TBool KernelLocked(TInt aCount=0); /**< @internalTechnology */
1.950 + IMPORT_C static NFastMutex* HeldFastMutex(); /**< @internalTechnology */
1.951 + static void Idle();
1.952 + IMPORT_C static SCpuIdleHandler* CpuIdleHandler(); /**< @internalTechnology */
1.953 + static void NotifyCrash(const TAny* a0, TInt a1); /**< @internalTechnology */
1.954 + IMPORT_C static TBool Crashed();
1.955 + static TUint32 IdleGenerationCount();
1.956 +
1.957 + // Debugger support
1.958 + typedef void (*TRescheduleCallback)(NThread*);
1.959 + IMPORT_C static void SchedulerHooks(TLinAddr& aStart, TLinAddr& aEnd);
1.960 + IMPORT_C static void InsertSchedulerHooks();
1.961 + IMPORT_C static void RemoveSchedulerHooks();
1.962 + IMPORT_C static void SetRescheduleCallback(TRescheduleCallback aCallback);
1.963 + };
1.964 +
1.965 +
1.966 +/** Create a fast semaphore
1.967 +
1.968 + @publishedPartner
1.969 + @released
1.970 +*/
1.971 +inline NFastSemaphore::NFastSemaphore(NThreadBase* aThread)
1.972 + : iCount(0),
1.973 + iOwningThread(aThread ? aThread : (NThreadBase*)NKern::CurrentThread())
1.974 + {
1.975 + }
1.976 +
1.977 +
1.978 +#endif