os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mexport.cpp
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
//
sl@0
    15
sl@0
    16
#include "memmodel.h"
sl@0
    17
#include "mm.h"
sl@0
    18
#include "mmu.h"
sl@0
    19
sl@0
    20
#include "mrom.h"
sl@0
    21
sl@0
    22
/**	Returns the amount of free RAM currently available.
sl@0
    23
sl@0
    24
@return The number of bytes of free RAM currently available.
sl@0
    25
@pre	any context
sl@0
    26
 */
sl@0
    27
EXPORT_C TInt Kern::FreeRamInBytes()
sl@0
    28
	{
sl@0
    29
	TUint numPages = TheMmu.FreeRamInPages();
sl@0
    30
	// hack, clip free RAM to fit into a signed integer...
sl@0
    31
	if(numPages>(KMaxTInt>>KPageShift))
sl@0
    32
		return KMaxTInt;
sl@0
    33
	return numPages*KPageSize;
sl@0
    34
	}
sl@0
    35
sl@0
    36
sl@0
    37
/**	Rounds up the argument to the size of a MMU page.
sl@0
    38
sl@0
    39
	To find out the size of a MMU page:
sl@0
    40
	@code
sl@0
    41
	size = Kern::RoundToPageSize(1);
sl@0
    42
	@endcode
sl@0
    43
sl@0
    44
	@param aSize Value to round up
sl@0
    45
	@pre any context
sl@0
    46
 */
sl@0
    47
EXPORT_C TUint32 Kern::RoundToPageSize(TUint32 aSize)
sl@0
    48
	{
sl@0
    49
	return (aSize+KPageMask)&~KPageMask;
sl@0
    50
	}
sl@0
    51
sl@0
    52
sl@0
    53
/**	Rounds up the argument to the amount of memory mapped by a MMU page 
sl@0
    54
	directory entry.
sl@0
    55
sl@0
    56
	Chunks occupy one or more consecutive page directory entries (PDE) and
sl@0
    57
	therefore the amount of linear and physical memory allocated to a chunk is
sl@0
    58
	always a multiple of the amount of memory mapped by a page directory entry.
sl@0
    59
 */
sl@0
    60
EXPORT_C TUint32 Kern::RoundToChunkSize(TUint32 aSize)
sl@0
    61
	{
sl@0
    62
	return (aSize+KChunkMask)&~KChunkMask;
sl@0
    63
	}
sl@0
    64
sl@0
    65
sl@0
    66
//
sl@0
    67
// Epoc class
sl@0
    68
// 
sl@0
    69
#ifdef BTRACE_KERNEL_MEMORY
sl@0
    70
TInt   Epoc::DriverAllocdPhysRam = 0;
sl@0
    71
TInt   Epoc::KernelMiscPages = 0;
sl@0
    72
#endif
sl@0
    73
sl@0
    74
sl@0
    75
/**
sl@0
    76
Allows the variant to specify the details of the RAM zones. This should be invoked 
sl@0
    77
by the variant in its implementation of the pure virtual function Asic::Init1().
sl@0
    78
sl@0
    79
There are some limitations to how the RAM zones can be specified:
sl@0
    80
- Each RAM zone's address space must be distinct and not overlap with any 
sl@0
    81
other RAM zone's address space
sl@0
    82
- Each RAM zone's address space must have a size that is multiples of the 
sl@0
    83
ASIC's MMU small page size and be aligned to the ASIC's MMU small page size, 
sl@0
    84
usually 4KB on ARM MMUs.
sl@0
    85
- When taken together all of the RAM zones must cover the whole of the physical RAM
sl@0
    86
address space as specified by the bootstrap in the SuperPage members iTotalRamSize
sl@0
    87
and iRamBootData;.
sl@0
    88
- There can be no more than KMaxRamZones RAM zones specified by the base port
sl@0
    89
sl@0
    90
Note the verification of the RAM zone data is not performed here but by the ram 
sl@0
    91
allocator later in the boot up sequence.  This is because it is only possible to
sl@0
    92
verify the zone data once the physical RAM configuration has been read from 
sl@0
    93
the super page. Any verification errors result in a "RAM-ALLOC" panic 
sl@0
    94
faulting the kernel during initialisation.
sl@0
    95
sl@0
    96
@param aZones Pointer to an array of SRamZone structs containing the details for all 
sl@0
    97
the zones. The end of the array is specified by an element with an iSize of zero. The array must 
sl@0
    98
remain in memory at least until the kernel has successfully booted.
sl@0
    99
sl@0
   100
@param aCallback Pointer to a call back function that the kernel may invoke to request 
sl@0
   101
one of the operations specified by TRamZoneOp.
sl@0
   102
sl@0
   103
@return KErrNone if successful, otherwise one of the system wide error codes
sl@0
   104
sl@0
   105
@see TRamZoneOp
sl@0
   106
@see SRamZone
sl@0
   107
@see TRamZoneCallback
sl@0
   108
*/
sl@0
   109
EXPORT_C TInt Epoc::SetRamZoneConfig(const SRamZone* aZones, TRamZoneCallback aCallback)
sl@0
   110
	{
sl@0
   111
	TRamZoneCallback dummy;
sl@0
   112
	// Ensure this is only called once and only while we are initialising the kernel
sl@0
   113
	if (!K::Initialising || TheMmu.RamZoneConfig(dummy) != NULL)
sl@0
   114
		{// fault kernel, won't return
sl@0
   115
		K::Fault(K::EBadSetRamZoneConfig);
sl@0
   116
		}
sl@0
   117
sl@0
   118
	if (NULL == aZones)
sl@0
   119
		{
sl@0
   120
		return KErrArgument;
sl@0
   121
		}
sl@0
   122
	TheMmu.SetRamZoneConfig(aZones, aCallback);
sl@0
   123
	return KErrNone;
sl@0
   124
	}
sl@0
   125
sl@0
   126
sl@0
   127
/**
sl@0
   128
Modify the specified RAM zone's flags.
sl@0
   129
sl@0
   130
This allows the BSP or device driver to configure which type of pages, if any,
sl@0
   131
can be allocated into a RAM zone by the system.
sl@0
   132
sl@0
   133
Note: updating a RAM zone's flags can result in
sl@0
   134
	1 - memory allocations failing despite there being enough free RAM in the system.
sl@0
   135
	2 - the methods TRamDefragRequest::EmptyRamZone(), TRamDefragRequest::ClaimRamZone()
sl@0
   136
	or TRamDefragRequest::DefragRam() never succeeding.
sl@0
   137
sl@0
   138
The flag masks KRamZoneFlagDiscardOnly, KRamZoneFlagMovAndDisOnly and KRamZoneFlagNoAlloc
sl@0
   139
are intended to be used with this method.
sl@0
   140
sl@0
   141
@param aId			The ID of the RAM zone to modify.
sl@0
   142
@param aClearMask	The bit mask to clear, each flag of which must already be set on the RAM zone.
sl@0
   143
@param aSetMask		The bit mask to set.
sl@0
   144
sl@0
   145
@return KErrNone on success, KErrArgument if the RAM zone of aId not found or if 
sl@0
   146
aSetMask contains invalid flag bits.
sl@0
   147
sl@0
   148
@see TRamDefragRequest::EmptyRamZone()
sl@0
   149
@see TRamDefragRequest::ClaimRamZone()
sl@0
   150
@see TRamDefragRequest::DefragRam()
sl@0
   151
sl@0
   152
@see KRamZoneFlagDiscardOnly
sl@0
   153
@see KRamZoneFlagMovAndDisOnly
sl@0
   154
@see KRamZoneFlagNoAlloc
sl@0
   155
*/
sl@0
   156
EXPORT_C TInt Epoc::ModifyRamZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask)
sl@0
   157
	{
sl@0
   158
	RamAllocLock::Lock();
sl@0
   159
	TInt r = TheMmu.ModifyRamZoneFlags(aId, aClearMask, aSetMask);
sl@0
   160
	RamAllocLock::Unlock();
sl@0
   161
	return r;
sl@0
   162
	}
sl@0
   163
sl@0
   164
sl@0
   165
/**
sl@0
   166
Gets the current count of a particular RAM zone's pages by type.
sl@0
   167
sl@0
   168
@param aId The ID of the RAM zone to enquire about
sl@0
   169
@param aPageData If successful, on return this contains the page count
sl@0
   170
sl@0
   171
@return KErrNone if successful, KErrArgument if a RAM zone of aId is not found or
sl@0
   172
one of the system wide error codes 
sl@0
   173
sl@0
   174
@pre Calling thread must be in a critical section.
sl@0
   175
@pre Interrupts must be enabled.
sl@0
   176
@pre Kernel must be unlocked.
sl@0
   177
@pre No fast mutex can be held.
sl@0
   178
@pre Call in a thread context.
sl@0
   179
sl@0
   180
@see SRamZonePageCount
sl@0
   181
*/
sl@0
   182
EXPORT_C TInt Epoc::GetRamZonePageCount(TUint aId, SRamZonePageCount& aPageData)
sl@0
   183
	{
sl@0
   184
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::GetRamZonePageCount");
sl@0
   185
	RamAllocLock::Lock();
sl@0
   186
	TInt r = TheMmu.GetRamZonePageCount(aId, aPageData);
sl@0
   187
	RamAllocLock::Unlock();
sl@0
   188
	return r;
sl@0
   189
	}
sl@0
   190
sl@0
   191
sl@0
   192
/**
sl@0
   193
Allocate a block of physically contiguous RAM with a physical address aligned
sl@0
   194
to a specified power of 2 boundary.
sl@0
   195
When the RAM is no longer required it should be freed using
sl@0
   196
Epoc::FreePhysicalRam()
sl@0
   197
sl@0
   198
@param	aSize		The size in bytes of the required block. The specified size
sl@0
   199
					is rounded up to the page size, since only whole pages of
sl@0
   200
					physical RAM can be allocated.
sl@0
   201
@param	aPhysAddr	Receives the physical address of the base of the block on
sl@0
   202
					successful allocation.
sl@0
   203
@param	aAlign		Specifies the number of least significant bits of the
sl@0
   204
					physical address which are required to be zero. If a value
sl@0
   205
					less than log2(page size) is specified, page alignment is
sl@0
   206
					assumed. Pass 0 for aAlign if there are no special alignment
sl@0
   207
					constraints (other than page alignment).
sl@0
   208
@return	KErrNone if the allocation was successful.
sl@0
   209
		KErrNoMemory if a sufficiently large physically contiguous block of free
sl@0
   210
		RAM	with the specified alignment could not be found.
sl@0
   211
@pre Calling thread must be in a critical section.
sl@0
   212
@pre Interrupts must be enabled.
sl@0
   213
@pre Kernel must be unlocked.
sl@0
   214
@pre No fast mutex can be held.
sl@0
   215
@pre Call in a thread context.
sl@0
   216
@pre Can be used in a device driver.
sl@0
   217
*/
sl@0
   218
EXPORT_C TInt Epoc::AllocPhysicalRam(TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
sl@0
   219
	{
sl@0
   220
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::AllocPhysicalRam");
sl@0
   221
	RamAllocLock::Lock();
sl@0
   222
	TInt r = TheMmu.AllocPhysicalRam
sl@0
   223
		(
sl@0
   224
		aPhysAddr,
sl@0
   225
		MM::RoundToPageCount(aSize),
sl@0
   226
		MM::RoundToPageShift(aAlign),
sl@0
   227
		(Mmu::TRamAllocFlags)EMemAttStronglyOrdered
sl@0
   228
		);
sl@0
   229
	RamAllocLock::Unlock();
sl@0
   230
	return r;
sl@0
   231
	}
sl@0
   232
sl@0
   233
sl@0
   234
/**
sl@0
   235
Allocate a block of physically contiguous RAM with a physical address aligned
sl@0
   236
to a specified power of 2 boundary from the specified zone.
sl@0
   237
When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
sl@0
   238
sl@0
   239
Note that this method only respects the KRamZoneFlagNoAlloc flag and will always attempt
sl@0
   240
to allocate regardless of whether the other flags are set for the specified RAM zones 
sl@0
   241
or not.
sl@0
   242
sl@0
   243
When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
sl@0
   244
sl@0
   245
@param 	aZoneId		The ID of the zone to attempt to allocate from.
sl@0
   246
@param	aSize		The size in bytes of the required block. The specified size
sl@0
   247
					is rounded up to the page size, since only whole pages of
sl@0
   248
					physical RAM can be allocated.
sl@0
   249
@param	aPhysAddr	Receives the physical address of the base of the block on
sl@0
   250
					successful allocation.
sl@0
   251
@param	aAlign		Specifies the number of least significant bits of the
sl@0
   252
					physical address which are required to be zero. If a value
sl@0
   253
					less than log2(page size) is specified, page alignment is
sl@0
   254
					assumed. Pass 0 for aAlign if there are no special alignment
sl@0
   255
					constraints (other than page alignment).
sl@0
   256
@return	KErrNone if the allocation was successful.
sl@0
   257
		KErrNoMemory if a sufficiently large physically contiguous block of free
sl@0
   258
		RAM	with the specified alignment could not be found within the specified 
sl@0
   259
		zone.
sl@0
   260
		KErrArgument if a RAM zone of the specified ID can't be found or if the
sl@0
   261
		RAM zone has a total number of physical pages which is less than those 
sl@0
   262
		requested for the allocation.
sl@0
   263
sl@0
   264
@pre Calling thread must be in a critical section.
sl@0
   265
@pre Interrupts must be enabled.
sl@0
   266
@pre Kernel must be unlocked.
sl@0
   267
@pre No fast mutex can be held.
sl@0
   268
@pre Call in a thread context.
sl@0
   269
@pre Can be used in a device driver.
sl@0
   270
*/
sl@0
   271
EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint aZoneId, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
sl@0
   272
	{
sl@0
   273
	return ZoneAllocPhysicalRam(&aZoneId, 1, aSize, aPhysAddr, aAlign);
sl@0
   274
	}
sl@0
   275
sl@0
   276
sl@0
   277
/**
sl@0
   278
Allocate a block of physically contiguous RAM with a physical address aligned
sl@0
   279
to a specified power of 2 boundary from the specified RAM zones.
sl@0
   280
When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
sl@0
   281
sl@0
   282
RAM will be allocated into the RAM zones in the order they are specified in the 
sl@0
   283
aZoneIdList parameter. If the contiguous allocations are intended to span RAM zones 
sl@0
   284
when required then aZoneIdList should be listed with the RAM zones in ascending 
sl@0
   285
physical address order.
sl@0
   286
sl@0
   287
Note that this method only respects the KRamZoneFlagNoAlloc flag and will always attempt
sl@0
   288
to allocate regardless of whether the other flags are set for the specified RAM zones 
sl@0
   289
or not.
sl@0
   290
sl@0
   291
When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
sl@0
   292
sl@0
   293
@param 	aZoneIdList	A pointer to an array of RAM zone IDs of the RAM zones to 
sl@0
   294
					attempt to allocate from.
sl@0
   295
@param 	aZoneIdCount The number of RAM zone IDs contained in aZoneIdList.
sl@0
   296
@param	aSize		The size in bytes of the required block. The specified size
sl@0
   297
					is rounded up to the page size, since only whole pages of
sl@0
   298
					physical RAM can be allocated.
sl@0
   299
@param	aPhysAddr	Receives the physical address of the base of the block on
sl@0
   300
					successful allocation.
sl@0
   301
@param	aAlign		Specifies the number of least significant bits of the
sl@0
   302
					physical address which are required to be zero. If a value
sl@0
   303
					less than log2(page size) is specified, page alignment is
sl@0
   304
					assumed. Pass 0 for aAlign if there are no special alignment
sl@0
   305
					constraints (other than page alignment).
sl@0
   306
@return	KErrNone if the allocation was successful.
sl@0
   307
		KErrNoMemory if a sufficiently large physically contiguous block of free
sl@0
   308
		RAM	with the specified alignment could not be found within the specified 
sl@0
   309
		zone.
sl@0
   310
		KErrArgument if a RAM zone of a specified ID can't be found or if the
sl@0
   311
		RAM zones have a total number of physical pages which is less than those 
sl@0
   312
		requested for the allocation.
sl@0
   313
sl@0
   314
@pre Calling thread must be in a critical section.
sl@0
   315
@pre Interrupts must be enabled.
sl@0
   316
@pre Kernel must be unlocked.
sl@0
   317
@pre No fast mutex can be held.
sl@0
   318
@pre Call in a thread context.
sl@0
   319
@pre Can be used in a device driver.
sl@0
   320
*/
sl@0
   321
EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
sl@0
   322
	{
sl@0
   323
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::ZoneAllocPhysicalRam");
sl@0
   324
	RamAllocLock::Lock();
sl@0
   325
	TInt r = TheMmu.ZoneAllocPhysicalRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, aAlign);
sl@0
   326
	RamAllocLock::Unlock();
sl@0
   327
	return r;
sl@0
   328
	}
sl@0
   329
sl@0
   330
sl@0
   331
/**
sl@0
   332
Attempt to allocate discontiguous RAM pages.
sl@0
   333
sl@0
   334
When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
sl@0
   335
sl@0
   336
@param	aNumPages	The number of discontiguous pages required to be allocated
sl@0
   337
@param	aPageList	This should be a pointer to a previously allocated array of
sl@0
   338
					aNumPages TPhysAddr elements.  On a successful allocation it 
sl@0
   339
					will receive the physical addresses of each page allocated.
sl@0
   340
sl@0
   341
@return	KErrNone if the allocation was successful.
sl@0
   342
		KErrNoMemory if the requested number of pages can't be allocated
sl@0
   343
sl@0
   344
@pre Calling thread must be in a critical section.
sl@0
   345
@pre Interrupts must be enabled.
sl@0
   346
@pre Kernel must be unlocked.
sl@0
   347
@pre No fast mutex can be held.
sl@0
   348
@pre Call in a thread context.
sl@0
   349
@pre Can be used in a device driver.
sl@0
   350
*/
sl@0
   351
EXPORT_C TInt Epoc::AllocPhysicalRam(TInt aNumPages, TPhysAddr* aPageList)
sl@0
   352
	{
sl@0
   353
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Epoc::AllocPhysicalRam");
sl@0
   354
	RamAllocLock::Lock();
sl@0
   355
	TInt r = TheMmu.AllocPhysicalRam(aPageList,aNumPages,(Mmu::TRamAllocFlags)EMemAttStronglyOrdered);
sl@0
   356
	RamAllocLock::Unlock();
sl@0
   357
	return r;
sl@0
   358
	}
sl@0
   359
sl@0
   360
sl@0
   361
/**
sl@0
   362
Attempt to allocate discontiguous RAM pages from the specified zone.
sl@0
   363
sl@0
   364
Note that this method only respects the KRamZoneFlagNoAlloc flag and will always attempt
sl@0
   365
to allocate regardless of whether the other flags are set for the specified RAM zones 
sl@0
   366
or not.
sl@0
   367
sl@0
   368
When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
sl@0
   369
sl@0
   370
@param 	aZoneId		The ID of the zone to attempt to allocate from.
sl@0
   371
@param	aNumPages	The number of discontiguous pages required to be allocated 
sl@0
   372
					from the specified zone.
sl@0
   373
@param	aPageList	This should be a pointer to a previously allocated array of
sl@0
   374
					aNumPages TPhysAddr elements.  On a successful 
sl@0
   375
					allocation it will receive the physical addresses of each 
sl@0
   376
					page allocated.
sl@0
   377
@return	KErrNone if the allocation was successful.
sl@0
   378
		KErrNoMemory if the requested number of pages can't be allocated from the 
sl@0
   379
		specified zone.
sl@0
   380
		KErrArgument if a RAM zone of the specified ID can't be found or if the
sl@0
   381
		RAM zone has a total number of physical pages which is less than those 
sl@0
   382
		requested for the allocation.
sl@0
   383
sl@0
   384
@pre Calling thread must be in a critical section.
sl@0
   385
@pre Interrupts must be enabled.
sl@0
   386
@pre Kernel must be unlocked.
sl@0
   387
@pre No fast mutex can be held.
sl@0
   388
@pre Call in a thread context.
sl@0
   389
@pre Can be used in a device driver.
sl@0
   390
*/
sl@0
   391
EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint aZoneId, TInt aNumPages, TPhysAddr* aPageList)
sl@0
   392
	{
sl@0
   393
	return ZoneAllocPhysicalRam(&aZoneId, 1, aNumPages, aPageList);
sl@0
   394
	}
sl@0
   395
sl@0
   396
sl@0
   397
/**
sl@0
   398
Attempt to allocate discontiguous RAM pages from the specified RAM zones.
sl@0
   399
The RAM pages will be allocated into the RAM zones in the order that they are specified 
sl@0
   400
in the aZoneIdList parameter, the RAM zone preferences will be ignored.
sl@0
   401
sl@0
   402
Note that this method only respects the KRamZoneFlagNoAlloc flag and will always attempt
sl@0
   403
to allocate regardless of whether the other flags are set for the specified RAM zones 
sl@0
   404
or not.
sl@0
   405
sl@0
   406
When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
sl@0
   407
sl@0
   408
@param 	aZoneIdList	A pointer to an array of RAM zone IDs of the RAM zones to 
sl@0
   409
					attempt to allocate from.
sl@0
   410
@param	aZoneIdCount The number of RAM zone IDs pointed to by aZoneIdList.
sl@0
   411
@param	aNumPages	The number of discontiguous pages required to be allocated 
sl@0
   412
					from the specified zone.
sl@0
   413
@param	aPageList	This should be a pointer to a previously allocated array of
sl@0
   414
					aNumPages TPhysAddr elements.  On a successful 
sl@0
   415
					allocation it will receive the physical addresses of each 
sl@0
   416
					page allocated.
sl@0
   417
@return	KErrNone if the allocation was successful.
sl@0
   418
		KErrNoMemory if the requested number of pages can't be allocated from the 
sl@0
   419
		specified zone.
sl@0
   420
		KErrArgument if a RAM zone of a specified ID can't be found or if the
sl@0
   421
		RAM zones have a total number of physical pages which is less than those 
sl@0
   422
		requested for the allocation.
sl@0
   423
sl@0
   424
@pre Calling thread must be in a critical section.
sl@0
   425
@pre Interrupts must be enabled.
sl@0
   426
@pre Kernel must be unlocked.
sl@0
   427
@pre No fast mutex can be held.
sl@0
   428
@pre Call in a thread context.
sl@0
   429
@pre Can be used in a device driver.
sl@0
   430
*/
sl@0
   431
EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList)
sl@0
   432
	{
sl@0
   433
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Epoc::ZoneAllocPhysicalRam");
sl@0
   434
	RamAllocLock::Lock();
sl@0
   435
	TInt r = TheMmu.ZoneAllocPhysicalRam(aZoneIdList, aZoneIdCount, aNumPages, aPageList);
sl@0
   436
	RamAllocLock::Unlock();
sl@0
   437
	return r;
sl@0
   438
	}
sl@0
   439
sl@0
   440
sl@0
   441
/**
sl@0
   442
Free a previously-allocated block of physically contiguous RAM.
sl@0
   443
sl@0
   444
Specifying one of the following may cause the system to panic: 
sl@0
   445
a) an invalid physical RAM address.
sl@0
   446
b) valid physical RAM addresses where some had not been previously allocated.
sl@0
   447
c) an address not aligned to a page boundary.
sl@0
   448
sl@0
   449
@param	aPhysAddr	The physical address of the base of the block to be freed.
sl@0
   450
					This must be the address returned by a previous call to
sl@0
   451
					Epoc::AllocPhysicalRam(), Epoc::ZoneAllocPhysicalRam(), 
sl@0
   452
					Epoc::ClaimPhysicalRam() or Epoc::ClaimRamZone().
sl@0
   453
@param	aSize		The size in bytes of the required block. The specified size
sl@0
   454
					is rounded up to the page size, since only whole pages of
sl@0
   455
					physical RAM can be allocated.
sl@0
   456
@return	KErrNone if the operation was successful.
sl@0
   457
sl@0
   458
sl@0
   459
sl@0
   460
@pre Calling thread must be in a critical section.
sl@0
   461
@pre Interrupts must be enabled.
sl@0
   462
@pre Kernel must be unlocked.
sl@0
   463
@pre No fast mutex can be held.
sl@0
   464
@pre Call in a thread context.
sl@0
   465
@pre Can be used in a device driver.
sl@0
   466
*/
sl@0
   467
EXPORT_C TInt Epoc::FreePhysicalRam(TPhysAddr aPhysAddr, TInt aSize)
sl@0
   468
	{
sl@0
   469
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreePhysicalRam");
sl@0
   470
	RamAllocLock::Lock();
sl@0
   471
	TheMmu.FreePhysicalRam(aPhysAddr,MM::RoundToPageCount(aSize));
sl@0
   472
	RamAllocLock::Unlock();
sl@0
   473
	return KErrNone;
sl@0
   474
	}
sl@0
   475
sl@0
   476
sl@0
   477
/**
sl@0
   478
Free a number of physical RAM pages that were previously allocated using
sl@0
   479
Epoc::AllocPhysicalRam() or Epoc::ZoneAllocPhysicalRam().
sl@0
   480
sl@0
   481
Specifying one of the following may cause the system to panic: 
sl@0
   482
a) an invalid physical RAM address.
sl@0
   483
b) valid physical RAM addresses where some had not been previously allocated.
sl@0
   484
c) an address not aligned to a page boundary.
sl@0
   485
sl@0
   486
@param	aNumPages	The number of pages to be freed.
sl@0
   487
@param	aPageList	An array of aNumPages TPhysAddr elements.  Where each element
sl@0
   488
					should contain the physical address of each page to be freed.
sl@0
   489
					This must be the same set of addresses as those returned by a 
sl@0
   490
					previous call to Epoc::AllocPhysicalRam() or 
sl@0
   491
					Epoc::ZoneAllocPhysicalRam().
sl@0
   492
@return	KErrNone if the operation was successful.
sl@0
   493
  
sl@0
   494
@pre Calling thread must be in a critical section.
sl@0
   495
@pre Interrupts must be enabled.
sl@0
   496
@pre Kernel must be unlocked.
sl@0
   497
@pre No fast mutex can be held.
sl@0
   498
@pre Call in a thread context.
sl@0
   499
@pre Can be used in a device driver.
sl@0
   500
		
sl@0
   501
*/
sl@0
   502
EXPORT_C TInt Epoc::FreePhysicalRam(TInt aNumPages, TPhysAddr* aPageList)
sl@0
   503
	{
sl@0
   504
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreePhysicalRam");
sl@0
   505
	RamAllocLock::Lock();
sl@0
   506
	TheMmu.FreePhysicalRam(aPageList,aNumPages);
sl@0
   507
	RamAllocLock::Unlock();
sl@0
   508
	return KErrNone;
sl@0
   509
	}
sl@0
   510
sl@0
   511
sl@0
   512
/**
sl@0
   513
Allocate a specific block of physically contiguous RAM, specified by physical
sl@0
   514
base address and size.
sl@0
   515
If and when the RAM is no longer required it should be freed using
sl@0
   516
Epoc::FreePhysicalRam()
sl@0
   517
sl@0
   518
@param	aPhysAddr	The physical address of the base of the required block.
sl@0
   519
@param	aSize		The size in bytes of the required block. The specified size
sl@0
   520
					is rounded up to the page size, since only whole pages of
sl@0
   521
					physical RAM can be allocated.
sl@0
   522
@return	KErrNone if the operation was successful.
sl@0
   523
		KErrArgument if the range of physical addresses specified included some
sl@0
   524
					which are not valid physical RAM addresses.
sl@0
   525
		KErrInUse	if the range of physical addresses specified are all valid
sl@0
   526
					physical RAM addresses but some of them have already been
sl@0
   527
					allocated for other purposes.
sl@0
   528
@pre Calling thread must be in a critical section.
sl@0
   529
@pre Interrupts must be enabled.
sl@0
   530
@pre Kernel must be unlocked.
sl@0
   531
@pre No fast mutex can be held.
sl@0
   532
@pre Call in a thread context.
sl@0
   533
@pre Can be used in a device driver.
sl@0
   534
*/
sl@0
   535
EXPORT_C TInt Epoc::ClaimPhysicalRam(TPhysAddr aPhysAddr, TInt aSize)
sl@0
   536
	{
sl@0
   537
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::ClaimPhysicalRam");
sl@0
   538
	RamAllocLock::Lock();
sl@0
   539
	TInt r = TheMmu.ClaimPhysicalRam
sl@0
   540
		(
sl@0
   541
		aPhysAddr,
sl@0
   542
		MM::RoundToPageCount(aSize),
sl@0
   543
		(Mmu::TRamAllocFlags)EMemAttStronglyOrdered
sl@0
   544
		);
sl@0
   545
	RamAllocLock::Unlock();
sl@0
   546
	return r;
sl@0
   547
	}
sl@0
   548
sl@0
   549
sl@0
   550
/**
sl@0
   551
Translate a virtual address to the corresponding physical address.
sl@0
   552
sl@0
   553
@param	aLinAddr	The virtual address to be translated.
sl@0
   554
@return	The physical address corresponding to the given virtual address, or
sl@0
   555
		KPhysAddrInvalid if the specified virtual address is unmapped.
sl@0
   556
@pre Interrupts must be enabled.
sl@0
   557
@pre Kernel must be unlocked.
sl@0
   558
@pre Call in a thread context.
sl@0
   559
@pre Can be used in a device driver.
sl@0
   560
*/
sl@0
   561
EXPORT_C TPhysAddr Epoc::LinearToPhysical(TLinAddr aLinAddr)
sl@0
   562
	{
sl@0
   563
//	This precondition is violated by various parts of the system under some conditions,
sl@0
   564
//	e.g. when __FLUSH_PT_INTO_RAM__ is defined. This function might also be called by
sl@0
   565
//	a higher-level RTOS for which these conditions are meaningless. Thus, it's been
sl@0
   566
//	disabled for now.
sl@0
   567
//	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"Epoc::LinearToPhysical");
sl@0
   568
sl@0
   569
	// When called by a higher-level OS we may not be in a DThread context, so avoid looking up the
sl@0
   570
	// current process in the DThread for a global address
sl@0
   571
	TInt osAsid = KKernelOsAsid;
sl@0
   572
	if (aLinAddr < KGlobalMemoryBase)
sl@0
   573
		{
sl@0
   574
		// Get the os asid of current thread's process so no need to open a reference on it.
sl@0
   575
		DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
sl@0
   576
		osAsid = pP->OsAsid();
sl@0
   577
		}
sl@0
   578
	
sl@0
   579
#if 1
sl@0
   580
	return Mmu::UncheckedLinearToPhysical(aLinAddr, osAsid);
sl@0
   581
#else
sl@0
   582
	MmuLock::Lock();
sl@0
   583
	TPhysAddr addr =  Mmu::LinearToPhysical(aLinAddr, osAsid);
sl@0
   584
	MmuLock::Unlock();
sl@0
   585
	return addr;
sl@0
   586
#endif
sl@0
   587
	}
sl@0
   588
sl@0
   589
sl@0
   590
//
sl@0
   591
// Misc
sl@0
   592
//
sl@0
   593
sl@0
   594
EXPORT_C TInt TInternalRamDrive::MaxSize()
sl@0
   595
	{
sl@0
   596
	TUint maxPages = (TUint(TheSuperPage().iRamDriveSize)>>KPageShift)+TheMmu.FreeRamInPages(); // current size plus spare memory
sl@0
   597
	TUint maxPages2 = TUint(PP::RamDriveMaxSize)>>KPageShift;
sl@0
   598
	if(maxPages>maxPages2)
sl@0
   599
		maxPages = maxPages2;
sl@0
   600
	return maxPages*KPageSize;
sl@0
   601
	}
sl@0
   602
sl@0
   603
sl@0
   604
TInt M::PageSizeInBytes()
sl@0
   605
	{
sl@0
   606
	return KPageSize;
sl@0
   607
	}
sl@0
   608
sl@0
   609
sl@0
   610
#ifdef BTRACE_KERNEL_MEMORY
sl@0
   611
void M::BTracePrime(TUint aCategory)
sl@0
   612
	{
sl@0
   613
	// TODO:
sl@0
   614
	}
sl@0
   615
#endif
sl@0
   616
sl@0
   617
sl@0
   618
sl@0
   619
//
sl@0
   620
// DPlatChunkHw
sl@0
   621
//
sl@0
   622
sl@0
   623
/**
sl@0
   624
Create a hardware chunk object, optionally mapping a specified block of physical
sl@0
   625
addresses with specified access permissions and cache policy.
sl@0
   626
sl@0
   627
When the mapping is no longer required, close the chunk using chunk->Close(0);
sl@0
   628
Note that closing a chunk does not free any RAM pages which were mapped by the
sl@0
   629
chunk - these must be freed separately using Epoc::FreePhysicalRam().
sl@0
   630
sl@0
   631
@param	aChunk	Upon successful completion this parameter receives a pointer to
sl@0
   632
				the newly created chunk. Upon unsuccessful completion it is
sl@0
   633
				written with a NULL pointer. The virtual address of the mapping
sl@0
   634
				can subsequently be discovered using the LinearAddress()
sl@0
   635
				function on the chunk.
sl@0
   636
@param	aAddr	The base address of the physical region to be mapped. This will
sl@0
   637
				be rounded down to a multiple of the hardware page size before
sl@0
   638
				being used.
sl@0
   639
@param	aSize	The size of the physical address region to be mapped. This will
sl@0
   640
				be rounded up to a multiple of the hardware page size before
sl@0
   641
				being used; the rounding is such that the entire range from
sl@0
   642
				aAddr to aAddr+aSize-1 inclusive is mapped. For example if
sl@0
   643
				aAddr=0xB0001FFF, aSize=2 and the hardware page size is 4KB, an
sl@0
   644
				8KB range of physical addresses from 0xB0001000 to 0xB0002FFF
sl@0
   645
				inclusive will be mapped.
sl@0
   646
@param	aMapAttr Mapping attributes required for the mapping. This is formed
sl@0
   647
				by ORing together values from the TMappingAttributes enumeration
sl@0
   648
				to specify the access permissions and caching policy.
sl@0
   649
sl@0
   650
@pre Calling thread must be in a critical section.
sl@0
   651
@pre Interrupts must be enabled.
sl@0
   652
@pre Kernel must be unlocked.
sl@0
   653
@pre No fast mutex can be held.
sl@0
   654
@pre Call in a thread context.
sl@0
   655
@pre Can be used in a device driver.
sl@0
   656
@see TMappingAttributes
sl@0
   657
*/
sl@0
   658
EXPORT_C TInt DPlatChunkHw::New(DPlatChunkHw*& aChunk, TPhysAddr aAddr, TInt aSize, TUint aMapAttr)
sl@0
   659
	{
sl@0
   660
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"DPlatChunkHw::New");
sl@0
   661
	__KTRACE_OPT(KMMU,Kern::Printf("DPlatChunkHw::New phys=%08x, size=%x, attribs=%x",aAddr,aSize,aMapAttr));
sl@0
   662
sl@0
   663
	aChunk = NULL;
sl@0
   664
sl@0
   665
	// check size...
sl@0
   666
	if(aSize<=0)
sl@0
   667
		return KErrArgument;
sl@0
   668
	TPhysAddr end = aAddr+aSize-1;
sl@0
   669
	if(end<aAddr) // overflow?
sl@0
   670
		return KErrArgument;
sl@0
   671
	aAddr &= ~KPageMask;
sl@0
   672
	TUint pageCount = (end>>KPageShift)-(aAddr>>KPageShift)+1;
sl@0
   673
sl@0
   674
	// check attributes...
sl@0
   675
	TMappingPermissions perm;
sl@0
   676
	TInt r = MM::MappingPermissions(perm,*(TMappingAttributes2*)&aMapAttr);
sl@0
   677
	if(r!=KErrNone)
sl@0
   678
		return r;
sl@0
   679
	TMemoryAttributes attr;
sl@0
   680
	r = MM::MemoryAttributes(attr,*(TMappingAttributes2*)&aMapAttr);
sl@0
   681
	if(r!=KErrNone)
sl@0
   682
		return r;
sl@0
   683
sl@0
   684
	// construct a hardware chunk...
sl@0
   685
	DMemModelChunkHw* pC = new DMemModelChunkHw;
sl@0
   686
	if(!pC)
sl@0
   687
		return KErrNoMemory;
sl@0
   688
sl@0
   689
	// set the executable flags based on the specified mapping permissions...
sl@0
   690
	TMemoryCreateFlags flags = EMemoryCreateDefault;
sl@0
   691
	if(perm&EExecute)
sl@0
   692
		flags = (TMemoryCreateFlags)(flags|EMemoryCreateAllowExecution);
sl@0
   693
sl@0
   694
	r = MM::MemoryNew(pC->iMemoryObject, EMemoryObjectHardware, pageCount, flags, attr);
sl@0
   695
	if(r==KErrNone)
sl@0
   696
		{
sl@0
   697
		r = MM::MemoryAddContiguous(pC->iMemoryObject,0,pageCount,aAddr);
sl@0
   698
		if(r==KErrNone)
sl@0
   699
			{
sl@0
   700
			r = MM::MappingNew(pC->iKernelMapping,pC->iMemoryObject,perm,KKernelOsAsid);
sl@0
   701
			if(r==KErrNone)
sl@0
   702
				{
sl@0
   703
				pC->iPhysAddr = aAddr;
sl@0
   704
				pC->iLinAddr = MM::MappingBase(pC->iKernelMapping);
sl@0
   705
				pC->iSize = pageCount<<KPageShift;
sl@0
   706
				const TMappingAttributes2& lma = MM::LegacyMappingAttributes(attr,perm); // not needed, but keep in case someone uses this internal member
sl@0
   707
				*(TMappingAttributes2*)&pC->iAttribs = lma;
sl@0
   708
				}
sl@0
   709
			}
sl@0
   710
		}
sl@0
   711
sl@0
   712
	if(r==KErrNone)
sl@0
   713
		aChunk = pC;
sl@0
   714
	else
sl@0
   715
		pC->Close(NULL);
sl@0
   716
	return r;
sl@0
   717
	}
sl@0
   718
sl@0
   719
sl@0
   720
TInt DMemModelChunkHw::Close(TAny*)
sl@0
   721
	{
sl@0
   722
	__KTRACE_OPT2(KOBJECT,KMMU,Kern::Printf("DMemModelChunkHw::Close %d %O",AccessCount(),this));
sl@0
   723
	TInt r = Dec();
sl@0
   724
	if(r==1)
sl@0
   725
		{
sl@0
   726
		MM::MappingDestroy(iKernelMapping);
sl@0
   727
		MM::MemoryDestroy(iMemoryObject);
sl@0
   728
		DBase::Delete(this);
sl@0
   729
		}
sl@0
   730
	return r;
sl@0
   731
	}
sl@0
   732
sl@0
   733
sl@0
   734
sl@0
   735
//
sl@0
   736
// Demand Paging
sl@0
   737
//
sl@0
   738
sl@0
   739
#ifdef _DEBUG
sl@0
   740
extern "C" void ASMCheckPagingSafe(TLinAddr aPC, TLinAddr aLR, TLinAddr aStartAddres, TUint aLength)
sl@0
   741
	{
sl@0
   742
	if(M::CheckPagingSafe(EFalse, aStartAddres, aLength))
sl@0
   743
		return;
sl@0
   744
	Kern::Printf("ASM_ASSERT_PAGING_SAFE FAILED: pc=%x lr=%x",aPC,aLR);
sl@0
   745
	__NK_ASSERT_ALWAYS(0);
sl@0
   746
	}
sl@0
   747
sl@0
   748
extern "C" void ASMCheckDataPagingSafe(TLinAddr aPC, TLinAddr aLR, TLinAddr aStartAddres, TUint aLength)
sl@0
   749
	{
sl@0
   750
	if(M::CheckPagingSafe(ETrue, aStartAddres, aLength))
sl@0
   751
		return;
sl@0
   752
	__KTRACE_OPT(KDATAPAGEWARN,Kern::Printf("Data paging: ASM_ASSERT_DATA_PAGING_SAFE FAILED: pc=%x lr=%x",aPC,aLR));
sl@0
   753
	}
sl@0
   754
#endif
sl@0
   755
sl@0
   756
sl@0
   757
DMutex* CheckMutexOrder()
sl@0
   758
	{
sl@0
   759
#ifdef _DEBUG
sl@0
   760
	SDblQue& ml = TheCurrentThread->iMutexList;
sl@0
   761
	if(ml.IsEmpty())
sl@0
   762
		return NULL;
sl@0
   763
	DMutex* mm = _LOFF(ml.First(), DMutex, iOrderLink);
sl@0
   764
	if (KMutexOrdPageOut >= mm->iOrder)
sl@0
   765
		return mm;
sl@0
   766
#endif
sl@0
   767
	return NULL;
sl@0
   768
	}
sl@0
   769
sl@0
   770
sl@0
   771
TBool M::CheckPagingSafe(TBool aDataPaging, TLinAddr aStartAddr, TUint aLength)
sl@0
   772
	{
sl@0
   773
	if(K::Initialising)
sl@0
   774
		return ETrue;
sl@0
   775
	
sl@0
   776
	NThread* nt = NCurrentThread();
sl@0
   777
	if(!nt)
sl@0
   778
		return ETrue; // We've not booted properly yet!
sl@0
   779
sl@0
   780
	if(aStartAddr>=KUserMemoryLimit)
sl@0
   781
		return ETrue; // kernel memory can't be paged
sl@0
   782
sl@0
   783
	if(IsUnpagedRom(aStartAddr,aLength))
sl@0
   784
		return ETrue;
sl@0
   785
sl@0
   786
	TBool dataPagingEnabled = K::MemModelAttributes&EMemModelAttrDataPaging;
sl@0
   787
sl@0
   788
	DThread* thread = _LOFF(nt,DThread,iNThread);
sl@0
   789
	NFastMutex* fm = NKern::HeldFastMutex();
sl@0
   790
	if(fm)
sl@0
   791
		{
sl@0
   792
		if(!thread->iPagingExcTrap || fm!=&TheScheduler.iLock)
sl@0
   793
			{
sl@0
   794
			if (!aDataPaging)
sl@0
   795
				{
sl@0
   796
				__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: CheckPagingSafe FAILED - FM Held"));
sl@0
   797
				return EFalse;
sl@0
   798
				}
sl@0
   799
			else
sl@0
   800
				{
sl@0
   801
				__KTRACE_OPT(KDATAPAGEWARN, Kern::Printf("Data paging: CheckPagingSafe FAILED - FM Held"));
sl@0
   802
				return !dataPagingEnabled;
sl@0
   803
				}
sl@0
   804
			}
sl@0
   805
		}
sl@0
   806
sl@0
   807
	DMutex* m = CheckMutexOrder();
sl@0
   808
	if (m)
sl@0
   809
		{
sl@0
   810
		if (!aDataPaging)
sl@0
   811
			{
sl@0
   812
			__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: Mutex Order Fault %O",m));
sl@0
   813
			return EFalse;
sl@0
   814
			}
sl@0
   815
		else
sl@0
   816
			{
sl@0
   817
			__KTRACE_OPT(KDATAPAGEWARN, Kern::Printf("Data paging: Mutex Order Fault %O mem=%x+%x",m,aStartAddr,aLength));
sl@0
   818
			return !dataPagingEnabled;
sl@0
   819
			}
sl@0
   820
		}
sl@0
   821
	
sl@0
   822
	return ETrue;
sl@0
   823
	}
sl@0
   824
sl@0
   825
sl@0
   826
sl@0
   827
EXPORT_C void DPagingDevice::NotifyIdle()
sl@0
   828
	{
sl@0
   829
	}
sl@0
   830
sl@0
   831
EXPORT_C void DPagingDevice::NotifyBusy()
sl@0
   832
	{
sl@0
   833
	}