os/kernelhwsrv/kernel/eka/memmodel/epoc/mmubase/mmubase.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\memmodel\epoc\mmubase\mmubase.cpp
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
#include <memmodel/epoc/mmubase/mmubase.h>
sl@0
    19
#include <mmubase.inl>
sl@0
    20
#include <ramcache.h>
sl@0
    21
#include <demand_paging.h>
sl@0
    22
#include "cache_maintenance.h"
sl@0
    23
#include "highrestimer.h"
sl@0
    24
#include <defrag.h>
sl@0
    25
#include <ramalloc.h>
sl@0
    26
sl@0
    27
sl@0
    28
__ASSERT_COMPILE(sizeof(SPageInfo)==(1<<KPageInfoShift));
sl@0
    29
sl@0
    30
_LIT(KLitRamAlloc,"RamAlloc");
sl@0
    31
_LIT(KLitHwChunk,"HwChunk");
sl@0
    32
sl@0
    33
sl@0
    34
DMutex* MmuBase::HwChunkMutex;
sl@0
    35
DMutex* MmuBase::RamAllocatorMutex;
sl@0
    36
#ifdef BTRACE_KERNEL_MEMORY
sl@0
    37
TInt   Epoc::DriverAllocdPhysRam = 0;
sl@0
    38
TInt   Epoc::KernelMiscPages = 0;
sl@0
    39
#endif
sl@0
    40
sl@0
    41
/******************************************************************************
sl@0
    42
 * Code common to all MMU memory models
sl@0
    43
 ******************************************************************************/
sl@0
    44
sl@0
    45
const TInt KFreePagesStepSize=16;
sl@0
    46
sl@0
    47
void MmuBase::Panic(TPanic aPanic)
sl@0
    48
	{
sl@0
    49
	Kern::Fault("MMUBASE",aPanic);
sl@0
    50
	}
sl@0
    51
sl@0
    52
void SPageInfo::Lock()
sl@0
    53
	{
sl@0
    54
	CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::Lock");
sl@0
    55
	++iLockCount;
sl@0
    56
	if(!iLockCount)
sl@0
    57
		MmuBase::Panic(MmuBase::EPageLockedTooManyTimes);
sl@0
    58
	}
sl@0
    59
sl@0
    60
TInt SPageInfo::Unlock()
sl@0
    61
	{
sl@0
    62
	CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::Unlock");
sl@0
    63
	if(!iLockCount)
sl@0
    64
		MmuBase::Panic(MmuBase::EPageUnlockedTooManyTimes);
sl@0
    65
	return --iLockCount;
sl@0
    66
	}
sl@0
    67
sl@0
    68
#ifdef _DEBUG
sl@0
    69
void SPageInfo::Set(TType aType, TAny* aOwner, TUint32 aOffset)
sl@0
    70
	{
sl@0
    71
	CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::Set");
sl@0
    72
	(TUint16&)iType = aType; // also sets iState to EStateNormal
sl@0
    73
	
sl@0
    74
	iOwner = aOwner;
sl@0
    75
	iOffset = aOffset;
sl@0
    76
	iModifier = 0;
sl@0
    77
	}
sl@0
    78
sl@0
    79
void SPageInfo::Change(TType aType,TState aState)
sl@0
    80
	{
sl@0
    81
	CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::Change");
sl@0
    82
	iType = aType;
sl@0
    83
	iState = aState;
sl@0
    84
	iModifier = 0;
sl@0
    85
	}
sl@0
    86
sl@0
    87
void SPageInfo::SetState(TState aState)
sl@0
    88
	{
sl@0
    89
	CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::SetState");
sl@0
    90
	iState = aState;
sl@0
    91
	iModifier = 0;
sl@0
    92
	}
sl@0
    93
sl@0
    94
void SPageInfo::SetModifier(TAny* aModifier)
sl@0
    95
	{
sl@0
    96
	CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::SetModifier");
sl@0
    97
	iModifier = aModifier;
sl@0
    98
	}
sl@0
    99
sl@0
   100
TInt SPageInfo::CheckModified(TAny* aModifier)
sl@0
   101
	{
sl@0
   102
	CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::CheckModified");
sl@0
   103
	return iModifier!=aModifier;
sl@0
   104
	}
sl@0
   105
sl@0
   106
void SPageInfo::SetZone(TUint8 aZoneIndex)
sl@0
   107
	{
sl@0
   108
	__ASSERT_ALWAYS(K::Initialising,Kern::Fault("SPageInfo::SetZone",0));
sl@0
   109
	iZone = aZoneIndex;
sl@0
   110
	}
sl@0
   111
sl@0
   112
sl@0
   113
#endif
sl@0
   114
sl@0
   115
MmuBase::MmuBase()
sl@0
   116
	: iRamCache(NULL), iDefrag(NULL)
sl@0
   117
	{
sl@0
   118
	}
sl@0
   119
sl@0
   120
TUint32 MmuBase::RoundToPageSize(TUint32 aSize)
sl@0
   121
	{
sl@0
   122
	return (aSize+KPageMask)&~KPageMask;
sl@0
   123
	}
sl@0
   124
sl@0
   125
TUint32 MmuBase::RoundToChunkSize(TUint32 aSize)
sl@0
   126
	{
sl@0
   127
	TUint32 mask=TheMmu->iChunkMask;
sl@0
   128
	return (aSize+mask)&~mask;
sl@0
   129
	}
sl@0
   130
sl@0
   131
TInt MmuBase::RoundUpRangeToPageSize(TUint32& aBase, TUint32& aSize)
sl@0
   132
	{
sl@0
   133
	TUint32 mask=KPageMask;
sl@0
   134
	TUint32 shift=KPageShift;
sl@0
   135
	TUint32 offset=aBase&mask;
sl@0
   136
	aBase&=~mask;
sl@0
   137
	aSize=(aSize+offset+mask)&~mask;
sl@0
   138
	return TInt(aSize>>shift);
sl@0
   139
	}
sl@0
   140
sl@0
   141
void MmuBase::Wait()
sl@0
   142
	{
sl@0
   143
	Kern::MutexWait(*RamAllocatorMutex);
sl@0
   144
	if (RamAllocatorMutex->iHoldCount==1)
sl@0
   145
		{
sl@0
   146
		MmuBase& m=*TheMmu;
sl@0
   147
		m.iInitialFreeMemory=Kern::FreeRamInBytes();
sl@0
   148
		m.iAllocFailed=EFalse;
sl@0
   149
		}
sl@0
   150
	}
sl@0
   151
sl@0
   152
void MmuBase::Signal()
sl@0
   153
	{
sl@0
   154
	if (RamAllocatorMutex->iHoldCount>1)
sl@0
   155
		{
sl@0
   156
		Kern::MutexSignal(*RamAllocatorMutex);
sl@0
   157
		return;
sl@0
   158
		}
sl@0
   159
	MmuBase& m=*TheMmu;
sl@0
   160
	TInt initial=m.iInitialFreeMemory;
sl@0
   161
	TBool failed=m.iAllocFailed;
sl@0
   162
	TInt final=Kern::FreeRamInBytes();
sl@0
   163
	Kern::MutexSignal(*RamAllocatorMutex);
sl@0
   164
	K::CheckFreeMemoryLevel(initial,final,failed);
sl@0
   165
	}
sl@0
   166
sl@0
   167
void MmuBase::WaitHwChunk()
sl@0
   168
	{
sl@0
   169
	Kern::MutexWait(*HwChunkMutex);
sl@0
   170
	}
sl@0
   171
sl@0
   172
void MmuBase::SignalHwChunk()
sl@0
   173
	{
sl@0
   174
	Kern::MutexSignal(*HwChunkMutex);
sl@0
   175
	}
sl@0
   176
sl@0
   177
sl@0
   178
void MmuBase::MapRamPage(TLinAddr aAddr, TPhysAddr aPage, TPte aPtePerm)
sl@0
   179
	{
sl@0
   180
	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MapRamPage %08x@%08x perm %08x", aPage, aAddr, aPtePerm));
sl@0
   181
	TInt ptid=PageTableId(aAddr);
sl@0
   182
	NKern::LockSystem();
sl@0
   183
	MapRamPages(ptid,SPageInfo::EInvalid,0,aAddr,&aPage,1,aPtePerm);
sl@0
   184
	NKern::UnlockSystem();
sl@0
   185
	}
sl@0
   186
sl@0
   187
//
sl@0
   188
// Unmap and free pages from a global area
sl@0
   189
//
sl@0
   190
void MmuBase::UnmapAndFree(TLinAddr aAddr, TInt aNumPages)
sl@0
   191
	{
sl@0
   192
	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::UnmapAndFree(%08x,%d)",aAddr,aNumPages));
sl@0
   193
	while(aNumPages)
sl@0
   194
		{
sl@0
   195
		TInt pt_np=(iChunkSize-(aAddr&iChunkMask))>>iPageShift;
sl@0
   196
		TInt np=Min(aNumPages,pt_np);
sl@0
   197
		aNumPages-=np;
sl@0
   198
		TInt id=PageTableId(aAddr);
sl@0
   199
		if (id>=0)
sl@0
   200
			{
sl@0
   201
			while(np)
sl@0
   202
				{
sl@0
   203
				TInt np2=Min(np,KFreePagesStepSize);
sl@0
   204
				TPhysAddr phys[KFreePagesStepSize];
sl@0
   205
				TInt nptes;
sl@0
   206
				TInt nfree;
sl@0
   207
				NKern::LockSystem();
sl@0
   208
				UnmapPages(id,aAddr,np2,phys,true,nptes,nfree,NULL);
sl@0
   209
				NKern::UnlockSystem();
sl@0
   210
				if (nfree)
sl@0
   211
					{
sl@0
   212
					if (iDecommitThreshold)
sl@0
   213
						CacheMaintenanceOnDecommit(phys, nfree);
sl@0
   214
					iRamPageAllocator->FreeRamPages(phys,nfree,EPageFixed);
sl@0
   215
					}
sl@0
   216
				np-=np2;
sl@0
   217
				aAddr+=(np2<<iPageShift);
sl@0
   218
				}
sl@0
   219
			}
sl@0
   220
		else
sl@0
   221
			{
sl@0
   222
			aAddr+=(np<<iPageShift);
sl@0
   223
			}
sl@0
   224
		}
sl@0
   225
	}
sl@0
   226
sl@0
   227
void MmuBase::FreePages(TPhysAddr* aPageList, TInt aCount, TZonePageType aPageType)
sl@0
   228
	{
sl@0
   229
	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::FreePages(%08x,%d)",aPageList,aCount));
sl@0
   230
	if (!aCount)
sl@0
   231
		return;
sl@0
   232
	TBool sync_decommit = (TUint(aCount)<iDecommitThreshold);
sl@0
   233
	TPhysAddr* ppa=aPageList;
sl@0
   234
	TPhysAddr* ppaE=ppa+aCount;
sl@0
   235
	NKern::LockSystem();
sl@0
   236
	while (ppa<ppaE)
sl@0
   237
		{
sl@0
   238
		TPhysAddr pa=*ppa++;
sl@0
   239
		SPageInfo* pi=SPageInfo::SafeFromPhysAddr(pa);
sl@0
   240
		if (pi)
sl@0
   241
			{
sl@0
   242
			pi->SetUnused();
sl@0
   243
			if (pi->LockCount())
sl@0
   244
				ppa[-1]=KPhysAddrInvalid;	// don't free page if it's locked down
sl@0
   245
			else if (sync_decommit)
sl@0
   246
				{
sl@0
   247
				NKern::UnlockSystem();
sl@0
   248
				CacheMaintenanceOnDecommit(pa);
sl@0
   249
				NKern::LockSystem();
sl@0
   250
				}
sl@0
   251
			}
sl@0
   252
		if (!sync_decommit)
sl@0
   253
			NKern::FlashSystem();
sl@0
   254
		}
sl@0
   255
	NKern::UnlockSystem();
sl@0
   256
	if (iDecommitThreshold && !sync_decommit)
sl@0
   257
		CacheMaintenance::SyncPhysicalCache_All();
sl@0
   258
	iRamPageAllocator->FreeRamPages(aPageList,aCount, aPageType);
sl@0
   259
	}
sl@0
   260
sl@0
   261
TInt MmuBase::InitPageTableInfo(TInt aId)
sl@0
   262
	{
sl@0
   263
	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::InitPageTableInfo(%x)",aId));
sl@0
   264
	TInt ptb=aId>>iPtBlockShift;
sl@0
   265
	if (++iPtBlockCount[ptb]==1)
sl@0
   266
		{
sl@0
   267
		// expand page table info array
sl@0
   268
		TPhysAddr pagePhys;
sl@0
   269
		if (AllocRamPages(&pagePhys,1, EPageFixed)!=KErrNone)
sl@0
   270
			{
sl@0
   271
			__KTRACE_OPT(KMMU,Kern::Printf("Unable to allocate page"));
sl@0
   272
			iPtBlockCount[ptb]=0;
sl@0
   273
			iAllocFailed=ETrue;
sl@0
   274
			return KErrNoMemory;
sl@0
   275
			}
sl@0
   276
#ifdef BTRACE_KERNEL_MEMORY
sl@0
   277
		BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, 1<<KPageShift);
sl@0
   278
		++Epoc::KernelMiscPages;
sl@0
   279
#endif
sl@0
   280
		TLinAddr pil=PtInfoBlockLinAddr(ptb);
sl@0
   281
		NKern::LockSystem();
sl@0
   282
		SPageInfo::FromPhysAddr(pagePhys)->SetPtInfo(ptb);
sl@0
   283
		NKern::UnlockSystem();
sl@0
   284
		MapRamPage(pil, pagePhys, iPtInfoPtePerm);
sl@0
   285
		memclr((TAny*)pil, iPageSize);
sl@0
   286
		}
sl@0
   287
	return KErrNone;
sl@0
   288
	}
sl@0
   289
sl@0
   290
TInt MmuBase::DoAllocPageTable(TPhysAddr& aPhysAddr)
sl@0
   291
//
sl@0
   292
// Allocate a new page table but don't map it.
sl@0
   293
// Return page table id and page number/phys address of new page if any.
sl@0
   294
//
sl@0
   295
	{
sl@0
   296
	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::DoAllocPageTable()"));
sl@0
   297
#ifdef _DEBUG
sl@0
   298
	if(K::CheckForSimulatedAllocFail())
sl@0
   299
		return KErrNoMemory;
sl@0
   300
#endif
sl@0
   301
	TInt id=iPageTableAllocator?iPageTableAllocator->Alloc():-1;
sl@0
   302
	if (id<0)
sl@0
   303
		{
sl@0
   304
		// need to allocate a new page
sl@0
   305
		if (AllocRamPages(&aPhysAddr,1, EPageFixed)!=KErrNone)
sl@0
   306
			{
sl@0
   307
			__KTRACE_OPT(KMMU,Kern::Printf("Unable to allocate page"));
sl@0
   308
			iAllocFailed=ETrue;
sl@0
   309
			return KErrNoMemory;
sl@0
   310
			}
sl@0
   311
sl@0
   312
		// allocate an ID for the new page
sl@0
   313
		id=iPageTableLinearAllocator->Alloc();
sl@0
   314
		if (id>=0)
sl@0
   315
			{
sl@0
   316
			id<<=iPtClusterShift;
sl@0
   317
			__KTRACE_OPT(KMMU,Kern::Printf("Allocated ID %04x",id));
sl@0
   318
			}
sl@0
   319
		if (id<0 || InitPageTableInfo(id)!=KErrNone)
sl@0
   320
			{
sl@0
   321
			__KTRACE_OPT(KMMU,Kern::Printf("Unable to allocate page table info"));
sl@0
   322
			iPageTableLinearAllocator->Free(id>>iPtClusterShift);
sl@0
   323
			if (iDecommitThreshold)
sl@0
   324
				CacheMaintenanceOnDecommit(aPhysAddr);
sl@0
   325
sl@0
   326
			iRamPageAllocator->FreeRamPage(aPhysAddr, EPageFixed);
sl@0
   327
			iAllocFailed=ETrue;
sl@0
   328
			return KErrNoMemory;
sl@0
   329
			}
sl@0
   330
sl@0
   331
		// Set up page info for new page
sl@0
   332
		NKern::LockSystem();
sl@0
   333
		SPageInfo::FromPhysAddr(aPhysAddr)->SetPageTable(id>>iPtClusterShift);
sl@0
   334
		NKern::UnlockSystem();
sl@0
   335
#ifdef BTRACE_KERNEL_MEMORY
sl@0
   336
		BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, 1<<KPageShift);
sl@0
   337
		++Epoc::KernelMiscPages;
sl@0
   338
#endif
sl@0
   339
		// mark all subpages other than first as free for use as page tables
sl@0
   340
		if (iPtClusterSize>1)
sl@0
   341
			iPageTableAllocator->Free(id+1,iPtClusterSize-1);
sl@0
   342
		}
sl@0
   343
	else
sl@0
   344
		aPhysAddr=KPhysAddrInvalid;
sl@0
   345
sl@0
   346
	__KTRACE_OPT(KMMU,Kern::Printf("DoAllocPageTable returns %d (%08x)",id,aPhysAddr));
sl@0
   347
	PtInfo(id).SetUnused();
sl@0
   348
	return id;
sl@0
   349
	}
sl@0
   350
sl@0
   351
TInt MmuBase::MapPageTable(TInt aId, TPhysAddr aPhysAddr, TBool aAllowExpand)
sl@0
   352
	{
sl@0
   353
	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MapPageTable(%d,%08x)",aId,aPhysAddr));
sl@0
   354
	TLinAddr ptLin=PageTableLinAddr(aId);
sl@0
   355
	TInt ptg=aId>>iPtGroupShift;
sl@0
   356
	if (++iPtGroupCount[ptg]==1)
sl@0
   357
		{
sl@0
   358
		// need to allocate a new page table
sl@0
   359
		__ASSERT_ALWAYS(aAllowExpand, Panic(EMapPageTableBadExpand));
sl@0
   360
		TPhysAddr xptPhys;
sl@0
   361
		TInt xptid=DoAllocPageTable(xptPhys);
sl@0
   362
		if (xptid<0)
sl@0
   363
			{
sl@0
   364
			__KTRACE_OPT(KMMU,Kern::Printf("Unable to allocate extra page table"));
sl@0
   365
			iPtGroupCount[ptg]=0;
sl@0
   366
			return KErrNoMemory;
sl@0
   367
			}
sl@0
   368
		if (xptPhys==KPhysAddrInvalid)
sl@0
   369
			xptPhys=aPhysAddr + ((xptid-aId)<<iPageTableShift);
sl@0
   370
		BootstrapPageTable(xptid, xptPhys, aId, aPhysAddr);	// initialise XPT and map it
sl@0
   371
		}
sl@0
   372
	else
sl@0
   373
		MapRamPage(ptLin, aPhysAddr, iPtPtePerm);
sl@0
   374
	return KErrNone;
sl@0
   375
	}
sl@0
   376
sl@0
   377
TInt MmuBase::AllocPageTable()
sl@0
   378
//
sl@0
   379
// Allocate a new page table, mapped at the correct linear address.
sl@0
   380
// Clear all entries to Not Present. Return page table id.
sl@0
   381
//
sl@0
   382
	{
sl@0
   383
	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::AllocPageTable()"));
sl@0
   384
	__ASSERT_MUTEX(MmuBase::RamAllocatorMutex);
sl@0
   385
sl@0
   386
	TPhysAddr ptPhys;
sl@0
   387
	TInt id=DoAllocPageTable(ptPhys);
sl@0
   388
	if (id<0)
sl@0
   389
		return KErrNoMemory;
sl@0
   390
	if (ptPhys!=KPhysAddrInvalid)
sl@0
   391
		{
sl@0
   392
		TInt r=MapPageTable(id,ptPhys);
sl@0
   393
		if (r!=KErrNone)
sl@0
   394
			{
sl@0
   395
			DoFreePageTable(id);
sl@0
   396
			SPageInfo* pi=SPageInfo::FromPhysAddr(ptPhys);
sl@0
   397
			NKern::LockSystem();
sl@0
   398
			pi->SetUnused();
sl@0
   399
			NKern::UnlockSystem();
sl@0
   400
			if (iDecommitThreshold)
sl@0
   401
				CacheMaintenanceOnDecommit(ptPhys);
sl@0
   402
sl@0
   403
			iRamPageAllocator->FreeRamPage(ptPhys, EPageFixed);
sl@0
   404
			return r;
sl@0
   405
			}
sl@0
   406
		}
sl@0
   407
	ClearPageTable(id);
sl@0
   408
	__KTRACE_OPT(KMMU,Kern::Printf("AllocPageTable returns %d",id));
sl@0
   409
	return id;
sl@0
   410
	}
sl@0
   411
sl@0
   412
TBool MmuBase::DoFreePageTable(TInt aId)
sl@0
   413
//
sl@0
   414
// Free an empty page table. We assume that all pages mapped by the page table have
sl@0
   415
// already been unmapped and freed.
sl@0
   416
//
sl@0
   417
	{
sl@0
   418
	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::DoFreePageTable(%d)",aId));
sl@0
   419
	SPageTableInfo& s=PtInfo(aId);
sl@0
   420
	__NK_ASSERT_DEBUG(!s.iCount); // shouldn't have any pages mapped
sl@0
   421
	s.SetUnused();
sl@0
   422
sl@0
   423
	TInt id=aId &~ iPtClusterMask;
sl@0
   424
	if (iPageTableAllocator)
sl@0
   425
		{
sl@0
   426
		iPageTableAllocator->Free(aId);
sl@0
   427
		if (iPageTableAllocator->NotFree(id,iPtClusterSize))
sl@0
   428
			{
sl@0
   429
			// some subpages still in use
sl@0
   430
			return ETrue;
sl@0
   431
			}
sl@0
   432
		__KTRACE_OPT(KMMU,Kern::Printf("Freeing whole page, id=%d",id));
sl@0
   433
		// whole page is now free
sl@0
   434
		// remove it from the page table allocator
sl@0
   435
		iPageTableAllocator->Alloc(id,iPtClusterSize);
sl@0
   436
		}
sl@0
   437
sl@0
   438
	TInt ptb=aId>>iPtBlockShift;
sl@0
   439
	if (--iPtBlockCount[ptb]==0)
sl@0
   440
		{
sl@0
   441
		// shrink page table info array
sl@0
   442
		TLinAddr pil=PtInfoBlockLinAddr(ptb);
sl@0
   443
		UnmapAndFree(pil,1);	// remove PTE, null page info, free page
sl@0
   444
#ifdef BTRACE_KERNEL_MEMORY
sl@0
   445
		BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscFree, 1<<KPageShift);
sl@0
   446
		--Epoc::KernelMiscPages;
sl@0
   447
#endif
sl@0
   448
		}
sl@0
   449
sl@0
   450
	// free the page table linear address
sl@0
   451
	iPageTableLinearAllocator->Free(id>>iPtClusterShift);
sl@0
   452
	return EFalse;
sl@0
   453
	}
sl@0
   454
sl@0
   455
void MmuBase::FreePageTable(TInt aId)
sl@0
   456
//
sl@0
   457
// Free an empty page table. We assume that all pages mapped by the page table have
sl@0
   458
// already been unmapped and freed.
sl@0
   459
//
sl@0
   460
	{
sl@0
   461
	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::FreePageTable(%d)",aId));
sl@0
   462
	if (DoFreePageTable(aId))
sl@0
   463
		return;
sl@0
   464
sl@0
   465
	TInt id=aId &~ iPtClusterMask;
sl@0
   466
sl@0
   467
	// calculate linear address of page
sl@0
   468
	TLinAddr ptLin=PageTableLinAddr(id);
sl@0
   469
	__KTRACE_OPT(KMMU,Kern::Printf("Page lin %08x",ptLin));
sl@0
   470
sl@0
   471
	// unmap and free the page
sl@0
   472
	UnmapAndFree(ptLin,1);
sl@0
   473
#ifdef BTRACE_KERNEL_MEMORY
sl@0
   474
	BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscFree, 1<<KPageShift);
sl@0
   475
	--Epoc::KernelMiscPages;
sl@0
   476
#endif
sl@0
   477
sl@0
   478
	TInt ptg=aId>>iPtGroupShift;
sl@0
   479
	--iPtGroupCount[ptg];
sl@0
   480
	// don't shrink the page table mapping for now
sl@0
   481
	}
sl@0
   482
sl@0
   483
TInt MmuBase::AllocPhysicalRam(TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
sl@0
   484
	{
sl@0
   485
	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam() size=%x align=%d",aSize,aAlign));
sl@0
   486
	TInt r=AllocContiguousRam(aSize, aPhysAddr, EPageFixed, aAlign);
sl@0
   487
	if (r!=KErrNone)
sl@0
   488
		{
sl@0
   489
		iAllocFailed=ETrue;
sl@0
   490
		return r;
sl@0
   491
		}
sl@0
   492
	TInt n=TInt(TUint32(aSize+iPageMask)>>iPageShift);
sl@0
   493
	SPageInfo* pI=SPageInfo::FromPhysAddr(aPhysAddr);
sl@0
   494
	SPageInfo* pE=pI+n;
sl@0
   495
	for (; pI<pE; ++pI)
sl@0
   496
		{
sl@0
   497
		NKern::LockSystem();
sl@0
   498
		__NK_ASSERT_DEBUG(pI->Type()==SPageInfo::EUnused);
sl@0
   499
		pI->Lock();
sl@0
   500
		NKern::UnlockSystem();
sl@0
   501
		}
sl@0
   502
	return KErrNone;
sl@0
   503
	}
sl@0
   504
sl@0
   505
/** Attempt to allocate a contiguous block of RAM from the specified zone.
sl@0
   506
sl@0
   507
@param aZoneIdList	An array of the IDs of the RAM zones to allocate from.
sl@0
   508
@param aZoneIdCount	The number of RAM zone IDs listed in aZoneIdList.
sl@0
   509
@param aSize 		The number of contiguous bytes to allocate
sl@0
   510
@param aPhysAddr 	The physical address of the start of the contiguous block of 
sl@0
   511
					memory allocated
sl@0
   512
@param aAlign		Required alignment
sl@0
   513
@return KErrNone on success, KErrArgument if zone doesn't exist or aSize is larger than the
sl@0
   514
size of the RAM zone or KErrNoMemory when the RAM zone is too full.
sl@0
   515
*/
sl@0
   516
TInt MmuBase::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
sl@0
   517
	{
sl@0
   518
	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam() size=0x%x align=%d", aSize, aAlign));
sl@0
   519
	TInt r = ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, EPageFixed, aAlign);
sl@0
   520
	if (r!=KErrNone)
sl@0
   521
		{
sl@0
   522
		iAllocFailed=ETrue;
sl@0
   523
		return r;
sl@0
   524
		}
sl@0
   525
	TInt n=TInt(TUint32(aSize+iPageMask)>>iPageShift);
sl@0
   526
	SPageInfo* pI=SPageInfo::FromPhysAddr(aPhysAddr);
sl@0
   527
	SPageInfo* pE=pI+n;
sl@0
   528
	for (; pI<pE; ++pI)
sl@0
   529
		{
sl@0
   530
		NKern::LockSystem();
sl@0
   531
		__NK_ASSERT_DEBUG(pI->Type()==SPageInfo::EUnused);
sl@0
   532
		pI->Lock();
sl@0
   533
		NKern::UnlockSystem();
sl@0
   534
		}
sl@0
   535
	return KErrNone;
sl@0
   536
	}
sl@0
   537
sl@0
   538
sl@0
   539
/** Attempt to allocate discontiguous RAM pages.
sl@0
   540
sl@0
   541
@param aNumPages	The number of pages to allocate.
sl@0
   542
@param aPageList 	Pointer to an array where each element will be the physical 
sl@0
   543
					address of each page allocated.
sl@0
   544
@return KErrNone on success, KErrNoMemory otherwise
sl@0
   545
*/
sl@0
   546
TInt MmuBase::AllocPhysicalRam(TInt aNumPages, TPhysAddr* aPageList)
sl@0
   547
	{
sl@0
   548
	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam() numpages=%x", aNumPages));
sl@0
   549
	TInt r = AllocRamPages(aPageList, aNumPages, EPageFixed);
sl@0
   550
	if (r!=KErrNone)
sl@0
   551
		{
sl@0
   552
		iAllocFailed=ETrue;
sl@0
   553
		return r;
sl@0
   554
		}
sl@0
   555
	TPhysAddr* pageEnd = aPageList + aNumPages;
sl@0
   556
	for (TPhysAddr* page = aPageList; page < pageEnd; page++)
sl@0
   557
		{
sl@0
   558
		SPageInfo* pageInfo = SPageInfo::FromPhysAddr(*page);
sl@0
   559
		NKern::LockSystem();
sl@0
   560
		__NK_ASSERT_DEBUG(pageInfo->Type() == SPageInfo::EUnused);
sl@0
   561
		pageInfo->Lock();
sl@0
   562
		NKern::UnlockSystem();
sl@0
   563
		}
sl@0
   564
	return KErrNone;
sl@0
   565
	}
sl@0
   566
sl@0
   567
sl@0
   568
/** Attempt to allocate discontiguous RAM pages from the specified RAM zones.
sl@0
   569
sl@0
   570
@param aZoneIdList	An array of the IDs of the RAM zones to allocate from.
sl@0
   571
@param aZoneIdCount	The number of RAM zone IDs listed in aZoneIdList.
sl@0
   572
@param aNumPages	The number of pages to allocate.
sl@0
   573
@param aPageList 	Pointer to an array where each element will be the physical 
sl@0
   574
					address of each page allocated.
sl@0
   575
@return KErrNone on success, KErrArgument if zone doesn't exist or aNumPages is 
sl@0
   576
larger than the total number of pages in the RAM zone or KErrNoMemory when the RAM 
sl@0
   577
zone is too full.
sl@0
   578
*/
sl@0
   579
TInt MmuBase::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList)
sl@0
   580
	{
sl@0
   581
	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam() numpages 0x%x zones 0x%x", aNumPages, aZoneIdCount));
sl@0
   582
	TInt r = ZoneAllocRamPages(aZoneIdList, aZoneIdCount, aPageList, aNumPages, EPageFixed);
sl@0
   583
	if (r!=KErrNone)
sl@0
   584
		{
sl@0
   585
		iAllocFailed=ETrue;
sl@0
   586
		return r;
sl@0
   587
		}
sl@0
   588
sl@0
   589
	TPhysAddr* pageEnd = aPageList + aNumPages;
sl@0
   590
	for (TPhysAddr* page = aPageList; page < pageEnd; page++)
sl@0
   591
		{
sl@0
   592
		SPageInfo* pageInfo = SPageInfo::FromPhysAddr(*page);
sl@0
   593
		NKern::LockSystem();
sl@0
   594
		__NK_ASSERT_DEBUG(pageInfo->Type() == SPageInfo::EUnused);
sl@0
   595
		pageInfo->Lock();
sl@0
   596
		NKern::UnlockSystem();
sl@0
   597
		}
sl@0
   598
	return KErrNone;
sl@0
   599
	}
sl@0
   600
sl@0
   601
sl@0
   602
TInt MmuBase::FreePhysicalRam(TPhysAddr aPhysAddr, TInt aSize)
sl@0
   603
	{
sl@0
   604
	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreePhysicalRam(%08x,%x)",aPhysAddr,aSize));
sl@0
   605
sl@0
   606
	TInt n=TInt(TUint32(aSize+iPageMask)>>iPageShift);
sl@0
   607
	SPageInfo* pI=SPageInfo::FromPhysAddr(aPhysAddr);
sl@0
   608
	SPageInfo* pE=pI+n;
sl@0
   609
	for (; pI<pE; ++pI)
sl@0
   610
		{
sl@0
   611
		NKern::LockSystem();
sl@0
   612
		__ASSERT_ALWAYS(pI->Type()==SPageInfo::EUnused && pI->Unlock()==0, Panic(EBadFreePhysicalRam));
sl@0
   613
		NKern::UnlockSystem();
sl@0
   614
		}
sl@0
   615
	TInt r=iRamPageAllocator->FreePhysicalRam(aPhysAddr, aSize);
sl@0
   616
	return r;
sl@0
   617
	}
sl@0
   618
sl@0
   619
/** Free discontiguous RAM pages that were previously allocated using discontiguous
sl@0
   620
overload of MmuBase::AllocPhysicalRam() or MmuBase::ZoneAllocPhysicalRam().
sl@0
   621
sl@0
   622
Specifying one of the following may cause the system to panic: 
sl@0
   623
a) an invalid physical RAM address.
sl@0
   624
b) valid physical RAM addresses where some had not been previously allocated.
sl@0
   625
c) an adrress not aligned to a page boundary.
sl@0
   626
sl@0
   627
@param aNumPages	Number of pages to free
sl@0
   628
@param aPageList	Array of the physical address of each page to free
sl@0
   629
sl@0
   630
@return KErrNone if the operation was successful.
sl@0
   631
		
sl@0
   632
*/
sl@0
   633
TInt MmuBase::FreePhysicalRam(TInt aNumPages, TPhysAddr* aPageList)
sl@0
   634
	{
sl@0
   635
	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreePhysicalRam(%08x,%08x)", aNumPages, aPageList));
sl@0
   636
	
sl@0
   637
	TPhysAddr* pageEnd = aPageList + aNumPages;
sl@0
   638
	TInt r = KErrNone;
sl@0
   639
sl@0
   640
	for (TPhysAddr* page = aPageList; page < pageEnd && r == KErrNone; page++)
sl@0
   641
		{
sl@0
   642
		SPageInfo* pageInfo = SPageInfo::FromPhysAddr(*page);
sl@0
   643
		NKern::LockSystem();
sl@0
   644
		__ASSERT_ALWAYS(pageInfo->Type()==SPageInfo::EUnused && pageInfo->Unlock()==0, Panic(EBadFreePhysicalRam));
sl@0
   645
		NKern::UnlockSystem();
sl@0
   646
		
sl@0
   647
		// Free the page
sl@0
   648
		r = iRamPageAllocator->FreePhysicalRam(*page, KPageSize);
sl@0
   649
		}
sl@0
   650
	return r;
sl@0
   651
	}
sl@0
   652
sl@0
   653
sl@0
   654
TInt MmuBase::ClaimPhysicalRam(TPhysAddr aPhysAddr, TInt aSize)
sl@0
   655
	{
sl@0
   656
	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ClaimPhysicalRam(%08x,%x)",aPhysAddr,aSize));
sl@0
   657
	TUint32 pa=aPhysAddr;
sl@0
   658
	TUint32 size=aSize;
sl@0
   659
	TInt n=RoundUpRangeToPageSize(pa,size);
sl@0
   660
	TInt r=iRamPageAllocator->ClaimPhysicalRam(pa, size);
sl@0
   661
	if (r==KErrNone)
sl@0
   662
		{
sl@0
   663
		SPageInfo* pI=SPageInfo::FromPhysAddr(pa);
sl@0
   664
		SPageInfo* pE=pI+n;
sl@0
   665
		for (; pI<pE; ++pI)
sl@0
   666
			{
sl@0
   667
			NKern::LockSystem();
sl@0
   668
			__NK_ASSERT_DEBUG(pI->Type()==SPageInfo::EUnused && pI->LockCount()==0);
sl@0
   669
			pI->Lock();
sl@0
   670
			NKern::UnlockSystem();
sl@0
   671
			}
sl@0
   672
		}
sl@0
   673
	return r;
sl@0
   674
	}
sl@0
   675
sl@0
   676
/** 
sl@0
   677
Allocate a set of discontiguous RAM pages from the specified zone.
sl@0
   678
sl@0
   679
@param aZoneIdList	The array of IDs of the RAM zones to allocate from.
sl@0
   680
@param aZoneIdCount	The number of RAM zone IDs in aZoneIdList.
sl@0
   681
@param aPageList 	Preallocated array of TPhysAddr elements that will receive the
sl@0
   682
physical address of each page allocated.
sl@0
   683
@param aNumPages 	The number of pages to allocate.
sl@0
   684
@param aPageType 	The type of the pages being allocated.
sl@0
   685
sl@0
   686
@return KErrNone on success, KErrArgument if a zone of aZoneIdList doesn't exist, 
sl@0
   687
KErrNoMemory if there aren't enough free pages in the zone
sl@0
   688
*/
sl@0
   689
TInt MmuBase::ZoneAllocRamPages(TUint* aZoneIdList, TUint aZoneIdCount, TPhysAddr* aPageList, TInt aNumPages, TZonePageType aPageType)
sl@0
   690
	{
sl@0
   691
#ifdef _DEBUG
sl@0
   692
	if(K::CheckForSimulatedAllocFail())
sl@0
   693
		return KErrNoMemory;
sl@0
   694
#endif
sl@0
   695
	__NK_ASSERT_DEBUG(aPageType == EPageFixed);
sl@0
   696
sl@0
   697
	return iRamPageAllocator->ZoneAllocRamPages(aZoneIdList, aZoneIdCount, aPageList, aNumPages, aPageType);
sl@0
   698
	}
sl@0
   699
sl@0
   700
sl@0
   701
TInt MmuBase::AllocRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aPageType, TUint aBlockedZoneId, TBool aBlockRest)
sl@0
   702
	{
sl@0
   703
#ifdef _DEBUG
sl@0
   704
	if(K::CheckForSimulatedAllocFail())
sl@0
   705
		return KErrNoMemory;
sl@0
   706
#endif
sl@0
   707
	TInt missing = iRamPageAllocator->AllocRamPages(aPageList, aNumPages, aPageType, aBlockedZoneId, aBlockRest);
sl@0
   708
sl@0
   709
	// If missing some pages, ask the RAM cache to donate some of its pages.
sl@0
   710
	// Don't ask it for discardable pages as those are intended for itself.
sl@0
   711
	if(missing && aPageType != EPageDiscard && iRamCache->GetFreePages(missing))
sl@0
   712
		missing = iRamPageAllocator->AllocRamPages(aPageList, aNumPages, aPageType, aBlockedZoneId, aBlockRest);
sl@0
   713
	return missing ? KErrNoMemory : KErrNone;
sl@0
   714
	}
sl@0
   715
sl@0
   716
sl@0
   717
TInt MmuBase::AllocContiguousRam(TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aPageType, TInt aAlign, TUint aBlockedZoneId, TBool aBlockRest)
sl@0
   718
	{
sl@0
   719
#ifdef _DEBUG
sl@0
   720
	if(K::CheckForSimulatedAllocFail())
sl@0
   721
		return KErrNoMemory;
sl@0
   722
#endif
sl@0
   723
	__NK_ASSERT_DEBUG(aPageType == EPageFixed);
sl@0
   724
	TUint contigPages = (aSize + KPageSize - 1) >> KPageShift;
sl@0
   725
	TInt r = iRamPageAllocator->AllocContiguousRam(contigPages, aPhysAddr, aPageType, aAlign, aBlockedZoneId, aBlockRest);
sl@0
   726
	if (r == KErrNoMemory && contigPages > KMaxFreeableContiguousPages)
sl@0
   727
		{// Allocation failed but as this is a large allocation flush the RAM cache 
sl@0
   728
		// and reattempt the allocation as large allocation wouldn't discard pages.
sl@0
   729
		iRamCache->FlushAll();
sl@0
   730
		r = iRamPageAllocator->AllocContiguousRam(contigPages, aPhysAddr, aPageType, aAlign, aBlockedZoneId, aBlockRest);
sl@0
   731
		}
sl@0
   732
	return r;
sl@0
   733
	}
sl@0
   734
sl@0
   735
sl@0
   736
/**
sl@0
   737
Allocate contiguous RAM from the specified RAM zones.
sl@0
   738
@param aZoneIdList	An array of IDs of the RAM zones to allocate from
sl@0
   739
@param aZoneIdCount	The number of IDs listed in aZoneIdList
sl@0
   740
@param aSize		The number of bytes to allocate
sl@0
   741
@param aPhysAddr 	Will receive the physical base address of the allocated RAM
sl@0
   742
@param aPageType 	The type of the pages being allocated
sl@0
   743
@param aAlign 		The log base 2 alginment required
sl@0
   744
*/
sl@0
   745
TInt MmuBase::ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aPageType, TInt aAlign)
sl@0
   746
	{
sl@0
   747
#ifdef _DEBUG
sl@0
   748
	if(K::CheckForSimulatedAllocFail())
sl@0
   749
		return KErrNoMemory;
sl@0
   750
#endif
sl@0
   751
	return iRamPageAllocator->ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, aPageType, aAlign);
sl@0
   752
	}
sl@0
   753
sl@0
   754
SPageInfo* SPageInfo::SafeFromPhysAddr(TPhysAddr aAddress)
sl@0
   755
	{
sl@0
   756
	TUint index = aAddress>>(KPageShift+KPageShift-KPageInfoShift);
sl@0
   757
	TUint flags = ((TUint8*)KPageInfoMap)[index>>3];
sl@0
   758
	TUint mask = 1<<(index&7);
sl@0
   759
	if(!(flags&mask))
sl@0
   760
		return 0; // no SPageInfo for aAddress
sl@0
   761
	SPageInfo* info = FromPhysAddr(aAddress);
sl@0
   762
	if(info->Type()==SPageInfo::EInvalid)
sl@0
   763
		return 0;
sl@0
   764
	return info;
sl@0
   765
	}
sl@0
   766
sl@0
   767
/** HAL Function wrapper for the RAM allocator.
sl@0
   768
 */
sl@0
   769
sl@0
   770
TInt RamHalFunction(TAny*, TInt aFunction, TAny* a1, TAny* a2)
sl@0
   771
	{
sl@0
   772
	DRamAllocator *pRamAlloc = MmuBase::TheMmu->iRamPageAllocator;
sl@0
   773
	
sl@0
   774
	if (pRamAlloc)
sl@0
   775
		return pRamAlloc->HalFunction(aFunction, a1, a2);
sl@0
   776
	return KErrNotSupported;
sl@0
   777
	}
sl@0
   778
sl@0
   779
sl@0
   780
/******************************************************************************
sl@0
   781
 * Initialisation
sl@0
   782
 ******************************************************************************/
sl@0
   783
sl@0
   784
void MmuBase::Init1()
sl@0
   785
	{
sl@0
   786
	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("MmuBase::Init1"));
sl@0
   787
	iInitialFreeMemory=0;
sl@0
   788
	iAllocFailed=EFalse;
sl@0
   789
	}
sl@0
   790
sl@0
   791
void MmuBase::Init2()
sl@0
   792
	{
sl@0
   793
	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("MmuBase::Init2"));
sl@0
   794
	TInt total_ram=TheSuperPage().iTotalRamSize;
sl@0
   795
	TInt total_ram_pages=total_ram>>iPageShift;
sl@0
   796
	iNumPages = total_ram_pages;
sl@0
   797
	const SRamInfo& info=*(const SRamInfo*)TheSuperPage().iRamBootData;
sl@0
   798
	iRamPageAllocator=DRamAllocator::New(info, RamZoneConfig, RamZoneCallback);
sl@0
   799
sl@0
   800
	TInt max_pt=total_ram>>iPageTableShift;
sl@0
   801
	if (max_pt<iMaxPageTables)
sl@0
   802
		iMaxPageTables=max_pt;
sl@0
   803
	iMaxPageTables &= ~iPtClusterMask;
sl@0
   804
	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iMaxPageTables=%d",iMaxPageTables));
sl@0
   805
	TInt max_ptpg=iMaxPageTables>>iPtClusterShift;
sl@0
   806
	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("max_ptpg=%d",max_ptpg));
sl@0
   807
	iPageTableLinearAllocator=TBitMapAllocator::New(max_ptpg,ETrue);
sl@0
   808
	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iPageTableLinearAllocator=%08x",iPageTableLinearAllocator));
sl@0
   809
	__ASSERT_ALWAYS(iPageTableLinearAllocator,Panic(EPtLinAllocCreateFailed));
sl@0
   810
	if (iPtClusterShift)	// if more than one page table per page
sl@0
   811
		{
sl@0
   812
		iPageTableAllocator=TBitMapAllocator::New(iMaxPageTables,EFalse);
sl@0
   813
		__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iPageTableAllocator=%08x",iPageTableAllocator));
sl@0
   814
		__ASSERT_ALWAYS(iPageTableAllocator,Panic(EPtAllocCreateFailed));
sl@0
   815
		}
sl@0
   816
	TInt max_ptb=(iMaxPageTables+iPtBlockMask)>>iPtBlockShift;
sl@0
   817
	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("max_ptb=%d",max_ptb));
sl@0
   818
	iPtBlockCount=(TInt*)Kern::AllocZ(max_ptb*sizeof(TInt));
sl@0
   819
	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iPtBlockCount=%08x",iPtBlockCount));
sl@0
   820
	__ASSERT_ALWAYS(iPtBlockCount,Panic(EPtBlockCountCreateFailed));
sl@0
   821
	TInt max_ptg=(iMaxPageTables+iPtGroupMask)>>iPtGroupShift;
sl@0
   822
	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("ptg_shift=%d, max_ptg=%d",iPtGroupShift,max_ptg));
sl@0
   823
	iPtGroupCount=(TInt*)Kern::AllocZ(max_ptg*sizeof(TInt));
sl@0
   824
	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iPtGroupCount=%08x",iPtGroupCount));
sl@0
   825
	__ASSERT_ALWAYS(iPtGroupCount,Panic(EPtGroupCountCreateFailed));
sl@0
   826
sl@0
   827
sl@0
   828
	// Clear the inital (and only so far) page table info page so all unused
sl@0
   829
	// page tables will be marked as unused.
sl@0
   830
	memclr((TAny*)KPageTableInfoBase, KPageSize);
sl@0
   831
sl@0
   832
	// look for page tables - assume first page table (id=0) maps page tables
sl@0
   833
	TPte* pPte=(TPte*)iPageTableLinBase;
sl@0
   834
	TInt i;
sl@0
   835
	for (i=0; i<iChunkSize/iPageSize; ++i)
sl@0
   836
		{
sl@0
   837
		TPte pte=*pPte++;
sl@0
   838
		if (!PteIsPresent(pte))	// after boot, page tables are contiguous
sl@0
   839
			break;
sl@0
   840
		iPageTableLinearAllocator->Alloc(i,1);
sl@0
   841
		TPhysAddr ptpgPhys=PtePhysAddr(pte, i);
sl@0
   842
		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(ptpgPhys);
sl@0
   843
		__ASSERT_ALWAYS(pi, Panic(EInvalidPageTableAtBoot));
sl@0
   844
		pi->SetPageTable(i);
sl@0
   845
		pi->Lock();
sl@0
   846
		TInt id=i<<iPtClusterShift;
sl@0
   847
		TInt ptb=id>>iPtBlockShift;
sl@0
   848
		++iPtBlockCount[ptb];
sl@0
   849
		TInt ptg=id>>iPtGroupShift;
sl@0
   850
		++iPtGroupCount[ptg];
sl@0
   851
		}
sl@0
   852
sl@0
   853
	// look for mapped pages
sl@0
   854
	TInt npdes=1<<(32-iChunkShift);
sl@0
   855
	TInt npt=0;
sl@0
   856
	for (i=0; i<npdes; ++i)
sl@0
   857
		{
sl@0
   858
		TLinAddr cAddr=TLinAddr(i<<iChunkShift);
sl@0
   859
		if (cAddr>=PP::RamDriveStartAddress && TUint32(cAddr-PP::RamDriveStartAddress)<TUint32(PP::RamDriveRange))
sl@0
   860
			continue;	// leave RAM drive for now
sl@0
   861
		TInt ptid=PageTableId(cAddr);
sl@0
   862
		TPhysAddr pdePhys = PdePhysAddr(cAddr);	// check for whole PDE mapping
sl@0
   863
		pPte = NULL;
sl@0
   864
		if (ptid>=0)
sl@0
   865
			{
sl@0
   866
			++npt;
sl@0
   867
			__KTRACE_OPT(KMMU,Kern::Printf("Addr %08x -> page table %d", cAddr, ptid));
sl@0
   868
			pPte=(TPte*)PageTableLinAddr(ptid);
sl@0
   869
			}
sl@0
   870
#ifdef KMMU
sl@0
   871
		if (pdePhys != KPhysAddrInvalid)
sl@0
   872
			{
sl@0
   873
			__KTRACE_OPT(KMMU,Kern::Printf("Addr %08x -> Whole PDE Phys %08x", cAddr, pdePhys));
sl@0
   874
			}
sl@0
   875
#endif
sl@0
   876
		if (ptid>=0 || pdePhys != KPhysAddrInvalid)
sl@0
   877
			{
sl@0
   878
			TInt j;
sl@0
   879
			TInt np=0;
sl@0
   880
			for (j=0; j<iChunkSize/iPageSize; ++j)
sl@0
   881
				{
sl@0
   882
				TBool present = ETrue;	// all pages present if whole PDE mapping
sl@0
   883
				TPte pte = 0;
sl@0
   884
				if (pPte)
sl@0
   885
					{
sl@0
   886
					pte = pPte[j];
sl@0
   887
					present = PteIsPresent(pte);
sl@0
   888
					}
sl@0
   889
				if (present)
sl@0
   890
					{
sl@0
   891
					++np;
sl@0
   892
					TPhysAddr pa = pPte ? PtePhysAddr(pte, j) : (pdePhys + (j<<iPageShift));
sl@0
   893
					SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa);
sl@0
   894
					__KTRACE_OPT(KMMU,Kern::Printf("Addr: %08x PA=%08x",
sl@0
   895
														cAddr+(j<<iPageShift), pa));
sl@0
   896
					if (pi)	// ignore non-RAM mappings
sl@0
   897
						{//these pages will never be freed and can't be moved
sl@0
   898
						TInt r = iRamPageAllocator->MarkPageAllocated(pa, EPageFixed);
sl@0
   899
						// allow KErrAlreadyExists since it's possible that a page is doubly mapped
sl@0
   900
						__ASSERT_ALWAYS(r==KErrNone || r==KErrAlreadyExists, Panic(EBadMappedPageAfterBoot));
sl@0
   901
						SetupInitialPageInfo(pi,cAddr,j);
sl@0
   902
#ifdef BTRACE_KERNEL_MEMORY
sl@0
   903
						if(r==KErrNone)
sl@0
   904
							++Epoc::KernelMiscPages;
sl@0
   905
#endif
sl@0
   906
						}
sl@0
   907
					}
sl@0
   908
				}
sl@0
   909
			__KTRACE_OPT(KMMU,Kern::Printf("Addr: %08x #PTEs=%d",cAddr,np));
sl@0
   910
			if (ptid>=0)
sl@0
   911
				SetupInitialPageTableInfo(ptid,cAddr,np);
sl@0
   912
			}
sl@0
   913
		}
sl@0
   914
sl@0
   915
	TInt oddpt=npt & iPtClusterMask;
sl@0
   916
	if (oddpt)
sl@0
   917
		oddpt=iPtClusterSize-oddpt;
sl@0
   918
	__KTRACE_OPT(KBOOT,Kern::Printf("Total page tables %d, left over subpages %d",npt,oddpt));
sl@0
   919
	if (oddpt)
sl@0
   920
		iPageTableAllocator->Free(npt,oddpt);
sl@0
   921
sl@0
   922
	DoInit2();
sl@0
   923
sl@0
   924
	// Save current free RAM size - there can never be more free RAM than this
sl@0
   925
	TInt max_free = Kern::FreeRamInBytes();
sl@0
   926
	K::MaxFreeRam = max_free;
sl@0
   927
	if (max_free < PP::RamDriveMaxSize)
sl@0
   928
		PP::RamDriveMaxSize = max_free;
sl@0
   929
sl@0
   930
	if (K::ColdStart)
sl@0
   931
		ClearRamDrive(PP::RamDriveStartAddress);
sl@0
   932
	else
sl@0
   933
		RecoverRamDrive();
sl@0
   934
sl@0
   935
	TInt r=K::MutexCreate((DMutex*&)RamAllocatorMutex, KLitRamAlloc, NULL, EFalse, KMutexOrdRamAlloc);
sl@0
   936
	if (r!=KErrNone)
sl@0
   937
		Panic(ERamAllocMutexCreateFailed);
sl@0
   938
	r=K::MutexCreate((DMutex*&)HwChunkMutex, KLitHwChunk, NULL, EFalse, KMutexOrdHwChunk);
sl@0
   939
	if (r!=KErrNone)
sl@0
   940
		Panic(EHwChunkMutexCreateFailed);
sl@0
   941
	
sl@0
   942
#ifdef __DEMAND_PAGING__
sl@0
   943
	if (DemandPaging::RomPagingRequested() || DemandPaging::CodePagingRequested())
sl@0
   944
		iRamCache = DemandPaging::New();
sl@0
   945
	else
sl@0
   946
		iRamCache = new RamCache;
sl@0
   947
#else
sl@0
   948
	iRamCache = new RamCache;
sl@0
   949
#endif
sl@0
   950
	if (!iRamCache)
sl@0
   951
		Panic(ERamCacheAllocFailed);
sl@0
   952
	iRamCache->Init2();
sl@0
   953
	RamCacheBase::TheRamCache = iRamCache;
sl@0
   954
sl@0
   955
	// Get the allocator to signal to the variant which RAM zones are in use so far
sl@0
   956
	iRamPageAllocator->InitialCallback();
sl@0
   957
	}
sl@0
   958
sl@0
   959
void MmuBase::Init3()
sl@0
   960
	{
sl@0
   961
	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("MmuBase::Init3"));
sl@0
   962
sl@0
   963
	// Initialise demand paging
sl@0
   964
#ifdef __DEMAND_PAGING__
sl@0
   965
	M::DemandPagingInit();
sl@0
   966
#endif
sl@0
   967
sl@0
   968
	// Register a HAL Function for the Ram allocator.
sl@0
   969
	TInt r = Kern::AddHalEntry(EHalGroupRam, RamHalFunction, 0);
sl@0
   970
	__NK_ASSERT_ALWAYS(r==KErrNone);
sl@0
   971
sl@0
   972
	//
sl@0
   973
	// Perform the intialisation for page moving and RAM defrag object.
sl@0
   974
	//
sl@0
   975
sl@0
   976
	// allocate a page to use as an alt stack
sl@0
   977
	MmuBase::Wait();
sl@0
   978
	TPhysAddr stackpage;
sl@0
   979
	r = AllocPhysicalRam(KPageSize, stackpage);
sl@0
   980
	MmuBase::Signal();
sl@0
   981
	if (r!=KErrNone)
sl@0
   982
		Panic(EDefragStackAllocFailed);
sl@0
   983
sl@0
   984
	// map it at a predetermined address
sl@0
   985
	TInt ptid = PageTableId(KDefragAltStackAddr);
sl@0
   986
	TPte perm = PtePermissions(EKernelStack);
sl@0
   987
	NKern::LockSystem();
sl@0
   988
	MapRamPages(ptid, SPageInfo::EFixed, NULL, KDefragAltStackAddr, &stackpage, 1, perm);
sl@0
   989
	NKern::UnlockSystem();
sl@0
   990
	iAltStackBase = KDefragAltStackAddr + KPageSize;
sl@0
   991
sl@0
   992
	__KTRACE_OPT(KMMU,Kern::Printf("Allocated defrag alt stack page at %08x, mapped to %08x, base is now %08x", stackpage, KDefragAltStackAddr, iAltStackBase));
sl@0
   993
sl@0
   994
	// Create the actual defrag object and initialise it.
sl@0
   995
	iDefrag = new Defrag;
sl@0
   996
	if (!iDefrag)
sl@0
   997
		Panic(EDefragAllocFailed);
sl@0
   998
	iDefrag->Init3(iRamPageAllocator);
sl@0
   999
	}
sl@0
  1000
sl@0
  1001
void MmuBase::CreateKernelSection(TLinAddr aEnd, TInt aHwChunkAlign)
sl@0
  1002
	{
sl@0
  1003
	TLinAddr base=(TLinAddr)TheRomHeader().iKernelLimit;
sl@0
  1004
	iKernelSection=TLinearSection::New(base, aEnd);
sl@0
  1005
	__ASSERT_ALWAYS(iKernelSection!=NULL, Panic(ECreateKernelSectionFailed));
sl@0
  1006
	iHwChunkAllocator=THwChunkAddressAllocator::New(aHwChunkAlign, iKernelSection);
sl@0
  1007
	__ASSERT_ALWAYS(iHwChunkAllocator!=NULL, Panic(ECreateHwChunkAllocFailed));
sl@0
  1008
	}
sl@0
  1009
sl@0
  1010
// Recover RAM drive contents after a reset
sl@0
  1011
TInt MmuBase::RecoverRamDrive()
sl@0
  1012
	{
sl@0
  1013
	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::RecoverRamDrive()"));
sl@0
  1014
	TLinAddr ptlin;
sl@0
  1015
	TLinAddr chunk = PP::RamDriveStartAddress;
sl@0
  1016
	TLinAddr end = chunk + (TLinAddr)PP::RamDriveRange;
sl@0
  1017
	TInt size = 0;
sl@0
  1018
	TInt limit = RoundToPageSize(TheSuperPage().iRamDriveSize);
sl@0
  1019
	for( ; chunk<end; chunk+=iChunkSize)
sl@0
  1020
		{
sl@0
  1021
		if (size==limit)		// have reached end of ram drive
sl@0
  1022
			break;
sl@0
  1023
		TPhysAddr ptphys = 0;
sl@0
  1024
		TInt ptid = BootPageTableId(chunk, ptphys);	// ret KErrNotFound if PDE not present, KErrUnknown if present but as yet unknown page table
sl@0
  1025
		__KTRACE_OPT(KMMU,Kern::Printf("Addr %08x: PTID=%d PTPHYS=%08x", chunk, ptid, ptphys));
sl@0
  1026
		if (ptid==KErrNotFound)
sl@0
  1027
			break;		// no page table so stop here and clear to end of range
sl@0
  1028
		TPhysAddr ptpgphys = ptphys & ~iPageMask;
sl@0
  1029
		TInt r = iRamPageAllocator->MarkPageAllocated(ptpgphys, EPageMovable);
sl@0
  1030
		__KTRACE_OPT(KMMU,Kern::Printf("MPA: r=%d",r));
sl@0
  1031
		if (r==KErrArgument)
sl@0
  1032
			break;		// page table address was invalid - stop here and clear to end of range
sl@0
  1033
		if (r==KErrNone)
sl@0
  1034
			{
sl@0
  1035
			// this page was currently unallocated
sl@0
  1036
			if (ptid>=0)
sl@0
  1037
				break;	// ID has been allocated - bad news - bail here
sl@0
  1038
			ptid = iPageTableLinearAllocator->Alloc();
sl@0
  1039
			__ASSERT_ALWAYS(ptid>=0, Panic(ERecoverRamDriveAllocPTIDFailed));
sl@0
  1040
			SPageInfo* pi = SPageInfo::SafeFromPhysAddr(ptpgphys);
sl@0
  1041
			__ASSERT_ALWAYS(pi, Panic(ERecoverRamDriveBadPageTable));
sl@0
  1042
			pi->SetPageTable(ptid);	// id = cluster number here
sl@0
  1043
			ptid <<= iPtClusterShift;
sl@0
  1044
			MapPageTable(ptid, ptpgphys, EFalse);
sl@0
  1045
			if (iPageTableAllocator)
sl@0
  1046
				iPageTableAllocator->Free(ptid, iPtClusterSize);
sl@0
  1047
			ptid |= ((ptphys>>iPageTableShift)&iPtClusterMask);
sl@0
  1048
			ptlin = PageTableLinAddr(ptid);
sl@0
  1049
			__KTRACE_OPT(KMMU,Kern::Printf("Page table ID %d lin %08x", ptid, ptlin));
sl@0
  1050
			if (iPageTableAllocator)
sl@0
  1051
				iPageTableAllocator->Alloc(ptid, 1);
sl@0
  1052
			}
sl@0
  1053
		else
sl@0
  1054
			{
sl@0
  1055
			// this page was already allocated
sl@0
  1056
			if (ptid<0)
sl@0
  1057
				break;	// ID not allocated - bad news - bail here
sl@0
  1058
			ptlin = PageTableLinAddr(ptid);
sl@0
  1059
			__KTRACE_OPT(KMMU,Kern::Printf("Page table lin %08x", ptlin));
sl@0
  1060
			if (iPageTableAllocator)
sl@0
  1061
				iPageTableAllocator->Alloc(ptid, 1);
sl@0
  1062
			}
sl@0
  1063
		TInt pte_index;
sl@0
  1064
		TBool chunk_inc = 0;
sl@0
  1065
		TPte* page_table = (TPte*)ptlin;
sl@0
  1066
		for (pte_index=0; pte_index<(iChunkSize>>iPageSize); ++pte_index)
sl@0
  1067
			{
sl@0
  1068
			if (size==limit)		// have reached end of ram drive
sl@0
  1069
				break;
sl@0
  1070
			TPte pte = page_table[pte_index];
sl@0
  1071
			if (PteIsPresent(pte))
sl@0
  1072
				{
sl@0
  1073
				TPhysAddr pa=PtePhysAddr(pte, pte_index);
sl@0
  1074
				SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa);
sl@0
  1075
				if (!pi)
sl@0
  1076
					break;
sl@0
  1077
				TInt r = iRamPageAllocator->MarkPageAllocated(pa, EPageMovable);
sl@0
  1078
				__ASSERT_ALWAYS(r==KErrNone, Panic(ERecoverRamDriveBadPage));
sl@0
  1079
				size+=iPageSize;
sl@0
  1080
				chunk_inc = iChunkSize;
sl@0
  1081
				}
sl@0
  1082
			}
sl@0
  1083
		if (pte_index < (iChunkSize>>iPageSize) )
sl@0
  1084
			{
sl@0
  1085
			// if we recovered pages in this page table, leave it in place
sl@0
  1086
			chunk += chunk_inc;
sl@0
  1087
sl@0
  1088
			// clear from here on
sl@0
  1089
			ClearPageTable(ptid, pte_index);
sl@0
  1090
			break;
sl@0
  1091
			}
sl@0
  1092
		}
sl@0
  1093
	if (chunk < end)
sl@0
  1094
		ClearRamDrive(chunk);
sl@0
  1095
	__KTRACE_OPT(KMMU,Kern::Printf("Recovered RAM drive size %08x",size));
sl@0
  1096
	if (size<TheSuperPage().iRamDriveSize)
sl@0
  1097
		{
sl@0
  1098
		__KTRACE_OPT(KMMU,Kern::Printf("Truncating RAM drive from %08x to %08x", TheSuperPage().iRamDriveSize, size));
sl@0
  1099
		TheSuperPage().iRamDriveSize=size;
sl@0
  1100
		}
sl@0
  1101
	return KErrNone;
sl@0
  1102
	}
sl@0
  1103
sl@0
  1104
TInt MmuBase::AllocShadowPage(TLinAddr aRomAddr)
sl@0
  1105
	{
sl@0
  1106
	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase:AllocShadowPage(%08x)", aRomAddr));
sl@0
  1107
	aRomAddr &= ~iPageMask;
sl@0
  1108
	TPhysAddr orig_phys = KPhysAddrInvalid;
sl@0
  1109
	if (aRomAddr>=iRomLinearBase && aRomAddr<=(iRomLinearEnd-iPageSize))
sl@0
  1110
		orig_phys = LinearToPhysical(aRomAddr);
sl@0
  1111
	__KTRACE_OPT(KMMU,Kern::Printf("OrigPhys = %08x",orig_phys));
sl@0
  1112
	if (orig_phys == KPhysAddrInvalid)
sl@0
  1113
		{
sl@0
  1114
		__KTRACE_OPT(KMMU,Kern::Printf("Invalid ROM address"));
sl@0
  1115
		return KErrArgument;
sl@0
  1116
		}
sl@0
  1117
	SPageInfo* pi = SPageInfo::SafeFromPhysAddr(orig_phys);
sl@0
  1118
	if (pi && pi->Type()==SPageInfo::EShadow)
sl@0
  1119
		{
sl@0
  1120
		__KTRACE_OPT(KMMU,Kern::Printf("ROM address already shadowed"));
sl@0
  1121
		return KErrAlreadyExists;
sl@0
  1122
		}
sl@0
  1123
	TInt ptid = PageTableId(aRomAddr);
sl@0
  1124
	__KTRACE_OPT(KMMU, Kern::Printf("Shadow PTID %d", ptid));
sl@0
  1125
	TInt newptid = -1;
sl@0
  1126
	if (ptid<0)
sl@0
  1127
		{
sl@0
  1128
		newptid = AllocPageTable();
sl@0
  1129
		__KTRACE_OPT(KMMU, Kern::Printf("New shadow PTID %d", newptid));
sl@0
  1130
		if (newptid<0)
sl@0
  1131
			return KErrNoMemory;
sl@0
  1132
		ptid = newptid;
sl@0
  1133
		PtInfo(ptid).SetShadow( (aRomAddr-iRomLinearBase)>>iChunkShift );
sl@0
  1134
		InitShadowPageTable(ptid, aRomAddr, orig_phys);
sl@0
  1135
		}
sl@0
  1136
	TPhysAddr shadow_phys;
sl@0
  1137
sl@0
  1138
	if (AllocRamPages(&shadow_phys, 1, EPageFixed) != KErrNone)
sl@0
  1139
		{
sl@0
  1140
		__KTRACE_OPT(KMMU,Kern::Printf("Unable to allocate page"));
sl@0
  1141
		iAllocFailed=ETrue;
sl@0
  1142
		if (newptid>=0)
sl@0
  1143
			{
sl@0
  1144
			FreePageTable(newptid);
sl@0
  1145
			}
sl@0
  1146
		return KErrNoMemory;
sl@0
  1147
		}
sl@0
  1148
#ifdef BTRACE_KERNEL_MEMORY
sl@0
  1149
	BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, 1<<KPageShift);
sl@0
  1150
	++Epoc::KernelMiscPages;
sl@0
  1151
#endif
sl@0
  1152
	InitShadowPage(shadow_phys, aRomAddr);	// copy original ROM contents
sl@0
  1153
	NKern::LockSystem();
sl@0
  1154
	Pagify(ptid, aRomAddr);
sl@0
  1155
	MapRamPages(ptid, SPageInfo::EShadow, (TAny*)orig_phys, (aRomAddr-iRomLinearBase), &shadow_phys, 1, iShadowPtePerm);
sl@0
  1156
	NKern::UnlockSystem();
sl@0
  1157
	if (newptid>=0)
sl@0
  1158
		{
sl@0
  1159
		NKern::LockSystem();
sl@0
  1160
		AssignShadowPageTable(newptid, aRomAddr);
sl@0
  1161
		NKern::UnlockSystem();
sl@0
  1162
		}
sl@0
  1163
	FlushShadow(aRomAddr);
sl@0
  1164
	__KTRACE_OPT(KMMU,Kern::Printf("AllocShadowPage successful"));
sl@0
  1165
	return KErrNone;
sl@0
  1166
	}
sl@0
  1167
sl@0
  1168
TInt MmuBase::FreeShadowPage(TLinAddr aRomAddr)
sl@0
  1169
	{
sl@0
  1170
	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase:FreeShadowPage(%08x)", aRomAddr));
sl@0
  1171
	aRomAddr &= ~iPageMask;
sl@0
  1172
	TPhysAddr shadow_phys = KPhysAddrInvalid;
sl@0
  1173
	if (aRomAddr>=iRomLinearBase || aRomAddr<=(iRomLinearEnd-iPageSize))
sl@0
  1174
		shadow_phys = LinearToPhysical(aRomAddr);
sl@0
  1175
	__KTRACE_OPT(KMMU,Kern::Printf("ShadowPhys = %08x",shadow_phys));
sl@0
  1176
	if (shadow_phys == KPhysAddrInvalid)
sl@0
  1177
		{
sl@0
  1178
		__KTRACE_OPT(KMMU,Kern::Printf("Invalid ROM address"));
sl@0
  1179
		return KErrArgument;
sl@0
  1180
		}
sl@0
  1181
	TInt ptid = PageTableId(aRomAddr);
sl@0
  1182
	SPageInfo* pi = SPageInfo::SafeFromPhysAddr(shadow_phys);
sl@0
  1183
	if (ptid<0 || !pi || pi->Type()!=SPageInfo::EShadow)
sl@0
  1184
		{
sl@0
  1185
		__KTRACE_OPT(KMMU,Kern::Printf("No shadow page at this address"));
sl@0
  1186
		return KErrGeneral;
sl@0
  1187
		}
sl@0
  1188
	TPhysAddr orig_phys = (TPhysAddr)pi->Owner();
sl@0
  1189
	DoUnmapShadowPage(ptid, aRomAddr, orig_phys);
sl@0
  1190
	SPageTableInfo& pti = PtInfo(ptid);
sl@0
  1191
	if (pti.Attribs()==SPageTableInfo::EShadow && --pti.iCount==0)
sl@0
  1192
		{
sl@0
  1193
		TInt r = UnassignShadowPageTable(aRomAddr, orig_phys);
sl@0
  1194
		if (r==KErrNone)
sl@0
  1195
			FreePageTable(ptid);
sl@0
  1196
		else
sl@0
  1197
			pti.SetGlobal(aRomAddr>>iChunkShift);
sl@0
  1198
		}
sl@0
  1199
sl@0
  1200
	FreePages(&shadow_phys, 1, EPageFixed);
sl@0
  1201
	__KTRACE_OPT(KMMU,Kern::Printf("FreeShadowPage successful"));
sl@0
  1202
#ifdef BTRACE_KERNEL_MEMORY
sl@0
  1203
	BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscFree, 1<<KPageShift);
sl@0
  1204
	--Epoc::KernelMiscPages;
sl@0
  1205
#endif
sl@0
  1206
	return KErrNone;
sl@0
  1207
	}
sl@0
  1208
sl@0
  1209
TInt MmuBase::FreezeShadowPage(TLinAddr aRomAddr)
sl@0
  1210
	{
sl@0
  1211
	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase:FreezeShadowPage(%08x)", aRomAddr));
sl@0
  1212
	aRomAddr &= ~iPageMask;
sl@0
  1213
	TPhysAddr shadow_phys = KPhysAddrInvalid;
sl@0
  1214
	if (aRomAddr>=iRomLinearBase || aRomAddr<=(iRomLinearEnd-iPageSize))
sl@0
  1215
		shadow_phys = LinearToPhysical(aRomAddr);
sl@0
  1216
	__KTRACE_OPT(KMMU,Kern::Printf("ShadowPhys = %08x",shadow_phys));
sl@0
  1217
	if (shadow_phys == KPhysAddrInvalid)
sl@0
  1218
		{
sl@0
  1219
		__KTRACE_OPT(KMMU,Kern::Printf("Invalid ROM address"));
sl@0
  1220
		return KErrArgument;
sl@0
  1221
		}
sl@0
  1222
	TInt ptid = PageTableId(aRomAddr);
sl@0
  1223
	SPageInfo* pi = SPageInfo::SafeFromPhysAddr(shadow_phys);
sl@0
  1224
	if (ptid<0 || pi==0)
sl@0
  1225
		{
sl@0
  1226
		__KTRACE_OPT(KMMU,Kern::Printf("No shadow page at this address"));
sl@0
  1227
		return KErrGeneral;
sl@0
  1228
		}
sl@0
  1229
	DoFreezeShadowPage(ptid, aRomAddr);
sl@0
  1230
	__KTRACE_OPT(KMMU,Kern::Printf("FreezeShadowPage successful"));
sl@0
  1231
	return KErrNone;
sl@0
  1232
	}
sl@0
  1233
sl@0
  1234
TInt MmuBase::CopyToShadowMemory(TLinAddr aDest, TLinAddr aSrc, TUint32 aLength)
sl@0
  1235
	{
sl@0
  1236
	memcpy ((TAny*)aDest, (const TAny*)aSrc, aLength);
sl@0
  1237
	return KErrNone;
sl@0
  1238
	}
sl@0
  1239
sl@0
  1240
void M::BTracePrime(TUint aCategory)
sl@0
  1241
	{
sl@0
  1242
	(void)aCategory;
sl@0
  1243
sl@0
  1244
#ifdef BTRACE_KERNEL_MEMORY
sl@0
  1245
	// Must check for -1 as that is the default value of aCategory for
sl@0
  1246
	// BTrace::Prime() which is intended to prime all categories that are 
sl@0
  1247
	// currently enabled via a single invocation of BTrace::Prime().
sl@0
  1248
	if(aCategory==BTrace::EKernelMemory || (TInt)aCategory == -1)
sl@0
  1249
		{
sl@0
  1250
		NKern::ThreadEnterCS();
sl@0
  1251
		Mmu::Wait();
sl@0
  1252
		BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryInitialFree,TheSuperPage().iTotalRamSize);
sl@0
  1253
		BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryCurrentFree,Kern::FreeRamInBytes());
sl@0
  1254
		BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, Epoc::KernelMiscPages<<KPageShift);
sl@0
  1255
		#ifdef __DEMAND_PAGING__
sl@0
  1256
		if (DemandPaging::ThePager) 
sl@0
  1257
			BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryDemandPagingCache,DemandPaging::ThePager->iMinimumPageCount << KPageShift);
sl@0
  1258
		#endif
sl@0
  1259
		BTrace8(BTrace::EKernelMemory,BTrace::EKernelMemoryDrvPhysAlloc, Epoc::DriverAllocdPhysRam, -1);
sl@0
  1260
		Mmu::Signal();
sl@0
  1261
		NKern::ThreadLeaveCS();
sl@0
  1262
		}
sl@0
  1263
#endif
sl@0
  1264
sl@0
  1265
#ifdef BTRACE_RAM_ALLOCATOR
sl@0
  1266
	// Must check for -1 as that is the default value of aCategroy for
sl@0
  1267
	// BTrace::Prime() which is intended to prime all categories that are 
sl@0
  1268
	// currently enabled via a single invocation of BTrace::Prime().
sl@0
  1269
	if(aCategory==BTrace::ERamAllocator || (TInt)aCategory == -1)
sl@0
  1270
		{
sl@0
  1271
		NKern::ThreadEnterCS();
sl@0
  1272
		Mmu::Wait();
sl@0
  1273
		Mmu::Get().iRamPageAllocator->SendInitialBtraceLogs();
sl@0
  1274
		Mmu::Signal();
sl@0
  1275
		NKern::ThreadLeaveCS();
sl@0
  1276
		}
sl@0
  1277
#endif
sl@0
  1278
	}
sl@0
  1279
sl@0
  1280
sl@0
  1281
/******************************************************************************
sl@0
  1282
 * Code common to all virtual memory models
sl@0
  1283
 ******************************************************************************/
sl@0
  1284
sl@0
  1285
void RHeapK::Mutate(TInt aOffset, TInt aMaxLength)
sl@0
  1286
//
sl@0
  1287
// Used by the kernel to mutate a fixed heap into a chunk heap.
sl@0
  1288
//
sl@0
  1289
	{
sl@0
  1290
	iMinLength += aOffset;
sl@0
  1291
	iMaxLength = aMaxLength + aOffset;
sl@0
  1292
	iOffset = aOffset;
sl@0
  1293
	iChunkHandle = (TInt)K::HeapInfo.iChunk;
sl@0
  1294
	iPageSize = M::PageSizeInBytes();
sl@0
  1295
	iGrowBy = iPageSize;
sl@0
  1296
	iFlags = 0;
sl@0
  1297
	}
sl@0
  1298
sl@0
  1299
TInt M::PageSizeInBytes()
sl@0
  1300
	{
sl@0
  1301
	return KPageSize;
sl@0
  1302
	}
sl@0
  1303
sl@0
  1304
TInt MmuBase::FreeRamInBytes()
sl@0
  1305
	{
sl@0
  1306
	TInt free = iRamPageAllocator->FreeRamInBytes();
sl@0
  1307
	if(iRamCache)
sl@0
  1308
		free += iRamCache->NumberOfFreePages()<<iPageShift;
sl@0
  1309
	return free;
sl@0
  1310
	}
sl@0
  1311
sl@0
  1312
/**	Returns the amount of free RAM currently available.
sl@0
  1313
sl@0
  1314
@return The number of bytes of free RAM currently available.
sl@0
  1315
@pre	any context
sl@0
  1316
 */
sl@0
  1317
EXPORT_C TInt Kern::FreeRamInBytes()
sl@0
  1318
	{
sl@0
  1319
	return MmuBase::TheMmu->FreeRamInBytes();
sl@0
  1320
	}
sl@0
  1321
sl@0
  1322
sl@0
  1323
/**	Rounds up the argument to the size of a MMU page.
sl@0
  1324
sl@0
  1325
	To find out the size of a MMU page:
sl@0
  1326
	@code
sl@0
  1327
	size = Kern::RoundToPageSize(1);
sl@0
  1328
	@endcode
sl@0
  1329
sl@0
  1330
	@param aSize Value to round up
sl@0
  1331
	@pre any context
sl@0
  1332
 */
sl@0
  1333
EXPORT_C TUint32 Kern::RoundToPageSize(TUint32 aSize)
sl@0
  1334
	{
sl@0
  1335
	return MmuBase::RoundToPageSize(aSize);
sl@0
  1336
	}
sl@0
  1337
sl@0
  1338
sl@0
  1339
/**	Rounds up the argument to the amount of memory mapped by a MMU page 
sl@0
  1340
	directory entry.
sl@0
  1341
sl@0
  1342
	Chunks occupy one or more consecutive page directory entries (PDE) and
sl@0
  1343
	therefore the amount of linear and physical memory allocated to a chunk is
sl@0
  1344
	always a multiple of the amount of memory mapped by a page directory entry.
sl@0
  1345
 */
sl@0
  1346
EXPORT_C TUint32 Kern::RoundToChunkSize(TUint32 aSize)
sl@0
  1347
	{
sl@0
  1348
	return MmuBase::RoundToChunkSize(aSize);
sl@0
  1349
	}
sl@0
  1350
sl@0
  1351
sl@0
  1352
/**
sl@0
  1353
Allows the variant to specify the details of the RAM zones. This should be invoked 
sl@0
  1354
by the variant in its implementation of the pure virtual function Asic::Init1().
sl@0
  1355
sl@0
  1356
There are some limitations to how the RAM zones can be specified:
sl@0
  1357
- Each RAM zone's address space must be distinct and not overlap with any 
sl@0
  1358
other RAM zone's address space
sl@0
  1359
- Each RAM zone's address space must have a size that is multiples of the 
sl@0
  1360
ASIC's MMU small page size and be aligned to the ASIC's MMU small page size, 
sl@0
  1361
usually 4KB on ARM MMUs.
sl@0
  1362
- When taken together all of the RAM zones must cover the whole of the physical RAM
sl@0
  1363
address space as specified by the bootstrap in the SuperPage members iTotalRamSize
sl@0
  1364
and iRamBootData;.
sl@0
  1365
- There can be no more than KMaxRamZones RAM zones specified by the base port
sl@0
  1366
sl@0
  1367
Note the verification of the RAM zone data is not performed here but by the ram 
sl@0
  1368
allocator later in the boot up sequence.  This is because it is only possible to
sl@0
  1369
verify the zone data once the physical RAM configuration has been read from 
sl@0
  1370
the super page. Any verification errors result in a "RAM-ALLOC" panic 
sl@0
  1371
faulting the kernel during initialisation.
sl@0
  1372
sl@0
  1373
@param aZones Pointer to an array of SRamZone structs containing the details for all 
sl@0
  1374
the zones. The end of the array is specified by an element with an iSize of zero. The array must 
sl@0
  1375
remain in memory at least until the kernel has successfully booted.
sl@0
  1376
sl@0
  1377
@param aCallback Pointer to a call back function that the kernel may invoke to request 
sl@0
  1378
one of the operations specified by TRamZoneOp.
sl@0
  1379
sl@0
  1380
@return KErrNone if successful, otherwise one of the system wide error codes
sl@0
  1381
sl@0
  1382
@see TRamZoneOp
sl@0
  1383
@see SRamZone
sl@0
  1384
@see TRamZoneCallback
sl@0
  1385
*/
sl@0
  1386
EXPORT_C TInt Epoc::SetRamZoneConfig(const SRamZone* aZones, TRamZoneCallback aCallback)
sl@0
  1387
	{
sl@0
  1388
	// Ensure this is only called once and only while we are initialising the kernel
sl@0
  1389
	if (!K::Initialising || MmuBase::RamZoneConfig != NULL)
sl@0
  1390
		{// fault kernel, won't return
sl@0
  1391
		K::Fault(K::EBadSetRamZoneConfig);
sl@0
  1392
		}
sl@0
  1393
sl@0
  1394
	if (NULL == aZones)
sl@0
  1395
		{
sl@0
  1396
		return KErrArgument;
sl@0
  1397
		}
sl@0
  1398
	MmuBase::RamZoneConfig=aZones;
sl@0
  1399
	MmuBase::RamZoneCallback=aCallback;
sl@0
  1400
	return KErrNone;
sl@0
  1401
	}
sl@0
  1402
sl@0
  1403
sl@0
  1404
/**
sl@0
  1405
Modify the specified RAM zone's flags.
sl@0
  1406
sl@0
  1407
This allows the BSP or device driver to configure which type of pages, if any,
sl@0
  1408
can be allocated into a RAM zone by the system.
sl@0
  1409
sl@0
  1410
Note: updating a RAM zone's flags can result in
sl@0
  1411
	1 - memory allocations failing despite there being enough free RAM in the system.
sl@0
  1412
	2 - the methods TRamDefragRequest::EmptyRamZone(), TRamDefragRequest::ClaimRamZone()
sl@0
  1413
	or TRamDefragRequest::DefragRam() never succeeding.
sl@0
  1414
sl@0
  1415
The flag masks KRamZoneFlagDiscardOnly, KRamZoneFlagMovAndDisOnly and KRamZoneFlagNoAlloc
sl@0
  1416
are intended to be used with this method.
sl@0
  1417
sl@0
  1418
@param aId			The ID of the RAM zone to modify.
sl@0
  1419
@param aClearMask	The bit mask to clear, each flag of which must already be set on the RAM zone.
sl@0
  1420
@param aSetMask		The bit mask to set.
sl@0
  1421
sl@0
  1422
@return KErrNone on success, KErrArgument if the RAM zone of aId not found or if 
sl@0
  1423
aSetMask contains invalid flag bits.
sl@0
  1424
sl@0
  1425
@see TRamDefragRequest::EmptyRamZone()
sl@0
  1426
@see TRamDefragRequest::ClaimRamZone()
sl@0
  1427
@see TRamDefragRequest::DefragRam()
sl@0
  1428
sl@0
  1429
@see KRamZoneFlagDiscardOnly
sl@0
  1430
@see KRamZoneFlagMovAndDisOnly
sl@0
  1431
@see KRamZoneFlagNoAlloc
sl@0
  1432
*/
sl@0
  1433
EXPORT_C TInt Epoc::ModifyRamZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask)
sl@0
  1434
	{
sl@0
  1435
	MmuBase& m = *MmuBase::TheMmu;
sl@0
  1436
	MmuBase::Wait();
sl@0
  1437
sl@0
  1438
	TInt ret = m.ModifyRamZoneFlags(aId, aClearMask, aSetMask);
sl@0
  1439
sl@0
  1440
	MmuBase::Signal();
sl@0
  1441
	return ret;
sl@0
  1442
	}
sl@0
  1443
sl@0
  1444
TInt MmuBase::ModifyRamZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask)
sl@0
  1445
	{
sl@0
  1446
	return iRamPageAllocator->ModifyZoneFlags(aId, aClearMask, aSetMask);
sl@0
  1447
	}
sl@0
  1448
sl@0
  1449
sl@0
  1450
/**
sl@0
  1451
Gets the current count of a particular RAM zone's pages by type.
sl@0
  1452
sl@0
  1453
@param aId The ID of the RAM zone to enquire about
sl@0
  1454
@param aPageData If successful, on return this contains the page count
sl@0
  1455
sl@0
  1456
@return KErrNone if successful, KErrArgument if a RAM zone of aId is not found or
sl@0
  1457
one of the system wide error codes 
sl@0
  1458
sl@0
  1459
@pre Calling thread must be in a critical section.
sl@0
  1460
@pre Interrupts must be enabled.
sl@0
  1461
@pre Kernel must be unlocked.
sl@0
  1462
@pre No fast mutex can be held.
sl@0
  1463
@pre Call in a thread context.
sl@0
  1464
sl@0
  1465
@see SRamZonePageCount
sl@0
  1466
*/
sl@0
  1467
EXPORT_C TInt Epoc::GetRamZonePageCount(TUint aId, SRamZonePageCount& aPageData)
sl@0
  1468
	{
sl@0
  1469
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::GetRamZonePageCount");
sl@0
  1470
sl@0
  1471
	MmuBase& m = *MmuBase::TheMmu;
sl@0
  1472
	MmuBase::Wait(); // Gets RAM alloc mutex
sl@0
  1473
sl@0
  1474
	TInt r = m.GetRamZonePageCount(aId, aPageData);
sl@0
  1475
sl@0
  1476
	MmuBase::Signal();	
sl@0
  1477
sl@0
  1478
	return r;
sl@0
  1479
	}
sl@0
  1480
sl@0
  1481
TInt MmuBase::GetRamZonePageCount(TUint aId, SRamZonePageCount& aPageData)
sl@0
  1482
	{
sl@0
  1483
	return iRamPageAllocator->GetZonePageCount(aId, aPageData);
sl@0
  1484
	}
sl@0
  1485
sl@0
  1486
/**
sl@0
  1487
Replace a page of the system's execute-in-place (XIP) ROM image with a page of
sl@0
  1488
RAM having the same contents. This RAM can subsequently be written to in order
sl@0
  1489
to apply patches to the XIP ROM or to insert software breakpoints for debugging
sl@0
  1490
purposes.
sl@0
  1491
Call Epoc::FreeShadowPage() when you wish to revert to the original ROM page.
sl@0
  1492
sl@0
  1493
@param	aRomAddr	The virtual address of the ROM page to be replaced.
sl@0
  1494
@return	KErrNone if the operation completed successfully.
sl@0
  1495
		KErrArgument if the specified address is not a valid XIP ROM address.
sl@0
  1496
		KErrNoMemory if the operation failed due to insufficient free RAM.
sl@0
  1497
		KErrAlreadyExists if the XIP ROM page at the specified address has
sl@0
  1498
			already been shadowed by a RAM page.
sl@0
  1499
sl@0
  1500
@pre Calling thread must be in a critical section.
sl@0
  1501
@pre Interrupts must be enabled.
sl@0
  1502
@pre Kernel must be unlocked.
sl@0
  1503
@pre No fast mutex can be held.
sl@0
  1504
@pre Call in a thread context.
sl@0
  1505
*/
sl@0
  1506
EXPORT_C TInt Epoc::AllocShadowPage(TLinAddr aRomAddr)
sl@0
  1507
	{
sl@0
  1508
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::AllocShadowPage");
sl@0
  1509
sl@0
  1510
	TInt r;
sl@0
  1511
	r=M::LockRegion(aRomAddr,1);
sl@0
  1512
	if(r!=KErrNone && r!=KErrNotFound)
sl@0
  1513
		return r;
sl@0
  1514
	MmuBase& m=*MmuBase::TheMmu;
sl@0
  1515
	MmuBase::Wait();
sl@0
  1516
	r=m.AllocShadowPage(aRomAddr);
sl@0
  1517
	MmuBase::Signal();
sl@0
  1518
	if(r!=KErrNone)
sl@0
  1519
		M::UnlockRegion(aRomAddr,1);
sl@0
  1520
	return r;
sl@0
  1521
	}
sl@0
  1522
sl@0
  1523
/**
sl@0
  1524
Copies data into shadow memory. Source data is presumed to be in Kernel memory.
sl@0
  1525
sl@0
  1526
@param	aSrc	Data to copy from.
sl@0
  1527
@param	aDest	Address to copy into.
sl@0
  1528
@param	aLength	Number of bytes to copy. Maximum of 32 bytes of data can be copied.
sl@0
  1529
.
sl@0
  1530
@return	KErrNone 		if the operation completed successfully.
sl@0
  1531
		KErrArgument 	if any part of destination region is not shadow page or
sl@0
  1532
						if aLength is greater then 32 bytes.
sl@0
  1533
sl@0
  1534
@pre Calling thread must be in a critical section.
sl@0
  1535
@pre Interrupts must be enabled.
sl@0
  1536
@pre Kernel must be unlocked.
sl@0
  1537
@pre No fast mutex can be held.
sl@0
  1538
@pre Call in a thread context.
sl@0
  1539
*/
sl@0
  1540
EXPORT_C TInt Epoc::CopyToShadowMemory(TLinAddr aDest, TLinAddr aSrc, TUint32 aLength)
sl@0
  1541
	{
sl@0
  1542
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::CopyToShadowMemory");
sl@0
  1543
sl@0
  1544
	if (aLength>32)
sl@0
  1545
		return KErrArgument;
sl@0
  1546
	MmuBase& m=*MmuBase::TheMmu;
sl@0
  1547
	// This is a simple copy operation except on platforms with __CPU_MEMORY_TYPE_REMAPPING defined,
sl@0
  1548
	// where shadow page is read-only and it has to be remapped before it is written into.
sl@0
  1549
	return m.CopyToShadowMemory(aDest, aSrc, aLength);
sl@0
  1550
	}
sl@0
  1551
/**
sl@0
  1552
Revert an XIP ROM address which has previously been shadowed to the original
sl@0
  1553
page of ROM.
sl@0
  1554
sl@0
  1555
@param	aRomAddr	The virtual address of the ROM page to be reverted.
sl@0
  1556
@return	KErrNone if the operation completed successfully.
sl@0
  1557
		KErrArgument if the specified address is not a valid XIP ROM address.
sl@0
  1558
		KErrGeneral if the specified address has not previously been shadowed
sl@0
  1559
			using Epoc::AllocShadowPage().
sl@0
  1560
sl@0
  1561
@pre Calling thread must be in a critical section.
sl@0
  1562
@pre Interrupts must be enabled.
sl@0
  1563
@pre Kernel must be unlocked.
sl@0
  1564
@pre No fast mutex can be held.
sl@0
  1565
@pre Call in a thread context.
sl@0
  1566
*/
sl@0
  1567
EXPORT_C TInt Epoc::FreeShadowPage(TLinAddr aRomAddr)
sl@0
  1568
	{
sl@0
  1569
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreeShadowPage");
sl@0
  1570
	MmuBase& m=*MmuBase::TheMmu;
sl@0
  1571
	MmuBase::Wait();
sl@0
  1572
	TInt r=m.FreeShadowPage(aRomAddr);
sl@0
  1573
	MmuBase::Signal();
sl@0
  1574
	if(r==KErrNone)
sl@0
  1575
		M::UnlockRegion(aRomAddr,1);
sl@0
  1576
	return r;
sl@0
  1577
	}
sl@0
  1578
sl@0
  1579
sl@0
  1580
/**
sl@0
  1581
Change the permissions on an XIP ROM address which has previously been shadowed
sl@0
  1582
by a RAM page so that the RAM page may no longer be written to.
sl@0
  1583
sl@0
  1584
Note: Shadow page on the latest platforms (that use the reduced set of access permissions:
sl@0
  1585
arm11mpcore, arm1176, cortex) is implemented with read only permissions. Therefore, calling
sl@0
  1586
this function in not necessary, as shadow page is already created as 'frozen'.
sl@0
  1587
sl@0
  1588
@param	aRomAddr	The virtual address of the shadow RAM page to be frozen.
sl@0
  1589
@return	KErrNone if the operation completed successfully.
sl@0
  1590
		KErrArgument if the specified address is not a valid XIP ROM address.
sl@0
  1591
		KErrGeneral if the specified address has not previously been shadowed
sl@0
  1592
			using Epoc::AllocShadowPage().
sl@0
  1593
sl@0
  1594
@pre Calling thread must be in a critical section.
sl@0
  1595
@pre Interrupts must be enabled.
sl@0
  1596
@pre Kernel must be unlocked.
sl@0
  1597
@pre No fast mutex can be held.
sl@0
  1598
@pre Call in a thread context.
sl@0
  1599
*/
sl@0
  1600
EXPORT_C TInt Epoc::FreezeShadowPage(TLinAddr aRomAddr)
sl@0
  1601
	{
sl@0
  1602
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreezeShadowPage");
sl@0
  1603
	MmuBase& m=*MmuBase::TheMmu;
sl@0
  1604
	MmuBase::Wait();
sl@0
  1605
	TInt r=m.FreezeShadowPage(aRomAddr);
sl@0
  1606
	MmuBase::Signal();
sl@0
  1607
	return r;
sl@0
  1608
	}
sl@0
  1609
sl@0
  1610
sl@0
  1611
/**
sl@0
  1612
Allocate a block of physically contiguous RAM with a physical address aligned
sl@0
  1613
to a specified power of 2 boundary.
sl@0
  1614
When the RAM is no longer required it should be freed using
sl@0
  1615
Epoc::FreePhysicalRam()
sl@0
  1616
sl@0
  1617
@param	aSize		The size in bytes of the required block. The specified size
sl@0
  1618
					is rounded up to the page size, since only whole pages of
sl@0
  1619
					physical RAM can be allocated.
sl@0
  1620
@param	aPhysAddr	Receives the physical address of the base of the block on
sl@0
  1621
					successful allocation.
sl@0
  1622
@param	aAlign		Specifies the number of least significant bits of the
sl@0
  1623
					physical address which are required to be zero. If a value
sl@0
  1624
					less than log2(page size) is specified, page alignment is
sl@0
  1625
					assumed. Pass 0 for aAlign if there are no special alignment
sl@0
  1626
					constraints (other than page alignment).
sl@0
  1627
@return	KErrNone if the allocation was successful.
sl@0
  1628
		KErrNoMemory if a sufficiently large physically contiguous block of free
sl@0
  1629
		RAM	with the specified alignment could not be found.
sl@0
  1630
@pre Calling thread must be in a critical section.
sl@0
  1631
@pre Interrupts must be enabled.
sl@0
  1632
@pre Kernel must be unlocked.
sl@0
  1633
@pre No fast mutex can be held.
sl@0
  1634
@pre Call in a thread context.
sl@0
  1635
@pre Can be used in a device driver.
sl@0
  1636
*/
sl@0
  1637
EXPORT_C TInt Epoc::AllocPhysicalRam(TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
sl@0
  1638
	{
sl@0
  1639
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::AllocPhysicalRam");
sl@0
  1640
	MmuBase& m=*MmuBase::TheMmu;
sl@0
  1641
	MmuBase::Wait();
sl@0
  1642
	TInt r=m.AllocPhysicalRam(aSize,aPhysAddr,aAlign);
sl@0
  1643
	if (r == KErrNone)
sl@0
  1644
		{
sl@0
  1645
		// For the sake of platform security we have to clear the memory. E.g. the driver
sl@0
  1646
		// could assign it to a chunk visible to user side.
sl@0
  1647
		m.ClearPages(Kern::RoundToPageSize(aSize)>>m.iPageShift, (TPhysAddr*)(aPhysAddr|1));
sl@0
  1648
#ifdef BTRACE_KERNEL_MEMORY
sl@0
  1649
		TUint size = Kern::RoundToPageSize(aSize);
sl@0
  1650
		BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, size, aPhysAddr);
sl@0
  1651
		Epoc::DriverAllocdPhysRam += size;
sl@0
  1652
#endif
sl@0
  1653
		}
sl@0
  1654
	MmuBase::Signal();
sl@0
  1655
	return r;
sl@0
  1656
	}
sl@0
  1657
sl@0
  1658
/**
sl@0
  1659
Allocate a block of physically contiguous RAM with a physical address aligned
sl@0
  1660
to a specified power of 2 boundary from the specified zone.
sl@0
  1661
When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
sl@0
  1662
sl@0
  1663
Note that this method only repsects the KRamZoneFlagNoAlloc flag and will always attempt
sl@0
  1664
to allocate regardless of whether the other flags are set for the specified RAM zones 
sl@0
  1665
or not.
sl@0
  1666
sl@0
  1667
When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
sl@0
  1668
sl@0
  1669
@param 	aZoneId		The ID of the zone to attempt to allocate from.
sl@0
  1670
@param	aSize		The size in bytes of the required block. The specified size
sl@0
  1671
					is rounded up to the page size, since only whole pages of
sl@0
  1672
					physical RAM can be allocated.
sl@0
  1673
@param	aPhysAddr	Receives the physical address of the base of the block on
sl@0
  1674
					successful allocation.
sl@0
  1675
@param	aAlign		Specifies the number of least significant bits of the
sl@0
  1676
					physical address which are required to be zero. If a value
sl@0
  1677
					less than log2(page size) is specified, page alignment is
sl@0
  1678
					assumed. Pass 0 for aAlign if there are no special alignment
sl@0
  1679
					constraints (other than page alignment).
sl@0
  1680
@return	KErrNone if the allocation was successful.
sl@0
  1681
		KErrNoMemory if a sufficiently large physically contiguous block of free
sl@0
  1682
		RAM	with the specified alignment could not be found within the specified 
sl@0
  1683
		zone.
sl@0
  1684
		KErrArgument if a RAM zone of the specified ID can't be found or if the
sl@0
  1685
		RAM zone has a total number of physical pages which is less than those 
sl@0
  1686
		requested for the allocation.
sl@0
  1687
sl@0
  1688
@pre Calling thread must be in a critical section.
sl@0
  1689
@pre Interrupts must be enabled.
sl@0
  1690
@pre Kernel must be unlocked.
sl@0
  1691
@pre No fast mutex can be held.
sl@0
  1692
@pre Call in a thread context.
sl@0
  1693
@pre Can be used in a device driver.
sl@0
  1694
*/
sl@0
  1695
EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint aZoneId, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
sl@0
  1696
	{
sl@0
  1697
	return ZoneAllocPhysicalRam(&aZoneId, 1, aSize, aPhysAddr, aAlign);
sl@0
  1698
	}
sl@0
  1699
sl@0
  1700
sl@0
  1701
/**
sl@0
  1702
Allocate a block of physically contiguous RAM with a physical address aligned
sl@0
  1703
to a specified power of 2 boundary from the specified RAM zones.
sl@0
  1704
When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
sl@0
  1705
sl@0
  1706
RAM will be allocated into the RAM zones in the order they are specified in the 
sl@0
  1707
aZoneIdList parameter. If the contiguous allocations are intended to span RAM zones 
sl@0
  1708
when required then aZoneIdList should be listed with the RAM zones in ascending 
sl@0
  1709
physical address order.
sl@0
  1710
sl@0
  1711
Note that this method only repsects the KRamZoneFlagNoAlloc flag and will always attempt
sl@0
  1712
to allocate regardless of whether the other flags are set for the specified RAM zones 
sl@0
  1713
or not.
sl@0
  1714
sl@0
  1715
When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
sl@0
  1716
sl@0
  1717
@param 	aZoneIdList	A pointer to an array of RAM zone IDs of the RAM zones to 
sl@0
  1718
					attempt to allocate from.
sl@0
  1719
@param 	aZoneIdCount The number of RAM zone IDs contained in aZoneIdList.
sl@0
  1720
@param	aSize		The size in bytes of the required block. The specified size
sl@0
  1721
					is rounded up to the page size, since only whole pages of
sl@0
  1722
					physical RAM can be allocated.
sl@0
  1723
@param	aPhysAddr	Receives the physical address of the base of the block on
sl@0
  1724
					successful allocation.
sl@0
  1725
@param	aAlign		Specifies the number of least significant bits of the
sl@0
  1726
					physical address which are required to be zero. If a value
sl@0
  1727
					less than log2(page size) is specified, page alignment is
sl@0
  1728
					assumed. Pass 0 for aAlign if there are no special alignment
sl@0
  1729
					constraints (other than page alignment).
sl@0
  1730
@return	KErrNone if the allocation was successful.
sl@0
  1731
		KErrNoMemory if a sufficiently large physically contiguous block of free
sl@0
  1732
		RAM	with the specified alignment could not be found within the specified 
sl@0
  1733
		zone.
sl@0
  1734
		KErrArgument if a RAM zone of a specified ID can't be found or if the
sl@0
  1735
		RAM zones have a total number of physical pages which is less than those 
sl@0
  1736
		requested for the allocation.
sl@0
  1737
sl@0
  1738
@pre Calling thread must be in a critical section.
sl@0
  1739
@pre Interrupts must be enabled.
sl@0
  1740
@pre Kernel must be unlocked.
sl@0
  1741
@pre No fast mutex can be held.
sl@0
  1742
@pre Call in a thread context.
sl@0
  1743
@pre Can be used in a device driver.
sl@0
  1744
*/
sl@0
  1745
EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
sl@0
  1746
	{
sl@0
  1747
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::ZoneAllocPhysicalRam");
sl@0
  1748
	MmuBase& m=*MmuBase::TheMmu;
sl@0
  1749
	MmuBase::Wait();
sl@0
  1750
	TInt r = m.ZoneAllocPhysicalRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, aAlign);
sl@0
  1751
	if (r == KErrNone)
sl@0
  1752
		{
sl@0
  1753
		// For the sake of platform security we have to clear the memory. E.g. the driver
sl@0
  1754
		// could assign it to a chunk visible to user side.
sl@0
  1755
		m.ClearPages(Kern::RoundToPageSize(aSize)>>m.iPageShift, (TPhysAddr*)(aPhysAddr|1));
sl@0
  1756
#ifdef BTRACE_KERNEL_MEMORY
sl@0
  1757
		TUint size = Kern::RoundToPageSize(aSize);
sl@0
  1758
		BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, size, aPhysAddr);
sl@0
  1759
		Epoc::DriverAllocdPhysRam += size;
sl@0
  1760
#endif
sl@0
  1761
		}
sl@0
  1762
	MmuBase::Signal();
sl@0
  1763
	return r;
sl@0
  1764
	}
sl@0
  1765
sl@0
  1766
sl@0
  1767
/**
sl@0
  1768
Attempt to allocate discontiguous RAM pages.
sl@0
  1769
sl@0
  1770
When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
sl@0
  1771
sl@0
  1772
@param	aNumPages	The number of discontiguous pages required to be allocated
sl@0
  1773
@param	aPageList	This should be a pointer to a previously allocated array of
sl@0
  1774
					aNumPages TPhysAddr elements.  On a succesful allocation it 
sl@0
  1775
					will receive the physical addresses of each page allocated.
sl@0
  1776
sl@0
  1777
@return	KErrNone if the allocation was successful.
sl@0
  1778
		KErrNoMemory if the requested number of pages can't be allocated
sl@0
  1779
sl@0
  1780
@pre Calling thread must be in a critical section.
sl@0
  1781
@pre Interrupts must be enabled.
sl@0
  1782
@pre Kernel must be unlocked.
sl@0
  1783
@pre No fast mutex can be held.
sl@0
  1784
@pre Call in a thread context.
sl@0
  1785
@pre Can be used in a device driver.
sl@0
  1786
*/
sl@0
  1787
EXPORT_C TInt Epoc::AllocPhysicalRam(TInt aNumPages, TPhysAddr* aPageList)
sl@0
  1788
	{
sl@0
  1789
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Epoc::AllocPhysicalRam");
sl@0
  1790
	MmuBase& m = *MmuBase::TheMmu;
sl@0
  1791
	MmuBase::Wait();
sl@0
  1792
	TInt r = m.AllocPhysicalRam(aNumPages, aPageList);
sl@0
  1793
	if (r == KErrNone)
sl@0
  1794
		{
sl@0
  1795
		// For the sake of platform security we have to clear the memory. E.g. the driver
sl@0
  1796
		// could assign it to a chunk visible to user side.
sl@0
  1797
		m.ClearPages(aNumPages, aPageList);
sl@0
  1798
sl@0
  1799
#ifdef BTRACE_KERNEL_MEMORY
sl@0
  1800
		if (BTrace::CheckFilter(BTrace::EKernelMemory))
sl@0
  1801
			{// Only loop round each page if EKernelMemory tracing is enabled
sl@0
  1802
			TPhysAddr* pAddr = aPageList;
sl@0
  1803
			TPhysAddr* pAddrEnd = aPageList + aNumPages;
sl@0
  1804
			while (pAddr < pAddrEnd)
sl@0
  1805
				{
sl@0
  1806
				BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, KPageSize, *pAddr++);
sl@0
  1807
				Epoc::DriverAllocdPhysRam += KPageSize;
sl@0
  1808
				}
sl@0
  1809
			}
sl@0
  1810
#endif
sl@0
  1811
		}
sl@0
  1812
	MmuBase::Signal();
sl@0
  1813
	return r;
sl@0
  1814
	}
sl@0
  1815
sl@0
  1816
sl@0
  1817
/**
sl@0
  1818
Attempt to allocate discontiguous RAM pages from the specified zone.
sl@0
  1819
sl@0
  1820
Note that this method only repsects the KRamZoneFlagNoAlloc flag and will always attempt
sl@0
  1821
to allocate regardless of whether the other flags are set for the specified RAM zones 
sl@0
  1822
or not.
sl@0
  1823
sl@0
  1824
When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
sl@0
  1825
sl@0
  1826
@param 	aZoneId		The ID of the zone to attempt to allocate from.
sl@0
  1827
@param	aNumPages	The number of discontiguous pages required to be allocated 
sl@0
  1828
					from the specified zone.
sl@0
  1829
@param	aPageList	This should be a pointer to a previously allocated array of
sl@0
  1830
					aNumPages TPhysAddr elements.  On a succesful 
sl@0
  1831
					allocation it will receive the physical addresses of each 
sl@0
  1832
					page allocated.
sl@0
  1833
@return	KErrNone if the allocation was successful.
sl@0
  1834
		KErrNoMemory if the requested number of pages can't be allocated from the 
sl@0
  1835
		specified zone.
sl@0
  1836
		KErrArgument if a RAM zone of the specified ID can't be found or if the
sl@0
  1837
		RAM zone has a total number of physical pages which is less than those 
sl@0
  1838
		requested for the allocation.
sl@0
  1839
sl@0
  1840
@pre Calling thread must be in a critical section.
sl@0
  1841
@pre Interrupts must be enabled.
sl@0
  1842
@pre Kernel must be unlocked.
sl@0
  1843
@pre No fast mutex can be held.
sl@0
  1844
@pre Call in a thread context.
sl@0
  1845
@pre Can be used in a device driver.
sl@0
  1846
*/
sl@0
  1847
EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint aZoneId, TInt aNumPages, TPhysAddr* aPageList)
sl@0
  1848
	{
sl@0
  1849
	return ZoneAllocPhysicalRam(&aZoneId, 1, aNumPages, aPageList);
sl@0
  1850
	}
sl@0
  1851
sl@0
  1852
sl@0
  1853
/**
sl@0
  1854
Attempt to allocate discontiguous RAM pages from the specified RAM zones.
sl@0
  1855
The RAM pages will be allocated into the RAM zones in the order that they are specified 
sl@0
  1856
in the aZoneIdList parameter, the RAM zone preferences will be ignored.
sl@0
  1857
sl@0
  1858
Note that this method only repsects the KRamZoneFlagNoAlloc flag and will always attempt
sl@0
  1859
to allocate regardless of whether the other flags are set for the specified RAM zones 
sl@0
  1860
or not.
sl@0
  1861
sl@0
  1862
When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
sl@0
  1863
sl@0
  1864
@param 	aZoneIdList	A pointer to an array of RAM zone IDs of the RAM zones to 
sl@0
  1865
					attempt to allocate from.
sl@0
  1866
@param	aZoneIdCount The number of RAM zone IDs pointed to by aZoneIdList.
sl@0
  1867
@param	aNumPages	The number of discontiguous pages required to be allocated 
sl@0
  1868
					from the specified zone.
sl@0
  1869
@param	aPageList	This should be a pointer to a previously allocated array of
sl@0
  1870
					aNumPages TPhysAddr elements.  On a succesful 
sl@0
  1871
					allocation it will receive the physical addresses of each 
sl@0
  1872
					page allocated.
sl@0
  1873
@return	KErrNone if the allocation was successful.
sl@0
  1874
		KErrNoMemory if the requested number of pages can't be allocated from the 
sl@0
  1875
		specified zone.
sl@0
  1876
		KErrArgument if a RAM zone of a specified ID can't be found or if the
sl@0
  1877
		RAM zones have a total number of physical pages which is less than those 
sl@0
  1878
		requested for the allocation.
sl@0
  1879
sl@0
  1880
@pre Calling thread must be in a critical section.
sl@0
  1881
@pre Interrupts must be enabled.
sl@0
  1882
@pre Kernel must be unlocked.
sl@0
  1883
@pre No fast mutex can be held.
sl@0
  1884
@pre Call in a thread context.
sl@0
  1885
@pre Can be used in a device driver.
sl@0
  1886
*/
sl@0
  1887
EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList)
sl@0
  1888
	{
sl@0
  1889
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Epoc::ZoneAllocPhysicalRam");
sl@0
  1890
	MmuBase& m = *MmuBase::TheMmu;
sl@0
  1891
	MmuBase::Wait();
sl@0
  1892
	TInt r = m.ZoneAllocPhysicalRam(aZoneIdList, aZoneIdCount, aNumPages, aPageList);
sl@0
  1893
	if (r == KErrNone)
sl@0
  1894
		{
sl@0
  1895
		// For the sake of platform security we have to clear the memory. E.g. the driver
sl@0
  1896
		// could assign it to a chunk visible to user side.
sl@0
  1897
		m.ClearPages(aNumPages, aPageList);
sl@0
  1898
sl@0
  1899
#ifdef BTRACE_KERNEL_MEMORY
sl@0
  1900
		if (BTrace::CheckFilter(BTrace::EKernelMemory))
sl@0
  1901
			{// Only loop round each page if EKernelMemory tracing is enabled
sl@0
  1902
			TPhysAddr* pAddr = aPageList;
sl@0
  1903
			TPhysAddr* pAddrEnd = aPageList + aNumPages;
sl@0
  1904
			while (pAddr < pAddrEnd)
sl@0
  1905
				{
sl@0
  1906
				BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, KPageSize, *pAddr++);
sl@0
  1907
				Epoc::DriverAllocdPhysRam += KPageSize;
sl@0
  1908
				}
sl@0
  1909
			}
sl@0
  1910
#endif
sl@0
  1911
		}
sl@0
  1912
	MmuBase::Signal();
sl@0
  1913
	return r;
sl@0
  1914
	}
sl@0
  1915
sl@0
  1916
/**
sl@0
  1917
Free a previously-allocated block of physically contiguous RAM.
sl@0
  1918
sl@0
  1919
Specifying one of the following may cause the system to panic: 
sl@0
  1920
a) an invalid physical RAM address.
sl@0
  1921
b) valid physical RAM addresses where some had not been previously allocated.
sl@0
  1922
c) an adrress not aligned to a page boundary.
sl@0
  1923
sl@0
  1924
@param	aPhysAddr	The physical address of the base of the block to be freed.
sl@0
  1925
					This must be the address returned by a previous call to
sl@0
  1926
					Epoc::AllocPhysicalRam(), Epoc::ZoneAllocPhysicalRam(), 
sl@0
  1927
					Epoc::ClaimPhysicalRam() or Epoc::ClaimRamZone().
sl@0
  1928
@param	aSize		The size in bytes of the required block. The specified size
sl@0
  1929
					is rounded up to the page size, since only whole pages of
sl@0
  1930
					physical RAM can be allocated.
sl@0
  1931
@return	KErrNone if the operation was successful.
sl@0
  1932
sl@0
  1933
sl@0
  1934
sl@0
  1935
@pre Calling thread must be in a critical section.
sl@0
  1936
@pre Interrupts must be enabled.
sl@0
  1937
@pre Kernel must be unlocked.
sl@0
  1938
@pre No fast mutex can be held.
sl@0
  1939
@pre Call in a thread context.
sl@0
  1940
@pre Can be used in a device driver.
sl@0
  1941
*/
sl@0
  1942
EXPORT_C TInt Epoc::FreePhysicalRam(TPhysAddr aPhysAddr, TInt aSize)
sl@0
  1943
	{
sl@0
  1944
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreePhysicalRam");
sl@0
  1945
	MmuBase& m=*MmuBase::TheMmu;
sl@0
  1946
	MmuBase::Wait();
sl@0
  1947
	TInt r=m.FreePhysicalRam(aPhysAddr,aSize);
sl@0
  1948
#ifdef BTRACE_KERNEL_MEMORY
sl@0
  1949
	if (r == KErrNone)
sl@0
  1950
		{
sl@0
  1951
		TUint size = Kern::RoundToPageSize(aSize);
sl@0
  1952
		BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysFree, size, aPhysAddr);
sl@0
  1953
		Epoc::DriverAllocdPhysRam -= size;
sl@0
  1954
		}
sl@0
  1955
#endif
sl@0
  1956
	MmuBase::Signal();
sl@0
  1957
	return r;
sl@0
  1958
	}
sl@0
  1959
sl@0
  1960
sl@0
  1961
/**
sl@0
  1962
Free a number of physical RAM pages that were previously allocated using
sl@0
  1963
Epoc::AllocPhysicalRam() or Epoc::ZoneAllocPhysicalRam().
sl@0
  1964
sl@0
  1965
Specifying one of the following may cause the system to panic: 
sl@0
  1966
a) an invalid physical RAM address.
sl@0
  1967
b) valid physical RAM addresses where some had not been previously allocated.
sl@0
  1968
c) an adrress not aligned to a page boundary.
sl@0
  1969
sl@0
  1970
@param	aNumPages	The number of pages to be freed.
sl@0
  1971
@param	aPhysAddr	An array of aNumPages TPhysAddr elements.  Where each element
sl@0
  1972
					should contain the physical address of each page to be freed.
sl@0
  1973
					This must be the same set of addresses as those returned by a 
sl@0
  1974
					previous call to Epoc::AllocPhysicalRam() or 
sl@0
  1975
					Epoc::ZoneAllocPhysicalRam().
sl@0
  1976
@return	KErrNone if the operation was successful.
sl@0
  1977
  
sl@0
  1978
@pre Calling thread must be in a critical section.
sl@0
  1979
@pre Interrupts must be enabled.
sl@0
  1980
@pre Kernel must be unlocked.
sl@0
  1981
@pre No fast mutex can be held.
sl@0
  1982
@pre Call in a thread context.
sl@0
  1983
@pre Can be used in a device driver.
sl@0
  1984
		
sl@0
  1985
*/
sl@0
  1986
EXPORT_C TInt Epoc::FreePhysicalRam(TInt aNumPages, TPhysAddr* aPageList)
sl@0
  1987
	{
sl@0
  1988
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreePhysicalRam");
sl@0
  1989
	MmuBase& m=*MmuBase::TheMmu;
sl@0
  1990
	MmuBase::Wait();
sl@0
  1991
	TInt r=m.FreePhysicalRam(aNumPages, aPageList);
sl@0
  1992
#ifdef BTRACE_KERNEL_MEMORY
sl@0
  1993
	if (r == KErrNone && BTrace::CheckFilter(BTrace::EKernelMemory))
sl@0
  1994
		{// Only loop round each page if EKernelMemory tracing is enabled
sl@0
  1995
		TPhysAddr* pAddr = aPageList;
sl@0
  1996
		TPhysAddr* pAddrEnd = aPageList + aNumPages;
sl@0
  1997
		while (pAddr < pAddrEnd)
sl@0
  1998
			{
sl@0
  1999
			BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysFree, KPageSize, *pAddr++);
sl@0
  2000
			Epoc::DriverAllocdPhysRam -= KPageSize;
sl@0
  2001
			}
sl@0
  2002
		}
sl@0
  2003
#endif
sl@0
  2004
	MmuBase::Signal();
sl@0
  2005
	return r;
sl@0
  2006
	}
sl@0
  2007
sl@0
  2008
sl@0
  2009
/**
sl@0
  2010
Allocate a specific block of physically contiguous RAM, specified by physical
sl@0
  2011
base address and size.
sl@0
  2012
If and when the RAM is no longer required it should be freed using
sl@0
  2013
Epoc::FreePhysicalRam()
sl@0
  2014
sl@0
  2015
@param	aPhysAddr	The physical address of the base of the required block.
sl@0
  2016
@param	aSize		The size in bytes of the required block. The specified size
sl@0
  2017
					is rounded up to the page size, since only whole pages of
sl@0
  2018
					physical RAM can be allocated.
sl@0
  2019
@return	KErrNone if the operation was successful.
sl@0
  2020
		KErrArgument if the range of physical addresses specified included some
sl@0
  2021
					which are not valid physical RAM addresses.
sl@0
  2022
		KErrInUse	if the range of physical addresses specified are all valid
sl@0
  2023
					physical RAM addresses but some of them have already been
sl@0
  2024
					allocated for other purposes.
sl@0
  2025
@pre Calling thread must be in a critical section.
sl@0
  2026
@pre Interrupts must be enabled.
sl@0
  2027
@pre Kernel must be unlocked.
sl@0
  2028
@pre No fast mutex can be held.
sl@0
  2029
@pre Call in a thread context.
sl@0
  2030
@pre Can be used in a device driver.
sl@0
  2031
*/
sl@0
  2032
EXPORT_C TInt Epoc::ClaimPhysicalRam(TPhysAddr aPhysAddr, TInt aSize)
sl@0
  2033
	{
sl@0
  2034
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::ClaimPhysicalRam");
sl@0
  2035
	MmuBase& m=*MmuBase::TheMmu;
sl@0
  2036
	MmuBase::Wait();
sl@0
  2037
	TInt r=m.ClaimPhysicalRam(aPhysAddr,aSize);
sl@0
  2038
#ifdef BTRACE_KERNEL_MEMORY
sl@0
  2039
	if(r==KErrNone)
sl@0
  2040
		{
sl@0
  2041
		TUint32 pa=aPhysAddr;
sl@0
  2042
		TUint32 size=aSize;
sl@0
  2043
		m.RoundUpRangeToPageSize(pa,size);
sl@0
  2044
		BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, size, pa);
sl@0
  2045
		Epoc::DriverAllocdPhysRam += size;
sl@0
  2046
		}
sl@0
  2047
#endif
sl@0
  2048
	MmuBase::Signal();
sl@0
  2049
	return r;
sl@0
  2050
	}
sl@0
  2051
sl@0
  2052
sl@0
  2053
/**
sl@0
  2054
Translate a virtual address to the corresponding physical address.
sl@0
  2055
sl@0
  2056
@param	aLinAddr	The virtual address to be translated.
sl@0
  2057
@return	The physical address corresponding to the given virtual address, or
sl@0
  2058
		KPhysAddrInvalid if the specified virtual address is unmapped.
sl@0
  2059
@pre Interrupts must be enabled.
sl@0
  2060
@pre Kernel must be unlocked.
sl@0
  2061
@pre Call in a thread context.
sl@0
  2062
@pre Can be used in a device driver.
sl@0
  2063
@pre Hold system lock if there is any possibility that the virtual address is
sl@0
  2064
		unmapped, may become unmapped, or may be remapped during the operation.
sl@0
  2065
	This will potentially be the case unless the virtual address refers to a
sl@0
  2066
	hardware chunk or shared chunk under the control of the driver calling this
sl@0
  2067
	function.
sl@0
  2068
*/
sl@0
  2069
EXPORT_C TPhysAddr Epoc::LinearToPhysical(TLinAddr aLinAddr)
sl@0
  2070
	{
sl@0
  2071
//	This precondition is violated by various parts of the system under some conditions,
sl@0
  2072
//	e.g. when __FLUSH_PT_INTO_RAM__ is defined. This function might also be called by
sl@0
  2073
//	a higher-level RTOS for which these conditions are meaningless. Thus, it's been
sl@0
  2074
//	disabled for now.
sl@0
  2075
//	CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"Epoc::LinearToPhysical");
sl@0
  2076
	MmuBase& m=*MmuBase::TheMmu;
sl@0
  2077
	TPhysAddr pa=m.LinearToPhysical(aLinAddr);
sl@0
  2078
	return pa;
sl@0
  2079
	}
sl@0
  2080
sl@0
  2081
sl@0
  2082
EXPORT_C TInt TInternalRamDrive::MaxSize()
sl@0
  2083
	{
sl@0
  2084
	return TheSuperPage().iRamDriveSize+Kern::FreeRamInBytes();
sl@0
  2085
	}
sl@0
  2086
sl@0
  2087
sl@0
  2088
/******************************************************************************
sl@0
  2089
 * Address allocator
sl@0
  2090
 ******************************************************************************/
sl@0
  2091
TLinearSection* TLinearSection::New(TLinAddr aBase, TLinAddr aEnd)
sl@0
  2092
	{
sl@0
  2093
	__KTRACE_OPT(KMMU,Kern::Printf("TLinearSection::New(%08x,%08x)", aBase, aEnd));
sl@0
  2094
	MmuBase& m=*MmuBase::TheMmu;
sl@0
  2095
	TUint npdes=(aEnd-aBase)>>m.iChunkShift;
sl@0
  2096
	TInt nmapw=(npdes+31)>>5;
sl@0
  2097
	TInt memsz=sizeof(TLinearSection)+(nmapw-1)*sizeof(TUint32);
sl@0
  2098
	TLinearSection* p=(TLinearSection*)Kern::Alloc(memsz);
sl@0
  2099
	if (p)
sl@0
  2100
		{
sl@0
  2101
		new(&p->iAllocator) TBitMapAllocator(npdes, ETrue);
sl@0
  2102
		p->iBase=aBase;
sl@0
  2103
		p->iEnd=aEnd;
sl@0
  2104
		}
sl@0
  2105
	__KTRACE_OPT(KMMU,Kern::Printf("TLinearSection at %08x", p));
sl@0
  2106
	return p;
sl@0
  2107
	}
sl@0
  2108
sl@0
  2109
/******************************************************************************
sl@0
  2110
 * Address allocator for HW chunks
sl@0
  2111
 ******************************************************************************/
sl@0
  2112
THwChunkPageTable::THwChunkPageTable(TInt aIndex, TInt aSize, TPde aPdePerm)
sl@0
  2113
	:	THwChunkRegion(aIndex, 0, aPdePerm),
sl@0
  2114
		iAllocator(aSize, ETrue)
sl@0
  2115
	{
sl@0
  2116
	}
sl@0
  2117
sl@0
  2118
THwChunkPageTable* THwChunkPageTable::New(TInt aIndex, TPde aPdePerm)
sl@0
  2119
	{
sl@0
  2120
	__KTRACE_OPT(KMMU, Kern::Printf("THwChunkPageTable::New(%03x,%08x)",aIndex,aPdePerm));
sl@0
  2121
	MmuBase& m=*MmuBase::TheMmu;
sl@0
  2122
	TInt pdepages=m.iChunkSize>>m.iPageShift;
sl@0
  2123
	TInt nmapw=(pdepages+31)>>5;
sl@0
  2124
	TInt memsz=sizeof(THwChunkPageTable)+(nmapw-1)*sizeof(TUint32);
sl@0
  2125
	THwChunkPageTable* p=(THwChunkPageTable*)Kern::Alloc(memsz);
sl@0
  2126
	if (p)
sl@0
  2127
		new (p) THwChunkPageTable(aIndex, pdepages, aPdePerm);
sl@0
  2128
	__KTRACE_OPT(KMMU, Kern::Printf("THwChunkPageTable at %08x",p));
sl@0
  2129
	return p;
sl@0
  2130
	}
sl@0
  2131
sl@0
  2132
THwChunkAddressAllocator::THwChunkAddressAllocator()
sl@0
  2133
	{
sl@0
  2134
	}
sl@0
  2135
sl@0
  2136
THwChunkAddressAllocator* THwChunkAddressAllocator::New(TInt aAlign, TLinearSection* aSection)
sl@0
  2137
	{
sl@0
  2138
	__KTRACE_OPT(KMMU, Kern::Printf("THwChunkAddressAllocator::New(%d,%08x)",aAlign,aSection));
sl@0
  2139
	THwChunkAddressAllocator* p=new THwChunkAddressAllocator;
sl@0
  2140
	if (p)
sl@0
  2141
		{
sl@0
  2142
		p->iAlign=aAlign;
sl@0
  2143
		p->iSection=aSection;
sl@0
  2144
		}
sl@0
  2145
	__KTRACE_OPT(KMMU, Kern::Printf("THwChunkAddressAllocator at %08x",p));
sl@0
  2146
	return p;
sl@0
  2147
	}
sl@0
  2148
sl@0
  2149
THwChunkRegion* THwChunkAddressAllocator::NewRegion(TInt aIndex, TInt aSize, TPde aPdePerm)
sl@0
  2150
	{
sl@0
  2151
	__KTRACE_OPT(KMMU, Kern::Printf("THwChAA::NewRegion(index=%x, size=%x, pde=%08x)",aIndex,aSize,aPdePerm));
sl@0
  2152
	THwChunkRegion* p=new THwChunkRegion(aIndex, aSize, aPdePerm);
sl@0
  2153
	if (p)
sl@0
  2154
		{
sl@0
  2155
		TInt r=InsertInOrder(p, Order);
sl@0
  2156
		__KTRACE_OPT(KMMU, Kern::Printf("p=%08x, insert ret %d",p,r));
sl@0
  2157
		if (r<0)
sl@0
  2158
			delete p, p=NULL;
sl@0
  2159
		}
sl@0
  2160
	__KTRACE_OPT(KMMU, Kern::Printf("THwChAA::NewRegion ret %08x)",p));
sl@0
  2161
	return p;
sl@0
  2162
	}
sl@0
  2163
sl@0
  2164
THwChunkPageTable* THwChunkAddressAllocator::NewPageTable(TInt aIndex, TPde aPdePerm, TInt aInitB, TInt aInitC)
sl@0
  2165
	{
sl@0
  2166
	__KTRACE_OPT(KMMU, Kern::Printf("THwChAA::NewPageTable(index=%x, pde=%08x, iB=%d, iC=%d)",aIndex,aPdePerm,aInitB,aInitC));
sl@0
  2167
	THwChunkPageTable* p=THwChunkPageTable::New(aIndex, aPdePerm);
sl@0
  2168
	if (p)
sl@0
  2169
		{
sl@0
  2170
		TInt r=InsertInOrder(p, Order);
sl@0
  2171
		__KTRACE_OPT(KMMU, Kern::Printf("p=%08x, insert ret %d",p,r));
sl@0
  2172
		if (r<0)
sl@0
  2173
			delete p, p=NULL;
sl@0
  2174
		else
sl@0
  2175
			p->iAllocator.Alloc(aInitB, aInitC);
sl@0
  2176
		}
sl@0
  2177
	__KTRACE_OPT(KMMU, Kern::Printf("THwChAA::NewPageTable ret %08x)",p));
sl@0
  2178
	return p;
sl@0
  2179
	}
sl@0
  2180
sl@0
  2181
TLinAddr THwChunkAddressAllocator::SearchExisting(TInt aNumPages, TInt aPageAlign, TInt aPageOffset, TPde aPdePerm)
sl@0
  2182
	{
sl@0
  2183
	__KTRACE_OPT(KMMU, Kern::Printf("THwChAA::SrchEx np=%03x align=%d offset=%03x pdeperm=%08x",
sl@0
  2184
				aNumPages, aPageAlign, aPageOffset, aPdePerm));
sl@0
  2185
	TInt c=Count();
sl@0
  2186
	if (c==0)
sl@0
  2187
		return 0;	// don't try to access [0] if array empty!
sl@0
  2188
	THwChunkPageTable** pp=(THwChunkPageTable**)&(*this)[0];
sl@0
  2189
	THwChunkPageTable** ppE=pp+c;
sl@0
  2190
	while(pp<ppE)
sl@0
  2191
		{
sl@0
  2192
		THwChunkPageTable* p=*pp++;
sl@0
  2193
		if (p->iRegionSize!=0 || p->iPdePerm!=aPdePerm)
sl@0
  2194
			continue;	// if not page table or PDE permissions wrong, we can't use it
sl@0
  2195
		TInt r=p->iAllocator.AllocAligned(aNumPages, aPageAlign, -aPageOffset, EFalse);
sl@0
  2196
		__KTRACE_OPT(KMMU, Kern::Printf("r=%d", r));
sl@0
  2197
		if (r<0)
sl@0
  2198
			continue;	// not enough space in this page table
sl@0
  2199
		
sl@0
  2200
		// got enough space in existing page table, so use it
sl@0
  2201
		p->iAllocator.Alloc(r, aNumPages);
sl@0
  2202
		MmuBase& m=*MmuBase::TheMmu;
sl@0
  2203
		TLinAddr a = iSection->iBase + (TLinAddr(p->iIndex)<<m.iChunkShift) + (r<<m.iPageShift);
sl@0
  2204
		__KTRACE_OPT(KMMU, Kern::Printf("THwChAA::SrchEx OK, returning %08x", a));
sl@0
  2205
		return a;
sl@0
  2206
		}
sl@0
  2207
	__KTRACE_OPT(KMMU, Kern::Printf("THwChAA::SrchEx not found"));
sl@0
  2208
	return 0;
sl@0
  2209
	}
sl@0
  2210
sl@0
  2211
TLinAddr THwChunkAddressAllocator::Alloc(TInt aSize, TInt aAlign, TInt aOffset, TPde aPdePerm)
sl@0
  2212
	{
sl@0
  2213
	__KTRACE_OPT(KMMU, Kern::Printf("THwChAA::Alloc size=%08x align=%d offset=%08x pdeperm=%08x",
sl@0
  2214
				aSize, aAlign, aOffset, aPdePerm));
sl@0
  2215
	MmuBase& m=*MmuBase::TheMmu;
sl@0
  2216
	TInt npages=(aSize+m.iPageMask)>>m.iPageShift;
sl@0
  2217
	TInt align=Max(aAlign,iAlign);
sl@0
  2218
	if (align>m.iChunkShift)
sl@0
  2219
		return 0;
sl@0
  2220
	TInt aligns=1<<align;
sl@0
  2221
	TInt alignm=aligns-1;
sl@0
  2222
	TInt offset=(aOffset&alignm)>>m.iPageShift;
sl@0
  2223
	TInt pdepages=m.iChunkSize>>m.iPageShift;
sl@0
  2224
	TInt pdepageshift=m.iChunkShift-m.iPageShift;
sl@0
  2225
	MmuBase::WaitHwChunk();
sl@0
  2226
	if (npages<pdepages)
sl@0
  2227
		{
sl@0
  2228
		// for small regions, first try to share an existing page table
sl@0
  2229
		TLinAddr a=SearchExisting(npages, align-m.iPageShift, offset, aPdePerm);
sl@0
  2230
		if (a)
sl@0
  2231
			{
sl@0
  2232
			MmuBase::SignalHwChunk();
sl@0
  2233
			return a;
sl@0
  2234
			}
sl@0
  2235
		}
sl@0
  2236
sl@0
  2237
	// large region or no free space in existing page tables - allocate whole PDEs
sl@0
  2238
	TInt npdes=(npages+offset+pdepages-1)>>pdepageshift;
sl@0
  2239
	__KTRACE_OPT(KMMU, Kern::Printf("Allocate %d PDEs", npdes));
sl@0
  2240
	MmuBase::Wait();
sl@0
  2241
	TInt ix=iSection->iAllocator.AllocConsecutive(npdes, EFalse);
sl@0
  2242
	if (ix>=0)
sl@0
  2243
		iSection->iAllocator.Alloc(ix, npdes);
sl@0
  2244
	MmuBase::Signal();
sl@0
  2245
	TLinAddr a=0;
sl@0
  2246
	if (ix>=0)
sl@0
  2247
		a = iSection->iBase + (TLinAddr(ix)<<m.iChunkShift) + (TLinAddr(offset)<<m.iPageShift);
sl@0
  2248
sl@0
  2249
	// Create bitmaps for each page table and placeholders for section blocks.
sl@0
  2250
	// We only create a bitmap for the first and last PDE and then only if they are not
sl@0
  2251
	// fully occupied by this request
sl@0
  2252
	THwChunkPageTable* first=NULL;
sl@0
  2253
	THwChunkRegion* middle=NULL;
sl@0
  2254
	TInt remain=npages;
sl@0
  2255
	TInt nix=ix;
sl@0
  2256
	if (a && (offset || npages<pdepages))
sl@0
  2257
		{
sl@0
  2258
		// first PDE is bitmap
sl@0
  2259
		TInt first_count = Min(remain, pdepages-offset);
sl@0
  2260
		first=NewPageTable(nix, aPdePerm, offset, first_count);
sl@0
  2261
		++nix;
sl@0
  2262
		remain -= first_count;
sl@0
  2263
		if (!first)
sl@0
  2264
			a=0;
sl@0
  2265
		}
sl@0
  2266
	if (a && remain>=pdepages)
sl@0
  2267
		{
sl@0
  2268
		// next need whole-PDE-block placeholder
sl@0
  2269
		TInt whole_pdes=remain>>pdepageshift;
sl@0
  2270
		middle=NewRegion(nix, whole_pdes, aPdePerm);
sl@0
  2271
		nix+=whole_pdes;
sl@0
  2272
		remain-=(whole_pdes<<pdepageshift);
sl@0
  2273
		if (!middle)
sl@0
  2274
			a=0;
sl@0
  2275
		}
sl@0
  2276
	if (a && remain)
sl@0
  2277
		{
sl@0
  2278
		// need final bitmap section
sl@0
  2279
		if (!NewPageTable(nix, aPdePerm, 0, remain))
sl@0
  2280
			a=0;
sl@0
  2281
		}
sl@0
  2282
	if (!a)
sl@0
  2283
		{
sl@0
  2284
		// alloc failed somewhere - free anything we did create
sl@0
  2285
		if (middle)
sl@0
  2286
			Discard(middle);
sl@0
  2287
		if (first)
sl@0
  2288
			Discard(first);
sl@0
  2289
		if (ix>=0)
sl@0
  2290
			{
sl@0
  2291
			MmuBase::Wait();
sl@0
  2292
			iSection->iAllocator.Free(ix, npdes);
sl@0
  2293
			MmuBase::Signal();
sl@0
  2294
			}
sl@0
  2295
		}
sl@0
  2296
	MmuBase::SignalHwChunk();
sl@0
  2297
	__KTRACE_OPT(KMMU, Kern::Printf("THwChAA::Alloc returns %08x", a));
sl@0
  2298
	return a;
sl@0
  2299
	}
sl@0
  2300
sl@0
  2301
void THwChunkAddressAllocator::Discard(THwChunkRegion* aRegion)
sl@0
  2302
	{
sl@0
  2303
	// remove a region from the array and destroy it
sl@0
  2304
	TInt r=FindInOrder(aRegion, Order);
sl@0
  2305
	if (r>=0)
sl@0
  2306
		Remove(r);
sl@0
  2307
	Kern::Free(aRegion);
sl@0
  2308
	}
sl@0
  2309
sl@0
  2310
TInt THwChunkAddressAllocator::Order(const THwChunkRegion& a1, const THwChunkRegion& a2)
sl@0
  2311
	{
sl@0
  2312
	// order two regions by address
sl@0
  2313
	return a1.iIndex-a2.iIndex;
sl@0
  2314
	}
sl@0
  2315
sl@0
  2316
THwChunkRegion* THwChunkAddressAllocator::Free(TLinAddr aAddr, TInt aSize)
sl@0
  2317
	{
sl@0
  2318
	__KTRACE_OPT(KMMU, Kern::Printf("THwChAA::Free addr=%08x size=%08x", aAddr, aSize));
sl@0
  2319
	__ASSERT_ALWAYS(aAddr>=iSection->iBase && (aAddr+aSize)<=iSection->iEnd,
sl@0
  2320
										MmuBase::Panic(MmuBase::EFreeHwChunkAddrInvalid));
sl@0
  2321
	THwChunkRegion* list=NULL;
sl@0
  2322
	MmuBase& m=*MmuBase::TheMmu;
sl@0
  2323
	TInt ix=(aAddr - iSection->iBase)>>m.iChunkShift;
sl@0
  2324
	TInt remain=(aSize+m.iPageMask)>>m.iPageShift;
sl@0
  2325
	TInt pdepageshift=m.iChunkShift-m.iPageShift;
sl@0
  2326
	TInt offset=(aAddr&m.iChunkMask)>>m.iPageShift;
sl@0
  2327
	THwChunkRegion find(ix, 0, 0);
sl@0
  2328
	MmuBase::WaitHwChunk();
sl@0
  2329
	TInt r=FindInOrder(&find, Order);
sl@0
  2330
	__ASSERT_ALWAYS(r>=0, MmuBase::Panic(MmuBase::EFreeHwChunkAddrInvalid));
sl@0
  2331
	while (remain)
sl@0
  2332
		{
sl@0
  2333
		THwChunkPageTable* p=(THwChunkPageTable*)(*this)[r];
sl@0
  2334
		__ASSERT_ALWAYS(p->iIndex==ix, MmuBase::Panic(MmuBase::EFreeHwChunkIndexInvalid));
sl@0
  2335
		if (p->iRegionSize)
sl@0
  2336
			{
sl@0
  2337
			// multiple-whole-PDE region
sl@0
  2338
			TInt rsz=p->iRegionSize;
sl@0
  2339
			remain-=(rsz<<pdepageshift);
sl@0
  2340
			Remove(r);	// r now indexes following array entry
sl@0
  2341
			ix+=rsz;
sl@0
  2342
			}
sl@0
  2343
		else
sl@0
  2344
			{
sl@0
  2345
			// bitmap region
sl@0
  2346
			TInt n=Min(remain, (1<<pdepageshift)-offset);
sl@0
  2347
			p->iAllocator.Free(offset, n);
sl@0
  2348
			remain-=n;
sl@0
  2349
			++ix;
sl@0
  2350
			if (p->iAllocator.iAvail < p->iAllocator.iSize)
sl@0
  2351
				{
sl@0
  2352
				// bitmap still in use
sl@0
  2353
				offset=0;
sl@0
  2354
				++r;	// r indexes following array entry
sl@0
  2355
				continue;
sl@0
  2356
				}
sl@0
  2357
			Remove(r);	// r now indexes following array entry
sl@0
  2358
			}
sl@0
  2359
		offset=0;
sl@0
  2360
		p->iNext=list;
sl@0
  2361
		list=p;			// chain free region descriptors together
sl@0
  2362
		}
sl@0
  2363
	MmuBase::SignalHwChunk();
sl@0
  2364
	__KTRACE_OPT(KMMU, Kern::Printf("THwChAA::Free returns %08x", list));
sl@0
  2365
	return list;
sl@0
  2366
	}
sl@0
  2367
sl@0
  2368
/********************************************
sl@0
  2369
 * Hardware chunk abstraction
sl@0
  2370
 ********************************************/
sl@0
  2371
THwChunkAddressAllocator* MmuBase::MappingRegion(TUint)
sl@0
  2372
	{
sl@0
  2373
	return iHwChunkAllocator;
sl@0
  2374
	}
sl@0
  2375
sl@0
  2376
TInt MmuBase::AllocateAllPageTables(TLinAddr aLinAddr, TInt aSize, TPde aPdePerm, TInt aMapShift, SPageTableInfo::TAttribs aAttrib)
sl@0
  2377
	{
sl@0
  2378
	__KTRACE_OPT(KMMU,Kern::Printf("AllocateAllPageTables lin=%08x, size=%x, pde=%08x, mapshift=%d attribs=%d",
sl@0
  2379
																aLinAddr, aSize, aPdePerm, aMapShift, aAttrib));
sl@0
  2380
	TInt offset=aLinAddr&iChunkMask;
sl@0
  2381
	TInt remain=aSize;
sl@0
  2382
	TLinAddr a=aLinAddr&~iChunkMask;
sl@0
  2383
	TInt newpts=0;
sl@0
  2384
	for (; remain>0; a+=iChunkSize)
sl@0
  2385
		{
sl@0
  2386
		// don't need page table if a whole PDE mapping is permitted here
sl@0
  2387
		if (aMapShift<iChunkShift || offset || remain<iChunkSize)
sl@0
  2388
			{
sl@0
  2389
			// need to check for a page table at a
sl@0
  2390
			TInt id=PageTableId(a);
sl@0
  2391
			if (id<0)
sl@0
  2392
				{
sl@0
  2393
				// no page table - must allocate one
sl@0
  2394
				id = AllocPageTable();
sl@0
  2395
				if (id<0)
sl@0
  2396
					break;
sl@0
  2397
				// got page table, assign it
sl@0
  2398
				// AssignPageTable(TInt aId, TInt aUsage, TAny* aObject, TLinAddr aAddr, TPde aPdePerm)
sl@0
  2399
				AssignPageTable(id, aAttrib, NULL, a, aPdePerm);
sl@0
  2400
				++newpts;
sl@0
  2401
				}
sl@0
  2402
			}
sl@0
  2403
		remain -= (iChunkSize-offset);
sl@0
  2404
		offset=0;
sl@0
  2405
		}
sl@0
  2406
	if (remain<=0)
sl@0
  2407
		return KErrNone;	// completed OK
sl@0
  2408
sl@0
  2409
	// ran out of memory somewhere - free page tables which were allocated
sl@0
  2410
	for (; newpts; --newpts)
sl@0
  2411
		{
sl@0
  2412
		a-=iChunkSize;
sl@0
  2413
		TInt id=UnassignPageTable(a);
sl@0
  2414
		FreePageTable(id);
sl@0
  2415
		}
sl@0
  2416
	return KErrNoMemory;
sl@0
  2417
	}
sl@0
  2418
sl@0
  2419
sl@0
  2420
/**
sl@0
  2421
Create a hardware chunk object mapping a specified block of physical addresses
sl@0
  2422
with specified access permissions and cache policy.
sl@0
  2423
sl@0
  2424
When the mapping is no longer required, close the chunk using chunk->Close(0);
sl@0
  2425
Note that closing a chunk does not free any RAM pages which were mapped by the
sl@0
  2426
chunk - these must be freed separately using Epoc::FreePhysicalRam().
sl@0
  2427
sl@0
  2428
@param	aChunk	Upon successful completion this parameter receives a pointer to
sl@0
  2429
				the newly created chunk. Upon unsuccessful completion it is
sl@0
  2430
				written with a NULL pointer. The virtual address of the mapping
sl@0
  2431
				can subsequently be discovered using the LinearAddress()
sl@0
  2432
				function on the chunk.
sl@0
  2433
@param	aAddr	The base address of the physical region to be mapped. This will
sl@0
  2434
				be rounded down to a multiple of the hardware page size before
sl@0
  2435
				being used.
sl@0
  2436
@param	aSize	The size of the physical address region to be mapped. This will
sl@0
  2437
				be rounded up to a multiple of the hardware page size before
sl@0
  2438
				being used; the rounding is such that the entire range from
sl@0
  2439
				aAddr to aAddr+aSize-1 inclusive is mapped. For example if
sl@0
  2440
				aAddr=0xB0001FFF, aSize=2 and the hardware page size is 4KB, an
sl@0
  2441
				8KB range of physical addresses from 0xB0001000 to 0xB0002FFF
sl@0
  2442
				inclusive will be mapped.
sl@0
  2443
@param	aMapAttr Mapping attributes required for the mapping. This is formed
sl@0
  2444
				by ORing together values from the TMappingAttributes enumeration
sl@0
  2445
				to specify the access permissions and caching policy.
sl@0
  2446
sl@0
  2447
@pre Calling thread must be in a critical section.
sl@0
  2448
@pre Interrupts must be enabled.
sl@0
  2449
@pre Kernel must be unlocked.
sl@0
  2450
@pre No fast mutex can be held.
sl@0
  2451
@pre Call in a thread context.
sl@0
  2452
@pre Can be used in a device driver.
sl@0
  2453
@see TMappingAttributes
sl@0
  2454
*/
sl@0
  2455
EXPORT_C TInt DPlatChunkHw::New(DPlatChunkHw*& aChunk, TPhysAddr aAddr, TInt aSize, TUint aMapAttr)
sl@0
  2456
	{
sl@0
  2457
	if (aAddr == KPhysAddrInvalid)
sl@0
  2458
		return KErrNotSupported;
sl@0
  2459
	return DoNew(aChunk, aAddr, aSize, aMapAttr);
sl@0
  2460
	}
sl@0
  2461
sl@0
  2462
TInt DPlatChunkHw::DoNew(DPlatChunkHw*& aChunk, TPhysAddr aAddr, TInt aSize, TUint aMapAttr)
sl@0
  2463
	{
sl@0
  2464
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"DPlatChunkHw::New");
sl@0
  2465
	__KTRACE_OPT(KMMU,Kern::Printf("DPlatChunkHw::New phys=%08x, size=%x, attribs=%x",aAddr,aSize,aMapAttr));
sl@0
  2466
	if (aSize<=0)
sl@0
  2467
		return KErrArgument;
sl@0
  2468
	MmuBase& m=*MmuBase::TheMmu;
sl@0
  2469
	aChunk=NULL;
sl@0
  2470
	TPhysAddr pa=aAddr!=KPhysAddrInvalid ? aAddr&~m.iPageMask : 0;
sl@0
  2471
	TInt size=((aAddr+aSize+m.iPageMask)&~m.iPageMask)-pa;
sl@0
  2472
	__KTRACE_OPT(KMMU,Kern::Printf("Rounded %08x+%x", pa, size));
sl@0
  2473
	DMemModelChunkHw* pC=new DMemModelChunkHw;
sl@0
  2474
	if (!pC)
sl@0
  2475
		return KErrNoMemory;
sl@0
  2476
	__KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunkHw created at %08x",pC));
sl@0
  2477
	pC->iPhysAddr=aAddr;
sl@0
  2478
	pC->iSize=size;
sl@0
  2479
	TUint mapattr=aMapAttr;
sl@0
  2480
	TPde pdePerm=0;
sl@0
  2481
	TPte ptePerm=0;
sl@0
  2482
	TInt r=m.PdePtePermissions(mapattr, pdePerm, ptePerm);
sl@0
  2483
	if (r==KErrNone)
sl@0
  2484
		{
sl@0
  2485
		pC->iAllocator=m.MappingRegion(mapattr);
sl@0
  2486
		pC->iAttribs=mapattr;	// save actual mapping attributes
sl@0
  2487
		r=pC->AllocateLinearAddress(pdePerm);
sl@0
  2488
		if (r>=0)
sl@0
  2489
			{
sl@0
  2490
			TInt map_shift=r;
sl@0
  2491
			MmuBase::Wait();
sl@0
  2492
			r=m.AllocateAllPageTables(pC->iLinAddr, size, pdePerm, map_shift, SPageTableInfo::EGlobal);
sl@0
  2493
			if (r==KErrNone && aAddr!=KPhysAddrInvalid)
sl@0
  2494
				m.Map(pC->iLinAddr, pa, size, pdePerm, ptePerm, map_shift);
sl@0
  2495
			MmuBase::Signal();
sl@0
  2496
			}
sl@0
  2497
		}
sl@0
  2498
	if (r==KErrNone)
sl@0
  2499
		aChunk=pC;
sl@0
  2500
	else
sl@0
  2501
		pC->Close(NULL);
sl@0
  2502
	return r;
sl@0
  2503
	}
sl@0
  2504
sl@0
  2505
TInt DMemModelChunkHw::AllocateLinearAddress(TPde aPdePerm)
sl@0
  2506
	{
sl@0
  2507
	__KTRACE_OPT(KMMU, Kern::Printf("DMemModelChunkHw::AllocateLinearAddress(%08x)", aPdePerm));
sl@0
  2508
	__KTRACE_OPT(KMMU, Kern::Printf("iAllocator=%08x iPhysAddr=%08x iSize=%08x", iAllocator, iPhysAddr, iSize));
sl@0
  2509
	MmuBase& m=*MmuBase::TheMmu;
sl@0
  2510
	TInt map_shift = (iPhysAddr<0xffffffffu) ? 30 : m.iPageShift;
sl@0
  2511
	for (; map_shift>=m.iPageShift; --map_shift)
sl@0
  2512
		{
sl@0
  2513
		TUint32 map_size = 1<<map_shift;
sl@0
  2514
		TUint32 map_mask = map_size-1;
sl@0
  2515
		if (!(m.iMapSizes & map_size))
sl@0
  2516
			continue;	// map_size is not supported on this hardware
sl@0
  2517
		TPhysAddr base = (iPhysAddr+map_mask) &~ map_mask;	// base rounded up
sl@0
  2518
		TPhysAddr end = (iPhysAddr+iSize)&~map_mask;		// end rounded down
sl@0
  2519
		if ((base-end)<0x80000000u && map_shift>m.iPageShift)
sl@0
  2520
			continue;	// region not big enough to use this mapping size
sl@0
  2521
		__KTRACE_OPT(KMMU, Kern::Printf("Try map size %08x", map_size));
sl@0
  2522
		iLinAddr=iAllocator->Alloc(iSize, map_shift, iPhysAddr, aPdePerm);
sl@0
  2523
		if (iLinAddr)
sl@0
  2524
			break;		// done
sl@0
  2525
		}
sl@0
  2526
	TInt r=iLinAddr ? map_shift : KErrNoMemory;
sl@0
  2527
	__KTRACE_OPT(KMMU, Kern::Printf("iLinAddr=%08x, returning %d", iLinAddr, r));
sl@0
  2528
	return r;
sl@0
  2529
	}
sl@0
  2530
sl@0
  2531
void DMemModelChunkHw::DeallocateLinearAddress()
sl@0
  2532
	{
sl@0
  2533
	__KTRACE_OPT(KMMU, Kern::Printf("DMemModelChunkHw::DeallocateLinearAddress %O", this));
sl@0
  2534
	MmuBase& m=*MmuBase::TheMmu;
sl@0
  2535
	MmuBase::WaitHwChunk();
sl@0
  2536
	THwChunkRegion* rgn=iAllocator->Free(iLinAddr, iSize);
sl@0
  2537
	iLinAddr=0;
sl@0
  2538
	MmuBase::SignalHwChunk();
sl@0
  2539
	TLinAddr base = iAllocator->iSection->iBase;
sl@0
  2540
	TBitMapAllocator& section_allocator = iAllocator->iSection->iAllocator;
sl@0
  2541
	while (rgn)
sl@0
  2542
		{
sl@0
  2543
		MmuBase::Wait();
sl@0
  2544
		if (rgn->iRegionSize)
sl@0
  2545
			{
sl@0
  2546
			// free address range
sl@0
  2547
			__KTRACE_OPT(KMMU, Kern::Printf("Freeing range %03x+%03x", rgn->iIndex, rgn->iRegionSize));
sl@0
  2548
			section_allocator.Free(rgn->iIndex, rgn->iRegionSize);
sl@0
  2549
			
sl@0
  2550
			// Though this is large region, it still can be made up of page tables (not sections).
sl@0
  2551
			// Check each chunk and remove tables in neccessary
sl@0
  2552
			TInt i = 0;
sl@0
  2553
			TLinAddr a = base + (TLinAddr(rgn->iIndex)<<m.iChunkShift);
sl@0
  2554
			for (; i<rgn->iRegionSize ; i++,a+=m.iChunkSize)
sl@0
  2555
				{
sl@0
  2556
				TInt id = m.UnassignPageTable(a);
sl@0
  2557
				if (id>=0)
sl@0
  2558
					m.FreePageTable(id);
sl@0
  2559
				}
sl@0
  2560
			}
sl@0
  2561
		else
sl@0
  2562
			{
sl@0
  2563
			// free address and page table if it exists
sl@0
  2564
			__KTRACE_OPT(KMMU, Kern::Printf("Freeing index %03x", rgn->iIndex));
sl@0
  2565
			section_allocator.Free(rgn->iIndex);
sl@0
  2566
			TLinAddr a = base + (TLinAddr(rgn->iIndex)<<m.iChunkShift);
sl@0
  2567
			TInt id = m.UnassignPageTable(a);
sl@0
  2568
			if (id>=0)
sl@0
  2569
				m.FreePageTable(id);
sl@0
  2570
			}
sl@0
  2571
		MmuBase::Signal();
sl@0
  2572
		THwChunkRegion* free=rgn;
sl@0
  2573
		rgn=rgn->iNext;
sl@0
  2574
		Kern::Free(free);
sl@0
  2575
		}
sl@0
  2576
	}
sl@0
  2577
sl@0
  2578
sl@0
  2579
//
sl@0
  2580
// RamCacheBase
sl@0
  2581
//
sl@0
  2582
sl@0
  2583
sl@0
  2584
RamCacheBase* RamCacheBase::TheRamCache = NULL;
sl@0
  2585
sl@0
  2586
sl@0
  2587
RamCacheBase::RamCacheBase()
sl@0
  2588
	{
sl@0
  2589
	}
sl@0
  2590
sl@0
  2591
sl@0
  2592
void RamCacheBase::Init2()
sl@0
  2593
	{
sl@0
  2594
	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">RamCacheBase::Init2"));
sl@0
  2595
	iMmu = MmuBase::TheMmu;
sl@0
  2596
	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<RamCacheBase::Init2"));
sl@0
  2597
	}
sl@0
  2598
sl@0
  2599
sl@0
  2600
void RamCacheBase::ReturnToSystem(SPageInfo* aPageInfo)
sl@0
  2601
	{
sl@0
  2602
	__ASSERT_MUTEX(MmuBase::RamAllocatorMutex);
sl@0
  2603
	__ASSERT_SYSTEM_LOCK;
sl@0
  2604
	aPageInfo->SetUnused();
sl@0
  2605
	--iNumberOfFreePages;
sl@0
  2606
	__NK_ASSERT_DEBUG(iNumberOfFreePages>=0);
sl@0
  2607
	// Release system lock before using the RAM allocator.
sl@0
  2608
	NKern::UnlockSystem();
sl@0
  2609
	iMmu->iRamPageAllocator->FreeRamPage(aPageInfo->PhysAddr(), EPageDiscard);
sl@0
  2610
	NKern::LockSystem();
sl@0
  2611
	}
sl@0
  2612
sl@0
  2613
sl@0
  2614
SPageInfo* RamCacheBase::GetPageFromSystem(TUint aBlockedZoneId, TBool aBlockRest)
sl@0
  2615
	{
sl@0
  2616
	__ASSERT_MUTEX(MmuBase::RamAllocatorMutex);
sl@0
  2617
	SPageInfo* pageInfo;
sl@0
  2618
	TPhysAddr pagePhys;
sl@0
  2619
	TInt r = iMmu->iRamPageAllocator->AllocRamPages(&pagePhys,1, EPageDiscard, aBlockedZoneId, aBlockRest);
sl@0
  2620
	if(r==KErrNone)
sl@0
  2621
		{
sl@0
  2622
		NKern::LockSystem();
sl@0
  2623
		pageInfo = SPageInfo::FromPhysAddr(pagePhys);
sl@0
  2624
		pageInfo->Change(SPageInfo::EPagedFree,SPageInfo::EStatePagedDead);
sl@0
  2625
		++iNumberOfFreePages;
sl@0
  2626
		NKern::UnlockSystem();
sl@0
  2627
		}
sl@0
  2628
	else
sl@0
  2629
		pageInfo = NULL;
sl@0
  2630
	return pageInfo;
sl@0
  2631
	}
sl@0
  2632
sl@0
  2633
sl@0
  2634
//
sl@0
  2635
// RamCache
sl@0
  2636
//
sl@0
  2637
sl@0
  2638
sl@0
  2639
void RamCache::Init2()
sl@0
  2640
	{
sl@0
  2641
	__KTRACE_OPT(KBOOT,Kern::Printf(">RamCache::Init2"));
sl@0
  2642
	RamCacheBase::Init2();
sl@0
  2643
	__KTRACE_OPT(KBOOT,Kern::Printf("<RamCache::Init2"));
sl@0
  2644
	}
sl@0
  2645
sl@0
  2646
sl@0
  2647
TInt RamCache::Init3()
sl@0
  2648
	{
sl@0
  2649
	return KErrNone;
sl@0
  2650
	}
sl@0
  2651
sl@0
  2652
void RamCache::RemovePage(SPageInfo& aPageInfo)
sl@0
  2653
	{
sl@0
  2654
	__NK_ASSERT_DEBUG(aPageInfo.Type() == SPageInfo::EPagedCache);
sl@0
  2655
	__NK_ASSERT_DEBUG(aPageInfo.State() == SPageInfo::EStatePagedYoung);
sl@0
  2656
	aPageInfo.iLink.Deque();
sl@0
  2657
	aPageInfo.SetState(SPageInfo::EStatePagedDead);
sl@0
  2658
	}
sl@0
  2659
sl@0
  2660
TBool RamCache::GetFreePages(TInt aNumPages)
sl@0
  2661
	{
sl@0
  2662
	__KTRACE_OPT(KPAGING,Kern::Printf("DP: >GetFreePages %d",aNumPages));
sl@0
  2663
	NKern::LockSystem();
sl@0
  2664
sl@0
  2665
	while(aNumPages>0 && NumberOfFreePages()>=aNumPages)
sl@0
  2666
		{
sl@0
  2667
		// steal a page from cache list and return it to the free pool...
sl@0
  2668
		SPageInfo* pageInfo = SPageInfo::FromLink(iPageList.First()->Deque());
sl@0
  2669
		pageInfo->SetState(SPageInfo::EStatePagedDead);
sl@0
  2670
		SetFree(pageInfo);
sl@0
  2671
		ReturnToSystem(pageInfo);
sl@0
  2672
		--aNumPages;
sl@0
  2673
		}
sl@0
  2674
sl@0
  2675
	NKern::UnlockSystem();
sl@0
  2676
	__KTRACE_OPT(KPAGING,Kern::Printf("DP: <GetFreePages %d",!aNumPages));
sl@0
  2677
	return !aNumPages;
sl@0
  2678
	}
sl@0
  2679
sl@0
  2680
sl@0
  2681
void RamCache::DonateRamCachePage(SPageInfo* aPageInfo)
sl@0
  2682
	{
sl@0
  2683
	SPageInfo::TType type = aPageInfo->Type();
sl@0
  2684
	if(type==SPageInfo::EChunk)
sl@0
  2685
		{
sl@0
  2686
		//Must not donate locked page. An example is DMA trasferred memory.
sl@0
  2687
		__NK_ASSERT_DEBUG(0 == aPageInfo->LockCount());
sl@0
  2688
sl@0
  2689
		aPageInfo->Change(SPageInfo::EPagedCache,SPageInfo::EStatePagedYoung);
sl@0
  2690
		iPageList.Add(&aPageInfo->iLink);
sl@0
  2691
		++iNumberOfFreePages;
sl@0
  2692
		// Update ram allocator counts as this page has changed its type
sl@0
  2693
		DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
sl@0
  2694
		iMmu->iRamPageAllocator->ChangePageType(aPageInfo, chunk->GetPageType(), EPageDiscard);
sl@0
  2695
sl@0
  2696
#ifdef BTRACE_PAGING
sl@0
  2697
		BTraceContext8(BTrace::EPaging, BTrace::EPagingChunkDonatePage, chunk, aPageInfo->Offset());
sl@0
  2698
#endif
sl@0
  2699
		return;
sl@0
  2700
		}
sl@0
  2701
	// allow already donated pages...
sl@0
  2702
	__NK_ASSERT_DEBUG(type==SPageInfo::EPagedCache);
sl@0
  2703
	}
sl@0
  2704
sl@0
  2705
sl@0
  2706
TBool RamCache::ReclaimRamCachePage(SPageInfo* aPageInfo)
sl@0
  2707
	{
sl@0
  2708
	SPageInfo::TType type = aPageInfo->Type();
sl@0
  2709
//	Kern::Printf("DemandPaging::ReclaimRamCachePage %x %d free=%d",aPageInfo,type,iNumberOfFreePages);
sl@0
  2710
sl@0
  2711
	if(type==SPageInfo::EChunk)
sl@0
  2712
		return ETrue; // page already reclaimed
sl@0
  2713
sl@0
  2714
	__NK_ASSERT_DEBUG(type==SPageInfo::EPagedCache);
sl@0
  2715
	__NK_ASSERT_DEBUG(aPageInfo->State()==SPageInfo::EStatePagedYoung);
sl@0
  2716
	// Update ram allocator counts as this page has changed its type
sl@0
  2717
	DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
sl@0
  2718
	iMmu->iRamPageAllocator->ChangePageType(aPageInfo, EPageDiscard, chunk->GetPageType());
sl@0
  2719
	aPageInfo->iLink.Deque();
sl@0
  2720
	--iNumberOfFreePages;
sl@0
  2721
	aPageInfo->Change(SPageInfo::EChunk,SPageInfo::EStateNormal);
sl@0
  2722
sl@0
  2723
#ifdef BTRACE_PAGING
sl@0
  2724
	BTraceContext8(BTrace::EPaging, BTrace::EPagingChunkReclaimPage, chunk, aPageInfo->Offset());
sl@0
  2725
#endif
sl@0
  2726
	return ETrue;
sl@0
  2727
	}
sl@0
  2728
sl@0
  2729
sl@0
  2730
/**
sl@0
  2731
Discard the specified page.
sl@0
  2732
Should only be called on a page if a previous call to IsPageDiscardable()
sl@0
  2733
returned ETrue and the system lock hasn't been released between the calls.
sl@0
  2734
sl@0
  2735
@param aPageInfo The page info of the page to be discarded
sl@0
  2736
@param aBlockedZoneId Not used by this overload.
sl@0
  2737
@param aBlockRest Not used by this overload. 
sl@0
  2738
@return ETrue if page succesfully discarded
sl@0
  2739
sl@0
  2740
@pre System lock held.
sl@0
  2741
@post System lock held.
sl@0
  2742
*/
sl@0
  2743
TBool RamCache::DoDiscardPage(SPageInfo& aPageInfo, TUint aBlockedZoneId, TBool aBlockRest)
sl@0
  2744
	{
sl@0
  2745
	__NK_ASSERT_DEBUG(iNumberOfFreePages > 0);
sl@0
  2746
	RemovePage(aPageInfo);
sl@0
  2747
	SetFree(&aPageInfo);
sl@0
  2748
	ReturnToSystem(&aPageInfo);
sl@0
  2749
	return ETrue;
sl@0
  2750
	}
sl@0
  2751
sl@0
  2752
sl@0
  2753
/**
sl@0
  2754
First stage in discarding a list of pages.
sl@0
  2755
sl@0
  2756
Must ensure that the pages will still be discardable even if system lock is released.
sl@0
  2757
To be used in conjunction with RamCacheBase::DoDiscardPages1().
sl@0
  2758
sl@0
  2759
@param aPageList A NULL terminated list of the pages to be discarded
sl@0
  2760
@return KErrNone on success.
sl@0
  2761
sl@0
  2762
@pre System lock held
sl@0
  2763
@post System lock held
sl@0
  2764
*/
sl@0
  2765
TInt RamCache::DoDiscardPages0(SPageInfo** aPageList)
sl@0
  2766
	{
sl@0
  2767
	__ASSERT_SYSTEM_LOCK;
sl@0
  2768
sl@0
  2769
	SPageInfo* pageInfo;
sl@0
  2770
	while((pageInfo = *aPageList++) != 0)
sl@0
  2771
		{
sl@0
  2772
		RemovePage(*pageInfo);
sl@0
  2773
		}
sl@0
  2774
	return KErrNone;
sl@0
  2775
	}
sl@0
  2776
sl@0
  2777
sl@0
  2778
/**
sl@0
  2779
Final stage in discarding a list of page
sl@0
  2780
Finish discarding the pages previously removed by RamCacheBase::DoDiscardPages0().
sl@0
  2781
This overload doesn't actually need to do anything.
sl@0
  2782
sl@0
  2783
@param aPageList A NULL terminated list of the pages to be discarded
sl@0
  2784
@return KErrNone on success.
sl@0
  2785
sl@0
  2786
@pre System lock held
sl@0
  2787
@post System lock held
sl@0
  2788
*/
sl@0
  2789
TInt RamCache::DoDiscardPages1(SPageInfo** aPageList)
sl@0
  2790
	{
sl@0
  2791
	__ASSERT_SYSTEM_LOCK;
sl@0
  2792
	SPageInfo* pageInfo;
sl@0
  2793
	while((pageInfo = *aPageList++) != 0)
sl@0
  2794
		{
sl@0
  2795
		SetFree(pageInfo);
sl@0
  2796
		ReturnToSystem(pageInfo);
sl@0
  2797
		}
sl@0
  2798
	return KErrNone;
sl@0
  2799
	}
sl@0
  2800
sl@0
  2801
sl@0
  2802
/**
sl@0
  2803
Check whether the specified page can be discarded by the RAM cache.
sl@0
  2804
sl@0
  2805
@param aPageInfo The page info of the page being queried.
sl@0
  2806
@return ETrue when the page can be discarded, EFalse otherwise.
sl@0
  2807
@pre System lock held.
sl@0
  2808
@post System lock held.
sl@0
  2809
*/
sl@0
  2810
TBool RamCache::IsPageDiscardable(SPageInfo& aPageInfo)
sl@0
  2811
	{
sl@0
  2812
	SPageInfo::TType type = aPageInfo.Type();
sl@0
  2813
	SPageInfo::TState state = aPageInfo.State();
sl@0
  2814
	return (type == SPageInfo::EPagedCache && state == SPageInfo::EStatePagedYoung);
sl@0
  2815
	}
sl@0
  2816
sl@0
  2817
sl@0
  2818
/**
sl@0
  2819
@return ETrue when the unmapped page should be freed, EFalse otherwise
sl@0
  2820
*/
sl@0
  2821
TBool RamCache::PageUnmapped(SPageInfo* aPageInfo)
sl@0
  2822
	{
sl@0
  2823
	SPageInfo::TType type = aPageInfo->Type();
sl@0
  2824
//	Kern::Printf("DemandPaging::PageUnmapped %x %d",aPageInfo,type);
sl@0
  2825
	if(type!=SPageInfo::EPagedCache)
sl@0
  2826
		return ETrue;
sl@0
  2827
	SPageInfo::TState state = aPageInfo->State();
sl@0
  2828
	if(state==SPageInfo::EStatePagedYoung)
sl@0
  2829
		{
sl@0
  2830
		// This page will be freed by DChunk::DoDecommit as it was originally 
sl@0
  2831
		// allocated so update page counts in ram allocator
sl@0
  2832
		DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
sl@0
  2833
		iMmu->iRamPageAllocator->ChangePageType(aPageInfo, EPageDiscard, chunk->GetPageType());
sl@0
  2834
		aPageInfo->iLink.Deque();
sl@0
  2835
		--iNumberOfFreePages;
sl@0
  2836
		}
sl@0
  2837
	return ETrue;
sl@0
  2838
	}
sl@0
  2839
sl@0
  2840
sl@0
  2841
void RamCache::Panic(TFault aFault)
sl@0
  2842
	{
sl@0
  2843
	Kern::Fault("RamCache",aFault);
sl@0
  2844
	}
sl@0
  2845
sl@0
  2846
/**
sl@0
  2847
Flush all cache pages.
sl@0
  2848
sl@0
  2849
@pre RAM allocator mutex held
sl@0
  2850
@post RAM allocator mutex held
sl@0
  2851
*/
sl@0
  2852
void RamCache::FlushAll()
sl@0
  2853
	{
sl@0
  2854
	__ASSERT_MUTEX(MmuBase::RamAllocatorMutex);
sl@0
  2855
#ifdef _DEBUG
sl@0
  2856
	// Should always succeed
sl@0
  2857
	__NK_ASSERT_DEBUG(GetFreePages(iNumberOfFreePages));
sl@0
  2858
#else
sl@0
  2859
	GetFreePages(iNumberOfFreePages);
sl@0
  2860
#endif
sl@0
  2861
	}
sl@0
  2862
sl@0
  2863
sl@0
  2864
//
sl@0
  2865
// Demand Paging
sl@0
  2866
//
sl@0
  2867
sl@0
  2868
#ifdef __DEMAND_PAGING__
sl@0
  2869
sl@0
  2870
DemandPaging* DemandPaging::ThePager = 0;
sl@0
  2871
TBool DemandPaging::PseudoRandInitialised = EFalse;
sl@0
  2872
volatile TUint32 DemandPaging::PseudoRandSeed = 0;
sl@0
  2873
sl@0
  2874
sl@0
  2875
void M::DemandPagingInit()
sl@0
  2876
	{
sl@0
  2877
	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">M::DemandPagingInit"));
sl@0
  2878
	TInt r = RamCacheBase::TheRamCache->Init3();
sl@0
  2879
	if (r != KErrNone)
sl@0
  2880
		DemandPaging::Panic(DemandPaging::EInitialiseFailed);	
sl@0
  2881
sl@0
  2882
	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<M::DemandPagingInit"));
sl@0
  2883
	}
sl@0
  2884
sl@0
  2885
sl@0
  2886
TInt M::DemandPagingFault(TAny* aExceptionInfo)
sl@0
  2887
	{
sl@0
  2888
	DemandPaging* pager = DemandPaging::ThePager;
sl@0
  2889
	if(pager)
sl@0
  2890
		return pager->Fault(aExceptionInfo);
sl@0
  2891
	return KErrAbort;
sl@0
  2892
	}
sl@0
  2893
sl@0
  2894
#ifdef _DEBUG
sl@0
  2895
extern "C" void ASMCheckPagingSafe(TLinAddr aPC, TLinAddr aLR, TLinAddr aStartAddres, TUint aLength)
sl@0
  2896
	{
sl@0
  2897
	if(M::CheckPagingSafe(EFalse, aStartAddres, aLength))
sl@0
  2898
		return;
sl@0
  2899
	Kern::Printf("ASM_ASSERT_PAGING_SAFE FAILED: pc=%x lr=%x",aPC,aLR);
sl@0
  2900
	__NK_ASSERT_ALWAYS(0);
sl@0
  2901
	}
sl@0
  2902
sl@0
  2903
extern "C" void ASMCheckDataPagingSafe(TLinAddr aPC, TLinAddr aLR, TLinAddr aStartAddres, TUint aLength)
sl@0
  2904
	{
sl@0
  2905
	if(M::CheckPagingSafe(ETrue, aStartAddres, aLength))
sl@0
  2906
		return;
sl@0
  2907
	__KTRACE_OPT(KDATAPAGEWARN,Kern::Printf("Data paging: ASM_ASSERT_DATA_PAGING_SAFE FAILED: pc=%x lr=%x",aPC,aLR));
sl@0
  2908
	}
sl@0
  2909
#endif
sl@0
  2910
sl@0
  2911
sl@0
  2912
TBool M::CheckPagingSafe(TBool aDataPaging, TLinAddr aStartAddr, TUint aLength)
sl@0
  2913
	{
sl@0
  2914
	DemandPaging* pager = DemandPaging::ThePager;
sl@0
  2915
	if(!pager || K::Initialising)
sl@0
  2916
		return ETrue;
sl@0
  2917
	
sl@0
  2918
	NThread* nt = NCurrentThread();
sl@0
  2919
	if(!nt)
sl@0
  2920
		return ETrue; // We've not booted properly yet!
sl@0
  2921
sl@0
  2922
	if (!pager->NeedsMutexOrderCheck(aStartAddr, aLength))
sl@0
  2923
		return ETrue;
sl@0
  2924
sl@0
  2925
	TBool dataPagingEnabled = EFalse; // data paging not supported on moving or multiple models
sl@0
  2926
sl@0
  2927
	DThread* thread = _LOFF(nt,DThread,iNThread);
sl@0
  2928
	NFastMutex* fm = NKern::HeldFastMutex();
sl@0
  2929
	if(fm)
sl@0
  2930
		{
sl@0
  2931
		if(!thread->iPagingExcTrap || fm!=&TheScheduler.iLock)
sl@0
  2932
			{
sl@0
  2933
			if (!aDataPaging)
sl@0
  2934
				{
sl@0
  2935
				__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: CheckPagingSafe FAILED - FM Held"));
sl@0
  2936
				return EFalse;
sl@0
  2937
				}
sl@0
  2938
			else
sl@0
  2939
				{
sl@0
  2940
				__KTRACE_OPT(KDATAPAGEWARN, Kern::Printf("Data paging: CheckPagingSafe FAILED - FM Held"));
sl@0
  2941
				return !dataPagingEnabled;
sl@0
  2942
				}
sl@0
  2943
			}
sl@0
  2944
		}
sl@0
  2945
sl@0
  2946
	DMutex* m = pager->CheckMutexOrder();
sl@0
  2947
	if (m)
sl@0
  2948
		{
sl@0
  2949
		if (!aDataPaging)
sl@0
  2950
			{
sl@0
  2951
			__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: Mutex Order Fault %O",m));
sl@0
  2952
			return EFalse;
sl@0
  2953
			}
sl@0
  2954
		else
sl@0
  2955
			{
sl@0
  2956
			__KTRACE_OPT(KDATAPAGEWARN, Kern::Printf("Data paging: Mutex Order Fault %O",m));
sl@0
  2957
			return !dataPagingEnabled;
sl@0
  2958
			}
sl@0
  2959
		}
sl@0
  2960
	
sl@0
  2961
	return ETrue;
sl@0
  2962
	}
sl@0
  2963
sl@0
  2964
sl@0
  2965
TInt M::LockRegion(TLinAddr aStart,TInt aSize)
sl@0
  2966
	{
sl@0
  2967
	DemandPaging* pager = DemandPaging::ThePager;
sl@0
  2968
	if(pager)
sl@0
  2969
		return pager->LockRegion(aStart,aSize,NULL);
sl@0
  2970
	return KErrNone;
sl@0
  2971
	}
sl@0
  2972
sl@0
  2973
sl@0
  2974
TInt M::UnlockRegion(TLinAddr aStart,TInt aSize)
sl@0
  2975
	{
sl@0
  2976
	DemandPaging* pager = DemandPaging::ThePager;
sl@0
  2977
	if(pager)
sl@0
  2978
		return pager->UnlockRegion(aStart,aSize,NULL);
sl@0
  2979
	return KErrNone;
sl@0
  2980
	}
sl@0
  2981
sl@0
  2982
#else // !__DEMAND_PAGING__
sl@0
  2983
sl@0
  2984
TInt M::LockRegion(TLinAddr /*aStart*/,TInt /*aSize*/)
sl@0
  2985
	{
sl@0
  2986
	return KErrNone;
sl@0
  2987
	}
sl@0
  2988
sl@0
  2989
sl@0
  2990
TInt M::UnlockRegion(TLinAddr /*aStart*/,TInt /*aSize*/)
sl@0
  2991
	{
sl@0
  2992
	return KErrNone;
sl@0
  2993
	}
sl@0
  2994
sl@0
  2995
#endif // __DEMAND_PAGING__
sl@0
  2996
sl@0
  2997
sl@0
  2998
sl@0
  2999
sl@0
  3000
//
sl@0
  3001
// DemandPaging
sl@0
  3002
//
sl@0
  3003
sl@0
  3004
#ifdef __DEMAND_PAGING__
sl@0
  3005
sl@0
  3006
sl@0
  3007
const TUint16 KDefaultYoungOldRatio = 3;
sl@0
  3008
const TUint KDefaultMinPages = 256;
sl@0
  3009
const TUint KDefaultMaxPages = KMaxTUint >> KPageShift;
sl@0
  3010
sl@0
  3011
/*	Need at least 4 mapped pages to guarentee to be able to execute all ARM instructions.
sl@0
  3012
	(Worst case is a THUMB2 STM instruction with both instruction and data stradling page
sl@0
  3013
	boundaries.)
sl@0
  3014
*/
sl@0
  3015
const TUint KMinYoungPages = 4;
sl@0
  3016
const TUint KMinOldPages = 1;
sl@0
  3017
sl@0
  3018
/*	A minimum young/old ratio of 1 means that we need at least twice KMinYoungPages pages...
sl@0
  3019
*/
sl@0
  3020
const TUint KAbsoluteMinPageCount = 2*KMinYoungPages;
sl@0
  3021
sl@0
  3022
__ASSERT_COMPILE(KMinOldPages<=KAbsoluteMinPageCount/2);
sl@0
  3023
sl@0
  3024
class DMissingPagingDevice : public DPagingDevice
sl@0
  3025
	{
sl@0
  3026
	TInt Read(TThreadMessage* /*aReq*/,TLinAddr /*aBuffer*/,TUint /*aOffset*/,TUint /*aSize*/,TInt /*aDrvNumber*/)
sl@0
  3027
		{ DemandPaging::Panic(DemandPaging::EDeviceMissing); return 0; }
sl@0
  3028
	};
sl@0
  3029
sl@0
  3030
sl@0
  3031
TBool DemandPaging::RomPagingRequested()
sl@0
  3032
	{
sl@0
  3033
	return TheRomHeader().iPageableRomSize != 0;
sl@0
  3034
	}
sl@0
  3035
sl@0
  3036
sl@0
  3037
TBool DemandPaging::CodePagingRequested()
sl@0
  3038
	{
sl@0
  3039
	return (TheSuperPage().KernelConfigFlags() & EKernelConfigCodePagingPolicyDefaultPaged) != EKernelConfigCodePagingPolicyNoPaging;
sl@0
  3040
	}
sl@0
  3041
sl@0
  3042
sl@0
  3043
DemandPaging::DemandPaging()
sl@0
  3044
	{
sl@0
  3045
	}
sl@0
  3046
sl@0
  3047
sl@0
  3048
void DemandPaging::Init2()
sl@0
  3049
	{
sl@0
  3050
	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">DemandPaging::Init2"));
sl@0
  3051
sl@0
  3052
	RamCacheBase::Init2();
sl@0
  3053
sl@0
  3054
	// initialise live list...
sl@0
  3055
	SDemandPagingConfig config = TheRomHeader().iDemandPagingConfig;
sl@0
  3056
sl@0
  3057
	iMinimumPageCount = KDefaultMinPages;
sl@0
  3058
	if(config.iMinPages)
sl@0
  3059
		iMinimumPageCount = config.iMinPages;
sl@0
  3060
	if(iMinimumPageCount<KAbsoluteMinPageCount)
sl@0
  3061
		iMinimumPageCount = KAbsoluteMinPageCount;
sl@0
  3062
	iInitMinimumPageCount = iMinimumPageCount;
sl@0
  3063
sl@0
  3064
	iMaximumPageCount = KDefaultMaxPages;
sl@0
  3065
	if(config.iMaxPages)
sl@0
  3066
		iMaximumPageCount = config.iMaxPages;
sl@0
  3067
	iInitMaximumPageCount = iMaximumPageCount;
sl@0
  3068
sl@0
  3069
	iYoungOldRatio = KDefaultYoungOldRatio;
sl@0
  3070
	if(config.iYoungOldRatio)
sl@0
  3071
		iYoungOldRatio = config.iYoungOldRatio;
sl@0
  3072
	TInt ratioLimit = (iMinimumPageCount-KMinOldPages)/KMinOldPages;
sl@0
  3073
	if(iYoungOldRatio>ratioLimit)
sl@0
  3074
		iYoungOldRatio = ratioLimit;
sl@0
  3075
sl@0
  3076
	iMinimumPageLimit = (KMinYoungPages * (1 + iYoungOldRatio)) / iYoungOldRatio;
sl@0
  3077
	if(iMinimumPageLimit<KAbsoluteMinPageCount)
sl@0
  3078
		iMinimumPageLimit = KAbsoluteMinPageCount;
sl@0
  3079
sl@0
  3080
	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">DemandPaging::InitialiseLiveList min=%d max=%d ratio=%d",iMinimumPageCount,iMaximumPageCount,iYoungOldRatio));
sl@0
  3081
sl@0
  3082
	if(iMaximumPageCount<iMinimumPageCount)
sl@0
  3083
		Panic(EInitialiseBadArgs);
sl@0
  3084
sl@0
  3085
	//
sl@0
  3086
	// This routine doesn't acuire any mutexes because it should be called before the system
sl@0
  3087
	// is fully up and running. I.e. called before another thread can preempt this.
sl@0
  3088
	//
sl@0
  3089
sl@0
  3090
	// Calculate page counts
sl@0
  3091
	iOldCount = iMinimumPageCount/(1+iYoungOldRatio);
sl@0
  3092
	if(iOldCount<KMinOldPages)
sl@0
  3093
		Panic(EInitialiseBadArgs);
sl@0
  3094
	iYoungCount = iMinimumPageCount-iOldCount;
sl@0
  3095
	if(iYoungCount<KMinYoungPages)
sl@0
  3096
		Panic(EInitialiseBadArgs); // Need at least 4 pages mapped to execute an ARM LDM instruction in THUMB2 mode
sl@0
  3097
	iNumberOfFreePages = 0;
sl@0
  3098
sl@0
  3099
	// Allocate RAM pages and put them all on the old list
sl@0
  3100
	iYoungCount = 0;
sl@0
  3101
	iOldCount = 0;
sl@0
  3102
	for(TUint i=0; i<iMinimumPageCount; i++)
sl@0
  3103
		{
sl@0
  3104
		// Allocate a single page
sl@0
  3105
		TPhysAddr pagePhys;
sl@0
  3106
		TInt r = iMmu->iRamPageAllocator->AllocRamPages(&pagePhys,1, EPageDiscard);
sl@0
  3107
		if(r!=0)
sl@0
  3108
			Panic(EInitialiseFailed);
sl@0
  3109
		AddAsFreePage(SPageInfo::FromPhysAddr(pagePhys));
sl@0
  3110
		}
sl@0
  3111
sl@0
  3112
	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<DemandPaging::Init2"));
sl@0
  3113
	}
sl@0
  3114
sl@0
  3115
sl@0
  3116
TInt VMHalFunction(TAny*, TInt aFunction, TAny* a1, TAny* a2);
sl@0
  3117
sl@0
  3118
TInt DemandPaging::Init3()
sl@0
  3119
	{
sl@0
  3120
	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">DemandPaging::Init3"));
sl@0
  3121
	TInt r;
sl@0
  3122
sl@0
  3123
	// construct iBufferChunk
sl@0
  3124
	iDeviceBufferSize = 2*KPageSize;
sl@0
  3125
	TChunkCreateInfo info;
sl@0
  3126
	info.iType = TChunkCreateInfo::ESharedKernelMultiple;
sl@0
  3127
	info.iMaxSize = iDeviceBufferSize*KMaxPagingDevices;
sl@0
  3128
	info.iMapAttr = EMapAttrCachedMax;
sl@0
  3129
	info.iOwnsMemory = ETrue;
sl@0
  3130
	TUint32 mapAttr;
sl@0
  3131
	r = Kern::ChunkCreate(info,iDeviceBuffersChunk,iDeviceBuffers,mapAttr);
sl@0
  3132
	if(r!=KErrNone)
sl@0
  3133
		return r;
sl@0
  3134
sl@0
  3135
	// Install 'null' paging devices which panic if used...
sl@0
  3136
	DMissingPagingDevice* missingPagingDevice = new DMissingPagingDevice;
sl@0
  3137
	for(TInt i=0; i<KMaxPagingDevices; i++)
sl@0
  3138
		{
sl@0
  3139
		iPagingDevices[i].iInstalled = EFalse;
sl@0
  3140
		iPagingDevices[i].iDevice = missingPagingDevice;
sl@0
  3141
		}
sl@0
  3142
sl@0
  3143
	// Initialise ROM info...
sl@0
  3144
	const TRomHeader& romHeader = TheRomHeader();
sl@0
  3145
	iRomLinearBase = (TLinAddr)&romHeader;
sl@0
  3146
	iRomSize = iMmu->RoundToPageSize(romHeader.iUncompressedSize);
sl@0
  3147
	if(romHeader.iRomPageIndex)
sl@0
  3148
		iRomPageIndex = (SRomPageInfo*)((TInt)&romHeader+romHeader.iRomPageIndex);
sl@0
  3149
sl@0
  3150
	TLinAddr pagedStart = romHeader.iPageableRomSize ? (TLinAddr)&romHeader+romHeader.iPageableRomStart : 0;
sl@0
  3151
	if(pagedStart)
sl@0
  3152
		{
sl@0
  3153
		__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("ROM=%x+%x PagedStart=%x",iRomLinearBase,iRomSize,pagedStart));
sl@0
  3154
		__NK_ASSERT_ALWAYS(TUint(pagedStart-iRomLinearBase)<TUint(iRomSize));
sl@0
  3155
		iRomPagedLinearBase = pagedStart;
sl@0
  3156
		iRomPagedSize = iRomLinearBase+iRomSize-pagedStart;
sl@0
  3157
		__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("DemandPaging::Init3, ROM Paged start(0x%x), sixe(0x%x)",iRomPagedLinearBase,iRomPagedSize));
sl@0
  3158
sl@0
  3159
#ifdef __SUPPORT_DEMAND_PAGING_EMULATION__
sl@0
  3160
		// Get physical addresses of ROM pages
sl@0
  3161
		iOriginalRomPageCount = iMmu->RoundToPageSize(iRomSize)>>KPageShift;
sl@0
  3162
		iOriginalRomPages = new TPhysAddr[iOriginalRomPageCount];
sl@0
  3163
		__NK_ASSERT_ALWAYS(iOriginalRomPages);
sl@0
  3164
		TPhysAddr romPhysAddress; 
sl@0
  3165
		iMmu->LinearToPhysical(iRomLinearBase,iRomSize,romPhysAddress,iOriginalRomPages);
sl@0
  3166
#endif
sl@0
  3167
		}
sl@0
  3168
sl@0
  3169
	r = Kern::AddHalEntry(EHalGroupVM, VMHalFunction, 0);
sl@0
  3170
	__NK_ASSERT_ALWAYS(r==KErrNone);
sl@0
  3171
sl@0
  3172
#ifdef __DEMAND_PAGING_BENCHMARKS__
sl@0
  3173
	for (TInt i = 0 ; i < EMaxPagingBm ; ++i)
sl@0
  3174
		ResetBenchmarkData((TPagingBenchmark)i);
sl@0
  3175
#endif
sl@0
  3176
sl@0
  3177
	// Initialisation now complete
sl@0
  3178
	ThePager = this;
sl@0
  3179
	return KErrNone;
sl@0
  3180
	}
sl@0
  3181
sl@0
  3182
sl@0
  3183
DemandPaging::~DemandPaging()
sl@0
  3184
	{
sl@0
  3185
#ifdef __SUPPORT_DEMAND_PAGING_EMULATION__
sl@0
  3186
	delete[] iOriginalRomPages;
sl@0
  3187
#endif
sl@0
  3188
	for (TUint i = 0 ; i < iPagingRequestCount ; ++i)
sl@0
  3189
		delete iPagingRequests[i];
sl@0
  3190
	}
sl@0
  3191
sl@0
  3192
sl@0
  3193
TInt DemandPaging::InstallPagingDevice(DPagingDevice* aDevice)
sl@0
  3194
	{
sl@0
  3195
	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">DemandPaging::InstallPagingDevice name='%s' type=%d",aDevice->iName,aDevice->iType));
sl@0
  3196
sl@0
  3197
	if(aDevice->iReadUnitShift>KPageShift)
sl@0
  3198
		Panic(EInvalidPagingDevice);
sl@0
  3199
sl@0
  3200
	TInt i;
sl@0
  3201
	TInt r = KErrNone;
sl@0
  3202
	TBool createRequestObjects = EFalse;
sl@0
  3203
	
sl@0
  3204
	if ((aDevice->iType & DPagingDevice::ERom) && RomPagingRequested())
sl@0
  3205
		{
sl@0
  3206
		r = DoInstallPagingDevice(aDevice, 0);
sl@0
  3207
		if (r != KErrNone)
sl@0
  3208
			goto done;
sl@0
  3209
		K::MemModelAttributes|=EMemModelAttrRomPaging;
sl@0
  3210
		createRequestObjects = ETrue;
sl@0
  3211
		}
sl@0
  3212
	
sl@0
  3213
	if ((aDevice->iType & DPagingDevice::ECode) && CodePagingRequested())
sl@0
  3214
		{
sl@0
  3215
		for (i = 0 ; i < KMaxLocalDrives ; ++i)
sl@0
  3216
			{
sl@0
  3217
			if (aDevice->iDrivesSupported & (1<<i))
sl@0
  3218
				{
sl@0
  3219
				r = DoInstallPagingDevice(aDevice, i + 1);
sl@0
  3220
				if (r != KErrNone)
sl@0
  3221
					goto done;
sl@0
  3222
				}
sl@0
  3223
			}
sl@0
  3224
		K::MemModelAttributes|=EMemModelAttrCodePaging;
sl@0
  3225
		createRequestObjects = ETrue;
sl@0
  3226
		}
sl@0
  3227
sl@0
  3228
	if (createRequestObjects)
sl@0
  3229
		{
sl@0
  3230
		for (i = 0 ; i < KPagingRequestsPerDevice ; ++i)
sl@0
  3231
			{
sl@0
  3232
			r = CreateRequestObject();
sl@0
  3233
			if (r != KErrNone)
sl@0
  3234
				goto done;
sl@0
  3235
			}
sl@0
  3236
		}
sl@0
  3237
	
sl@0
  3238
done:	
sl@0
  3239
	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<DemandPaging::InstallPagingDevice returns %d",r));
sl@0
  3240
	return r;
sl@0
  3241
	}
sl@0
  3242
sl@0
  3243
TInt DemandPaging::DoInstallPagingDevice(DPagingDevice* aDevice, TInt aId)
sl@0
  3244
	{
sl@0
  3245
	NKern::LockSystem();
sl@0
  3246
	SPagingDevice* device = &iPagingDevices[aId];
sl@0
  3247
	if(device->iInstalled)
sl@0
  3248
		{
sl@0
  3249
		__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("**** Attempt to install more than one ROM paging device !!!!!!!! ****"));
sl@0
  3250
		//Panic(EDeviceAlreadyExists);
sl@0
  3251
		NKern::UnlockSystem();
sl@0
  3252
		return KErrNone;
sl@0
  3253
		}	
sl@0
  3254
	
sl@0
  3255
	aDevice->iDeviceId = aId;
sl@0
  3256
	device->iDevice = aDevice;
sl@0
  3257
	device->iInstalled = ETrue;
sl@0
  3258
	NKern::UnlockSystem();
sl@0
  3259
	
sl@0
  3260
	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("DemandPaging::InstallPagingDevice id=%d, device=%08x",aId,device));
sl@0
  3261
	
sl@0
  3262
	return KErrNone;
sl@0
  3263
	}
sl@0
  3264
sl@0
  3265
DemandPaging::DPagingRequest::~DPagingRequest()
sl@0
  3266
	{
sl@0
  3267
	if (iMutex)
sl@0
  3268
		iMutex->Close(NULL);
sl@0
  3269
	}
sl@0
  3270
sl@0
  3271
TInt DemandPaging::CreateRequestObject()
sl@0
  3272
	{
sl@0
  3273
	_LIT(KLitPagingRequest,"PagingRequest-"); 
sl@0
  3274
sl@0
  3275
	TInt index;
sl@0
  3276
	TInt id = (TInt)__e32_atomic_add_ord32(&iNextPagingRequestCount, 1);
sl@0
  3277
	TLinAddr offset = id * iDeviceBufferSize;
sl@0
  3278
	TUint32 physAddr = 0;
sl@0
  3279
	TInt r = Kern::ChunkCommitContiguous(iDeviceBuffersChunk,offset,iDeviceBufferSize, physAddr);
sl@0
  3280
	if(r != KErrNone)
sl@0
  3281
		return r;
sl@0
  3282
sl@0
  3283
	DPagingRequest* req = new DPagingRequest();
sl@0
  3284
	if (!req)
sl@0
  3285
		return KErrNoMemory;
sl@0
  3286
sl@0
  3287
	req->iBuffer = iDeviceBuffers + offset;
sl@0
  3288
	AllocLoadAddress(*req, id);
sl@0
  3289
		
sl@0
  3290
	TBuf<16> mutexName(KLitPagingRequest);
sl@0
  3291
	mutexName.AppendNum(id);
sl@0
  3292
	r = K::MutexCreate(req->iMutex, mutexName, NULL, EFalse, KMutexOrdPageIn);
sl@0
  3293
	if (r!=KErrNone)
sl@0
  3294
		goto done;
sl@0
  3295
sl@0
  3296
	// Ensure there are enough young pages to cope with new request object
sl@0
  3297
	r = ResizeLiveList(iMinimumPageCount, iMaximumPageCount);
sl@0
  3298
	if (r!=KErrNone)
sl@0
  3299
		goto done;
sl@0
  3300
sl@0
  3301
	NKern::LockSystem();
sl@0
  3302
	index = iPagingRequestCount++;
sl@0
  3303
	__NK_ASSERT_ALWAYS(index < KMaxPagingRequests);
sl@0
  3304
	iPagingRequests[index] = req;
sl@0
  3305
	iFreeRequestPool.AddHead(req);
sl@0
  3306
	NKern::UnlockSystem();
sl@0
  3307
sl@0
  3308
done:
sl@0
  3309
	if (r != KErrNone)
sl@0
  3310
		delete req;
sl@0
  3311
	
sl@0
  3312
	return r;
sl@0
  3313
	}
sl@0
  3314
sl@0
  3315
DemandPaging::DPagingRequest* DemandPaging::AcquireRequestObject()
sl@0
  3316
	{
sl@0
  3317
	__ASSERT_SYSTEM_LOCK;	
sl@0
  3318
	__NK_ASSERT_DEBUG(iPagingRequestCount > 0);
sl@0
  3319
	
sl@0
  3320
	DPagingRequest* req = NULL;
sl@0
  3321
sl@0
  3322
	// System lock used to serialise access to our data strucures as we have to hold it anyway when
sl@0
  3323
	// we wait on the mutex
sl@0
  3324
sl@0
  3325
	req = (DPagingRequest*)iFreeRequestPool.GetFirst();
sl@0
  3326
	if (req != NULL)
sl@0
  3327
		__NK_ASSERT_DEBUG(req->iUsageCount == 0);
sl@0
  3328
	else
sl@0
  3329
		{
sl@0
  3330
		// Pick a random request object to wait on
sl@0
  3331
		TUint index = (FastPseudoRand() * TUint64(iPagingRequestCount)) >> 32;
sl@0
  3332
		__NK_ASSERT_DEBUG(index < iPagingRequestCount);
sl@0
  3333
		req = iPagingRequests[index];
sl@0
  3334
		__NK_ASSERT_DEBUG(req->iUsageCount > 0);
sl@0
  3335
		}
sl@0
  3336
	
sl@0
  3337
#ifdef __CONCURRENT_PAGING_INSTRUMENTATION__
sl@0
  3338
	++iWaitingCount;
sl@0
  3339
	if (iWaitingCount > iMaxWaitingCount)
sl@0
  3340
		iMaxWaitingCount = iWaitingCount;
sl@0
  3341
#endif
sl@0
  3342
sl@0
  3343
	++req->iUsageCount;
sl@0
  3344
	TInt r = req->iMutex->Wait();
sl@0
  3345
	__NK_ASSERT_ALWAYS(r == KErrNone);
sl@0
  3346
sl@0
  3347
#ifdef __CONCURRENT_PAGING_INSTRUMENTATION__
sl@0
  3348
	--iWaitingCount;
sl@0
  3349
	++iPagingCount;
sl@0
  3350
	if (iPagingCount > iMaxPagingCount)
sl@0
  3351
		iMaxPagingCount = iPagingCount;
sl@0
  3352
#endif
sl@0
  3353
sl@0
  3354
	return req;
sl@0
  3355
	}
sl@0
  3356
sl@0
  3357
void DemandPaging::ReleaseRequestObject(DPagingRequest* aReq)
sl@0
  3358
	{
sl@0
  3359
	__ASSERT_SYSTEM_LOCK;
sl@0
  3360
sl@0
  3361
#ifdef __CONCURRENT_PAGING_INSTRUMENTATION__
sl@0
  3362
	--iPagingCount;
sl@0
  3363
#endif
sl@0
  3364
sl@0
  3365
	// If there are no threads waiting on the mutex then return it to the free pool
sl@0
  3366
	__NK_ASSERT_DEBUG(aReq->iUsageCount > 0);
sl@0
  3367
	if (--aReq->iUsageCount == 0)
sl@0
  3368
		iFreeRequestPool.AddHead(aReq);
sl@0
  3369
sl@0
  3370
	aReq->iMutex->Signal();
sl@0
  3371
	NKern::LockSystem();
sl@0
  3372
	}
sl@0
  3373
sl@0
  3374
TInt DemandPaging::ReadRomPage(const DPagingRequest* aReq, TLinAddr aRomAddress)
sl@0
  3375
	{
sl@0
  3376
	START_PAGING_BENCHMARK;
sl@0
  3377
sl@0
  3378
	TInt pageSize = KPageSize;
sl@0
  3379
	TInt dataOffset = aRomAddress-iRomLinearBase;
sl@0
  3380
	TInt pageNumber = dataOffset>>KPageShift;
sl@0
  3381
	TInt readUnitShift = RomPagingDevice().iDevice->iReadUnitShift;
sl@0
  3382
	TInt r;
sl@0
  3383
	if(!iRomPageIndex)
sl@0
  3384
		{
sl@0
  3385
		// ROM not broken into pages, so just read it in directly
sl@0
  3386
		START_PAGING_BENCHMARK;
sl@0
  3387
		r = RomPagingDevice().iDevice->Read(const_cast<TThreadMessage*>(&aReq->iMessage),aReq->iLoadAddr,dataOffset>>readUnitShift,pageSize>>readUnitShift,-1/*token for ROM paging*/);
sl@0
  3388
		END_PAGING_BENCHMARK(DemandPaging::ThePager, EPagingBmReadMedia);
sl@0
  3389
		}
sl@0
  3390
	else
sl@0
  3391
		{
sl@0
  3392
		// Work out where data for page is located
sl@0
  3393
		SRomPageInfo* romPageInfo = iRomPageIndex+pageNumber;
sl@0
  3394
		dataOffset = romPageInfo->iDataStart;
sl@0
  3395
		TInt dataSize = romPageInfo->iDataSize;
sl@0
  3396
		if(!dataSize)
sl@0
  3397
			{
sl@0
  3398
			// empty page, fill it with 0xff...
sl@0
  3399
			memset((void*)aReq->iLoadAddr,-1,pageSize);
sl@0
  3400
			r = KErrNone;
sl@0
  3401
			}
sl@0
  3402
		else
sl@0
  3403
			{
sl@0
  3404
			__NK_ASSERT_ALWAYS(romPageInfo->iPagingAttributes&SRomPageInfo::EPageable);
sl@0
  3405
sl@0
  3406
			// Read data for page...
sl@0
  3407
			TThreadMessage* msg= const_cast<TThreadMessage*>(&aReq->iMessage);
sl@0
  3408
			TLinAddr buffer = aReq->iBuffer;
sl@0
  3409
			TUint readStart = dataOffset>>readUnitShift;
sl@0
  3410
			TUint readSize = ((dataOffset+dataSize-1)>>readUnitShift)-readStart+1;
sl@0
  3411
			__NK_ASSERT_DEBUG((readSize<<readUnitShift)<=iDeviceBufferSize);
sl@0
  3412
			START_PAGING_BENCHMARK;
sl@0
  3413
			r = RomPagingDevice().iDevice->Read(msg,buffer,readStart,readSize,-1/*token for ROM paging*/);
sl@0
  3414
			END_PAGING_BENCHMARK(DemandPaging::ThePager, EPagingBmReadMedia);
sl@0
  3415
			if(r==KErrNone)
sl@0
  3416
				{
sl@0
  3417
				// Decompress data...
sl@0
  3418
				TLinAddr data = buffer+dataOffset-(readStart<<readUnitShift);
sl@0
  3419
				r = Decompress(romPageInfo->iCompressionType,aReq->iLoadAddr,data,dataSize);
sl@0
  3420
				if(r>=0)
sl@0
  3421
					{
sl@0
  3422
					__NK_ASSERT_ALWAYS(r==pageSize);
sl@0
  3423
					r = KErrNone;
sl@0
  3424
					}
sl@0
  3425
				}
sl@0
  3426
			}
sl@0
  3427
		}
sl@0
  3428
sl@0
  3429
	END_PAGING_BENCHMARK(this, EPagingBmReadRomPage);
sl@0
  3430
	return r;
sl@0
  3431
	}
sl@0
  3432
sl@0
  3433
TInt ReadFunc(TAny* aArg1, TAny* aArg2, TLinAddr aBuffer, TInt aBlockNumber, TInt aBlockCount)
sl@0
  3434
	{
sl@0
  3435
	START_PAGING_BENCHMARK;
sl@0
  3436
	TInt drive = (TInt)aArg1;
sl@0
  3437
	TThreadMessage* msg= (TThreadMessage*)aArg2;
sl@0
  3438
	DemandPaging::SPagingDevice& device = DemandPaging::ThePager->CodePagingDevice(drive);
sl@0
  3439
	TInt r = device.iDevice->Read(msg, aBuffer, aBlockNumber, aBlockCount, drive);
sl@0
  3440
	END_PAGING_BENCHMARK(DemandPaging::ThePager, EPagingBmReadMedia);
sl@0
  3441
	return r;
sl@0
  3442
	}
sl@0
  3443
sl@0
  3444
TInt DemandPaging::ReadCodePage(const DPagingRequest* aReq, DMmuCodeSegMemory* aCodeSegMemory, TLinAddr aCodeAddress)
sl@0
  3445
	{
sl@0
  3446
	__KTRACE_OPT(KPAGING,Kern::Printf("ReadCodePage buffer = %08x, csm == %08x, addr == %08x", aReq->iLoadAddr, aCodeSegMemory, aCodeAddress));
sl@0
  3447
	
sl@0
  3448
	START_PAGING_BENCHMARK;
sl@0
  3449
sl@0
  3450
	// Get the paging device for this drive
sl@0
  3451
	SPagingDevice& device = CodePagingDevice(aCodeSegMemory->iCodeLocalDrive);
sl@0
  3452
sl@0
  3453
	// Work out which bit of the file to read
sl@0
  3454
	SRamCodeInfo& ri = aCodeSegMemory->iRamInfo;
sl@0
  3455
	TInt codeOffset = aCodeAddress - ri.iCodeRunAddr;
sl@0
  3456
	TInt pageNumber = codeOffset >> KPageShift;
sl@0
  3457
	TBool compressed = aCodeSegMemory->iCompressionType != SRomPageInfo::ENoCompression;
sl@0
  3458
	TInt dataOffset, dataSize;
sl@0
  3459
	if (compressed)
sl@0
  3460
		{
sl@0
  3461
		dataOffset = aCodeSegMemory->iCodePageOffsets[pageNumber];
sl@0
  3462
		dataSize = aCodeSegMemory->iCodePageOffsets[pageNumber + 1] - dataOffset;
sl@0
  3463
		__KTRACE_OPT(KPAGING,Kern::Printf("  compressed, file offset == %x, size == %d", dataOffset, dataSize));
sl@0
  3464
		}
sl@0
  3465
	else
sl@0
  3466
		{
sl@0
  3467
		dataOffset = codeOffset + aCodeSegMemory->iCodeStartInFile;
sl@0
  3468
		dataSize = Min(KPageSize, aCodeSegMemory->iBlockMap.DataLength() - dataOffset);
sl@0
  3469
		__NK_ASSERT_DEBUG(dataSize >= 0);
sl@0
  3470
		__KTRACE_OPT(KPAGING,Kern::Printf("  uncompressed, file offset == %x, size == %d", dataOffset, dataSize));
sl@0
  3471
		}
sl@0
  3472
sl@0
  3473
	TInt bufferStart = aCodeSegMemory->iBlockMap.Read(aReq->iBuffer,
sl@0
  3474
												dataOffset,
sl@0
  3475
												dataSize,
sl@0
  3476
												device.iDevice->iReadUnitShift,
sl@0
  3477
												ReadFunc,
sl@0
  3478
												(TAny*)aCodeSegMemory->iCodeLocalDrive,
sl@0
  3479
												(TAny*)&aReq->iMessage);
sl@0
  3480
	
sl@0
  3481
sl@0
  3482
	TInt r = KErrNone;
sl@0
  3483
	if(bufferStart<0)
sl@0
  3484
		{
sl@0
  3485
		r = bufferStart; // return error
sl@0
  3486
		__NK_ASSERT_DEBUG(0);
sl@0
  3487
		}
sl@0
  3488
	else
sl@0
  3489
		{
sl@0
  3490
		TLinAddr data = aReq->iBuffer + bufferStart;
sl@0
  3491
		if (compressed)
sl@0
  3492
			{
sl@0
  3493
			TInt r = Decompress(aCodeSegMemory->iCompressionType, aReq->iLoadAddr, data, dataSize);
sl@0
  3494
			if(r>=0)
sl@0
  3495
				{
sl@0
  3496
				dataSize = Min(KPageSize, ri.iCodeSize - codeOffset);
sl@0
  3497
				if(r!=dataSize)
sl@0
  3498
					{
sl@0
  3499
					__NK_ASSERT_DEBUG(0);
sl@0
  3500
					r = KErrCorrupt;
sl@0
  3501
					}
sl@0
  3502
				else
sl@0
  3503
					r = KErrNone;
sl@0
  3504
				}
sl@0
  3505
			else
sl@0
  3506
				{
sl@0
  3507
				__NK_ASSERT_DEBUG(0);
sl@0
  3508
				}
sl@0
  3509
			}
sl@0
  3510
		else
sl@0
  3511
			{
sl@0
  3512
			#ifdef BTRACE_PAGING_VERBOSE
sl@0
  3513
			BTraceContext4(BTrace::EPaging,BTrace::EPagingDecompressStart,SRomPageInfo::ENoCompression);
sl@0
  3514
			#endif
sl@0
  3515
			memcpy((TAny*)aReq->iLoadAddr, (TAny*)data, dataSize);
sl@0
  3516
			#ifdef BTRACE_PAGING_VERBOSE
sl@0
  3517
			BTraceContext0(BTrace::EPaging,BTrace::EPagingDecompressEnd);
sl@0
  3518
			#endif
sl@0
  3519
			}
sl@0
  3520
		}
sl@0
  3521
sl@0
  3522
	if(r==KErrNone)
sl@0
  3523
		if (dataSize < KPageSize)
sl@0
  3524
			memset((TAny*)(aReq->iLoadAddr + dataSize), KPageSize - dataSize, 0x03);
sl@0
  3525
sl@0
  3526
	END_PAGING_BENCHMARK(this, EPagingBmReadCodePage);
sl@0
  3527
	
sl@0
  3528
	return KErrNone;
sl@0
  3529
	}
sl@0
  3530
sl@0
  3531
sl@0
  3532
#include "decompress.h"
sl@0
  3533
sl@0
  3534
	
sl@0
  3535
TInt DemandPaging::Decompress(TInt aCompressionType,TLinAddr aDst,TLinAddr aSrc,TUint aSrcSize)
sl@0
  3536
	{
sl@0
  3537
#ifdef BTRACE_PAGING_VERBOSE
sl@0
  3538
	BTraceContext4(BTrace::EPaging,BTrace::EPagingDecompressStart,aCompressionType);
sl@0
  3539
#endif
sl@0
  3540
	TInt r;
sl@0
  3541
	switch(aCompressionType)
sl@0
  3542
		{
sl@0
  3543
	case SRomPageInfo::ENoCompression:
sl@0
  3544
		memcpy((void*)aDst,(void*)aSrc,aSrcSize);
sl@0
  3545
		r = aSrcSize;
sl@0
  3546
		break;
sl@0
  3547
sl@0
  3548
	case SRomPageInfo::EBytePair:
sl@0
  3549
		{
sl@0
  3550
		START_PAGING_BENCHMARK;
sl@0
  3551
		TUint8* srcNext=0;
sl@0
  3552
		r=BytePairDecompress((TUint8*)aDst,KPageSize,(TUint8*)aSrc,aSrcSize,srcNext);
sl@0
  3553
		if (r == KErrNone)
sl@0
  3554
			__NK_ASSERT_ALWAYS((TLinAddr)srcNext == aSrc + aSrcSize);
sl@0
  3555
		END_PAGING_BENCHMARK(this, EPagingBmDecompress);
sl@0
  3556
		}
sl@0
  3557
		break;
sl@0
  3558
sl@0
  3559
	default:
sl@0
  3560
		r = KErrNotSupported;
sl@0
  3561
		break;
sl@0
  3562
		}
sl@0
  3563
#ifdef BTRACE_PAGING_VERBOSE
sl@0
  3564
	BTraceContext0(BTrace::EPaging,BTrace::EPagingDecompressEnd);
sl@0
  3565
#endif
sl@0
  3566
	return r;
sl@0
  3567
	}
sl@0
  3568
sl@0
  3569
sl@0
  3570
void DemandPaging::BalanceAges()
sl@0
  3571
	{
sl@0
  3572
	if(iOldCount*iYoungOldRatio>=iYoungCount)
sl@0
  3573
		return; // We have enough old pages
sl@0
  3574
sl@0
  3575
	// make one young page into an old page...
sl@0
  3576
sl@0
  3577
	__NK_ASSERT_DEBUG(!iYoungList.IsEmpty());
sl@0
  3578
	__NK_ASSERT_DEBUG(iYoungCount);
sl@0
  3579
	SDblQueLink* link = iYoungList.Last()->Deque();
sl@0
  3580
	--iYoungCount;
sl@0
  3581
sl@0
  3582
	SPageInfo* pageInfo = SPageInfo::FromLink(link);
sl@0
  3583
	pageInfo->SetState(SPageInfo::EStatePagedOld);
sl@0
  3584
sl@0
  3585
	iOldList.AddHead(link);
sl@0
  3586
	++iOldCount;
sl@0
  3587
sl@0
  3588
	SetOld(pageInfo);
sl@0
  3589
sl@0
  3590
#ifdef BTRACE_PAGING_VERBOSE
sl@0
  3591
	BTraceContext4(BTrace::EPaging,BTrace::EPagingAged,pageInfo->PhysAddr());
sl@0
  3592
#endif
sl@0
  3593
	}
sl@0
  3594
sl@0
  3595
sl@0
  3596
void DemandPaging::AddAsYoungest(SPageInfo* aPageInfo)
sl@0
  3597
	{
sl@0
  3598
#ifdef _DEBUG
sl@0
  3599
	SPageInfo::TType type = aPageInfo->Type();
sl@0
  3600
	__NK_ASSERT_DEBUG(type==SPageInfo::EPagedROM || type==SPageInfo::EPagedCode || type==SPageInfo::EPagedData || type==SPageInfo::EPagedCache);
sl@0
  3601
#endif
sl@0
  3602
	aPageInfo->SetState(SPageInfo::EStatePagedYoung);
sl@0
  3603
	iYoungList.AddHead(&aPageInfo->iLink);
sl@0
  3604
	++iYoungCount;
sl@0
  3605
	}
sl@0
  3606
sl@0
  3607
sl@0
  3608
void DemandPaging::AddAsFreePage(SPageInfo* aPageInfo)
sl@0
  3609
	{
sl@0
  3610
#ifdef BTRACE_PAGING
sl@0
  3611
	TPhysAddr phys = aPageInfo->PhysAddr();
sl@0
  3612
	BTraceContext4(BTrace::EPaging,BTrace::EPagingPageInFree,phys);
sl@0
  3613
#endif
sl@0
  3614
	aPageInfo->Change(SPageInfo::EPagedFree,SPageInfo::EStatePagedOld);
sl@0
  3615
	iOldList.Add(&aPageInfo->iLink);
sl@0
  3616
	++iOldCount;
sl@0
  3617
	}
sl@0
  3618
sl@0
  3619
sl@0
  3620
void DemandPaging::RemovePage(SPageInfo* aPageInfo)
sl@0
  3621
	{
sl@0
  3622
	switch(aPageInfo->State())
sl@0
  3623
		{
sl@0
  3624
	case SPageInfo::EStatePagedYoung:
sl@0
  3625
		__NK_ASSERT_DEBUG(iYoungCount);
sl@0
  3626
		aPageInfo->iLink.Deque();
sl@0
  3627
		--iYoungCount;
sl@0
  3628
		break;
sl@0
  3629
sl@0
  3630
	case SPageInfo::EStatePagedOld:
sl@0
  3631
		__NK_ASSERT_DEBUG(iOldCount);
sl@0
  3632
		aPageInfo->iLink.Deque();
sl@0
  3633
		--iOldCount;
sl@0
  3634
		break;
sl@0
  3635
sl@0
  3636
	case SPageInfo::EStatePagedLocked:
sl@0
  3637
		break;
sl@0
  3638
sl@0
  3639
	default:
sl@0
  3640
		__NK_ASSERT_DEBUG(0);
sl@0
  3641
		}
sl@0
  3642
	aPageInfo->SetState(SPageInfo::EStatePagedDead);
sl@0
  3643
	}
sl@0
  3644
sl@0
  3645
sl@0
  3646
SPageInfo* DemandPaging::GetOldestPage()
sl@0
  3647
	{
sl@0
  3648
	// remove oldest from list...
sl@0
  3649
	SDblQueLink* link;
sl@0
  3650
	if(iOldCount)
sl@0
  3651
		{
sl@0
  3652
		__NK_ASSERT_DEBUG(!iOldList.IsEmpty());
sl@0
  3653
		link = iOldList.Last()->Deque();
sl@0
  3654
		--iOldCount;
sl@0
  3655
		}
sl@0
  3656
	else
sl@0
  3657
		{
sl@0
  3658
		__NK_ASSERT_DEBUG(iYoungCount);
sl@0
  3659
		__NK_ASSERT_DEBUG(!iYoungList.IsEmpty());
sl@0
  3660
		link = iYoungList.Last()->Deque();
sl@0
  3661
		--iYoungCount;
sl@0
  3662
		}
sl@0
  3663
	SPageInfo* pageInfo = SPageInfo::FromLink(link);
sl@0
  3664
	pageInfo->SetState(SPageInfo::EStatePagedDead);
sl@0
  3665
sl@0
  3666
	// put page in a free state...
sl@0
  3667
	SetFree(pageInfo);
sl@0
  3668
	pageInfo->Change(SPageInfo::EPagedFree,SPageInfo::EStatePagedDead);
sl@0
  3669
sl@0
  3670
	// keep live list balanced...
sl@0
  3671
	BalanceAges();
sl@0
  3672
sl@0
  3673
	return pageInfo;
sl@0
  3674
	}
sl@0
  3675
sl@0
  3676
sl@0
  3677
TBool DemandPaging::GetFreePages(TInt aNumPages)
sl@0
  3678
	{
sl@0
  3679
	__KTRACE_OPT(KPAGING,Kern::Printf("DP: >GetFreePages %d",aNumPages));
sl@0
  3680
	NKern::LockSystem();
sl@0
  3681
sl@0
  3682
	while(aNumPages>0 && NumberOfFreePages()>=aNumPages)
sl@0
  3683
		{
sl@0
  3684
		// steal a page from live page list and return it to the free pool...
sl@0
  3685
		ReturnToSystem(GetOldestPage());
sl@0
  3686
		--aNumPages;
sl@0
  3687
		}
sl@0
  3688
sl@0
  3689
	NKern::UnlockSystem();
sl@0
  3690
	__KTRACE_OPT(KPAGING,Kern::Printf("DP: <GetFreePages %d",!aNumPages));
sl@0
  3691
	return !aNumPages;
sl@0
  3692
	}
sl@0
  3693
sl@0
  3694
sl@0
  3695
void DemandPaging::DonateRamCachePage(SPageInfo* aPageInfo)
sl@0
  3696
	{
sl@0
  3697
	__NK_ASSERT_DEBUG(iMinimumPageCount + iNumberOfFreePages <= iMaximumPageCount);
sl@0
  3698
	SPageInfo::TType type = aPageInfo->Type();
sl@0
  3699
	if(type==SPageInfo::EChunk)
sl@0
  3700
		{
sl@0
  3701
		//Must not donate locked page. An example is DMA trasferred memory.
sl@0
  3702
		__NK_ASSERT_DEBUG(0 == aPageInfo->LockCount());
sl@0
  3703
		
sl@0
  3704
		aPageInfo->Change(SPageInfo::EPagedCache,SPageInfo::EStatePagedYoung);
sl@0
  3705
sl@0
  3706
		// Update ram allocator counts as this page has changed its type
sl@0
  3707
		DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
sl@0
  3708
		iMmu->iRamPageAllocator->ChangePageType(aPageInfo, chunk->GetPageType(), EPageDiscard);
sl@0
  3709
sl@0
  3710
		AddAsYoungest(aPageInfo);
sl@0
  3711
		++iNumberOfFreePages;
sl@0
  3712
		if (iMinimumPageCount + iNumberOfFreePages > iMaximumPageCount)
sl@0
  3713
			ReturnToSystem(GetOldestPage());
sl@0
  3714
		BalanceAges();
sl@0
  3715
		return;
sl@0
  3716
		}
sl@0
  3717
	// allow already donated pages...
sl@0
  3718
	__NK_ASSERT_DEBUG(type==SPageInfo::EPagedCache);
sl@0
  3719
	}
sl@0
  3720
sl@0
  3721
sl@0
  3722
TBool DemandPaging::ReclaimRamCachePage(SPageInfo* aPageInfo)
sl@0
  3723
	{
sl@0
  3724
	SPageInfo::TType type = aPageInfo->Type();
sl@0
  3725
	if(type==SPageInfo::EChunk)
sl@0
  3726
		return ETrue; // page already reclaimed
sl@0
  3727
sl@0
  3728
	__NK_ASSERT_DEBUG(type==SPageInfo::EPagedCache);
sl@0
  3729
sl@0
  3730
	if(!iNumberOfFreePages)
sl@0
  3731
		return EFalse;
sl@0
  3732
	--iNumberOfFreePages;
sl@0
  3733
sl@0
  3734
	RemovePage(aPageInfo);
sl@0
  3735
	aPageInfo->Change(SPageInfo::EChunk,SPageInfo::EStateNormal);
sl@0
  3736
sl@0
  3737
	// Update ram allocator counts as this page has changed its type
sl@0
  3738
	DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
sl@0
  3739
	iMmu->iRamPageAllocator->ChangePageType(aPageInfo, EPageDiscard, chunk->GetPageType());
sl@0
  3740
	return ETrue;
sl@0
  3741
	}
sl@0
  3742
sl@0
  3743
sl@0
  3744
SPageInfo* DemandPaging::AllocateNewPage()
sl@0
  3745
	{
sl@0
  3746
	__ASSERT_SYSTEM_LOCK
sl@0
  3747
	SPageInfo* pageInfo;
sl@0
  3748
sl@0
  3749
	NKern::UnlockSystem();
sl@0
  3750
	MmuBase::Wait();
sl@0
  3751
	NKern::LockSystem();
sl@0
  3752
sl@0
  3753
	// Try getting a free page from our active page list
sl@0
  3754
	if(iOldCount)
sl@0
  3755
		{
sl@0
  3756
		pageInfo = SPageInfo::FromLink(iOldList.Last());
sl@0
  3757
		if(pageInfo->Type()==SPageInfo::EPagedFree)
sl@0
  3758
			{
sl@0
  3759
			pageInfo = GetOldestPage();
sl@0
  3760
			goto done;
sl@0
  3761
			}
sl@0
  3762
		}
sl@0
  3763
sl@0
  3764
	// Try getting a free page from the system pool
sl@0
  3765
	if(iMinimumPageCount+iNumberOfFreePages<iMaximumPageCount)
sl@0
  3766
		{
sl@0
  3767
		NKern::UnlockSystem();
sl@0
  3768
		pageInfo = GetPageFromSystem();
sl@0
  3769
		NKern::LockSystem();
sl@0
  3770
		if(pageInfo)
sl@0
  3771
			goto done;
sl@0
  3772
		}
sl@0
  3773
sl@0
  3774
	// As a last resort, steal one from our list of active pages
sl@0
  3775
	pageInfo = GetOldestPage();
sl@0
  3776
sl@0
  3777
done:
sl@0
  3778
	NKern::UnlockSystem();
sl@0
  3779
	MmuBase::Signal();
sl@0
  3780
	NKern::LockSystem();
sl@0
  3781
	return pageInfo;
sl@0
  3782
	}
sl@0
  3783
sl@0
  3784
sl@0
  3785
void DemandPaging::Rejuvenate(SPageInfo* aPageInfo)
sl@0
  3786
	{
sl@0
  3787
	SPageInfo::TState state = aPageInfo->State();
sl@0
  3788
	if(state==SPageInfo::EStatePagedOld)
sl@0
  3789
		{
sl@0
  3790
		// move page from old list to head of young list...
sl@0
  3791
		__NK_ASSERT_DEBUG(iOldCount);
sl@0
  3792
		aPageInfo->iLink.Deque();
sl@0
  3793
		--iOldCount;
sl@0
  3794
		AddAsYoungest(aPageInfo);
sl@0
  3795
		BalanceAges();
sl@0
  3796
		}
sl@0
  3797
	else if(state==SPageInfo::EStatePagedYoung)
sl@0
  3798
		{
sl@0
  3799
		// page was already young, move it to the start of the list (make it the youngest)
sl@0
  3800
		aPageInfo->iLink.Deque();
sl@0
  3801
		iYoungList.AddHead(&aPageInfo->iLink);
sl@0
  3802
		}
sl@0
  3803
	else
sl@0
  3804
		{
sl@0
  3805
		// leave locked pages alone
sl@0
  3806
		__NK_ASSERT_DEBUG(state==SPageInfo::EStatePagedLocked);
sl@0
  3807
		}
sl@0
  3808
	}
sl@0
  3809
sl@0
  3810
sl@0
  3811
TInt DemandPaging::CheckRealtimeThreadFault(DThread* aThread, TAny* aContext)
sl@0
  3812
	{
sl@0
  3813
	TInt r = KErrNone;
sl@0
  3814
	DThread* client = aThread->iIpcClient;
sl@0
  3815
	
sl@0
  3816
	// If iIpcClient is set then we are accessing the address space of a remote thread.  If we are
sl@0
  3817
	// in an IPC trap, this will contain information the local and remte addresses being accessed.
sl@0
  3818
	// If this is not set then we assume than any fault must be the fault of a bad remote address.
sl@0
  3819
	TIpcExcTrap* ipcTrap = (TIpcExcTrap*)aThread->iExcTrap;
sl@0
  3820
	if (ipcTrap && !ipcTrap->IsTIpcExcTrap())
sl@0
  3821
		ipcTrap = 0;
sl@0
  3822
	if (client && (!ipcTrap || ipcTrap->ExcLocation(aThread, aContext) == TIpcExcTrap::EExcRemote))
sl@0
  3823
		{
sl@0
  3824
		// Kill client thread...
sl@0
  3825
		NKern::UnlockSystem();
sl@0
  3826
		if(K::IllegalFunctionForRealtimeThread(client,"Access to Paged Memory (by other thread)"))
sl@0
  3827
			{
sl@0
  3828
			// Treat memory access as bad...
sl@0
  3829
			r = KErrAbort;
sl@0
  3830
			}
sl@0
  3831
		// else thread is in 'warning only' state so allow paging
sl@0
  3832
		}
sl@0
  3833
	else
sl@0
  3834
		{
sl@0
  3835
		// Kill current thread...
sl@0
  3836
		NKern::UnlockSystem();
sl@0
  3837
		if(K::IllegalFunctionForRealtimeThread(NULL,"Access to Paged Memory"))
sl@0
  3838
			{
sl@0
  3839
			// If current thread is in critical section, then the above kill will be deferred
sl@0
  3840
			// and we will continue executing. We will handle this by returning an error
sl@0
  3841
			// which means that the thread will take an exception (which hopfully is XTRAPed!)
sl@0
  3842
			r = KErrAbort;
sl@0
  3843
			}
sl@0
  3844
		// else thread is in 'warning only' state so allow paging
sl@0
  3845
		}
sl@0
  3846
	
sl@0
  3847
	NKern::LockSystem();
sl@0
  3848
	return r;
sl@0
  3849
	}
sl@0
  3850
sl@0
  3851
sl@0
  3852
TInt DemandPaging::ResizeLiveList(TUint aMinimumPageCount,TUint aMaximumPageCount)
sl@0
  3853
	{
sl@0
  3854
	if(!aMaximumPageCount)
sl@0
  3855
		{
sl@0
  3856
		aMinimumPageCount = iInitMinimumPageCount;
sl@0
  3857
		aMaximumPageCount = iInitMaximumPageCount;
sl@0
  3858
		}
sl@0
  3859
sl@0
  3860
	// Min must not be greater than max...
sl@0
  3861
	if(aMinimumPageCount>aMaximumPageCount)
sl@0
  3862
		return KErrArgument;
sl@0
  3863
sl@0
  3864
	NKern::ThreadEnterCS();
sl@0
  3865
	MmuBase::Wait();
sl@0
  3866
sl@0
  3867
	NKern::LockSystem();
sl@0
  3868
sl@0
  3869
	// Make sure aMinimumPageCount is not less than absolute minimum we can cope with...
sl@0
  3870
	iMinimumPageLimit = ((KMinYoungPages + iNextPagingRequestCount) * (1 + iYoungOldRatio)) / iYoungOldRatio;
sl@0
  3871
	if(iMinimumPageLimit<KAbsoluteMinPageCount)
sl@0
  3872
		iMinimumPageLimit = KAbsoluteMinPageCount;
sl@0
  3873
	if(aMinimumPageCount<iMinimumPageLimit+iReservePageCount)
sl@0
  3874
		aMinimumPageCount = iMinimumPageLimit+iReservePageCount;
sl@0
  3875
	if(aMaximumPageCount<aMinimumPageCount)
sl@0
  3876
		aMaximumPageCount=aMinimumPageCount;
sl@0
  3877
sl@0
  3878
	// Increase iMaximumPageCount?
sl@0
  3879
	TInt extra = aMaximumPageCount-iMaximumPageCount;
sl@0
  3880
	if(extra>0)
sl@0
  3881
		iMaximumPageCount += extra;
sl@0
  3882
sl@0
  3883
	// Reduce iMinimumPageCount?
sl@0
  3884
	TInt spare = iMinimumPageCount-aMinimumPageCount;
sl@0
  3885
	if(spare>0)
sl@0
  3886
		{
sl@0
  3887
		iMinimumPageCount -= spare;
sl@0
  3888
		iNumberOfFreePages += spare;
sl@0
  3889
		}
sl@0
  3890
sl@0
  3891
	// Increase iMinimumPageCount?
sl@0
  3892
	TInt r=KErrNone;
sl@0
  3893
	while(aMinimumPageCount>iMinimumPageCount)
sl@0
  3894
		{
sl@0
  3895
		if(iNumberOfFreePages==0)	// Need more pages?
sl@0
  3896
			{
sl@0
  3897
			// get a page from the system
sl@0
  3898
			NKern::UnlockSystem();
sl@0
  3899
			SPageInfo* pageInfo = GetPageFromSystem();
sl@0
  3900
			NKern::LockSystem();
sl@0
  3901
			if(!pageInfo)
sl@0
  3902
				{
sl@0
  3903
				r=KErrNoMemory;
sl@0
  3904
				break;
sl@0
  3905
				}
sl@0
  3906
			AddAsFreePage(pageInfo);
sl@0
  3907
			}
sl@0
  3908
		++iMinimumPageCount;
sl@0
  3909
		--iNumberOfFreePages;
sl@0
  3910
		NKern::FlashSystem();
sl@0
  3911
		}
sl@0
  3912
sl@0
  3913
	// Reduce iMaximumPageCount?
sl@0
  3914
	while(iMaximumPageCount>aMaximumPageCount)
sl@0
  3915
		{
sl@0
  3916
		if (iMinimumPageCount+iNumberOfFreePages==iMaximumPageCount)	// Need to free pages?
sl@0
  3917
			{
sl@0
  3918
			ReturnToSystem(GetOldestPage());
sl@0
  3919
			}
sl@0
  3920
		--iMaximumPageCount;
sl@0
  3921
		NKern::FlashSystem();
sl@0
  3922
		}
sl@0
  3923
sl@0
  3924
#ifdef BTRACE_KERNEL_MEMORY
sl@0
  3925
	BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryDemandPagingCache,ThePager->iMinimumPageCount << KPageShift);
sl@0
  3926
#endif
sl@0
  3927
sl@0
  3928
	__NK_ASSERT_DEBUG(iMinimumPageCount + iNumberOfFreePages <= iMaximumPageCount);
sl@0
  3929
sl@0
  3930
	NKern::UnlockSystem();
sl@0
  3931
sl@0
  3932
	MmuBase::Signal();
sl@0
  3933
	NKern::ThreadLeaveCS();
sl@0
  3934
sl@0
  3935
	return r;
sl@0
  3936
	}
sl@0
  3937
sl@0
  3938
sl@0
  3939
TInt VMHalFunction(TAny*, TInt aFunction, TAny* a1, TAny* a2)
sl@0
  3940
	{
sl@0
  3941
	DemandPaging* pager = DemandPaging::ThePager;
sl@0
  3942
	switch(aFunction)
sl@0
  3943
		{
sl@0
  3944
	case EVMHalFlushCache:
sl@0
  3945
		if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalFlushCache)")))
sl@0
  3946
			K::UnlockedPlatformSecurityPanic();
sl@0
  3947
		pager->FlushAll();
sl@0
  3948
		return KErrNone;
sl@0
  3949
sl@0
  3950
	case EVMHalSetCacheSize:
sl@0
  3951
		{
sl@0
  3952
		if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalSetCacheSize)")))
sl@0
  3953
			K::UnlockedPlatformSecurityPanic();
sl@0
  3954
		TUint min = (TUint)a1>>KPageShift;
sl@0
  3955
		if((TUint)a1&KPageMask)
sl@0
  3956
			++min;
sl@0
  3957
		TUint max = (TUint)a2>>KPageShift;
sl@0
  3958
		if((TUint)a2&KPageMask)
sl@0
  3959
			++max;
sl@0
  3960
		return pager->ResizeLiveList(min,max);
sl@0
  3961
		}
sl@0
  3962
sl@0
  3963
	case EVMHalGetCacheSize:
sl@0
  3964
		{
sl@0
  3965
		SVMCacheInfo info;
sl@0
  3966
		NKern::LockSystem(); // lock system to ensure consistent set of values are read...
sl@0
  3967
		info.iMinSize = pager->iMinimumPageCount<<KPageShift;
sl@0
  3968
		info.iMaxSize = pager->iMaximumPageCount<<KPageShift;
sl@0
  3969
		info.iCurrentSize = (pager->iMinimumPageCount+pager->iNumberOfFreePages)<<KPageShift;
sl@0
  3970
		info.iMaxFreeSize = pager->iNumberOfFreePages<<KPageShift;
sl@0
  3971
		NKern::UnlockSystem();
sl@0
  3972
		kumemput32(a1,&info,sizeof(info));
sl@0
  3973
		}
sl@0
  3974
		return KErrNone;
sl@0
  3975
sl@0
  3976
	case EVMHalGetEventInfo:
sl@0
  3977
		{
sl@0
  3978
		SVMEventInfo info;
sl@0
  3979
		NKern::LockSystem(); // lock system to ensure consistent set of values are read...
sl@0
  3980
		info = pager->iEventInfo;
sl@0
  3981
		NKern::UnlockSystem();
sl@0
  3982
		Kern::InfoCopy(*(TDes8*)a1,(TUint8*)&info,sizeof(info));
sl@0
  3983
		}
sl@0
  3984
		return KErrNone;
sl@0
  3985
sl@0
  3986
	case EVMHalResetEventInfo:
sl@0
  3987
		NKern::LockSystem();
sl@0
  3988
		memclr(&pager->iEventInfo, sizeof(pager->iEventInfo));
sl@0
  3989
		NKern::UnlockSystem();
sl@0
  3990
		return KErrNone;
sl@0
  3991
sl@0
  3992
#ifdef __SUPPORT_DEMAND_PAGING_EMULATION__
sl@0
  3993
	case EVMHalGetOriginalRomPages:
sl@0
  3994
		*(TPhysAddr**)a1 = pager->iOriginalRomPages;
sl@0
  3995
		*(TInt*)a2 = pager->iOriginalRomPageCount;
sl@0
  3996
		return KErrNone;
sl@0
  3997
#endif
sl@0
  3998
sl@0
  3999
	case EVMPageState:
sl@0
  4000
		return pager->PageState((TLinAddr)a1);
sl@0
  4001
sl@0
  4002
#ifdef __CONCURRENT_PAGING_INSTRUMENTATION__
sl@0
  4003
	case EVMHalGetConcurrencyInfo:
sl@0
  4004
		{
sl@0
  4005
		NKern::LockSystem();
sl@0
  4006
		SPagingConcurrencyInfo info = { pager->iMaxWaitingCount, pager->iMaxPagingCount };
sl@0
  4007
		NKern::UnlockSystem();
sl@0
  4008
		kumemput32(a1,&info,sizeof(info));
sl@0
  4009
		}
sl@0
  4010
		return KErrNone;
sl@0
  4011
		
sl@0
  4012
	case EVMHalResetConcurrencyInfo:
sl@0
  4013
		NKern::LockSystem();
sl@0
  4014
		pager->iMaxWaitingCount = 0;
sl@0
  4015
		pager->iMaxPagingCount = 0;
sl@0
  4016
		NKern::UnlockSystem();
sl@0
  4017
		return KErrNone;
sl@0
  4018
#endif
sl@0
  4019
sl@0
  4020
#ifdef __DEMAND_PAGING_BENCHMARKS__
sl@0
  4021
	case EVMHalGetPagingBenchmark:
sl@0
  4022
		{
sl@0
  4023
		TUint index = (TInt) a1;
sl@0
  4024
		if (index >= EMaxPagingBm)
sl@0
  4025
			return KErrNotFound;
sl@0
  4026
		NKern::LockSystem();
sl@0
  4027
		SPagingBenchmarkInfo info = pager->iBenchmarkInfo[index];
sl@0
  4028
		NKern::UnlockSystem();
sl@0
  4029
		kumemput32(a2,&info,sizeof(info));
sl@0
  4030
		}		
sl@0
  4031
		return KErrNone;
sl@0
  4032
		
sl@0
  4033
	case EVMHalResetPagingBenchmark:
sl@0
  4034
		{
sl@0
  4035
		TUint index = (TInt) a1;
sl@0
  4036
		if (index >= EMaxPagingBm)
sl@0
  4037
			return KErrNotFound;
sl@0
  4038
		NKern::LockSystem();
sl@0
  4039
		pager->ResetBenchmarkData((TPagingBenchmark)index);
sl@0
  4040
		NKern::UnlockSystem();
sl@0
  4041
		}
sl@0
  4042
		return KErrNone;
sl@0
  4043
#endif
sl@0
  4044
sl@0
  4045
	default:
sl@0
  4046
		return KErrNotSupported;
sl@0
  4047
		}
sl@0
  4048
	}
sl@0
  4049
sl@0
  4050
void DemandPaging::Panic(TFault aFault)
sl@0
  4051
	{
sl@0
  4052
	Kern::Fault("DEMAND-PAGING",aFault);
sl@0
  4053
	}
sl@0
  4054
sl@0
  4055
sl@0
  4056
DMutex* DemandPaging::CheckMutexOrder()
sl@0
  4057
	{
sl@0
  4058
#ifdef _DEBUG
sl@0
  4059
	SDblQue& ml = TheCurrentThread->iMutexList;
sl@0
  4060
	if(ml.IsEmpty())
sl@0
  4061
		return NULL;
sl@0
  4062
	DMutex* mm = _LOFF(ml.First(), DMutex, iOrderLink);
sl@0
  4063
	if (KMutexOrdPageIn >= mm->iOrder)
sl@0
  4064
		return mm;
sl@0
  4065
#endif
sl@0
  4066
	return NULL;
sl@0
  4067
	}
sl@0
  4068
sl@0
  4069
sl@0
  4070
TBool DemandPaging::ReservePage()
sl@0
  4071
	{
sl@0
  4072
	__ASSERT_SYSTEM_LOCK;
sl@0
  4073
	__ASSERT_CRITICAL;
sl@0
  4074
sl@0
  4075
	NKern::UnlockSystem();
sl@0
  4076
	MmuBase::Wait();
sl@0
  4077
	NKern::LockSystem();
sl@0
  4078
sl@0
  4079
	__NK_ASSERT_DEBUG(iMinimumPageCount >= iMinimumPageLimit + iReservePageCount);
sl@0
  4080
	while (iMinimumPageCount == iMinimumPageLimit + iReservePageCount &&
sl@0
  4081
		   iNumberOfFreePages == 0)
sl@0
  4082
		{
sl@0
  4083
		NKern::UnlockSystem();
sl@0
  4084
		SPageInfo* pageInfo = GetPageFromSystem();
sl@0
  4085
		if(!pageInfo)
sl@0
  4086
			{
sl@0
  4087
			MmuBase::Signal();
sl@0
  4088
			NKern::LockSystem();
sl@0
  4089
			return EFalse;
sl@0
  4090
			}
sl@0
  4091
		NKern::LockSystem();
sl@0
  4092
		AddAsFreePage(pageInfo);
sl@0
  4093
		}
sl@0
  4094
	if (iMinimumPageCount == iMinimumPageLimit + iReservePageCount)
sl@0
  4095
		{	
sl@0
  4096
		++iMinimumPageCount;
sl@0
  4097
		--iNumberOfFreePages;
sl@0
  4098
		if (iMinimumPageCount > iMaximumPageCount)
sl@0
  4099
			iMaximumPageCount = iMinimumPageCount;
sl@0
  4100
		}
sl@0
  4101
	++iReservePageCount;
sl@0
  4102
	__NK_ASSERT_DEBUG(iMinimumPageCount >= iMinimumPageLimit + iReservePageCount);
sl@0
  4103
	__NK_ASSERT_DEBUG(iMinimumPageCount + iNumberOfFreePages <= iMaximumPageCount);
sl@0
  4104
sl@0
  4105
	NKern::UnlockSystem();
sl@0
  4106
	MmuBase::Signal();
sl@0
  4107
	NKern::LockSystem();
sl@0
  4108
	return ETrue;
sl@0
  4109
	}
sl@0
  4110
sl@0
  4111
sl@0
  4112
TInt DemandPaging::LockRegion(TLinAddr aStart,TInt aSize,DProcess* aProcess)
sl@0
  4113
	{
sl@0
  4114
	__KTRACE_OPT(KPAGING,Kern::Printf("DP: LockRegion(%08x,%x)",aStart,aSize));
sl@0
  4115
	NKern::ThreadEnterCS();
sl@0
  4116
sl@0
  4117
	// calculate the number of pages required to lock aSize bytes
sl@0
  4118
	TUint32 mask=KPageMask;
sl@0
  4119
	TUint32 offset=aStart&mask;
sl@0
  4120
	TInt numPages = (aSize+offset+mask)>>KPageShift;
sl@0
  4121
sl@0
  4122
	// Lock pages...
sl@0
  4123
	TInt r=KErrNone;
sl@0
  4124
	TLinAddr page = aStart;
sl@0
  4125
sl@0
  4126
	NKern::LockSystem();
sl@0
  4127
	while(--numPages>=0)
sl@0
  4128
		{
sl@0
  4129
		if (!ReservePage())
sl@0
  4130
			break;
sl@0
  4131
		TPhysAddr phys;
sl@0
  4132
		r = LockPage(page,aProcess,phys);
sl@0
  4133
		NKern::FlashSystem();
sl@0
  4134
		if(r!=KErrNone)
sl@0
  4135
			break;
sl@0
  4136
		page += KPageSize;
sl@0
  4137
		}
sl@0
  4138
sl@0
  4139
	NKern::UnlockSystem();
sl@0
  4140
sl@0
  4141
	// If error, unlock whatever we managed to lock...
sl@0
  4142
	if(r!=KErrNone)
sl@0
  4143
		{
sl@0
  4144
		while((page-=KPageSize)>=aStart)
sl@0
  4145
			{
sl@0
  4146
			NKern::LockSystem();
sl@0
  4147
			UnlockPage(aStart,aProcess,KPhysAddrInvalid);
sl@0
  4148
			--iReservePageCount;
sl@0
  4149
			NKern::UnlockSystem();
sl@0
  4150
			}
sl@0
  4151
		}
sl@0
  4152
sl@0
  4153
	NKern::ThreadLeaveCS();
sl@0
  4154
	__KTRACE_OPT(KPAGING,Kern::Printf("DP: LockRegion returns %d",r));
sl@0
  4155
	return r;
sl@0
  4156
	}
sl@0
  4157
sl@0
  4158
sl@0
  4159
TInt DemandPaging::UnlockRegion(TLinAddr aStart,TInt aSize,DProcess* aProcess)
sl@0
  4160
	{
sl@0
  4161
	__KTRACE_OPT(KPAGING,Kern::Printf("DP: UnlockRegion(%08x,%x)",aStart,aSize));
sl@0
  4162
	TUint32 mask=KPageMask;
sl@0
  4163
	TUint32 offset=aStart&mask;
sl@0
  4164
	TInt numPages = (aSize+offset+mask)>>KPageShift;
sl@0
  4165
	NKern::LockSystem();
sl@0
  4166
	__NK_ASSERT_DEBUG(iReservePageCount >= (TUint)numPages);
sl@0
  4167
	while(--numPages>=0)
sl@0
  4168
		{
sl@0
  4169
		UnlockPage(aStart,aProcess,KPhysAddrInvalid);
sl@0
  4170
		--iReservePageCount;		
sl@0
  4171
		NKern::FlashSystem();
sl@0
  4172
		aStart += KPageSize;
sl@0
  4173
		}
sl@0
  4174
	NKern::UnlockSystem();
sl@0
  4175
	return KErrNone;
sl@0
  4176
	}
sl@0
  4177
sl@0
  4178
sl@0
  4179
void DemandPaging::FlushAll()
sl@0
  4180
	{
sl@0
  4181
	NKern::ThreadEnterCS();
sl@0
  4182
	MmuBase::Wait();
sl@0
  4183
	// look at all RAM pages in the system, and unmap all those used for paging
sl@0
  4184
	const TUint32* piMap = (TUint32*)KPageInfoMap;
sl@0
  4185
	const TUint32* piMapEnd = piMap+(KNumPageInfoPages>>5);
sl@0
  4186
	SPageInfo* pi = (SPageInfo*)KPageInfoLinearBase;
sl@0
  4187
	NKern::LockSystem();
sl@0
  4188
	do
sl@0
  4189
		{
sl@0
  4190
		SPageInfo* piNext = pi+(KPageInfosPerPage<<5);
sl@0
  4191
		for(TUint32 piFlags=*piMap++; piFlags; piFlags>>=1)
sl@0
  4192
			{
sl@0
  4193
			if(!(piFlags&1))
sl@0
  4194
				{
sl@0
  4195
				pi += KPageInfosPerPage;
sl@0
  4196
				continue;
sl@0
  4197
				}
sl@0
  4198
			SPageInfo* piEnd = pi+KPageInfosPerPage;
sl@0
  4199
			do
sl@0
  4200
				{
sl@0
  4201
				SPageInfo::TState state = pi->State();
sl@0
  4202
				if(state==SPageInfo::EStatePagedYoung || state==SPageInfo::EStatePagedOld)
sl@0
  4203
					{
sl@0
  4204
					RemovePage(pi);
sl@0
  4205
					SetFree(pi);
sl@0
  4206
					AddAsFreePage(pi);
sl@0
  4207
					NKern::FlashSystem();
sl@0
  4208
					}
sl@0
  4209
				++pi;
sl@0
  4210
				const TUint KFlashCount = 64; // flash every 64 page infos (must be a power-of-2)
sl@0
  4211
				__ASSERT_COMPILE((TUint)KPageInfosPerPage >= KFlashCount);
sl@0
  4212
				if(((TUint)pi&((KFlashCount-1)<<KPageInfoShift))==0)
sl@0
  4213
					NKern::FlashSystem();
sl@0
  4214
				}
sl@0
  4215
			while(pi<piEnd);
sl@0
  4216
			}
sl@0
  4217
		pi = piNext;
sl@0
  4218
		}
sl@0
  4219
	while(piMap<piMapEnd);
sl@0
  4220
	NKern::UnlockSystem();
sl@0
  4221
sl@0
  4222
	// reduce live page list to a minimum
sl@0
  4223
	while(GetFreePages(1)) {}; 
sl@0
  4224
sl@0
  4225
	MmuBase::Signal();
sl@0
  4226
	NKern::ThreadLeaveCS();
sl@0
  4227
	}
sl@0
  4228
sl@0
  4229
sl@0
  4230
TInt DemandPaging::LockPage(TLinAddr aPage, DProcess *aProcess, TPhysAddr& aPhysAddr)
sl@0
  4231
	{
sl@0
  4232
	__KTRACE_OPT(KPAGING,Kern::Printf("DP: LockPage() %08x",aPage));
sl@0
  4233
	__ASSERT_SYSTEM_LOCK
sl@0
  4234
sl@0
  4235
	aPhysAddr = KPhysAddrInvalid;
sl@0
  4236
sl@0
  4237
	TInt r = EnsurePagePresent(aPage,aProcess);
sl@0
  4238
	if (r != KErrNone)
sl@0
  4239
		return KErrArgument; // page doesn't exist
sl@0
  4240
sl@0
  4241
	// get info about page to be locked...
sl@0
  4242
	TPhysAddr phys = LinearToPhysical(aPage,aProcess);
sl@0
  4243
retry:
sl@0
  4244
	__NK_ASSERT_DEBUG(phys!=KPhysAddrInvalid);
sl@0
  4245
sl@0
  4246
	SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(phys);
sl@0
  4247
	if(!pageInfo)
sl@0
  4248
		return KErrNotFound;
sl@0
  4249
sl@0
  4250
	// lock it...
sl@0
  4251
	SPageInfo::TType type = pageInfo->Type();
sl@0
  4252
	if(type==SPageInfo::EShadow)
sl@0
  4253
		{
sl@0
  4254
		// get the page which is being shadowed and lock that
sl@0
  4255
		phys = (TPhysAddr)pageInfo->Owner();
sl@0
  4256
		goto retry;
sl@0
  4257
		}
sl@0
  4258
sl@0
  4259
	switch(pageInfo->State())
sl@0
  4260
		{
sl@0
  4261
	case SPageInfo::EStatePagedLocked:
sl@0
  4262
		// already locked, so just increment lock count...
sl@0
  4263
		++pageInfo->PagedLock();
sl@0
  4264
		break;
sl@0
  4265
sl@0
  4266
	case SPageInfo::EStatePagedYoung:
sl@0
  4267
		{
sl@0
  4268
		if(type!=SPageInfo::EPagedROM && type !=SPageInfo::EPagedCode)
sl@0
  4269
			{
sl@0
  4270
			// not implemented yet
sl@0
  4271
			__NK_ASSERT_ALWAYS(0);
sl@0
  4272
			}
sl@0
  4273
sl@0
  4274
		// remove page to be locked from live list...
sl@0
  4275
		RemovePage(pageInfo);
sl@0
  4276
sl@0
  4277
		// change to locked state...
sl@0
  4278
		pageInfo->SetState(SPageInfo::EStatePagedLocked);
sl@0
  4279
		pageInfo->PagedLock() = 1; // Start with lock count of one
sl@0
  4280
sl@0
  4281
		// open reference on memory...
sl@0
  4282
		if(type==SPageInfo::EPagedCode)
sl@0
  4283
			{
sl@0
  4284
			DMemModelCodeSegMemory* codeSegMemory = (DMemModelCodeSegMemory*)pageInfo->Owner();
sl@0
  4285
			if(codeSegMemory->Open()!=KErrNone)
sl@0
  4286
				{
sl@0
  4287
				__NK_ASSERT_DEBUG(0);
sl@0
  4288
				}
sl@0
  4289
			}
sl@0
  4290
		}
sl@0
  4291
		
sl@0
  4292
		break;
sl@0
  4293
sl@0
  4294
	case SPageInfo::EStatePagedOld:
sl@0
  4295
		// can't happen because we forced the page to be accessible earlier
sl@0
  4296
		__NK_ASSERT_ALWAYS(0);
sl@0
  4297
		return KErrCorrupt;
sl@0
  4298
sl@0
  4299
	default:
sl@0
  4300
		return KErrNotFound;
sl@0
  4301
		}
sl@0
  4302
sl@0
  4303
	aPhysAddr = phys;
sl@0
  4304
sl@0
  4305
#ifdef BTRACE_PAGING
sl@0
  4306
	BTraceContext8(BTrace::EPaging,BTrace::EPagingPageLock,phys,pageInfo->PagedLock());
sl@0
  4307
#endif
sl@0
  4308
	return KErrNone;
sl@0
  4309
	}
sl@0
  4310
sl@0
  4311
sl@0
  4312
TInt DemandPaging::UnlockPage(TLinAddr aPage, DProcess* aProcess, TPhysAddr aPhysAddr)
sl@0
  4313
	{
sl@0
  4314
	__KTRACE_OPT(KPAGING,Kern::Printf("DP: UnlockPage() %08x",aPage));
sl@0
  4315
	__ASSERT_SYSTEM_LOCK;
sl@0
  4316
	__ASSERT_CRITICAL;
sl@0
  4317
sl@0
  4318
	// Get info about page to be unlocked
sl@0
  4319
	TPhysAddr phys = LinearToPhysical(aPage,aProcess);
sl@0
  4320
	if(phys==KPhysAddrInvalid)
sl@0
  4321
		{
sl@0
  4322
		phys = aPhysAddr;
sl@0
  4323
		if(phys==KPhysAddrInvalid)
sl@0
  4324
			return KErrNotFound;
sl@0
  4325
		}
sl@0
  4326
retry:
sl@0
  4327
	SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(phys);
sl@0
  4328
	if(!pageInfo)
sl@0
  4329
		return KErrNotFound;
sl@0
  4330
sl@0
  4331
	SPageInfo::TType type = pageInfo->Type();
sl@0
  4332
	if(type==SPageInfo::EShadow)
sl@0
  4333
		{
sl@0
  4334
		// Get the page which is being shadowed and unlock that
sl@0
  4335
		phys = (TPhysAddr)pageInfo->Owner();
sl@0
  4336
		goto retry;
sl@0
  4337
		}
sl@0
  4338
sl@0
  4339
	__NK_ASSERT_DEBUG(phys==aPhysAddr || aPhysAddr==KPhysAddrInvalid);
sl@0
  4340
sl@0
  4341
	// Unlock it...
sl@0
  4342
	switch(pageInfo->State())
sl@0
  4343
		{
sl@0
  4344
	case SPageInfo::EStatePagedLocked:
sl@0
  4345
#ifdef BTRACE_PAGING
sl@0
  4346
		BTraceContext8(BTrace::EPaging,BTrace::EPagingPageUnlock,phys,pageInfo->PagedLock());
sl@0
  4347
#endif
sl@0
  4348
		if(!(--pageInfo->PagedLock()))
sl@0
  4349
			{
sl@0
  4350
			// get pointer to memory...
sl@0
  4351
			DMemModelCodeSegMemory* codeSegMemory = 0;
sl@0
  4352
			if(type==SPageInfo::EPagedCode)
sl@0
  4353
				codeSegMemory = (DMemModelCodeSegMemory*)pageInfo->Owner();
sl@0
  4354
sl@0
  4355
			// put page back on live list...
sl@0
  4356
			AddAsYoungest(pageInfo);
sl@0
  4357
			BalanceAges();
sl@0
  4358
sl@0
  4359
			// close reference on memory...
sl@0
  4360
			if(codeSegMemory)
sl@0
  4361
				{
sl@0
  4362
				NKern::UnlockSystem();
sl@0
  4363
				codeSegMemory->Close();
sl@0
  4364
				NKern::LockSystem();
sl@0
  4365
				}
sl@0
  4366
			}
sl@0
  4367
		break;
sl@0
  4368
sl@0
  4369
	default:
sl@0
  4370
		return KErrNotFound;
sl@0
  4371
		}
sl@0
  4372
sl@0
  4373
	return KErrNone;
sl@0
  4374
	}
sl@0
  4375
sl@0
  4376
sl@0
  4377
sl@0
  4378
TInt DemandPaging::ReserveAlloc(TInt aSize, DDemandPagingLock& aLock)
sl@0
  4379
	{
sl@0
  4380
	__NK_ASSERT_DEBUG(aLock.iPages == NULL);
sl@0
  4381
	
sl@0
  4382
	// calculate the number of pages required to lock aSize bytes
sl@0
  4383
	TInt numPages = ((aSize-1+KPageMask)>>KPageShift)+1;
sl@0
  4384
sl@0
  4385
	__KTRACE_OPT(KPAGING,Kern::Printf("DP: ReserveAlloc() pages %d",numPages));
sl@0
  4386
	
sl@0
  4387
	NKern::ThreadEnterCS();
sl@0
  4388
sl@0
  4389
	aLock.iPages = (TPhysAddr*)Kern::Alloc(numPages*sizeof(TPhysAddr));
sl@0
  4390
	if(!aLock.iPages)
sl@0
  4391
		{
sl@0
  4392
		NKern::ThreadLeaveCS();
sl@0
  4393
		return KErrNoMemory;
sl@0
  4394
		}
sl@0
  4395
	
sl@0
  4396
	MmuBase::Wait();
sl@0
  4397
	NKern::LockSystem();
sl@0
  4398
sl@0
  4399
	// reserve pages, adding more if necessary
sl@0
  4400
	while (aLock.iReservedPageCount < numPages)
sl@0
  4401
		{
sl@0
  4402
		if (!ReservePage())
sl@0
  4403
			break;
sl@0
  4404
		++aLock.iReservedPageCount;
sl@0
  4405
		}
sl@0
  4406
sl@0
  4407
	NKern::UnlockSystem();
sl@0
  4408
	MmuBase::Signal();
sl@0
  4409
sl@0
  4410
	TBool enoughPages = aLock.iReservedPageCount == numPages;
sl@0
  4411
	if(!enoughPages)
sl@0
  4412
		ReserveFree(aLock);
sl@0
  4413
sl@0
  4414
	NKern::ThreadLeaveCS();
sl@0
  4415
	return enoughPages ? KErrNone : KErrNoMemory;
sl@0
  4416
	}
sl@0
  4417
sl@0
  4418
sl@0
  4419
sl@0
  4420
void DemandPaging::ReserveFree(DDemandPagingLock& aLock)
sl@0
  4421
	{
sl@0
  4422
	NKern::ThreadEnterCS();
sl@0
  4423
sl@0
  4424
	// make sure pages aren't still locked
sl@0
  4425
	ReserveUnlock(aLock);
sl@0
  4426
sl@0
  4427
	NKern::LockSystem();
sl@0
  4428
	__NK_ASSERT_DEBUG(iReservePageCount >= (TUint)aLock.iReservedPageCount);
sl@0
  4429
	iReservePageCount -= aLock.iReservedPageCount;
sl@0
  4430
	aLock.iReservedPageCount = 0;
sl@0
  4431
	NKern::UnlockSystem();
sl@0
  4432
sl@0
  4433
	// free page array...
sl@0
  4434
	Kern::Free(aLock.iPages);
sl@0
  4435
	aLock.iPages = 0;
sl@0
  4436
sl@0
  4437
	NKern::ThreadLeaveCS();
sl@0
  4438
	}
sl@0
  4439
sl@0
  4440
sl@0
  4441
sl@0
  4442
TBool DemandPaging::ReserveLock(DThread* aThread, TLinAddr aStart,TInt aSize, DDemandPagingLock& aLock)
sl@0
  4443
	{
sl@0
  4444
	if(aLock.iLockedPageCount)
sl@0
  4445
		Panic(ELockTwice);
sl@0
  4446
sl@0
  4447
	// calculate the number of pages that need to be locked...
sl@0
  4448
	TUint32 mask=KPageMask;
sl@0
  4449
	TUint32 offset=aStart&mask;
sl@0
  4450
	TInt numPages = (aSize+offset+mask)>>KPageShift;
sl@0
  4451
	if(numPages>aLock.iReservedPageCount)
sl@0
  4452
		Panic(ELockTooBig);
sl@0
  4453
sl@0
  4454
	NKern::LockSystem();
sl@0
  4455
sl@0
  4456
	// lock the pages
sl@0
  4457
	TBool locked = EFalse; // becomes true if any pages were locked
sl@0
  4458
	DProcess* process = aThread->iOwningProcess;
sl@0
  4459
	TLinAddr page=aStart;
sl@0
  4460
	TInt count=numPages;
sl@0
  4461
	TPhysAddr* physPages = aLock.iPages;
sl@0
  4462
	while(--count>=0)
sl@0
  4463
		{
sl@0
  4464
		if(LockPage(page,process,*physPages)==KErrNone)
sl@0
  4465
			locked = ETrue;
sl@0
  4466
		NKern::FlashSystem();
sl@0
  4467
		page += KPageSize;
sl@0
  4468
		++physPages;
sl@0
  4469
		}
sl@0
  4470
sl@0
  4471
	// if any pages were locked, save the lock info...
sl@0
  4472
	if(locked)
sl@0
  4473
		{
sl@0
  4474
		if(aLock.iLockedPageCount)
sl@0
  4475
			Panic(ELockTwice);
sl@0
  4476
		aLock.iLockedStart = aStart;
sl@0
  4477
		aLock.iLockedPageCount = numPages;
sl@0
  4478
		aLock.iProcess = process;
sl@0
  4479
		aLock.iProcess->Open();
sl@0
  4480
		}
sl@0
  4481
sl@0
  4482
	NKern::UnlockSystem();
sl@0
  4483
	return locked;
sl@0
  4484
	}
sl@0
  4485
sl@0
  4486
sl@0
  4487
sl@0
  4488
void DemandPaging::ReserveUnlock(DDemandPagingLock& aLock)
sl@0
  4489
	{
sl@0
  4490
	NKern::ThreadEnterCS();
sl@0
  4491
sl@0
  4492
	DProcess* process = NULL;
sl@0
  4493
	NKern::LockSystem();
sl@0
  4494
	TInt numPages = aLock.iLockedPageCount;
sl@0
  4495
	TLinAddr page = aLock.iLockedStart;
sl@0
  4496
	TPhysAddr* physPages = aLock.iPages;
sl@0
  4497
	while(--numPages>=0)
sl@0
  4498
		{
sl@0
  4499
		UnlockPage(page, aLock.iProcess,*physPages);
sl@0
  4500
		NKern::FlashSystem();
sl@0
  4501
		page += KPageSize;
sl@0
  4502
		++physPages;
sl@0
  4503
		}
sl@0
  4504
	process = aLock.iProcess;
sl@0
  4505
	aLock.iProcess = NULL;
sl@0
  4506
	aLock.iLockedPageCount = 0;
sl@0
  4507
	NKern::UnlockSystem();
sl@0
  4508
	if (process)
sl@0
  4509
		process->Close(NULL);
sl@0
  4510
sl@0
  4511
	NKern::ThreadLeaveCS();
sl@0
  4512
	}
sl@0
  4513
sl@0
  4514
/**
sl@0
  4515
Check whether the specified page can be discarded by the RAM cache.
sl@0
  4516
sl@0
  4517
@param aPageInfo The page info of the page being queried.
sl@0
  4518
@return ETrue when the page can be discarded, EFalse otherwise.
sl@0
  4519
@pre System lock held.
sl@0
  4520
@post System lock held.
sl@0
  4521
*/
sl@0
  4522
TBool DemandPaging::IsPageDiscardable(SPageInfo& aPageInfo)
sl@0
  4523
	{
sl@0
  4524
	 // on live list?
sl@0
  4525
	SPageInfo::TState state = aPageInfo.State();
sl@0
  4526
	return (state == SPageInfo::EStatePagedYoung || state == SPageInfo::EStatePagedOld);
sl@0
  4527
	}
sl@0
  4528
sl@0
  4529
sl@0
  4530
/**
sl@0
  4531
Discard the specified page.
sl@0
  4532
Should only be called on a page if a previous call to IsPageDiscardable()
sl@0
  4533
returned ETrue and the system lock hasn't been released between the calls.
sl@0
  4534
sl@0
  4535
@param aPageInfo The page info of the page to be discarded
sl@0
  4536
@param aBlockZoneId The ID of the RAM zone that shouldn't be allocated into.
sl@0
  4537
@param aBlockRest Set to ETrue to stop allocation as soon as aBlockedZoneId is reached 
sl@0
  4538
in preference ordering.  EFalse otherwise.
sl@0
  4539
@return ETrue if the page could be discarded, EFalse otherwise.
sl@0
  4540
sl@0
  4541
@pre System lock held.
sl@0
  4542
@post System lock held.
sl@0
  4543
*/
sl@0
  4544
TBool DemandPaging::DoDiscardPage(SPageInfo& aPageInfo, TUint aBlockedZoneId, TBool aBlockRest)
sl@0
  4545
	{
sl@0
  4546
	__ASSERT_SYSTEM_LOCK;
sl@0
  4547
	// Ensure that we don't reduce the cache beyond its minimum.
sl@0
  4548
	if (iNumberOfFreePages == 0)
sl@0
  4549
		{
sl@0
  4550
		NKern::UnlockSystem();
sl@0
  4551
		SPageInfo* newPage = GetPageFromSystem(aBlockedZoneId, aBlockRest);
sl@0
  4552
		NKern::LockSystem();
sl@0
  4553
		if (newPage == NULL)
sl@0
  4554
			{// couldn't allocate a new page
sl@0
  4555
			return EFalse;
sl@0
  4556
			}
sl@0
  4557
		if (IsPageDiscardable(aPageInfo))
sl@0
  4558
			{// page can still be discarded so use new page 
sl@0
  4559
			// and discard old one
sl@0
  4560
			AddAsFreePage(newPage);
sl@0
  4561
			RemovePage(&aPageInfo);
sl@0
  4562
			SetFree(&aPageInfo);
sl@0
  4563
			ReturnToSystem(&aPageInfo);
sl@0
  4564
			BalanceAges();
sl@0
  4565
			return ETrue;
sl@0
  4566
			}
sl@0
  4567
		else
sl@0
  4568
			{// page no longer discardable so no longer require new page
sl@0
  4569
			ReturnToSystem(newPage);
sl@0
  4570
			return EFalse;
sl@0
  4571
			}
sl@0
  4572
		}
sl@0
  4573
sl@0
  4574
	// Discard the page
sl@0
  4575
	RemovePage(&aPageInfo);
sl@0
  4576
	SetFree(&aPageInfo);
sl@0
  4577
	ReturnToSystem(&aPageInfo);
sl@0
  4578
	BalanceAges();
sl@0
  4579
	
sl@0
  4580
	return ETrue;
sl@0
  4581
	}
sl@0
  4582
sl@0
  4583
sl@0
  4584
/**
sl@0
  4585
First stage in discarding a list of pages.
sl@0
  4586
sl@0
  4587
Must ensure that the pages will still be discardable even if system lock is released.
sl@0
  4588
To be used in conjunction with RamCacheBase::DoDiscardPages1().
sl@0
  4589
sl@0
  4590
@param aPageList A NULL terminated list of the pages to be discarded
sl@0
  4591
@return KErrNone on success.
sl@0
  4592
sl@0
  4593
@pre System lock held
sl@0
  4594
@post System lock held
sl@0
  4595
*/
sl@0
  4596
TInt DemandPaging::DoDiscardPages0(SPageInfo** aPageList)
sl@0
  4597
	{
sl@0
  4598
	__ASSERT_SYSTEM_LOCK;
sl@0
  4599
sl@0
  4600
	SPageInfo* pageInfo;
sl@0
  4601
	while((pageInfo = *aPageList++) != 0)
sl@0
  4602
		{
sl@0
  4603
		RemovePage(pageInfo);
sl@0
  4604
		}
sl@0
  4605
	return KErrNone;
sl@0
  4606
	}
sl@0
  4607
sl@0
  4608
sl@0
  4609
/**
sl@0
  4610
Final stage in discarding a list of page
sl@0
  4611
Finish discarding the pages previously removed by RamCacheBase::DoDiscardPages0().
sl@0
  4612
sl@0
  4613
@param aPageList A NULL terminated list of the pages to be discarded
sl@0
  4614
@return KErrNone on success.
sl@0
  4615
sl@0
  4616
@pre System lock held
sl@0
  4617
@post System lock held
sl@0
  4618
*/
sl@0
  4619
TInt DemandPaging::DoDiscardPages1(SPageInfo** aPageList)
sl@0
  4620
	{
sl@0
  4621
	__ASSERT_SYSTEM_LOCK;
sl@0
  4622
sl@0
  4623
	SPageInfo* pageInfo;
sl@0
  4624
	while((pageInfo = *aPageList++)!=0)
sl@0
  4625
		{
sl@0
  4626
		SetFree(pageInfo);
sl@0
  4627
		ReturnToSystem(pageInfo);
sl@0
  4628
		BalanceAges();
sl@0
  4629
		}
sl@0
  4630
	return KErrNone;
sl@0
  4631
	}
sl@0
  4632
sl@0
  4633
sl@0
  4634
TBool DemandPaging::MayBePaged(TLinAddr aStartAddr, TUint aLength)
sl@0
  4635
	{
sl@0
  4636
	TLinAddr endAddr = aStartAddr + aLength;
sl@0
  4637
	TBool rangeTouchesPagedRom =
sl@0
  4638
		TUint(aStartAddr - iRomPagedLinearBase) < iRomSize  ||
sl@0
  4639
		TUint(endAddr - iRomPagedLinearBase) < iRomSize;
sl@0
  4640
	TBool rangeTouchesCodeArea =
sl@0
  4641
		TUint(aStartAddr - iCodeLinearBase) < iCodeSize  ||
sl@0
  4642
		TUint(endAddr - iCodeLinearBase) < iCodeSize;
sl@0
  4643
	return rangeTouchesPagedRom || rangeTouchesCodeArea;
sl@0
  4644
	}
sl@0
  4645
sl@0
  4646
sl@0
  4647
#ifdef __DEMAND_PAGING_BENCHMARKS__
sl@0
  4648
sl@0
  4649
void DemandPaging::ResetBenchmarkData(TPagingBenchmark aBm)
sl@0
  4650
	{
sl@0
  4651
	SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm];
sl@0
  4652
	info.iCount = 0;
sl@0
  4653
	info.iTotalTime = 0;
sl@0
  4654
	info.iMaxTime = 0;
sl@0
  4655
	info.iMinTime = KMaxTInt;
sl@0
  4656
	}
sl@0
  4657
sl@0
  4658
void DemandPaging::RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime)
sl@0
  4659
	{
sl@0
  4660
	SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm];
sl@0
  4661
	++info.iCount;
sl@0
  4662
#if !defined(HIGH_RES_TIMER) || defined(HIGH_RES_TIMER_COUNTS_UP)
sl@0
  4663
	TInt64 elapsed = aEndTime - aStartTime;
sl@0
  4664
#else
sl@0
  4665
	TInt64 elapsed = aStartTime - aEndTime;
sl@0
  4666
#endif
sl@0
  4667
	info.iTotalTime += elapsed;
sl@0
  4668
	if (elapsed > info.iMaxTime)
sl@0
  4669
		info.iMaxTime = elapsed;
sl@0
  4670
	if (elapsed < info.iMinTime)
sl@0
  4671
		info.iMinTime = elapsed;
sl@0
  4672
	}
sl@0
  4673
	
sl@0
  4674
#endif
sl@0
  4675
sl@0
  4676
sl@0
  4677
//
sl@0
  4678
// DDemandPagingLock
sl@0
  4679
//
sl@0
  4680
sl@0
  4681
EXPORT_C DDemandPagingLock::DDemandPagingLock()
sl@0
  4682
	: iThePager(DemandPaging::ThePager), iReservedPageCount(0), iLockedPageCount(0), iPages(0)
sl@0
  4683
	{
sl@0
  4684
	}
sl@0
  4685
sl@0
  4686
sl@0
  4687
EXPORT_C TInt DDemandPagingLock::Alloc(TInt aSize)
sl@0
  4688
	{	
sl@0
  4689
	if (iThePager)
sl@0
  4690
		return iThePager->ReserveAlloc(aSize,*this);
sl@0
  4691
	else
sl@0
  4692
		return KErrNone;
sl@0
  4693
	}
sl@0
  4694
sl@0
  4695
sl@0
  4696
EXPORT_C void DDemandPagingLock::DoUnlock()
sl@0
  4697
	{
sl@0
  4698
	if (iThePager)
sl@0
  4699
		iThePager->ReserveUnlock(*this);
sl@0
  4700
	}
sl@0
  4701
sl@0
  4702
sl@0
  4703
EXPORT_C void DDemandPagingLock::Free()
sl@0
  4704
	{
sl@0
  4705
	if (iThePager)
sl@0
  4706
		iThePager->ReserveFree(*this);
sl@0
  4707
	}
sl@0
  4708
sl@0
  4709
sl@0
  4710
EXPORT_C TInt Kern::InstallPagingDevice(DPagingDevice* aDevice)
sl@0
  4711
	{
sl@0
  4712
	if (DemandPaging::ThePager)
sl@0
  4713
		return DemandPaging::ThePager->InstallPagingDevice(aDevice);
sl@0
  4714
	else
sl@0
  4715
		return KErrNotSupported;
sl@0
  4716
	}
sl@0
  4717
sl@0
  4718
sl@0
  4719
#else  // !__DEMAND_PAGING__
sl@0
  4720
sl@0
  4721
EXPORT_C DDemandPagingLock::DDemandPagingLock()
sl@0
  4722
	: iLockedPageCount(0)
sl@0
  4723
	{
sl@0
  4724
	}
sl@0
  4725
sl@0
  4726
EXPORT_C TInt DDemandPagingLock::Alloc(TInt /*aSize*/)
sl@0
  4727
	{
sl@0
  4728
	return KErrNone;
sl@0
  4729
	}
sl@0
  4730
sl@0
  4731
EXPORT_C TBool DDemandPagingLock::Lock(DThread* /*aThread*/, TLinAddr /*aStart*/, TInt /*aSize*/)
sl@0
  4732
	{
sl@0
  4733
	return EFalse;
sl@0
  4734
	}
sl@0
  4735
sl@0
  4736
EXPORT_C void DDemandPagingLock::DoUnlock()
sl@0
  4737
	{
sl@0
  4738
	}
sl@0
  4739
sl@0
  4740
EXPORT_C void DDemandPagingLock::Free()
sl@0
  4741
	{
sl@0
  4742
	}
sl@0
  4743
sl@0
  4744
EXPORT_C TInt Kern::InstallPagingDevice(DPagingDevice* aDevice)
sl@0
  4745
	{
sl@0
  4746
	return KErrNotSupported;
sl@0
  4747
	}
sl@0
  4748
sl@0
  4749
#endif // __DEMAND_PAGING__
sl@0
  4750
sl@0
  4751
sl@0
  4752
DMmuCodeSegMemory::DMmuCodeSegMemory(DEpocCodeSeg* aCodeSeg)
sl@0
  4753
	: DEpocCodeSegMemory(aCodeSeg), iCodeAllocBase(KMinTInt)
sl@0
  4754
	{
sl@0
  4755
	}
sl@0
  4756
sl@0
  4757
//#define __DUMP_BLOCKMAP_INFO
sl@0
  4758
DMmuCodeSegMemory::~DMmuCodeSegMemory()
sl@0
  4759
	{
sl@0
  4760
#ifdef __DEMAND_PAGING__
sl@0
  4761
	Kern::Free(iCodeRelocTable);
sl@0
  4762
	Kern::Free(iCodePageOffsets);
sl@0
  4763
	Kern::Free(iDataSectionMemory);
sl@0
  4764
#endif
sl@0
  4765
	}
sl@0
  4766
sl@0
  4767
#ifdef __DEMAND_PAGING__
sl@0
  4768
sl@0
  4769
/**
sl@0
  4770
Read and process the block map and related data.
sl@0
  4771
*/
sl@0
  4772
TInt DMmuCodeSegMemory::ReadBlockMap(const TCodeSegCreateInfo& aInfo)
sl@0
  4773
	{
sl@0
  4774
	__KTRACE_OPT(KPAGING,Kern::Printf("DP: Reading block map for %C", iCodeSeg));
sl@0
  4775
sl@0
  4776
	if (aInfo.iCodeBlockMapEntriesSize <= 0)
sl@0
  4777
		return KErrArgument;  // no block map provided
sl@0
  4778
	
sl@0
  4779
	// Get compression data
sl@0
  4780
	switch (aInfo.iCompressionType)
sl@0
  4781
		{
sl@0
  4782
		case KFormatNotCompressed:
sl@0
  4783
			iCompressionType = SRomPageInfo::ENoCompression;
sl@0
  4784
			break;
sl@0
  4785
sl@0
  4786
		case KUidCompressionBytePair:
sl@0
  4787
			{
sl@0
  4788
			iCompressionType = SRomPageInfo::EBytePair;
sl@0
  4789
			if (!aInfo.iCodePageOffsets)
sl@0
  4790
				return KErrArgument;
sl@0
  4791
			TInt size = sizeof(TInt32) * (iPageCount + 1);
sl@0
  4792
			iCodePageOffsets = (TInt32*)Kern::Alloc(size);
sl@0
  4793
			if (!iCodePageOffsets)
sl@0
  4794
				return KErrNoMemory;
sl@0
  4795
			kumemget32(iCodePageOffsets, aInfo.iCodePageOffsets, size);
sl@0
  4796
sl@0
  4797
#ifdef __DUMP_BLOCKMAP_INFO
sl@0
  4798
			Kern::Printf("CodePageOffsets:");
sl@0
  4799
			for (TInt i = 0 ; i < iPageCount + 1 ; ++i)
sl@0
  4800
				Kern::Printf("  %08x", iCodePageOffsets[i]);
sl@0
  4801
#endif
sl@0
  4802
sl@0
  4803
			TInt last = 0;
sl@0
  4804
			for (TInt j = 0 ; j < iPageCount + 1 ; ++j)
sl@0
  4805
				{
sl@0
  4806
				if (iCodePageOffsets[j] < last ||
sl@0
  4807
					iCodePageOffsets[j] > (aInfo.iCodeLengthInFile + aInfo.iCodeStartInFile))
sl@0
  4808
					{
sl@0
  4809
					__NK_ASSERT_DEBUG(0);
sl@0
  4810
					return KErrCorrupt;
sl@0
  4811
					}
sl@0
  4812
				last = iCodePageOffsets[j];
sl@0
  4813
				}
sl@0
  4814
			}
sl@0
  4815
			break;
sl@0
  4816
sl@0
  4817
		default:
sl@0
  4818
			return KErrNotSupported;
sl@0
  4819
		}		
sl@0
  4820
sl@0
  4821
	// Copy block map data itself...
sl@0
  4822
sl@0
  4823
#ifdef __DUMP_BLOCKMAP_INFO
sl@0
  4824
	Kern::Printf("Original block map");
sl@0
  4825
	Kern::Printf("  block granularity: %d", aInfo.iCodeBlockMapCommon.iBlockGranularity);
sl@0
  4826
	Kern::Printf("  block start offset: %x", aInfo.iCodeBlockMapCommon.iBlockStartOffset);
sl@0
  4827
	Kern::Printf("  start block address: %016lx", aInfo.iCodeBlockMapCommon.iStartBlockAddress);
sl@0
  4828
	Kern::Printf("  local drive number: %d", aInfo.iCodeBlockMapCommon.iLocalDriveNumber);
sl@0
  4829
	Kern::Printf("  entry size: %d", aInfo.iCodeBlockMapEntriesSize);
sl@0
  4830
#endif
sl@0
  4831
sl@0
  4832
	// Find relevant paging device
sl@0
  4833
	iCodeLocalDrive = aInfo.iCodeBlockMapCommon.iLocalDriveNumber;
sl@0
  4834
	if (TUint(iCodeLocalDrive) >= (TUint)KMaxLocalDrives)
sl@0
  4835
		{
sl@0
  4836
		__KTRACE_OPT(KPAGING,Kern::Printf("Bad local drive number"));
sl@0
  4837
		return KErrArgument;
sl@0
  4838
		}
sl@0
  4839
	DemandPaging* pager = DemandPaging::ThePager;
sl@0
  4840
	
sl@0
  4841
	if (!pager->CodePagingDevice(iCodeLocalDrive).iInstalled)
sl@0
  4842
		{
sl@0
  4843
		__KTRACE_OPT(KPAGING,Kern::Printf("No paging device installed for drive"));
sl@0
  4844
		return KErrNotSupported;
sl@0
  4845
		}
sl@0
  4846
	DPagingDevice* device = pager->CodePagingDevice(iCodeLocalDrive).iDevice;
sl@0
  4847
sl@0
  4848
	// Set code start offset
sl@0
  4849
	iCodeStartInFile = aInfo.iCodeStartInFile;
sl@0
  4850
	if (iCodeStartInFile < 0)
sl@0
  4851
		{
sl@0
  4852
		__KTRACE_OPT(KPAGING,Kern::Printf("Bad code start offset"));
sl@0
  4853
		return KErrArgument;
sl@0
  4854
		}
sl@0
  4855
	
sl@0
  4856
	// Allocate buffer for block map and copy from user-side
sl@0
  4857
	TBlockMapEntryBase* buffer = (TBlockMapEntryBase*)Kern::Alloc(aInfo.iCodeBlockMapEntriesSize);
sl@0
  4858
	if (!buffer)
sl@0
  4859
		return KErrNoMemory;
sl@0
  4860
	kumemget32(buffer, aInfo.iCodeBlockMapEntries, aInfo.iCodeBlockMapEntriesSize);
sl@0
  4861
	
sl@0
  4862
#ifdef __DUMP_BLOCKMAP_INFO
sl@0
  4863
	Kern::Printf("  entries:");
sl@0
  4864
	for (TInt k = 0 ; k < aInfo.iCodeBlockMapEntriesSize / sizeof(TBlockMapEntryBase) ; ++k)
sl@0
  4865
		Kern::Printf("    %d: %d blocks at %08x", k, buffer[k].iNumberOfBlocks, buffer[k].iStartBlock);
sl@0
  4866
#endif
sl@0
  4867
sl@0
  4868
	// Initialise block map
sl@0
  4869
	TInt r = iBlockMap.Initialise(aInfo.iCodeBlockMapCommon,
sl@0
  4870
								  buffer,
sl@0
  4871
								  aInfo.iCodeBlockMapEntriesSize,
sl@0
  4872
								  device->iReadUnitShift,
sl@0
  4873
								  iCodeStartInFile + aInfo.iCodeLengthInFile);
sl@0
  4874
	if (r != KErrNone)
sl@0
  4875
		{
sl@0
  4876
		Kern::Free(buffer);
sl@0
  4877
		return r;
sl@0
  4878
		}
sl@0
  4879
sl@0
  4880
#if defined(__DUMP_BLOCKMAP_INFO) && defined(_DEBUG)
sl@0
  4881
	iBlockMap.Dump();
sl@0
  4882
#endif
sl@0
  4883
	
sl@0
  4884
	return KErrNone;
sl@0
  4885
	}
sl@0
  4886
sl@0
  4887
/**
sl@0
  4888
Read code relocation table and import fixup table from user side.
sl@0
  4889
*/
sl@0
  4890
TInt DMmuCodeSegMemory::ReadFixupTables(const TCodeSegCreateInfo& aInfo)
sl@0
  4891
	{
sl@0
  4892
	__KTRACE_OPT(KPAGING,Kern::Printf("DP: Reading fixup tables for %C", iCodeSeg));
sl@0
  4893
	
sl@0
  4894
	iCodeRelocTableSize = aInfo.iCodeRelocTableSize;
sl@0
  4895
	iImportFixupTableSize = aInfo.iImportFixupTableSize;
sl@0
  4896
	iCodeDelta = aInfo.iCodeDelta;
sl@0
  4897
	iDataDelta = aInfo.iDataDelta;
sl@0
  4898
	
sl@0
  4899
	// round sizes to four-byte boundaris...
sl@0
  4900
	TInt relocSize = (iCodeRelocTableSize + 3) & ~3;
sl@0
  4901
	TInt fixupSize = (iImportFixupTableSize + 3) & ~3;
sl@0
  4902
sl@0
  4903
	// copy relocs and fixups...
sl@0
  4904
	iCodeRelocTable = (TUint8*)Kern::Alloc(relocSize+fixupSize);
sl@0
  4905
	if (!iCodeRelocTable)
sl@0
  4906
		return KErrNoMemory;
sl@0
  4907
	iImportFixupTable = iCodeRelocTable + relocSize;
sl@0
  4908
	kumemget32(iCodeRelocTable, aInfo.iCodeRelocTable, relocSize);
sl@0
  4909
	kumemget32(iImportFixupTable, aInfo.iImportFixupTable, fixupSize);
sl@0
  4910
	
sl@0
  4911
	return KErrNone;
sl@0
  4912
	}
sl@0
  4913
sl@0
  4914
#endif
sl@0
  4915
sl@0
  4916
sl@0
  4917
TInt DMmuCodeSegMemory::Create(TCodeSegCreateInfo& aInfo)
sl@0
  4918
	{
sl@0
  4919
	TInt r = KErrNone;	
sl@0
  4920
	if (!aInfo.iUseCodePaging)
sl@0
  4921
		iPageCount=(iRamInfo.iCodeSize+iRamInfo.iDataSize+KPageMask)>>KPageShift;
sl@0
  4922
	else
sl@0
  4923
		{
sl@0
  4924
#ifdef __DEMAND_PAGING__
sl@0
  4925
		iDataSectionMemory = Kern::Alloc(iRamInfo.iDataSize);
sl@0
  4926
		if (!iDataSectionMemory)
sl@0
  4927
			return KErrNoMemory;
sl@0
  4928
sl@0
  4929
		iPageCount=(iRamInfo.iCodeSize+KPageMask)>>KPageShift;
sl@0
  4930
		iDataPageCount=(iRamInfo.iDataSize+KPageMask)>>KPageShift;
sl@0
  4931
sl@0
  4932
		r = ReadBlockMap(aInfo);
sl@0
  4933
		if (r != KErrNone)
sl@0
  4934
			return r;
sl@0
  4935
sl@0
  4936
		iIsDemandPaged = ETrue;
sl@0
  4937
		iCodeSeg->iAttr |= ECodeSegAttCodePaged;
sl@0
  4938
#endif
sl@0
  4939
		}
sl@0
  4940
sl@0
  4941
	iCodeSeg->iSize = (iPageCount+iDataPageCount)<<KPageShift;
sl@0
  4942
	return r;		
sl@0
  4943
	}
sl@0
  4944
sl@0
  4945
sl@0
  4946
TInt DMmuCodeSegMemory::Loaded(TCodeSegCreateInfo& aInfo)
sl@0
  4947
	{
sl@0
  4948
#ifdef __DEMAND_PAGING__
sl@0
  4949
	if(iIsDemandPaged)
sl@0
  4950
		{
sl@0
  4951
		TInt r = ReadFixupTables(aInfo);
sl@0
  4952
		if (r != KErrNone)
sl@0
  4953
			return r;
sl@0
  4954
		}
sl@0
  4955
	TAny* dataSection = iDataSectionMemory;
sl@0
  4956
	if(dataSection)
sl@0
  4957
		{
sl@0
  4958
		UNLOCK_USER_MEMORY();
sl@0
  4959
		memcpy(dataSection,(TAny*)iRamInfo.iDataLoadAddr,iRamInfo.iDataSize);
sl@0
  4960
		LOCK_USER_MEMORY();
sl@0
  4961
		iRamInfo.iDataLoadAddr = (TLinAddr)dataSection;
sl@0
  4962
		}
sl@0
  4963
#endif
sl@0
  4964
	return KErrNone;
sl@0
  4965
	}
sl@0
  4966
sl@0
  4967
sl@0
  4968
void DMmuCodeSegMemory::ApplyCodeFixups(TUint32* aBuffer, TLinAddr aDestAddress)
sl@0
  4969
	{
sl@0
  4970
	__NK_ASSERT_DEBUG(iRamInfo.iCodeRunAddr==iRamInfo.iCodeLoadAddr); // code doesn't work if this isn't true
sl@0
  4971
sl@0
  4972
	START_PAGING_BENCHMARK;
sl@0
  4973
	
sl@0
  4974
	TUint offset = aDestAddress - iRamInfo.iCodeRunAddr;
sl@0
  4975
	__ASSERT_ALWAYS(offset < (TUint)(iRamInfo.iCodeSize + iRamInfo.iDataSize), K::Fault(K::ECodeSegBadFixupAddress));
sl@0
  4976
sl@0
  4977
	// Index tables are only valid for pages containg code
sl@0
  4978
	if (offset >= (TUint)iRamInfo.iCodeSize)
sl@0
  4979
		return;
sl@0
  4980
sl@0
  4981
	UNLOCK_USER_MEMORY();
sl@0
  4982
sl@0
  4983
	TInt page = offset >> KPageShift;
sl@0
  4984
sl@0
  4985
	// Relocate code
sl@0
  4986
	
sl@0
  4987
	if (iCodeRelocTableSize > 0)
sl@0
  4988
		{
sl@0
  4989
		TUint32* codeRelocTable32 = (TUint32*)iCodeRelocTable;
sl@0
  4990
		TUint startOffset = codeRelocTable32[page];
sl@0
  4991
		TUint endOffset = codeRelocTable32[page + 1];
sl@0
  4992
		
sl@0
  4993
		__KTRACE_OPT(KPAGING, Kern::Printf("Performing code relocation: start == %x, end == %x", startOffset, endOffset));
sl@0
  4994
		__ASSERT_ALWAYS(startOffset <= endOffset && endOffset <= (TUint)iCodeRelocTableSize,
sl@0
  4995
						K::Fault(K::ECodeSegBadFixupTables));
sl@0
  4996
		
sl@0
  4997
		TUint8* codeRelocTable8 = (TUint8*)codeRelocTable32;
sl@0
  4998
		const TUint16* ptr = (const TUint16*)(codeRelocTable8 + startOffset);
sl@0
  4999
		const TUint16* end = (const TUint16*)(codeRelocTable8 + endOffset);
sl@0
  5000
sl@0
  5001
		const TUint32 codeDelta = iCodeDelta;
sl@0
  5002
		const TUint32 dataDelta = iDataDelta;
sl@0
  5003
sl@0
  5004
		while (ptr < end)
sl@0
  5005
			{
sl@0
  5006
			TUint16 entry = *ptr++;
sl@0
  5007
sl@0
  5008
			// address of word to fix up is sum of page start and 12-bit offset
sl@0
  5009
			TUint32* addr = (TUint32*)((TUint8*)aBuffer + (entry & 0x0fff));
sl@0
  5010
			
sl@0
  5011
			TUint32 word = *addr;
sl@0
  5012
#ifdef _DEBUG
sl@0
  5013
			TInt type = entry & 0xf000;
sl@0
  5014
			__NK_ASSERT_DEBUG(type == KTextRelocType || type == KDataRelocType);
sl@0
  5015
#endif
sl@0
  5016
			if (entry < KDataRelocType /* => type == KTextRelocType */)
sl@0
  5017
				word += codeDelta;
sl@0
  5018
			else
sl@0
  5019
				word += dataDelta;
sl@0
  5020
			*addr = word;
sl@0
  5021
			}
sl@0
  5022
		}
sl@0
  5023
		
sl@0
  5024
	// Fixup imports
sl@0
  5025
			
sl@0
  5026
	if (iImportFixupTableSize > 0)
sl@0
  5027
		{
sl@0
  5028
		TUint32* importFixupTable32 = (TUint32*)iImportFixupTable;
sl@0
  5029
		TUint startOffset = importFixupTable32[page];
sl@0
  5030
		TUint endOffset = importFixupTable32[page + 1];
sl@0
  5031
		
sl@0
  5032
		__KTRACE_OPT(KPAGING, Kern::Printf("Performing import fixup: start == %x, end == %x", startOffset, endOffset));
sl@0
  5033
		__ASSERT_ALWAYS(startOffset <= endOffset && endOffset <= (TUint)iImportFixupTableSize,
sl@0
  5034
						K::Fault(K::ECodeSegBadFixupTables));
sl@0
  5035
		
sl@0
  5036
		TUint8* importFixupTable8 = (TUint8*)importFixupTable32;
sl@0
  5037
		const TUint16* ptr = (const TUint16*)(importFixupTable8 + startOffset);
sl@0
  5038
		const TUint16* end = (const TUint16*)(importFixupTable8 + endOffset);
sl@0
  5039
sl@0
  5040
		while (ptr < end)
sl@0
  5041
			{
sl@0
  5042
			TUint16 offset = *ptr++;
sl@0
  5043
		
sl@0
  5044
			// get word to write into that address
sl@0
  5045
			// (don't read as a single TUint32 because may not be word-aligned)
sl@0
  5046
			TUint32 wordLow = *ptr++;
sl@0
  5047
			TUint32 wordHigh = *ptr++;
sl@0
  5048
			TUint32 word = (wordHigh << 16) | wordLow;
sl@0
  5049
sl@0
  5050
			__KTRACE_OPT(KPAGING, Kern::Printf("DP: Fixup %08x=%08x", iRamInfo.iCodeRunAddr+(page<<KPageShift)+offset, word));
sl@0
  5051
			*(TUint32*)((TLinAddr)aBuffer+offset) = word;
sl@0
  5052
			}
sl@0
  5053
		}
sl@0
  5054
	
sl@0
  5055
	LOCK_USER_MEMORY();
sl@0
  5056
sl@0
  5057
	END_PAGING_BENCHMARK(DemandPaging::ThePager, EPagingBmFixupCodePage);
sl@0
  5058
	}
sl@0
  5059
sl@0
  5060
sl@0
  5061
TInt DMmuCodeSegMemory::ApplyCodeFixupsOnLoad(TUint32* aBuffer, TLinAddr aDestAddress)
sl@0
  5062
	{
sl@0
  5063
#ifdef __DEMAND_PAGING__
sl@0
  5064
	TInt r=DemandPaging::ThePager->LockRegion((TLinAddr)aBuffer,KPageSize,&Kern::CurrentProcess());
sl@0
  5065
	if(r!=KErrNone)
sl@0
  5066
		return r;
sl@0
  5067
#endif
sl@0
  5068
	ApplyCodeFixups(aBuffer,aDestAddress);
sl@0
  5069
	UNLOCK_USER_MEMORY();
sl@0
  5070
	CacheMaintenance::CodeChanged((TLinAddr)aBuffer, KPageSize);
sl@0
  5071
	LOCK_USER_MEMORY();
sl@0
  5072
#ifdef __DEMAND_PAGING__
sl@0
  5073
	DemandPaging::ThePager->UnlockRegion((TLinAddr)aBuffer,KPageSize,&Kern::CurrentProcess());
sl@0
  5074
#endif
sl@0
  5075
	return KErrNone;
sl@0
  5076
	}
sl@0
  5077
sl@0
  5078
sl@0
  5079
#ifdef __DEMAND_PAGING__
sl@0
  5080
sl@0
  5081
TInt M::CreateVirtualPinObject(TVirtualPinObject*& aPinObject)
sl@0
  5082
	{
sl@0
  5083
	aPinObject = (TVirtualPinObject*) new DDemandPagingLock;
sl@0
  5084
	return aPinObject != NULL ? KErrNone : KErrNoMemory;
sl@0
  5085
	}
sl@0
  5086
sl@0
  5087
TInt M::PinVirtualMemory(TVirtualPinObject* aPinObject, TLinAddr aStart, TUint aSize, DThread* aThread)
sl@0
  5088
	{
sl@0
  5089
	if (!DemandPaging::ThePager)
sl@0
  5090
		return KErrNone;
sl@0
  5091
	
sl@0
  5092
	if (!DemandPaging::ThePager->MayBePaged(aStart, aSize))
sl@0
  5093
		return KErrNone;
sl@0
  5094
sl@0
  5095
	DDemandPagingLock* lock = (DDemandPagingLock*)aPinObject;
sl@0
  5096
	TInt r = lock->Alloc(aSize);
sl@0
  5097
	if (r != KErrNone)
sl@0
  5098
		return r;
sl@0
  5099
	lock->Lock(aThread, aStart, aSize);
sl@0
  5100
	return KErrNone;
sl@0
  5101
	}
sl@0
  5102
sl@0
  5103
TInt M::CreateAndPinVirtualMemory(TVirtualPinObject*& aPinObject, TLinAddr aStart, TUint aSize)
sl@0
  5104
	{
sl@0
  5105
	aPinObject = 0;
sl@0
  5106
sl@0
  5107
	if (!DemandPaging::ThePager)
sl@0
  5108
		return KErrNone;
sl@0
  5109
	if (!DemandPaging::ThePager->MayBePaged(aStart, aSize))
sl@0
  5110
		return KErrNone;
sl@0
  5111
sl@0
  5112
	TInt r = CreateVirtualPinObject(aPinObject);
sl@0
  5113
	if (r != KErrNone)
sl@0
  5114
		return r;
sl@0
  5115
sl@0
  5116
	DDemandPagingLock* lock = (DDemandPagingLock*)aPinObject;
sl@0
  5117
	r = lock->Alloc(aSize);
sl@0
  5118
	if (r != KErrNone)
sl@0
  5119
		return r;
sl@0
  5120
	lock->Lock(TheCurrentThread, aStart, aSize);
sl@0
  5121
	return KErrNone;
sl@0
  5122
	}
sl@0
  5123
sl@0
  5124
void M::UnpinVirtualMemory(TVirtualPinObject* aPinObject)
sl@0
  5125
	{
sl@0
  5126
	DDemandPagingLock* lock = (DDemandPagingLock*)aPinObject;
sl@0
  5127
	if (lock)
sl@0
  5128
		lock->Free();
sl@0
  5129
	}
sl@0
  5130
	
sl@0
  5131
void M::DestroyVirtualPinObject(TVirtualPinObject*& aPinObject)
sl@0
  5132
	{
sl@0
  5133
	DDemandPagingLock* lock = (DDemandPagingLock*)__e32_atomic_swp_ord_ptr(&aPinObject, 0);
sl@0
  5134
	if (lock)
sl@0
  5135
		lock->AsyncDelete();
sl@0
  5136
	}
sl@0
  5137
sl@0
  5138
#else
sl@0
  5139
sl@0
  5140
class TVirtualPinObject
sl@0
  5141
	{	
sl@0
  5142
	};
sl@0
  5143
sl@0
  5144
TInt M::CreateVirtualPinObject(TVirtualPinObject*& aPinObject)
sl@0
  5145
	{
sl@0
  5146
	aPinObject = new TVirtualPinObject;
sl@0
  5147
	return aPinObject != NULL ? KErrNone : KErrNoMemory;
sl@0
  5148
	}
sl@0
  5149
sl@0
  5150
TInt M::PinVirtualMemory(TVirtualPinObject* aPinObject, TLinAddr, TUint, DThread*)
sl@0
  5151
	{
sl@0
  5152
	__ASSERT_DEBUG(aPinObject, K::Fault(K::EVirtualPinObjectBad));
sl@0
  5153
	(void)aPinObject;
sl@0
  5154
	return KErrNone;
sl@0
  5155
	}
sl@0
  5156
sl@0
  5157
TInt M::CreateAndPinVirtualMemory(TVirtualPinObject*& aPinObject, TLinAddr, TUint)
sl@0
  5158
	{
sl@0
  5159
	aPinObject = 0;
sl@0
  5160
	return KErrNone;
sl@0
  5161
	}
sl@0
  5162
sl@0
  5163
void M::UnpinVirtualMemory(TVirtualPinObject* aPinObject)
sl@0
  5164
	{
sl@0
  5165
	__ASSERT_DEBUG(aPinObject, K::Fault(K::EVirtualPinObjectBad));
sl@0
  5166
	(void)aPinObject;
sl@0
  5167
	}
sl@0
  5168
sl@0
  5169
void M::DestroyVirtualPinObject(TVirtualPinObject*& aPinObject)
sl@0
  5170
	{
sl@0
  5171
	TVirtualPinObject* object = (TVirtualPinObject*)__e32_atomic_swp_ord_ptr(&aPinObject, 0);
sl@0
  5172
	if (object)
sl@0
  5173
		Kern::AsyncFree(object);
sl@0
  5174
	}
sl@0
  5175
sl@0
  5176
#endif
sl@0
  5177
sl@0
  5178
TInt M::CreatePhysicalPinObject(TPhysicalPinObject*& aPinObject)
sl@0
  5179
	{
sl@0
  5180
	return KErrNotSupported;
sl@0
  5181
	}
sl@0
  5182
sl@0
  5183
TInt M::PinPhysicalMemory(TPhysicalPinObject*, TLinAddr, TUint, TBool, TUint32&, TUint32*, TUint32&, TUint&, DThread*)
sl@0
  5184
	{
sl@0
  5185
	K::Fault(K::EPhysicalPinObjectBad);
sl@0
  5186
	return KErrNone;
sl@0
  5187
	}
sl@0
  5188
sl@0
  5189
void M::UnpinPhysicalMemory(TPhysicalPinObject* aPinObject)
sl@0
  5190
	{
sl@0
  5191
	K::Fault(K::EPhysicalPinObjectBad);
sl@0
  5192
	}
sl@0
  5193
sl@0
  5194
void M::DestroyPhysicalPinObject(TPhysicalPinObject*& aPinObject)
sl@0
  5195
	{
sl@0
  5196
	K::Fault(K::EPhysicalPinObjectBad);
sl@0
  5197
	}
sl@0
  5198
sl@0
  5199
sl@0
  5200
//
sl@0
  5201
// Kernel map and pin (Not supported on the moving or multiple memory models).
sl@0
  5202
//
sl@0
  5203
sl@0
  5204
TInt M::CreateKernelMapObject(TKernelMapObject*&, TUint)
sl@0
  5205
	{
sl@0
  5206
	return KErrNotSupported;
sl@0
  5207
	}
sl@0
  5208
sl@0
  5209
sl@0
  5210
TInt M::MapAndPinMemory(TKernelMapObject*, DThread*, TLinAddr, TUint, TUint, TLinAddr&, TPhysAddr*)
sl@0
  5211
	{
sl@0
  5212
	return KErrNotSupported;
sl@0
  5213
	}
sl@0
  5214
sl@0
  5215
sl@0
  5216
void M::UnmapAndUnpinMemory(TKernelMapObject*)
sl@0
  5217
	{
sl@0
  5218
	}
sl@0
  5219
sl@0
  5220
sl@0
  5221
void M::DestroyKernelMapObject(TKernelMapObject*&)
sl@0
  5222
	{
sl@0
  5223
	}
sl@0
  5224
sl@0
  5225
sl@0
  5226
// Misc DPagingDevice methods
sl@0
  5227
sl@0
  5228
EXPORT_C void DPagingDevice::NotifyIdle()
sl@0
  5229
	{
sl@0
  5230
	// Not used on this memory model
sl@0
  5231
	}
sl@0
  5232
sl@0
  5233
EXPORT_C void DPagingDevice::NotifyBusy()
sl@0
  5234
	{
sl@0
  5235
	// Not used on this memory model
sl@0
  5236
	}
sl@0
  5237
sl@0
  5238
EXPORT_C TInt Cache::SyncPhysicalMemoryBeforeDmaWrite(TPhysAddr* , TUint , TUint , TUint , TUint32 )
sl@0
  5239
	{
sl@0
  5240
	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryBeforeDmaWrite");
sl@0
  5241
	return KErrNotSupported;
sl@0
  5242
	}
sl@0
  5243
sl@0
  5244
EXPORT_C TInt Cache::SyncPhysicalMemoryBeforeDmaRead(TPhysAddr* , TUint , TUint , TUint , TUint32 )
sl@0
  5245
	{
sl@0
  5246
	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryBeforeDmaRead");
sl@0
  5247
	return KErrNotSupported;
sl@0
  5248
	}
sl@0
  5249
EXPORT_C TInt Cache::SyncPhysicalMemoryAfterDmaRead(TPhysAddr* , TUint , TUint , TUint , TUint32 )
sl@0
  5250
	{
sl@0
  5251
	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryAfterDmaRead");
sl@0
  5252
	return KErrNotSupported;
sl@0
  5253
	}
sl@0
  5254
sl@0
  5255
//
sl@0
  5256
//	Page moving methods
sl@0
  5257
//
sl@0
  5258
sl@0
  5259
/*
sl@0
  5260
 * Move a page from aOld to aNew safely, updating any references to the page
sl@0
  5261
 * stored elsewhere (such as page table entries). The destination page must
sl@0
  5262
 * already be allocated. If the move is successful, the source page will be
sl@0
  5263
 * freed and returned to the allocator.
sl@0
  5264
 *
sl@0
  5265
 * @pre RAM alloc mutex must be held.
sl@0
  5266
 * @pre Calling thread must be in a critical section.
sl@0
  5267
 * @pre Interrupts must be enabled.
sl@0
  5268
 * @pre Kernel must be unlocked.
sl@0
  5269
 * @pre No fast mutex can be held.
sl@0
  5270
 * @pre Call in a thread context.
sl@0
  5271
 */
sl@0
  5272
TInt MmuBase::MovePage(TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest)
sl@0
  5273
	{
sl@0
  5274
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Defrag::DoMovePage");
sl@0
  5275
	__ASSERT_WITH_MESSAGE_MUTEX(MmuBase::RamAllocatorMutex, "Ram allocator mutex must be held", "Defrag::DoMovePage");
sl@0
  5276
	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage() old=%08x",aOld));
sl@0
  5277
	TInt r = KErrNotSupported;
sl@0
  5278
#if defined(__CPU_X86) && defined(__MEMMODEL_MULTIPLE__)
sl@0
  5279
	return r;
sl@0
  5280
#endif
sl@0
  5281
	aNew = KPhysAddrInvalid;
sl@0
  5282
	NKern::LockSystem();
sl@0
  5283
	SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aOld);
sl@0
  5284
	if (!pi)
sl@0
  5285
		{
sl@0
  5286
		__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage() fails: page has no PageInfo"));
sl@0
  5287
		r = KErrArgument;
sl@0
  5288
		goto fail;
sl@0
  5289
		}
sl@0
  5290
	if (pi->LockCount())
sl@0
  5291
		{
sl@0
  5292
		__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage() fails: page is locked"));
sl@0
  5293
		goto fail;
sl@0
  5294
		}
sl@0
  5295
	
sl@0
  5296
	switch(pi->Type())
sl@0
  5297
		{
sl@0
  5298
	case SPageInfo::EUnused:
sl@0
  5299
		// Nothing to do - we allow this, though, in case the caller wasn't
sl@0
  5300
		// actually checking the free bitmap.
sl@0
  5301
		r = KErrNotFound;
sl@0
  5302
		__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage(): page unused"));
sl@0
  5303
		break;
sl@0
  5304
sl@0
  5305
	case SPageInfo::EChunk:
sl@0
  5306
		{
sl@0
  5307
		// It's a chunk - we need to investigate what it's used for.
sl@0
  5308
		DChunk* chunk = (DChunk*)pi->Owner();
sl@0
  5309
		TInt offset = pi->Offset()<<KPageShift;
sl@0
  5310
sl@0
  5311
		switch(chunk->iChunkType)
sl@0
  5312
			{
sl@0
  5313
		case EKernelData:
sl@0
  5314
		case EKernelMessage:
sl@0
  5315
			// The kernel data/bss/heap chunk pages are not moved as DMA may be accessing them.
sl@0
  5316
			__KTRACE_OPT(KMMU, Kern::Printf("MmuBase::MovePage() fails: kernel data"));
sl@0
  5317
			goto fail;
sl@0
  5318
sl@0
  5319
		case EKernelStack:
sl@0
  5320
			// The kernel thread stack chunk.
sl@0
  5321
			r = MoveKernelStackPage(chunk, offset, aOld, aNew, aBlockZoneId, aBlockRest);
sl@0
  5322
			__KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: k stack r%d",r));
sl@0
  5323
			__NK_ASSERT_DEBUG(NKern::HeldFastMutex()==0);
sl@0
  5324
 			goto released;
sl@0
  5325
sl@0
  5326
		case EKernelCode:
sl@0
  5327
		case EDll:
sl@0
  5328
			// The kernel code chunk, or a global user code chunk.
sl@0
  5329
			r = MoveCodeChunkPage(chunk, offset, aOld, aNew, aBlockZoneId, aBlockRest);
sl@0
  5330
			__KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: code chk r%d",r));
sl@0
  5331
			__NK_ASSERT_DEBUG(NKern::HeldFastMutex()==0);
sl@0
  5332
			goto released;
sl@0
  5333
sl@0
  5334
		case ERamDrive:
sl@0
  5335
		case EUserData:
sl@0
  5336
		case EDllData:
sl@0
  5337
		case EUserSelfModCode:
sl@0
  5338
			// A data chunk of some description.
sl@0
  5339
			r = MoveDataChunkPage(chunk, offset, aOld, aNew, aBlockZoneId, aBlockRest);
sl@0
  5340
			__KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: data chk r%d",r));
sl@0
  5341
			__NK_ASSERT_DEBUG(NKern::HeldFastMutex()==0);
sl@0
  5342
			goto released;
sl@0
  5343
sl@0
  5344
		case ESharedKernelSingle:
sl@0
  5345
		case ESharedKernelMultiple:
sl@0
  5346
		case ESharedIo:
sl@0
  5347
		case ESharedKernelMirror:
sl@0
  5348
			// These chunk types cannot be moved
sl@0
  5349
			r = KErrNotSupported;
sl@0
  5350
			__KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: shared r%d",r));
sl@0
  5351
			break;
sl@0
  5352
sl@0
  5353
		case EUserCode:
sl@0
  5354
		default:
sl@0
  5355
			// Unknown page type, or EUserCode.
sl@0
  5356
			// EUserCode is not used in moving model, and on multiple model
sl@0
  5357
			// it never owns any pages so shouldn't be found via SPageInfo
sl@0
  5358
			__KTRACE_OPT(KMMU,Kern::Printf("Defrag::DoMovePage fails: unknown chunk type %d",chunk->iChunkType));
sl@0
  5359
			Panic(EDefragUnknownChunkType);
sl@0
  5360
			}
sl@0
  5361
		}
sl@0
  5362
		break;
sl@0
  5363
sl@0
  5364
	case SPageInfo::ECodeSegMemory:
sl@0
  5365
		// It's a code segment memory section (multiple model only)
sl@0
  5366
		r = MoveCodeSegMemoryPage((DMemModelCodeSegMemory*)pi->Owner(), pi->Offset()<<KPageShift, aOld, aNew, aBlockZoneId, aBlockRest);
sl@0
  5367
		__KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: codeseg r%d",r));
sl@0
  5368
		__NK_ASSERT_DEBUG(NKern::HeldFastMutex()==0);
sl@0
  5369
		goto released;
sl@0
  5370
sl@0
  5371
	case SPageInfo::EPagedROM:
sl@0
  5372
	case SPageInfo::EPagedCode:
sl@0
  5373
	case SPageInfo::EPagedData:
sl@0
  5374
	case SPageInfo::EPagedCache:
sl@0
  5375
	case SPageInfo::EPagedFree:
sl@0
  5376
		{// DP or RamCache page so attempt to discard it. Added for testing purposes only
sl@0
  5377
		//  In normal use ClearDiscardableFromZone will have already removed RAM cache pages
sl@0
  5378
		r = KErrInUse;
sl@0
  5379
		MmuBase& mmu = *MmuBase::TheMmu;
sl@0
  5380
		RamCacheBase& ramCache = *(mmu.iRamCache);
sl@0
  5381
		if (ramCache.IsPageDiscardable(*pi))
sl@0
  5382
			{
sl@0
  5383
			if (ramCache.DoDiscardPage(*pi, KRamZoneInvalidId, EFalse))
sl@0
  5384
				{// Sucessfully discarded the page.
sl@0
  5385
				r = KErrNone;
sl@0
  5386
				}
sl@0
  5387
			}
sl@0
  5388
		__KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: paged r%d",r));
sl@0
  5389
		goto fail; // Goto fail to release the system lock.	
sl@0
  5390
		}
sl@0
  5391
sl@0
  5392
		
sl@0
  5393
	case SPageInfo::EPageTable:
sl@0
  5394
	case SPageInfo::EPageDir:
sl@0
  5395
	case SPageInfo::EPtInfo:
sl@0
  5396
	case SPageInfo::EInvalid:
sl@0
  5397
	case SPageInfo::EFixed:
sl@0
  5398
	case SPageInfo::EShadow:
sl@0
  5399
		// These page types cannot be moved (or don't need to be moved)
sl@0
  5400
		r = KErrNotSupported;
sl@0
  5401
		__KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: PT etc r%d",r));
sl@0
  5402
		break;
sl@0
  5403
sl@0
  5404
	default:
sl@0
  5405
		// Unknown page type
sl@0
  5406
		__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage() fails: unknown page type %d",pi->Type()));
sl@0
  5407
		Panic(EDefragUnknownPageType);
sl@0
  5408
		}
sl@0
  5409
sl@0
  5410
fail:
sl@0
  5411
	NKern::UnlockSystem();
sl@0
  5412
released:
sl@0
  5413
	__KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage() returns %d",r));
sl@0
  5414
	return r;
sl@0
  5415
	}
sl@0
  5416
sl@0
  5417
sl@0
  5418
TInt MmuBase::DiscardPage(TPhysAddr aAddr, TUint aBlockZoneId, TBool aBlockRest)
sl@0
  5419
	{
sl@0
  5420
	TInt r = KErrInUse;
sl@0
  5421
	NKern::LockSystem();
sl@0
  5422
	SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(aAddr);
sl@0
  5423
	if (pageInfo != NULL)
sl@0
  5424
		{// Allocatable page at this address so is it a discardable one?
sl@0
  5425
		if (iRamCache->IsPageDiscardable(*pageInfo))
sl@0
  5426
			{
sl@0
  5427
			// Discard this page and return it to the ram allocator
sl@0
  5428
			if (!iRamCache->DoDiscardPage(*pageInfo, aBlockZoneId, aBlockRest))
sl@0
  5429
				{// Couldn't discard the page.
sl@0
  5430
				if (aBlockRest)
sl@0
  5431
					{
sl@0
  5432
					__KTRACE_OPT(KMMU, Kern::Printf("ClearDiscardableFromZone: page discard fail addr %x", aAddr));
sl@0
  5433
					NKern::UnlockSystem();
sl@0
  5434
					return KErrNoMemory;
sl@0
  5435
					}
sl@0
  5436
				}
sl@0
  5437
			else
sl@0
  5438
				{// Page discarded successfully.
sl@0
  5439
				r = KErrNone;
sl@0
  5440
				}
sl@0
  5441
			}
sl@0
  5442
		}
sl@0
  5443
	NKern::UnlockSystem();
sl@0
  5444
	return r;
sl@0
  5445
	}
sl@0
  5446
sl@0
  5447
TUint MmuBase::NumberOfFreeDpPages()
sl@0
  5448
	{
sl@0
  5449
	TUint free = 0;
sl@0
  5450
	if(iRamCache)
sl@0
  5451
		{
sl@0
  5452
		free = iRamCache->NumberOfFreePages();
sl@0
  5453
		}
sl@0
  5454
	return free;
sl@0
  5455
	}
sl@0
  5456
sl@0
  5457
sl@0
  5458
EXPORT_C TInt Epoc::MovePhysicalPage(TPhysAddr aOld, TPhysAddr& aNew, TRamDefragPageToMove aPageToMove)
sl@0
  5459
	{
sl@0
  5460
	CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::MovePhysicalPage");
sl@0
  5461
	__KTRACE_OPT(KMMU,Kern::Printf("Epoc::MovePhysicalPage() old=%08x pageToMove=%d",aOld,aPageToMove));
sl@0
  5462
sl@0
  5463
	switch(aPageToMove)
sl@0
  5464
		{
sl@0
  5465
		case ERamDefragPage_Physical:
sl@0
  5466
			break;
sl@0
  5467
		default:
sl@0
  5468
			return KErrNotSupported;
sl@0
  5469
		}
sl@0
  5470
sl@0
  5471
	MmuBase::Wait();
sl@0
  5472
	TInt r=M::MovePage(aOld,aNew,KRamZoneInvalidId,EFalse);
sl@0
  5473
	if (r!=KErrNone)
sl@0
  5474
		aNew = KPhysAddrInvalid;
sl@0
  5475
	MmuBase::Signal();
sl@0
  5476
	__KTRACE_OPT(KMMU,Kern::Printf("Epoc::MovePhysicalPage() returns %d",r));
sl@0
  5477
	return r;
sl@0
  5478
	}
sl@0
  5479
sl@0
  5480
sl@0
  5481
TInt M::RamDefragFault(TAny* aExceptionInfo)
sl@0
  5482
	{
sl@0
  5483
	// If the mmu has been initialised then let it try processing the fault.
sl@0
  5484
	if(MmuBase::TheMmu)
sl@0
  5485
		return MmuBase::TheMmu->RamDefragFault(aExceptionInfo);
sl@0
  5486
	return KErrAbort;
sl@0
  5487
	}
sl@0
  5488
sl@0
  5489
sl@0
  5490
void M::RamZoneClaimed(SZone* aZone)
sl@0
  5491
	{
sl@0
  5492
	// Lock each page.  OK to traverse SPageInfo array as we know no unknown
sl@0
  5493
	// pages are in the zone.
sl@0
  5494
	SPageInfo* pageInfo = SPageInfo::FromPhysAddr(aZone->iPhysBase);
sl@0
  5495
	SPageInfo* pageInfoEnd = pageInfo + aZone->iPhysPages;
sl@0
  5496
	for (; pageInfo < pageInfoEnd; ++pageInfo)
sl@0
  5497
		{
sl@0
  5498
		NKern::LockSystem();
sl@0
  5499
		__NK_ASSERT_DEBUG(pageInfo->Type()==SPageInfo::EUnused);
sl@0
  5500
		pageInfo->Lock();
sl@0
  5501
		NKern::UnlockSystem();
sl@0
  5502
		}
sl@0
  5503
	// For the sake of platform security we have to clear the memory. E.g. the driver
sl@0
  5504
	// could assign it to a chunk visible to user side.  Set LSB so ClearPages
sl@0
  5505
	// knows this is a contiguous memory region.
sl@0
  5506
	Mmu::Get().ClearPages(aZone->iPhysPages, (TPhysAddr*)(aZone->iPhysBase|1));
sl@0
  5507
	}