os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mmu.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
//
sl@0
    15
sl@0
    16
#include "memmodel.h"
sl@0
    17
#include "kernel/cache_maintenance.inl"
sl@0
    18
#include <kernel/cache.h>
sl@0
    19
#include <ramalloc.h>
sl@0
    20
#include <defrag.h>
sl@0
    21
#include "mm.h"
sl@0
    22
#include "mmu.h"
sl@0
    23
#include "mpager.h"
sl@0
    24
#include "mmapping.h"
sl@0
    25
#include "mobject.h"
sl@0
    26
#include "mmanager.h"
sl@0
    27
#include "mpagearray.h"
sl@0
    28
sl@0
    29
sl@0
    30
//
sl@0
    31
// SPageInfo
sl@0
    32
//
sl@0
    33
sl@0
    34
// check enough space for page infos...
sl@0
    35
__ASSERT_COMPILE((KPageInfoLinearEnd-KPageInfoLinearBase)/sizeof(SPageInfo)==(1<<(32-KPageShift)));
sl@0
    36
sl@0
    37
// check KPageInfoShift...
sl@0
    38
__ASSERT_COMPILE(sizeof(SPageInfo)==(1<<KPageInfoShift));
sl@0
    39
sl@0
    40
sl@0
    41
SPageInfo* SPageInfo::SafeFromPhysAddr(TPhysAddr aAddress)
sl@0
    42
	{
sl@0
    43
	__NK_ASSERT_DEBUG((aAddress&KPageMask)==0);
sl@0
    44
	TUint index = aAddress>>(KPageShift+KPageShift-KPageInfoShift);
sl@0
    45
	TUint flags = ((TUint8*)KPageInfoMap)[index>>3];
sl@0
    46
	TUint mask = 1<<(index&7);
sl@0
    47
	if(!(flags&mask))
sl@0
    48
		return 0; // no SPageInfo for aAddress
sl@0
    49
	SPageInfo* info = FromPhysAddr(aAddress);
sl@0
    50
	if(info->iType==SPageInfo::EInvalid)
sl@0
    51
		return 0;
sl@0
    52
	return info;
sl@0
    53
	}
sl@0
    54
sl@0
    55
sl@0
    56
#ifdef _DEBUG
sl@0
    57
sl@0
    58
void SPageInfo::CheckAccess(const char* aMessage, TUint aFlags)
sl@0
    59
	{
sl@0
    60
	if(K::Initialising || NKern::Crashed())
sl@0
    61
		return;
sl@0
    62
sl@0
    63
	if((aFlags&ECheckNotAllocated) && (iType!=EUnknown))
sl@0
    64
		{
sl@0
    65
		Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iType==%d : %s",this,PhysAddr(),iType,aMessage);
sl@0
    66
		__NK_ASSERT_DEBUG(0);
sl@0
    67
		goto fail;
sl@0
    68
		}
sl@0
    69
sl@0
    70
	if((aFlags&ECheckNotUnused) && (iType==EUnused))
sl@0
    71
		{
sl@0
    72
		Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iType==%d : %s",this,PhysAddr(),iType,aMessage);
sl@0
    73
		__NK_ASSERT_DEBUG(0);
sl@0
    74
		goto fail;
sl@0
    75
		}
sl@0
    76
sl@0
    77
	if((aFlags&ECheckUnused) && (iType!=EUnused))
sl@0
    78
		{
sl@0
    79
		Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iType==%d : %s",this,PhysAddr(),iType,aMessage);
sl@0
    80
		__NK_ASSERT_DEBUG(0);
sl@0
    81
		goto fail;
sl@0
    82
		}
sl@0
    83
sl@0
    84
	if((aFlags&ECheckNotPaged) && (iPagedState!=EUnpaged))
sl@0
    85
		{
sl@0
    86
		Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iPagedState=%d : %s",this,PhysAddr(),iPagedState,aMessage);
sl@0
    87
		__NK_ASSERT_DEBUG(0);
sl@0
    88
		goto fail;
sl@0
    89
		}
sl@0
    90
sl@0
    91
	if((aFlags&ECheckRamAllocLock) && !RamAllocLock::IsHeld())
sl@0
    92
		{
sl@0
    93
		Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iType==%d : %s",this,PhysAddr(),iType,aMessage);
sl@0
    94
		__NK_ASSERT_DEBUG(0);
sl@0
    95
		goto fail;
sl@0
    96
		}
sl@0
    97
sl@0
    98
	if((aFlags&ENoCheckMmuLock) || MmuLock::IsHeld())
sl@0
    99
		return;
sl@0
   100
fail:
sl@0
   101
	Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x : %s",this,PhysAddr(),aMessage);
sl@0
   102
	Mmu::Panic(Mmu::EUnsafePageInfoAccess);
sl@0
   103
	}
sl@0
   104
sl@0
   105
sl@0
   106
void SPageInfo::Dump()
sl@0
   107
	{
sl@0
   108
	Kern::Printf("SPageInfo for page %x = %d,%d,%02x,0x%08x,0x%x,%d",PhysAddr(),iType,iPagedState,iFlags,iOwner,iIndex,iPinCount);
sl@0
   109
	}
sl@0
   110
sl@0
   111
#endif
sl@0
   112
sl@0
   113
sl@0
   114
sl@0
   115
//
sl@0
   116
// SPageTableInfo
sl@0
   117
//
sl@0
   118
sl@0
   119
// check enough space for page table infos...
sl@0
   120
__ASSERT_COMPILE((KPageTableInfoEnd-KPageTableInfoBase)/sizeof(SPageTableInfo)
sl@0
   121
					>=(KPageTableEnd-KPageTableBase)/KPageTableSize);
sl@0
   122
sl@0
   123
// check KPtBlockShift...
sl@0
   124
__ASSERT_COMPILE((sizeof(SPageTableInfo)<<KPtBlockShift)==KPageSize);
sl@0
   125
sl@0
   126
sl@0
   127
#ifdef _DEBUG
sl@0
   128
sl@0
   129
TBool SPageTableInfo::CheckPageCount()
sl@0
   130
	{
sl@0
   131
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
   132
	TPte* pt = PageTable();
sl@0
   133
	TUint realCount = 0;
sl@0
   134
	do if(*pt++) ++realCount;
sl@0
   135
	while(TLinAddr(pt)&(KPageTableMask/sizeof(TPte)*sizeof(TPte)));
sl@0
   136
	if(iPageCount==realCount)
sl@0
   137
		return true;
sl@0
   138
	Kern::Printf("CheckPageCount Failed: pt=0x%08x count=%d realCount=%d",TLinAddr(pt)-KPageTableSize,iPageCount,realCount);
sl@0
   139
	return false;
sl@0
   140
	}
sl@0
   141
sl@0
   142
sl@0
   143
void SPageTableInfo::CheckChangeUse(const char* aName)
sl@0
   144
	{
sl@0
   145
	if(K::Initialising)
sl@0
   146
		return;
sl@0
   147
	if(PageTablesLockIsHeld() && MmuLock::IsHeld())
sl@0
   148
		return;
sl@0
   149
	Kern::Printf("SPageTableInfo::CheckChangeUse failed : %s",aName);
sl@0
   150
	Mmu::Panic(Mmu::EUnsafePageTableInfoAccess);
sl@0
   151
	}
sl@0
   152
sl@0
   153
sl@0
   154
void SPageTableInfo::CheckCheckUse(const char* aName)
sl@0
   155
	{
sl@0
   156
	if(K::Initialising)
sl@0
   157
		return;
sl@0
   158
	if(PageTablesLockIsHeld() || MmuLock::IsHeld())
sl@0
   159
		return;
sl@0
   160
	Kern::Printf("SPageTableInfo::CheckCheckUse failed : %s",aName);
sl@0
   161
	Mmu::Panic(Mmu::EUnsafePageTableInfoAccess);
sl@0
   162
	}
sl@0
   163
sl@0
   164
sl@0
   165
void SPageTableInfo::CheckAccess(const char* aName)
sl@0
   166
	{
sl@0
   167
	if(K::Initialising)
sl@0
   168
		return;
sl@0
   169
	if(MmuLock::IsHeld())
sl@0
   170
		return;
sl@0
   171
	Kern::Printf("SPageTableInfo::CheckAccess failed : %s",aName);
sl@0
   172
	Mmu::Panic(Mmu::EUnsafePageTableInfoAccess);
sl@0
   173
	}
sl@0
   174
sl@0
   175
sl@0
   176
void SPageTableInfo::CheckInit(const char* aName)
sl@0
   177
	{
sl@0
   178
	if(K::Initialising)
sl@0
   179
		return;
sl@0
   180
	if(PageTablesLockIsHeld() && iType==EUnused)
sl@0
   181
		return;
sl@0
   182
	Kern::Printf("SPageTableInfo::CheckInit failed : %s",aName);
sl@0
   183
	Mmu::Panic(Mmu::EUnsafePageTableInfoAccess);
sl@0
   184
	}
sl@0
   185
sl@0
   186
#endif
sl@0
   187
sl@0
   188
sl@0
   189
sl@0
   190
//
sl@0
   191
// RamAllocLock
sl@0
   192
//
sl@0
   193
sl@0
   194
_LIT(KLitRamAlloc,"RamAlloc");
sl@0
   195
_LIT(KLitPhysMemSync,"PhysMemSync");
sl@0
   196
sl@0
   197
void RamAllocLock::Lock()
sl@0
   198
	{
sl@0
   199
	Mmu& m = TheMmu;
sl@0
   200
	Kern::MutexWait(*m.iRamAllocatorMutex);
sl@0
   201
	if(!m.iRamAllocLockCount++)
sl@0
   202
		{
sl@0
   203
		// first lock, so setup memory fail data...
sl@0
   204
		m.iRamAllocFailed = EFalse;
sl@0
   205
		__NK_ASSERT_DEBUG(m.iRamAllocInitialFreePages==m.FreeRamInPages()); // free RAM shouldn't have changed whilst lock was held
sl@0
   206
		}
sl@0
   207
	}
sl@0
   208
sl@0
   209
sl@0
   210
void RamAllocLock::Unlock()
sl@0
   211
	{
sl@0
   212
	Mmu& m = TheMmu;
sl@0
   213
	if(--m.iRamAllocLockCount)
sl@0
   214
		{
sl@0
   215
		Kern::MutexSignal(*m.iRamAllocatorMutex);
sl@0
   216
		return;
sl@0
   217
		}
sl@0
   218
	TBool failed = m.iRamAllocFailed;
sl@0
   219
	TUint initial = m.iRamAllocInitialFreePages;
sl@0
   220
	TUint final = m.FreeRamInPages();
sl@0
   221
	m.iRamAllocInitialFreePages = final; // new baseline value
sl@0
   222
	TUint changes = K::CheckFreeMemoryLevel(initial*KPageSize,final*KPageSize,failed);
sl@0
   223
	if(changes)
sl@0
   224
		{
sl@0
   225
		__KTRACE_OPT(KMMU,Kern::Printf("RamAllocLock::Unlock() changes=%x",changes));
sl@0
   226
		}
sl@0
   227
	Kern::MutexSignal(*m.iRamAllocatorMutex);
sl@0
   228
	}
sl@0
   229
sl@0
   230
sl@0
   231
TBool RamAllocLock::Flash()
sl@0
   232
	{
sl@0
   233
	Unlock();
sl@0
   234
	Lock();
sl@0
   235
	return true; // lock was released
sl@0
   236
	}
sl@0
   237
sl@0
   238
sl@0
   239
TBool RamAllocLock::IsHeld()
sl@0
   240
	{
sl@0
   241
	Mmu& m = TheMmu;
sl@0
   242
	return m.iRamAllocatorMutex->iCleanup.iThread == &Kern::CurrentThread() && m.iRamAllocLockCount;
sl@0
   243
	}
sl@0
   244
sl@0
   245
sl@0
   246
sl@0
   247
//
sl@0
   248
// MmuLock
sl@0
   249
//
sl@0
   250
sl@0
   251
#ifdef _DEBUG
sl@0
   252
TUint MmuLock::UnlockGuardNest =0;
sl@0
   253
TUint MmuLock::UnlockGuardFail =0;
sl@0
   254
#endif
sl@0
   255
sl@0
   256
NFastMutex MmuLock::iLock;
sl@0
   257
sl@0
   258
void MmuLock::Lock()
sl@0
   259
	{
sl@0
   260
	NKern::FMWait(&iLock);
sl@0
   261
	}
sl@0
   262
sl@0
   263
void MmuLock::Unlock()
sl@0
   264
	{
sl@0
   265
	UnlockGuardCheck();
sl@0
   266
	NKern::FMSignal(&iLock);
sl@0
   267
	}
sl@0
   268
sl@0
   269
TBool MmuLock::Flash()
sl@0
   270
	{
sl@0
   271
	UnlockGuardCheck();
sl@0
   272
	return NKern::FMFlash(&iLock);
sl@0
   273
	}
sl@0
   274
sl@0
   275
TBool MmuLock::IsHeld()
sl@0
   276
	{
sl@0
   277
	NFastMutex& m = iLock;
sl@0
   278
	return m.HeldByCurrentThread();
sl@0
   279
	}
sl@0
   280
sl@0
   281
sl@0
   282
sl@0
   283
//
sl@0
   284
// Initialisation
sl@0
   285
//
sl@0
   286
sl@0
   287
Mmu TheMmu;
sl@0
   288
sl@0
   289
void Mmu::Init1Common()
sl@0
   290
	{
sl@0
   291
	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::Init1Common"));
sl@0
   292
sl@0
   293
	// Mmu data
sl@0
   294
	TUint pteType = PteType(ESupervisorReadWrite,true);
sl@0
   295
	iTempPteCached = BlankPte((TMemoryAttributes)(EMemoryAttributeNormalCached|EMemoryAttributeDefaultShareable),pteType);
sl@0
   296
	iTempPteUncached = BlankPte((TMemoryAttributes)(EMemoryAttributeNormalUncached|EMemoryAttributeDefaultShareable),pteType);
sl@0
   297
	iTempPteCacheMaintenance = BlankPte((TMemoryAttributes)(CacheMaintenance::TemporaryMapping()|EMemoryAttributeDefaultShareable),pteType);
sl@0
   298
	
sl@0
   299
	// other
sl@0
   300
	PP::MaxUserThreadStack=0x14000;			// 80K - STDLIB asks for 64K for PosixServer!!!!
sl@0
   301
	PP::UserThreadStackGuard=0x2000;		// 8K
sl@0
   302
	PP::MaxStackSpacePerProcess=0x200000;	// 2Mb
sl@0
   303
	K::SupervisorThreadStackSize=0x1000;	// 4K
sl@0
   304
	PP::SupervisorThreadStackGuard=0x1000;	// 4K
sl@0
   305
	K::MachineConfig=(TMachineConfig*)KMachineConfigLinAddr;
sl@0
   306
	PP::RamDriveStartAddress=0;
sl@0
   307
	PP::RamDriveRange=0;
sl@0
   308
	PP::RamDriveMaxSize=0x20000000;	// 512MB, probably will be reduced later
sl@0
   309
	K::MemModelAttributes=EMemModelTypeFlexible|EMemModelAttrNonExProt|EMemModelAttrKernProt|EMemModelAttrWriteProt|
sl@0
   310
						EMemModelAttrVA|EMemModelAttrProcessProt|EMemModelAttrSameVA|EMemModelAttrSvKernProt|
sl@0
   311
						EMemModelAttrIPCKernProt|EMemModelAttrRamCodeProt;
sl@0
   312
	}
sl@0
   313
sl@0
   314
sl@0
   315
#if 0
sl@0
   316
void Mmu::VerifyRam()
sl@0
   317
	{
sl@0
   318
	Kern::Printf("Mmu::VerifyRam() pass 1");
sl@0
   319
	RamAllocLock::Lock();
sl@0
   320
sl@0
   321
	TPhysAddr p = 0;
sl@0
   322
	do
sl@0
   323
		{
sl@0
   324
		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(p);
sl@0
   325
		if(pi)
sl@0
   326
			{
sl@0
   327
			Kern::Printf("%08x %d",p,pi->Type());
sl@0
   328
			if(pi->Type()==SPageInfo::EUnused)
sl@0
   329
				{
sl@0
   330
				volatile TPhysAddr* b = (volatile TPhysAddr*)MapTemp(p,0);
sl@0
   331
				b[0] = p;
sl@0
   332
				b[1] = ~p;
sl@0
   333
				__NK_ASSERT_DEBUG(b[0]==p);
sl@0
   334
				__NK_ASSERT_DEBUG(b[1]==~p);
sl@0
   335
				UnmapTemp();
sl@0
   336
				}
sl@0
   337
			}
sl@0
   338
		p += KPageSize;
sl@0
   339
		}
sl@0
   340
	while(p);
sl@0
   341
sl@0
   342
	TBool fail = false;
sl@0
   343
	Kern::Printf("Mmu::VerifyRam() pass 2");
sl@0
   344
	do
sl@0
   345
		{
sl@0
   346
		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(p);
sl@0
   347
		if(pi)
sl@0
   348
			{
sl@0
   349
			if(pi->Type()==SPageInfo::EUnused)
sl@0
   350
				{
sl@0
   351
				volatile TPhysAddr* b = (volatile TPhysAddr*)MapTemp(p,0);
sl@0
   352
				if(b[0]!=p || b[1]!=~p)
sl@0
   353
					{
sl@0
   354
					fail = true;
sl@0
   355
					Kern::Printf("%08x FAILED %x %x",b[0],b[1]);
sl@0
   356
					}
sl@0
   357
				UnmapTemp();
sl@0
   358
				}
sl@0
   359
			}
sl@0
   360
		p += KPageSize;
sl@0
   361
		}
sl@0
   362
	while(p);
sl@0
   363
sl@0
   364
	__NK_ASSERT_DEBUG(!fail);
sl@0
   365
	RamAllocLock::Unlock();
sl@0
   366
	}
sl@0
   367
#endif
sl@0
   368
sl@0
   369
sl@0
   370
void Mmu::Init2Common()
sl@0
   371
	{
sl@0
   372
	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::Init2Common"));
sl@0
   373
sl@0
   374
	// create allocator...
sl@0
   375
	const SRamInfo& info = *(const SRamInfo*)TheSuperPage().iRamBootData;
sl@0
   376
	iRamPageAllocator = DRamAllocator::New(info, iRamZones, iRamZoneCallback);
sl@0
   377
sl@0
   378
	// initialise all pages in banks as unused...
sl@0
   379
	const SRamBank* bank = info.iBanks;
sl@0
   380
	while(bank->iSize)
sl@0
   381
		{
sl@0
   382
		TUint32 base = bank->iBase;
sl@0
   383
		TUint32 size = bank->iSize;
sl@0
   384
		__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Found RAM bank 0x%08x size %d",base,size));
sl@0
   385
		if(base+size<=base || ((base|size)&KPageMask))
sl@0
   386
			Panic(EInvalidRamBankAtBoot);
sl@0
   387
sl@0
   388
		SPageInfo* pi = SPageInfo::FromPhysAddr(base);
sl@0
   389
		SPageInfo* piEnd = pi+(size>>KPageShift);
sl@0
   390
		while(pi<piEnd)
sl@0
   391
			(pi++)->SetUnused();
sl@0
   392
		++bank;
sl@0
   393
		}
sl@0
   394
	// step over the last bank to get to the reserved banks.
sl@0
   395
	++bank;
sl@0
   396
	// mark any reserved regions as allocated...
sl@0
   397
	while(bank->iSize)
sl@0
   398
		{
sl@0
   399
		TUint32 base = bank->iBase;
sl@0
   400
		TUint32 size = bank->iSize;
sl@0
   401
		__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Found reserved bank 0x%08x size %d",base,size));
sl@0
   402
		if(base+size<=base || ((base|size)&KPageMask))
sl@0
   403
			Panic(EInvalidReservedBankAtBoot);
sl@0
   404
sl@0
   405
		SPageInfo* pi = SPageInfo::FromPhysAddr(base);
sl@0
   406
		SPageInfo* piEnd = pi+(size>>KPageShift);
sl@0
   407
		while(pi<piEnd)
sl@0
   408
			(pi++)->SetPhysAlloc();
sl@0
   409
		++bank;
sl@0
   410
		}
sl@0
   411
sl@0
   412
	// Clear the inital (and only so far) page table info page so all unused
sl@0
   413
	// page tables infos will be marked as unused.
sl@0
   414
	__ASSERT_COMPILE(SPageTableInfo::EUnused == 0);
sl@0
   415
	memclr((TAny*)KPageTableInfoBase, KPageSize);
sl@0
   416
sl@0
   417
	// look for page tables - assume first page table maps page tables
sl@0
   418
	TPte* pPte = (TPte*)KPageTableBase;
sl@0
   419
	TInt i;
sl@0
   420
	for(i=0; i<KChunkSize/KPageSize; ++i)
sl@0
   421
		{
sl@0
   422
		TPte pte = *pPte++;
sl@0
   423
		if(pte==KPteUnallocatedEntry)	// after boot, page tables are contiguous
sl@0
   424
			break;
sl@0
   425
		TPhysAddr ptpgPhys = Mmu::PtePhysAddr(pte,i);
sl@0
   426
		__KTRACE_OPT(KBOOT,Kern::Printf("Page Table Group %08x -> Phys %08x", KPageTableBase+i*KPageSize, ptpgPhys));
sl@0
   427
		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(ptpgPhys);
sl@0
   428
		__ASSERT_ALWAYS(pi, Panic(EInvalidPageTableAtBoot));
sl@0
   429
		pi->SetFixed(i); // this also sets the SPageInfo::iOffset so that linear-to-physical works
sl@0
   430
		}
sl@0
   431
sl@0
   432
	// look for mapped pages
sl@0
   433
	TPde* pd = Mmu::PageDirectory(KKernelOsAsid);
sl@0
   434
	for(i=0; i<(1<<(32-KChunkShift)); ++i)
sl@0
   435
		{
sl@0
   436
		TPde pde = pd[i];
sl@0
   437
		if(pde==KPdeUnallocatedEntry)
sl@0
   438
			continue;
sl@0
   439
		TPhysAddr pdePhys = Mmu::PdePhysAddr(pde);
sl@0
   440
		TPte* pt = 0;
sl@0
   441
		if(pdePhys!=KPhysAddrInvalid)
sl@0
   442
			{
sl@0
   443
			__KTRACE_OPT(KBOOT,Kern::Printf("Addr %08x -> Whole PDE Phys %08x", i<<KChunkShift, pdePhys));
sl@0
   444
			}
sl@0
   445
		else
sl@0
   446
			{
sl@0
   447
			pt = Mmu::PageTableFromPde(pde);
sl@0
   448
			__KTRACE_OPT(KBOOT,Kern::Printf("Addr %08x -> page table %08x", i<<KChunkShift, pt));
sl@0
   449
			__ASSERT_ALWAYS(pt,Panic(EInvalidPdeAtBoot)); // bad PDE
sl@0
   450
			}
sl@0
   451
sl@0
   452
		TInt j;
sl@0
   453
		TInt np = 0;
sl@0
   454
		for(j=0; j<KChunkSize/KPageSize; ++j)
sl@0
   455
			{
sl@0
   456
			TBool present = ETrue;	// all pages present if whole PDE mapping
sl@0
   457
			TPte pte = 0;
sl@0
   458
			if(pt)
sl@0
   459
				{
sl@0
   460
				pte = pt[j];
sl@0
   461
				present = pte!=KPteUnallocatedEntry;
sl@0
   462
				}
sl@0
   463
			if(present)
sl@0
   464
				{
sl@0
   465
				++np;
sl@0
   466
				TPhysAddr pa = pt ? Mmu::PtePhysAddr(pte,j) : (pdePhys + (j<<KPageShift));
sl@0
   467
				SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa);
sl@0
   468
				__KTRACE_OPT(KBOOT,Kern::Printf("Addr: %08x PA=%08x",
sl@0
   469
													(i<<KChunkShift)+(j<<KPageShift), pa));
sl@0
   470
				if(pi)	// ignore non-RAM mappings
sl@0
   471
					{
sl@0
   472
					TInt r = iRamPageAllocator->MarkPageAllocated(pa, EPageFixed);
sl@0
   473
					// allow KErrAlreadyExists since it's possible that a page is doubly mapped
sl@0
   474
					__ASSERT_ALWAYS(r==KErrNone || r==KErrAlreadyExists, Panic(EBadMappedPageAfterBoot));
sl@0
   475
					if(pi->Type()==SPageInfo::EUnused)
sl@0
   476
						pi->SetFixed();
sl@0
   477
					}
sl@0
   478
				}
sl@0
   479
			}
sl@0
   480
		__KTRACE_OPT(KBOOT,Kern::Printf("Addr: %08x #PTEs=%d",(i<<KChunkShift),np));
sl@0
   481
		if(pt)
sl@0
   482
			{
sl@0
   483
			SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
sl@0
   484
			pti->Boot(np);
sl@0
   485
			}
sl@0
   486
		}
sl@0
   487
sl@0
   488
	TInt r = K::MutexCreate(iRamAllocatorMutex, KLitRamAlloc, NULL, EFalse, KMutexOrdRamAlloc);
sl@0
   489
	if(r!=KErrNone)
sl@0
   490
		Panic(ERamAllocMutexCreateFailed);
sl@0
   491
	iRamAllocLockCount = 0;
sl@0
   492
	iRamAllocInitialFreePages = FreeRamInPages();
sl@0
   493
sl@0
   494
	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::DoInit2"));
sl@0
   495
sl@0
   496
	for(i=0; i<KNumTempMappingSlots; ++i)
sl@0
   497
		iTempMap[i].Alloc(1);
sl@0
   498
sl@0
   499
	iPhysMemSyncTemp.Alloc(1);
sl@0
   500
	r = K::MutexCreate(iPhysMemSyncMutex, KLitPhysMemSync, NULL, EFalse, KMutexOrdSyncPhysMem);
sl@0
   501
	if(r!=KErrNone)
sl@0
   502
		Panic(EPhysMemSyncMutexCreateFailed);
sl@0
   503
//	VerifyRam();
sl@0
   504
	}
sl@0
   505
sl@0
   506
sl@0
   507
void Mmu::Init2FinalCommon()
sl@0
   508
	{
sl@0
   509
	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::Init2FinalCommon"));
sl@0
   510
	// hack, reduce free memory to <2GB...
sl@0
   511
	while(FreeRamInPages()>=0x80000000/KPageSize)
sl@0
   512
		{
sl@0
   513
		TPhysAddr dummyPage;
sl@0
   514
		TInt r = iRamPageAllocator->AllocRamPages(&dummyPage,1, EPageFixed);
sl@0
   515
		__NK_ASSERT_ALWAYS(r==KErrNone);
sl@0
   516
		}
sl@0
   517
	// hack, reduce total RAM to <2GB...
sl@0
   518
	if(TheSuperPage().iTotalRamSize<0)
sl@0
   519
		TheSuperPage().iTotalRamSize = 0x80000000-KPageSize;
sl@0
   520
sl@0
   521
	// Save current free RAM size - there can never be more free RAM than this
sl@0
   522
	TUint maxFreePages = FreeRamInPages();
sl@0
   523
	K::MaxFreeRam = maxFreePages*KPageSize;
sl@0
   524
	if(maxFreePages < (TUint(PP::RamDriveMaxSize)>>KPageShift))
sl@0
   525
		PP::RamDriveMaxSize = maxFreePages*KPageSize;
sl@0
   526
sl@0
   527
	// update this to stop assert triggering in RamAllocLock::Lock()
sl@0
   528
	iRamAllocInitialFreePages = maxFreePages;
sl@0
   529
	}
sl@0
   530
sl@0
   531
 
sl@0
   532
void Mmu::Init3()
sl@0
   533
	{
sl@0
   534
	iDefrag = new Defrag;
sl@0
   535
	if (!iDefrag)
sl@0
   536
		Panic(EDefragAllocFailed);
sl@0
   537
	iDefrag->Init3(TheMmu.iRamPageAllocator);
sl@0
   538
	}
sl@0
   539
sl@0
   540
//
sl@0
   541
// Utils
sl@0
   542
//
sl@0
   543
sl@0
   544
void Mmu::Panic(TPanic aPanic)
sl@0
   545
	{
sl@0
   546
	Kern::Fault("MMU",aPanic);
sl@0
   547
	}
sl@0
   548
sl@0
   549
sl@0
   550
TUint Mmu::FreeRamInPages()
sl@0
   551
	{
sl@0
   552
	return iRamPageAllocator->FreeRamInPages()+ThePager.NumberOfFreePages();
sl@0
   553
	}
sl@0
   554
sl@0
   555
sl@0
   556
TUint Mmu::TotalPhysicalRamPages()
sl@0
   557
	{
sl@0
   558
	return iRamPageAllocator->TotalPhysicalRamPages();
sl@0
   559
	}
sl@0
   560
sl@0
   561
sl@0
   562
const SRamZone* Mmu::RamZoneConfig(TRamZoneCallback& aCallback) const
sl@0
   563
	{
sl@0
   564
	aCallback = iRamZoneCallback;
sl@0
   565
	return iRamZones;
sl@0
   566
	}
sl@0
   567
sl@0
   568
sl@0
   569
void Mmu::SetRamZoneConfig(const SRamZone* aZones, TRamZoneCallback aCallback)
sl@0
   570
	{
sl@0
   571
	iRamZones = aZones;
sl@0
   572
	iRamZoneCallback = aCallback;
sl@0
   573
	}
sl@0
   574
sl@0
   575
sl@0
   576
TInt Mmu::ModifyRamZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask)
sl@0
   577
	{
sl@0
   578
	return iRamPageAllocator->ModifyZoneFlags(aId, aClearMask, aSetMask);
sl@0
   579
	}
sl@0
   580
sl@0
   581
sl@0
   582
TInt Mmu::GetRamZonePageCount(TUint aId, SRamZonePageCount& aPageData)
sl@0
   583
	{
sl@0
   584
	return iRamPageAllocator->GetZonePageCount(aId, aPageData);
sl@0
   585
	}
sl@0
   586
sl@0
   587
sl@0
   588
TInt Mmu::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aBytes, TPhysAddr& aPhysAddr, TInt aAlign)
sl@0
   589
	{
sl@0
   590
	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam(?,%d,%d,?,%d)", aZoneIdCount, aBytes, aPhysAddr, aAlign));
sl@0
   591
	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
sl@0
   592
sl@0
   593
	TInt r = iRamPageAllocator->ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aBytes, aPhysAddr, EPageFixed, aAlign);
sl@0
   594
	if(r!=KErrNone)
sl@0
   595
		iRamAllocFailed = ETrue;
sl@0
   596
	else
sl@0
   597
		{
sl@0
   598
		TUint pages = MM::RoundToPageCount(aBytes);
sl@0
   599
		AllocatedPhysicalRam(aPhysAddr, pages,  (Mmu::TRamAllocFlags)EMemAttStronglyOrdered);
sl@0
   600
		}
sl@0
   601
	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam returns %d and aPhysAddr=0x%08x",r,aPhysAddr));
sl@0
   602
	return r;
sl@0
   603
	}
sl@0
   604
sl@0
   605
sl@0
   606
TInt Mmu::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList)
sl@0
   607
	{
sl@0
   608
	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam(?,%d,%d,?)", aZoneIdCount, aNumPages));
sl@0
   609
	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
sl@0
   610
sl@0
   611
	TInt r = iRamPageAllocator->ZoneAllocRamPages(aZoneIdList, aZoneIdCount, aPageList, aNumPages, EPageFixed);
sl@0
   612
	if(r!=KErrNone)
sl@0
   613
		iRamAllocFailed = ETrue;
sl@0
   614
	else
sl@0
   615
		{
sl@0
   616
		PagesAllocated(aPageList, aNumPages, (Mmu::TRamAllocFlags)EMemAttStronglyOrdered);
sl@0
   617
sl@0
   618
		// update page infos...
sl@0
   619
		TUint flash = 0;
sl@0
   620
		TPhysAddr* pageEnd = aPageList + aNumPages;
sl@0
   621
		MmuLock::Lock();
sl@0
   622
		TPhysAddr* page = aPageList;
sl@0
   623
		while (page < pageEnd)
sl@0
   624
			{
sl@0
   625
			MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2);
sl@0
   626
			TPhysAddr pagePhys = *page++;
sl@0
   627
			__NK_ASSERT_DEBUG(pagePhys != KPhysAddrInvalid);
sl@0
   628
			SPageInfo::FromPhysAddr(pagePhys)->SetPhysAlloc();
sl@0
   629
			}
sl@0
   630
		MmuLock::Unlock();
sl@0
   631
		}
sl@0
   632
	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam returns %d",r));
sl@0
   633
	return r;
sl@0
   634
	}
sl@0
   635
sl@0
   636
sl@0
   637
TInt Mmu::RamHalFunction(TInt aFunction, TAny* a1, TAny* a2)
sl@0
   638
	{
sl@0
   639
	// This function should only be registered with hal and therefore can only 
sl@0
   640
	// be invoked after the ram allocator has been created.
sl@0
   641
	__NK_ASSERT_DEBUG(iRamPageAllocator);
sl@0
   642
	return iRamPageAllocator->HalFunction(aFunction, a1, a2);
sl@0
   643
	}
sl@0
   644
sl@0
   645
sl@0
   646
void Mmu::ChangePageType(SPageInfo* aPageInfo, TZonePageType aOldPageType, TZonePageType aNewPageType)
sl@0
   647
	{
sl@0
   648
	iRamPageAllocator->ChangePageType(aPageInfo, aOldPageType, aNewPageType);
sl@0
   649
	}
sl@0
   650
sl@0
   651
TInt Mmu::HandlePageFault(TLinAddr aPc, TLinAddr aFaultAddress, TUint aAccessPermissions, TAny* aExceptionInfo)
sl@0
   652
	{
sl@0
   653
	TRACE(("Mmu::HandlePageFault(0x%08x,0x%08x,%d)",aPc,aFaultAddress,aAccessPermissions));
sl@0
   654
sl@0
   655
	DMemModelThread* thread = (DMemModelThread*)TheCurrentThread;
sl@0
   656
	// Get the os asid of the process taking the fault, no need to open a reference 
sl@0
   657
	// as it is the current thread's process so can't be freed.
sl@0
   658
	TUint faultOsAsid = ((DMemModelProcess*)thread->iNThread.iAddressSpace)->OsAsid();
sl@0
   659
sl@0
   660
	// check if any fast mutexes held...
sl@0
   661
	NFastMutex* fm = NKern::HeldFastMutex();
sl@0
   662
	TPagingExcTrap* trap = thread->iPagingExcTrap;
sl@0
   663
	if(fm)
sl@0
   664
		{
sl@0
   665
		// check there is an XTRAP_PAGING in effect...
sl@0
   666
		if(!trap)
sl@0
   667
			{
sl@0
   668
			// oops, kill system...
sl@0
   669
			__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("Fault with FM Held! addr=0x%08x (%O pc=%x)",aFaultAddress,thread,aPc));
sl@0
   670
			Exc::Fault(aExceptionInfo);
sl@0
   671
			}
sl@0
   672
sl@0
   673
		// release the fast mutex...
sl@0
   674
		NKern::FMSignal(fm);
sl@0
   675
		}
sl@0
   676
sl@0
   677
	NKern::ThreadEnterCS();
sl@0
   678
sl@0
   679
	// work out address space for aFaultAddress...
sl@0
   680
	TUint osAsid = faultOsAsid;
sl@0
   681
	TLinAddr addr = aFaultAddress;
sl@0
   682
	if(thread->iAliasLinAddr && TUint(addr - thread->iAliasLinAddr) < TUint(KPageSize))
sl@0
   683
		{
sl@0
   684
		// Address in aliased memory...
sl@0
   685
		addr = (addr - thread->iAliasLinAddr) + thread->iAliasTarget;
sl@0
   686
		// Get the os asid of the process thread is aliasing, no need to open 
sl@0
   687
		// a reference on it as one was already opened when the alias was created.
sl@0
   688
		osAsid = thread->iAliasProcess->OsAsid();
sl@0
   689
		}
sl@0
   690
	else if(addr>=KGlobalMemoryBase)
sl@0
   691
		{
sl@0
   692
		// Address in global region, so look it up in kernel's address space...
sl@0
   693
		osAsid = KKernelOsAsid;
sl@0
   694
		}
sl@0
   695
sl@0
   696
	// NOTE, osAsid will remain valid for duration of this function because it is either
sl@0
   697
	// - The current thread's address space, which can't go away whilst the thread
sl@0
   698
	//   is running.
sl@0
   699
	// - The address space of another thread which we are aliasing memory from,
sl@0
   700
	//   and we would only do this if we have a reference on this other thread,
sl@0
   701
	//   which has a reference on it's process, which should own the address space!
sl@0
   702
sl@0
   703
#ifdef __BROADCAST_CACHE_MAINTENANCE__
sl@0
   704
	TInt aliasAsid = -1;
sl@0
   705
	if (thread->iAliasLinAddr)
sl@0
   706
		{
sl@0
   707
		// If an alias is in effect, the the thread will be locked to the current CPU,
sl@0
   708
		// but we need to be able to migrate between CPUs for cache maintainance.  This
sl@0
   709
		// must be dealt with by removing the alias and restoring it with a paging trap
sl@0
   710
		// handler.
sl@0
   711
		if(!trap)
sl@0
   712
			{
sl@0
   713
			// oops, kill system...
sl@0
   714
			__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("Fault with thread locked to current CPU! addr=0x%08x (%O pc=%x)",aFaultAddress,thread,aPc));
sl@0
   715
			Exc::Fault(aExceptionInfo);
sl@0
   716
			}
sl@0
   717
		// Open a reference on the aliased process's os asid before removing the alias
sl@0
   718
		// so that the address space can't be freed while we try to access its members.
sl@0
   719
		aliasAsid = thread->iAliasProcess->TryOpenOsAsid();
sl@0
   720
		// This should never fail as until we remove the alias there will 
sl@0
   721
		// always be at least one reference on the os asid.
sl@0
   722
		__NK_ASSERT_DEBUG(aliasAsid >= 0);
sl@0
   723
		thread->RemoveAlias();
sl@0
   724
		}
sl@0
   725
#endif
sl@0
   726
sl@0
   727
	// find mapping...
sl@0
   728
	TUint offsetInMapping;
sl@0
   729
	TUint mapInstanceCount;
sl@0
   730
	DMemoryMapping* mapping = MM::FindMappingInAddressSpace(osAsid, addr, 1, offsetInMapping, mapInstanceCount);
sl@0
   731
//	TRACE(("%O mapping=0x%08x",TheCurrentThread,mapping));
sl@0
   732
	TInt r = KErrNotFound;
sl@0
   733
sl@0
   734
	if(mapping)
sl@0
   735
		{
sl@0
   736
		MmuLock::Lock();
sl@0
   737
sl@0
   738
		// check if we need to process page fault...
sl@0
   739
		if(!Mmu::CheckPteTypePermissions(mapping->PteType(),aAccessPermissions) ||
sl@0
   740
			mapInstanceCount != mapping->MapInstanceCount())
sl@0
   741
			{
sl@0
   742
			// Invalid access to the page.
sl@0
   743
			MmuLock::Unlock();
sl@0
   744
			r = KErrAbort;
sl@0
   745
			}
sl@0
   746
		else
sl@0
   747
			{
sl@0
   748
			// Should not be able to take a fault on a pinned mapping if accessing it 
sl@0
   749
			// with the correct permissions.
sl@0
   750
			__NK_ASSERT_DEBUG(!mapping->IsPinned());
sl@0
   751
sl@0
   752
			// we do need to handle fault so is this a demand paging or page moving fault
sl@0
   753
			DMemoryObject* memory = mapping->Memory();
sl@0
   754
			if(!memory)
sl@0
   755
				MmuLock::Unlock();
sl@0
   756
			else
sl@0
   757
				{
sl@0
   758
				TUint faultIndex = (offsetInMapping >> KPageShift) + mapping->iStartIndex;
sl@0
   759
				memory->Open();
sl@0
   760
sl@0
   761
				// This is safe as we have the instance count so can detect the mapping 
sl@0
   762
				// being reused and we have a reference to the memory object so it can't 
sl@0
   763
				// be deleted.
sl@0
   764
				MmuLock::Unlock();
sl@0
   765
sl@0
   766
				if(memory->IsDemandPaged())
sl@0
   767
					{
sl@0
   768
					// Let the pager handle the fault...
sl@0
   769
					r = ThePager.HandlePageFault(	aPc, aFaultAddress, faultOsAsid, faultIndex,
sl@0
   770
													aAccessPermissions, memory, mapping, mapInstanceCount,
sl@0
   771
													thread, aExceptionInfo);
sl@0
   772
					}
sl@0
   773
				else
sl@0
   774
					{// The page could be being moved so verify that with its manager.
sl@0
   775
					DMemoryManager* manager = memory->iManager;
sl@0
   776
					r = manager->HandleFault(memory, faultIndex, mapping, mapInstanceCount, aAccessPermissions);
sl@0
   777
					}
sl@0
   778
				if (r == KErrNone)
sl@0
   779
					{// alias PDE needs updating because page tables have changed...
sl@0
   780
					thread->RefreshAlias();
sl@0
   781
					}
sl@0
   782
				memory->Close();
sl@0
   783
				}
sl@0
   784
			}
sl@0
   785
		mapping->Close();
sl@0
   786
		}
sl@0
   787
sl@0
   788
	if (trap)
sl@0
   789
		{
sl@0
   790
		// restore address space (because the trap will bypass any code
sl@0
   791
		// which would have done this.)...
sl@0
   792
		DMemModelThread::RestoreAddressSpace();
sl@0
   793
		}
sl@0
   794
sl@0
   795
#ifdef __BROADCAST_CACHE_MAINTENANCE__
sl@0
   796
	// Close any reference on the aliased process's os asid before we leave the
sl@0
   797
	// critical section.
sl@0
   798
	if (aliasAsid >= 0)
sl@0
   799
		{
sl@0
   800
		thread->iAliasProcess->CloseOsAsid();
sl@0
   801
		}
sl@0
   802
#endif
sl@0
   803
sl@0
   804
	NKern::ThreadLeaveCS();  // thread will die now if CheckRealtimeThreadFault caused a panic
sl@0
   805
sl@0
   806
	// deal with XTRAP_PAGING...
sl@0
   807
	if(trap)
sl@0
   808
		{
sl@0
   809
		// re-acquire any fast mutex which was held before the page fault...
sl@0
   810
		if(fm)
sl@0
   811
			NKern::FMWait(fm);
sl@0
   812
		if (r == KErrNone)
sl@0
   813
			{
sl@0
   814
			trap->Exception(1); // return from exception trap with result '1' (value>0)
sl@0
   815
			// code doesn't continue beyond this point.
sl@0
   816
			__NK_ASSERT_DEBUG(0);
sl@0
   817
			}
sl@0
   818
		}
sl@0
   819
sl@0
   820
	return r;
sl@0
   821
	}
sl@0
   822
sl@0
   823
sl@0
   824
//
sl@0
   825
// Memory allocation
sl@0
   826
//
sl@0
   827
sl@0
   828
TInt Mmu::AllocRam(	TPhysAddr* aPages, TUint aCount, TRamAllocFlags aFlags, TZonePageType aZonePageType, 
sl@0
   829
					TUint aBlockZoneId, TBool aBlockRest)
sl@0
   830
	{
sl@0
   831
	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocRam(?,%d,%x)",aCount,aFlags));
sl@0
   832
	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
sl@0
   833
#ifdef _DEBUG
sl@0
   834
	if(K::CheckForSimulatedAllocFail())
sl@0
   835
		{
sl@0
   836
		__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocRam returns simulated OOM %d",KErrNoMemory));
sl@0
   837
		return KErrNoMemory;
sl@0
   838
		}
sl@0
   839
#endif
sl@0
   840
	TInt missing = iRamPageAllocator->AllocRamPages(aPages, aCount, aZonePageType, aBlockZoneId, aBlockRest);
sl@0
   841
	if(missing && !(aFlags&EAllocNoPagerReclaim) && ThePager.GetFreePages(missing))
sl@0
   842
		missing = iRamPageAllocator->AllocRamPages(aPages, aCount, aZonePageType, aBlockZoneId, aBlockRest);
sl@0
   843
	TInt r = missing ? KErrNoMemory : KErrNone;
sl@0
   844
	if(r!=KErrNone)
sl@0
   845
		iRamAllocFailed = ETrue;
sl@0
   846
	else
sl@0
   847
		PagesAllocated(aPages,aCount,aFlags);
sl@0
   848
	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocRam returns %d",r));
sl@0
   849
	return r;
sl@0
   850
	}
sl@0
   851
sl@0
   852
sl@0
   853
void Mmu::FreeRam(TPhysAddr* aPages, TUint aCount, TZonePageType aZonePageType)
sl@0
   854
	{
sl@0
   855
	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreeRam(?,%d)",aCount));
sl@0
   856
	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
sl@0
   857
sl@0
   858
	// update page infos...
sl@0
   859
	TPhysAddr* pages = aPages;
sl@0
   860
	TPhysAddr* pagesEnd = pages+aCount;
sl@0
   861
	TPhysAddr* pagesOut = aPages;
sl@0
   862
	MmuLock::Lock();
sl@0
   863
	TUint flash = 0;
sl@0
   864
	while(pages<pagesEnd)
sl@0
   865
		{
sl@0
   866
		MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2);
sl@0
   867
		TPhysAddr pagePhys = *pages++;
sl@0
   868
		__NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid);
sl@0
   869
		SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
sl@0
   870
		PageFreed(pi);
sl@0
   871
sl@0
   872
		// If this is an old page of a page being moved that was previously pinned
sl@0
   873
		// then make sure it is freed as discardable otherwise despite DPager::DonatePages()
sl@0
   874
		// having marked it as discardable it would be freed as movable.
sl@0
   875
		__NK_ASSERT_DEBUG(pi->PagedState() != SPageInfo::EPagedPinnedMoved || aCount == 1);
sl@0
   876
		if (pi->PagedState() == SPageInfo::EPagedPinnedMoved)
sl@0
   877
			aZonePageType = EPageDiscard;
sl@0
   878
sl@0
   879
		if(ThePager.PageFreed(pi)==KErrNone)
sl@0
   880
			--aCount; // pager has dealt with this page, so one less for us
sl@0
   881
		else
sl@0
   882
			{
sl@0
   883
			// All paged pages should have been dealt with by the pager above.
sl@0
   884
			__NK_ASSERT_DEBUG(pi->PagedState() == SPageInfo::EUnpaged);
sl@0
   885
			*pagesOut++ = pagePhys; // store page address for freeing later
sl@0
   886
			}
sl@0
   887
		}
sl@0
   888
	MmuLock::Unlock();
sl@0
   889
sl@0
   890
	iRamPageAllocator->FreeRamPages(aPages, aCount, aZonePageType);
sl@0
   891
	}
sl@0
   892
sl@0
   893
sl@0
   894
TInt Mmu::AllocContiguousRam(TPhysAddr& aPhysAddr, TUint aCount, TUint aAlign, TRamAllocFlags aFlags)
sl@0
   895
	{
sl@0
   896
	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocContiguousRam(?,0x%x,%d,%x)",aCount,aAlign,aFlags));
sl@0
   897
	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
sl@0
   898
#ifdef _DEBUG
sl@0
   899
	if(K::CheckForSimulatedAllocFail())
sl@0
   900
		{
sl@0
   901
		__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocContiguousRam returns simulated OOM %d",KErrNoMemory));
sl@0
   902
		return KErrNoMemory;
sl@0
   903
		}
sl@0
   904
	// Only the page sets EAllocNoPagerReclaim and it shouldn't allocate contiguous ram.
sl@0
   905
	__NK_ASSERT_DEBUG(!(aFlags&EAllocNoPagerReclaim));
sl@0
   906
#endif
sl@0
   907
	TInt r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, EPageFixed, aAlign+KPageShift);
sl@0
   908
	if(r==KErrNoMemory && aCount > KMaxFreeableContiguousPages)
sl@0
   909
		{
sl@0
   910
		// flush paging cache and retry...
sl@0
   911
		ThePager.FlushAll();
sl@0
   912
		r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, EPageFixed, aAlign+KPageShift);
sl@0
   913
		}
sl@0
   914
	if(r!=KErrNone)
sl@0
   915
		iRamAllocFailed = ETrue;
sl@0
   916
	else
sl@0
   917
		PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags);
sl@0
   918
	__KTRACE_OPT(KMMU,Kern::Printf("AllocContiguouseRam returns %d and aPhysAddr=0x%08x",r,aPhysAddr));
sl@0
   919
	return r;
sl@0
   920
	}
sl@0
   921
sl@0
   922
sl@0
   923
void Mmu::FreeContiguousRam(TPhysAddr aPhysAddr, TUint aCount)
sl@0
   924
	{
sl@0
   925
	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreeContiguousRam(0x%08x,0x%x)",aPhysAddr,aCount));
sl@0
   926
	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
sl@0
   927
	__NK_ASSERT_DEBUG((aPhysAddr&KPageMask)==0);
sl@0
   928
sl@0
   929
	TUint pageCount = aCount;
sl@0
   930
sl@0
   931
	// update page infos...
sl@0
   932
	SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
sl@0
   933
	SPageInfo* piEnd = pi+pageCount;
sl@0
   934
	TUint flash = 0;
sl@0
   935
	MmuLock::Lock();
sl@0
   936
	while(pi<piEnd)
sl@0
   937
		{
sl@0
   938
		MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
sl@0
   939
		PageFreed(pi++);
sl@0
   940
		}
sl@0
   941
	MmuLock::Unlock();
sl@0
   942
sl@0
   943
	// free pages...
sl@0
   944
	while(pageCount)
sl@0
   945
		{
sl@0
   946
		iRamPageAllocator->FreeRamPage(aPhysAddr, EPageFixed);
sl@0
   947
		aPhysAddr += KPageSize;
sl@0
   948
		--pageCount;
sl@0
   949
		}
sl@0
   950
	}
sl@0
   951
sl@0
   952
sl@0
   953
TInt Mmu::AllocPhysicalRam(TPhysAddr* aPages, TUint aCount, TRamAllocFlags aFlags)
sl@0
   954
	{
sl@0
   955
	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam(?,%d,%x)",aCount,aFlags));
sl@0
   956
	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
sl@0
   957
	// Allocate fixed pages as physically allocated pages aren't movable or discardable.
sl@0
   958
	TInt r = AllocRam(aPages, aCount, aFlags, EPageFixed);
sl@0
   959
	if (r!=KErrNone)
sl@0
   960
		return r;
sl@0
   961
sl@0
   962
	// update page infos...
sl@0
   963
	TPhysAddr* pages = aPages;
sl@0
   964
	TPhysAddr* pagesEnd = pages+aCount;
sl@0
   965
	MmuLock::Lock();
sl@0
   966
	TUint flash = 0;
sl@0
   967
	while(pages<pagesEnd)
sl@0
   968
		{
sl@0
   969
		MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2);
sl@0
   970
		TPhysAddr pagePhys = *pages++;
sl@0
   971
		__NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid);
sl@0
   972
		SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
sl@0
   973
		pi->SetPhysAlloc();
sl@0
   974
		}
sl@0
   975
	MmuLock::Unlock();
sl@0
   976
sl@0
   977
	return KErrNone;
sl@0
   978
	}
sl@0
   979
sl@0
   980
sl@0
   981
void Mmu::FreePhysicalRam(TPhysAddr* aPages, TUint aCount)
sl@0
   982
	{
sl@0
   983
	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreePhysicalRam(?,%d)",aCount));
sl@0
   984
	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
sl@0
   985
sl@0
   986
	// update page infos...
sl@0
   987
	TPhysAddr* pages = aPages;
sl@0
   988
	TPhysAddr* pagesEnd = pages+aCount;
sl@0
   989
	MmuLock::Lock();
sl@0
   990
	TUint flash = 0;
sl@0
   991
	while(pages<pagesEnd)
sl@0
   992
		{
sl@0
   993
		MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2);
sl@0
   994
		TPhysAddr pagePhys = *pages++;
sl@0
   995
		__NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid);
sl@0
   996
		SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
sl@0
   997
		__ASSERT_ALWAYS(pi->Type()==SPageInfo::EPhysAlloc, Panic(EBadFreePhysicalRam));
sl@0
   998
		__ASSERT_ALWAYS(!pi->UseCount(), Panic(EBadFreePhysicalRam));
sl@0
   999
		pi->SetUnused();
sl@0
  1000
		}
sl@0
  1001
	MmuLock::Unlock();
sl@0
  1002
sl@0
  1003
	iRamPageAllocator->FreeRamPages(aPages,aCount, EPageFixed);
sl@0
  1004
	}
sl@0
  1005
sl@0
  1006
sl@0
  1007
TInt Mmu::AllocPhysicalRam(TPhysAddr& aPhysAddr, TUint aCount, TUint aAlign, TRamAllocFlags aFlags)
sl@0
  1008
	{
sl@0
  1009
	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam(?,0x%x,d,%x)",aCount,aAlign,aFlags));
sl@0
  1010
	TInt r = AllocContiguousRam(aPhysAddr,aCount,aAlign,aFlags);
sl@0
  1011
	if (r!=KErrNone)
sl@0
  1012
		return r;
sl@0
  1013
sl@0
  1014
	// update page infos...
sl@0
  1015
	SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
sl@0
  1016
	SPageInfo* piEnd = pi+aCount;
sl@0
  1017
	TUint flash = 0;
sl@0
  1018
	MmuLock::Lock();
sl@0
  1019
	while(pi<piEnd)
sl@0
  1020
		{
sl@0
  1021
		MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
sl@0
  1022
		pi->SetPhysAlloc();
sl@0
  1023
		++pi;
sl@0
  1024
		}
sl@0
  1025
	MmuLock::Unlock();
sl@0
  1026
sl@0
  1027
	return KErrNone;
sl@0
  1028
	}
sl@0
  1029
sl@0
  1030
sl@0
  1031
void Mmu::FreePhysicalRam(TPhysAddr aPhysAddr, TUint aCount)
sl@0
  1032
	{
sl@0
  1033
	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreePhysicalRam(0x%08x,0x%x)",aPhysAddr,aCount));
sl@0
  1034
sl@0
  1035
	// update page infos...
sl@0
  1036
	SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
sl@0
  1037
	SPageInfo* piEnd = pi+aCount;
sl@0
  1038
	TUint flash = 0;
sl@0
  1039
	MmuLock::Lock();
sl@0
  1040
	while(pi<piEnd)
sl@0
  1041
		{
sl@0
  1042
		MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
sl@0
  1043
		__ASSERT_ALWAYS(pi->Type()==SPageInfo::EPhysAlloc, Panic(EBadFreePhysicalRam));
sl@0
  1044
		__ASSERT_ALWAYS(!pi->UseCount(), Panic(EBadFreePhysicalRam));
sl@0
  1045
		pi->SetUnused();
sl@0
  1046
		++pi;
sl@0
  1047
		}
sl@0
  1048
	MmuLock::Unlock();
sl@0
  1049
sl@0
  1050
	iRamPageAllocator->FreePhysicalRam(aPhysAddr, aCount << KPageShift);
sl@0
  1051
	}
sl@0
  1052
sl@0
  1053
sl@0
  1054
TInt Mmu::ClaimPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags)
sl@0
  1055
	{
sl@0
  1056
	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::ClaimPhysicalRam(0x%08x,0x%x,0x%08x)",aPhysAddr,aCount,aFlags));
sl@0
  1057
	aPhysAddr &= ~KPageMask;
sl@0
  1058
	TInt r = iRamPageAllocator->ClaimPhysicalRam(aPhysAddr,(aCount << KPageShift));
sl@0
  1059
	if(r!=KErrNone)
sl@0
  1060
		return r;
sl@0
  1061
sl@0
  1062
	PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags);
sl@0
  1063
sl@0
  1064
	// update page infos...
sl@0
  1065
	SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
sl@0
  1066
	SPageInfo* piEnd = pi+aCount;
sl@0
  1067
	TUint flash = 0;
sl@0
  1068
	MmuLock::Lock();
sl@0
  1069
	while(pi<piEnd)
sl@0
  1070
		{
sl@0
  1071
		MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
sl@0
  1072
		pi->SetPhysAlloc();
sl@0
  1073
		++pi;
sl@0
  1074
		}
sl@0
  1075
	MmuLock::Unlock();
sl@0
  1076
sl@0
  1077
	return KErrNone;
sl@0
  1078
	}
sl@0
  1079
sl@0
  1080
sl@0
  1081
void Mmu::AllocatedPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags)
sl@0
  1082
	{
sl@0
  1083
	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocatedPhysicalRam(0x%08x,0x%x,d,%x)",aPhysAddr,aCount,aFlags));
sl@0
  1084
sl@0
  1085
	PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags);
sl@0
  1086
sl@0
  1087
	// update page infos...
sl@0
  1088
	SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
sl@0
  1089
	SPageInfo* piEnd = pi+aCount;
sl@0
  1090
	TUint flash = 0;
sl@0
  1091
	MmuLock::Lock();
sl@0
  1092
	while(pi<piEnd)
sl@0
  1093
		{
sl@0
  1094
		MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
sl@0
  1095
		pi->SetPhysAlloc();
sl@0
  1096
		++pi;
sl@0
  1097
		}
sl@0
  1098
	MmuLock::Unlock();
sl@0
  1099
	}
sl@0
  1100
sl@0
  1101
sl@0
  1102
//
sl@0
  1103
// Misc
sl@0
  1104
//
sl@0
  1105
sl@0
  1106
#ifdef _DEBUG
sl@0
  1107
/**
sl@0
  1108
Perform a page table walk to return the physical address of
sl@0
  1109
the memory mapped at virtual address \a aLinAddr in the
sl@0
  1110
address space \a aOsAsid.
sl@0
  1111
sl@0
  1112
If the page table used was not one allocated by the kernel
sl@0
  1113
then the results are unpredictable and may cause a system fault.
sl@0
  1114
sl@0
  1115
@pre #MmuLock held.
sl@0
  1116
*/
sl@0
  1117
TPhysAddr Mmu::LinearToPhysical(TLinAddr aLinAddr, TInt aOsAsid)
sl@0
  1118
	{
sl@0
  1119
	__NK_ASSERT_DEBUG(MmuLock::IsHeld() || K::Initialising);
sl@0
  1120
	return UncheckedLinearToPhysical(aLinAddr,aOsAsid);
sl@0
  1121
	}
sl@0
  1122
#endif
sl@0
  1123
sl@0
  1124
sl@0
  1125
/**
sl@0
  1126
Next virtual address available for allocation by TTempMapping.
sl@0
  1127
This is initialised to #KTempAddr and addresses may be allocated
sl@0
  1128
until they reach #KTempAddrEnd.
sl@0
  1129
*/
sl@0
  1130
TLinAddr Mmu::TTempMapping::iNextLinAddr = KTempAddr;
sl@0
  1131
sl@0
  1132
sl@0
  1133
/**
sl@0
  1134
Allocate virtual address space required to map a given number of memory pages.
sl@0
  1135
sl@0
  1136
The actual size of allocated virtual allocated needs to accommodate \a aNumPages
sl@0
  1137
number of pages of any colour. For example: if \a aNumPages == 4 and #KPageColourCount == 4,
sl@0
  1138
then at least 7 pages are required. 
sl@0
  1139
sl@0
  1140
@param aNumPages	Maximum number of pages that can be mapped into this temporary mapping.
sl@0
  1141
sl@0
  1142
@pre Called in single threaded content (boot) only.
sl@0
  1143
sl@0
  1144
@pre #iNextLinAddr points to virtual page with zero colour.
sl@0
  1145
@post #iNextLinAddr points to virtual page with zero colour.
sl@0
  1146
*/
sl@0
  1147
void Mmu::TTempMapping::Alloc(TUint aNumPages)
sl@0
  1148
	{
sl@0
  1149
	__NK_ASSERT_DEBUG(aNumPages<=(KTempAddrEnd-KTempAddr)/KPageSize);
sl@0
  1150
sl@0
  1151
	// This runs during the boot only (single threaded context) so the access to iNextLinAddr is not guarded by any mutex.
sl@0
  1152
	TLinAddr tempAddr = iNextLinAddr;
sl@0
  1153
	TUint numPages = (KPageColourMask+aNumPages+KPageColourMask)&~KPageColourMask;
sl@0
  1154
	iNextLinAddr = tempAddr+numPages*KPageSize;
sl@0
  1155
sl@0
  1156
	__NK_ASSERT_ALWAYS(iNextLinAddr<=KTempAddrEnd);
sl@0
  1157
sl@0
  1158
	__NK_ASSERT_DEBUG(iSize==0);
sl@0
  1159
	iLinAddr = tempAddr;
sl@0
  1160
	MmuLock::Lock();
sl@0
  1161
	iPtePtr = Mmu::PtePtrFromLinAddr(tempAddr,KKernelOsAsid);
sl@0
  1162
	__NK_ASSERT_DEBUG(iPtePtr);
sl@0
  1163
	MmuLock::Unlock();
sl@0
  1164
	iBlankPte = TheMmu.iTempPteCached;
sl@0
  1165
	iSize = aNumPages;
sl@0
  1166
	iCount = 0;
sl@0
  1167
sl@0
  1168
	TRACEB(("Mmu::TTempMapping::Alloc(%d) iLinAddr=0x%08x, iPtePtr=0x%08x",aNumPages,iLinAddr,iPtePtr));
sl@0
  1169
	}
sl@0
  1170
sl@0
  1171
sl@0
  1172
/**
sl@0
  1173
Map a single physical page into this temporary mapping.
sl@0
  1174
sl@0
  1175
Supervisor read/write access and EMemoryAttributeStandard memory attributes apply.
sl@0
  1176
sl@0
  1177
@param aPage		The physical page to map.
sl@0
  1178
@param aColour 		The required colour for the mapping.
sl@0
  1179
sl@0
  1180
@return 			The linear address at which the page is mapped.
sl@0
  1181
*/
sl@0
  1182
TLinAddr Mmu::TTempMapping::Map(TPhysAddr aPage, TUint aColour)
sl@0
  1183
	{
sl@0
  1184
	__NK_ASSERT_DEBUG(iSize>=1);
sl@0
  1185
	__NK_ASSERT_DEBUG(iCount==0);
sl@0
  1186
sl@0
  1187
	TUint colour = aColour&KPageColourMask;
sl@0
  1188
	TLinAddr addr = iLinAddr+(colour<<KPageShift);
sl@0
  1189
	TPte* pPte = iPtePtr+colour;
sl@0
  1190
	iColour = colour;
sl@0
  1191
sl@0
  1192
	__ASSERT_DEBUG(*pPte==KPteUnallocatedEntry,MM::Panic(MM::ETempMappingAlreadyInUse));
sl@0
  1193
	*pPte = (aPage&~KPageMask) | iBlankPte;
sl@0
  1194
	CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
sl@0
  1195
	InvalidateTLBForPage(addr|KKernelOsAsid);
sl@0
  1196
sl@0
  1197
	iCount = 1;
sl@0
  1198
	return addr;
sl@0
  1199
	}
sl@0
  1200
sl@0
  1201
/**
sl@0
  1202
Map a single physical page into this temporary mapping using the given page table entry (PTE) value.
sl@0
  1203
sl@0
  1204
@param aPage		The physical page to map.
sl@0
  1205
@param aColour 		The required colour for the mapping.
sl@0
  1206
@param aBlankPte	The PTE value to use for mapping the page,
sl@0
  1207
					with the physical address component equal to zero.
sl@0
  1208
sl@0
  1209
@return 			The linear address at which the page is mapped.
sl@0
  1210
*/
sl@0
  1211
TLinAddr Mmu::TTempMapping::Map(TPhysAddr aPage, TUint aColour, TPte aBlankPte)
sl@0
  1212
	{
sl@0
  1213
	__NK_ASSERT_DEBUG(iSize>=1);
sl@0
  1214
	__NK_ASSERT_DEBUG(iCount==0);
sl@0
  1215
sl@0
  1216
	TUint colour = aColour&KPageColourMask;
sl@0
  1217
	TLinAddr addr = iLinAddr+(colour<<KPageShift);
sl@0
  1218
	TPte* pPte = iPtePtr+colour;
sl@0
  1219
	iColour = colour;
sl@0
  1220
sl@0
  1221
	__ASSERT_DEBUG(*pPte==KPteUnallocatedEntry,MM::Panic(MM::ETempMappingAlreadyInUse));
sl@0
  1222
	*pPte = (aPage&~KPageMask) | aBlankPte;
sl@0
  1223
	CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
sl@0
  1224
	InvalidateTLBForPage(addr|KKernelOsAsid);
sl@0
  1225
sl@0
  1226
	iCount = 1;
sl@0
  1227
	return addr;
sl@0
  1228
	}
sl@0
  1229
sl@0
  1230
sl@0
  1231
/**
sl@0
  1232
Map a number of physical pages into this temporary mapping.
sl@0
  1233
sl@0
  1234
Supervisor read/write access and EMemoryAttributeStandard memory attributes apply.
sl@0
  1235
sl@0
  1236
@param aPages		The array of physical pages to map.
sl@0
  1237
@param aCount		The number of pages to map.
sl@0
  1238
@param aColour 		The required colour for the first page.
sl@0
  1239
					Consecutive pages will be coloured accordingly.
sl@0
  1240
sl@0
  1241
@return 			The linear address at which the first page is mapped.
sl@0
  1242
*/
sl@0
  1243
TLinAddr Mmu::TTempMapping::Map(TPhysAddr* aPages, TUint aCount, TUint aColour)
sl@0
  1244
	{
sl@0
  1245
	__NK_ASSERT_DEBUG(iSize>=aCount);
sl@0
  1246
	__NK_ASSERT_DEBUG(iCount==0);
sl@0
  1247
sl@0
  1248
	TUint colour = aColour&KPageColourMask;
sl@0
  1249
	TLinAddr addr = iLinAddr+(colour<<KPageShift);
sl@0
  1250
	TPte* pPte = iPtePtr+colour;
sl@0
  1251
	iColour = colour;
sl@0
  1252
sl@0
  1253
	for(TUint i=0; i<aCount; ++i)
sl@0
  1254
		{
sl@0
  1255
		__ASSERT_DEBUG(pPte[i]==KPteUnallocatedEntry,MM::Panic(MM::ETempMappingAlreadyInUse));
sl@0
  1256
		pPte[i] = (aPages[i]&~KPageMask) | iBlankPte;
sl@0
  1257
		CacheMaintenance::SinglePteUpdated((TLinAddr)&pPte[i]);
sl@0
  1258
		InvalidateTLBForPage((addr+i*KPageSize)|KKernelOsAsid);
sl@0
  1259
		}
sl@0
  1260
sl@0
  1261
	iCount = aCount;
sl@0
  1262
	return addr;
sl@0
  1263
	}
sl@0
  1264
sl@0
  1265
sl@0
  1266
/**
sl@0
  1267
Unmap all pages from this temporary mapping.
sl@0
  1268
sl@0
  1269
@param aIMBRequired	True if IMB barrier is required prior unmapping.
sl@0
  1270
*/
sl@0
  1271
void Mmu::TTempMapping::Unmap(TBool aIMBRequired)
sl@0
  1272
	{
sl@0
  1273
	__NK_ASSERT_DEBUG(iSize>=1);
sl@0
  1274
	if(aIMBRequired)
sl@0
  1275
		CacheMaintenance::CodeChanged(iLinAddr+iColour*KPageSize,iCount*KPageSize);
sl@0
  1276
	Unmap();
sl@0
  1277
	}
sl@0
  1278
sl@0
  1279
sl@0
  1280
/**
sl@0
  1281
Unmap all pages from this temporary mapping.
sl@0
  1282
*/
sl@0
  1283
void Mmu::TTempMapping::Unmap()
sl@0
  1284
	{
sl@0
  1285
	__NK_ASSERT_DEBUG(iSize>=1);
sl@0
  1286
sl@0
  1287
	TUint colour = iColour;
sl@0
  1288
	TLinAddr addr = iLinAddr+(colour<<KPageShift);
sl@0
  1289
	TPte* pPte = iPtePtr+colour;
sl@0
  1290
	TUint count = iCount;
sl@0
  1291
sl@0
  1292
	while(count)
sl@0
  1293
		{
sl@0
  1294
		*pPte = KPteUnallocatedEntry;
sl@0
  1295
		CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
sl@0
  1296
		InvalidateTLBForPage(addr|KKernelOsAsid);
sl@0
  1297
		addr += KPageSize;
sl@0
  1298
		++pPte;
sl@0
  1299
		--count;
sl@0
  1300
		}
sl@0
  1301
sl@0
  1302
	iCount = 0;
sl@0
  1303
	}
sl@0
  1304
sl@0
  1305
#ifdef __SMP__
sl@0
  1306
/**
sl@0
  1307
Dummy IPI to be invoked when a thread's alias pde members are updated remotely
sl@0
  1308
by another thread.
sl@0
  1309
sl@0
  1310
@internalComponent
sl@0
  1311
*/
sl@0
  1312
class TAliasIPI : public TGenericIPI
sl@0
  1313
	{
sl@0
  1314
public:
sl@0
  1315
	static void RefreshIsr(TGenericIPI*);
sl@0
  1316
	void RefreshAlias();
sl@0
  1317
	};
sl@0
  1318
sl@0
  1319
sl@0
  1320
/**
sl@0
  1321
Dummy isr method.
sl@0
  1322
*/
sl@0
  1323
void TAliasIPI::RefreshIsr(TGenericIPI*)
sl@0
  1324
	{
sl@0
  1325
	TRACE2(("TAliasIPI"));
sl@0
  1326
	}
sl@0
  1327
sl@0
  1328
sl@0
  1329
/**
sl@0
  1330
Queue the dummy IPI on all other processors.  This ensures that DoProcessSwitch will
sl@0
  1331
have completed updating iAliasPdePtr once this method returns.
sl@0
  1332
*/
sl@0
  1333
void TAliasIPI::RefreshAlias()
sl@0
  1334
	{
sl@0
  1335
	NKern::Lock();
sl@0
  1336
	QueueAllOther(&RefreshIsr);
sl@0
  1337
	NKern::Unlock();
sl@0
  1338
	WaitCompletion();
sl@0
  1339
	}
sl@0
  1340
sl@0
  1341
sl@0
  1342
/** 
sl@0
  1343
Perform a dummy ipi on all the other processors to ensure if any of them are 
sl@0
  1344
executing DoProcessSwitch they will see the new value of iAliasPde before they 
sl@0
  1345
update iAliasPdePtr or will finish updating iAliasPdePtr before we continue.  
sl@0
  1346
This works as DoProcessSwitch() has interrupts disabled while reading iAliasPde 
sl@0
  1347
and updating iAliasPdePtr.
sl@0
  1348
*/
sl@0
  1349
void BroadcastAliasRefresh()
sl@0
  1350
	{
sl@0
  1351
	TAliasIPI ipi;
sl@0
  1352
	ipi.RefreshAlias();
sl@0
  1353
	}
sl@0
  1354
#endif //__SMP__
sl@0
  1355
sl@0
  1356
/**
sl@0
  1357
Remove any thread IPC aliases which use the specified page table.
sl@0
  1358
This is used by the page table allocator when a page table is freed.
sl@0
  1359
sl@0
  1360
@pre #PageTablesLockIsHeld
sl@0
  1361
*/
sl@0
  1362
void Mmu::RemoveAliasesForPageTable(TPhysAddr aPageTable)
sl@0
  1363
	{
sl@0
  1364
	__NK_ASSERT_DEBUG(PageTablesLockIsHeld());
sl@0
  1365
sl@0
  1366
	MmuLock::Lock();
sl@0
  1367
sl@0
  1368
	SDblQue checkedList;
sl@0
  1369
sl@0
  1370
	TUint ptId = aPageTable>>KPageTableShift;
sl@0
  1371
	while(!iAliasList.IsEmpty())
sl@0
  1372
		{
sl@0
  1373
		SDblQueLink* next = iAliasList.First()->Deque();
sl@0
  1374
		checkedList.Add(next);
sl@0
  1375
		DMemModelThread* thread = (DMemModelThread*)((TInt)next-_FOFF(DMemModelThread,iAliasLink));
sl@0
  1376
		if((thread->iAliasPde>>KPageTableShift)==ptId)
sl@0
  1377
			{
sl@0
  1378
			// the page table is being aliased by the thread, so remove it...
sl@0
  1379
			TRACE2(("Thread %O RemoveAliasesForPageTable", this));
sl@0
  1380
			thread->iAliasPde = KPdeUnallocatedEntry;
sl@0
  1381
#ifdef __SMP__ // we need to also unmap the page table in case thread is running on another core...
sl@0
  1382
sl@0
  1383
			// Ensure other processors see the update to iAliasPde.
sl@0
  1384
			BroadcastAliasRefresh();
sl@0
  1385
sl@0
  1386
			*thread->iAliasPdePtr = KPdeUnallocatedEntry;
sl@0
  1387
sl@0
  1388
			SinglePdeUpdated(thread->iAliasPdePtr);
sl@0
  1389
			__NK_ASSERT_DEBUG((thread->iAliasLinAddr&KPageMask)==0);
sl@0
  1390
			// Invalidate the tlb for the page using os asid of the process that created the alias
sl@0
  1391
			// this is safe as the os asid will be valid as thread must be running otherwise the alias
sl@0
  1392
			// would have been removed.
sl@0
  1393
			InvalidateTLBForPage(thread->iAliasLinAddr | ((DMemModelProcess*)thread->iOwningProcess)->OsAsid());
sl@0
  1394
			// note, race condition with 'thread' updating its iAliasLinAddr is
sl@0
  1395
			// not a problem because 'thread' will not the be accessing the aliased
sl@0
  1396
			// region and will take care of invalidating the TLB.
sl@0
  1397
#endif
sl@0
  1398
			}
sl@0
  1399
		MmuLock::Flash();
sl@0
  1400
		}
sl@0
  1401
sl@0
  1402
	// copy checkedList back to iAliasList
sl@0
  1403
	iAliasList.MoveFrom(&checkedList);
sl@0
  1404
sl@0
  1405
	MmuLock::Unlock();
sl@0
  1406
	}
sl@0
  1407
sl@0
  1408
sl@0
  1409
void DMemModelThread::RefreshAlias()
sl@0
  1410
	{
sl@0
  1411
	if(iAliasLinAddr)
sl@0
  1412
		{
sl@0
  1413
		TRACE2(("Thread %O RefreshAlias", this));
sl@0
  1414
		// Get the os asid, this is the current thread so no need to open a reference.
sl@0
  1415
		TUint thisAsid = ((DMemModelProcess*)iOwningProcess)->OsAsid();
sl@0
  1416
		MmuLock::Lock();
sl@0
  1417
		TInt osAsid = iAliasProcess->OsAsid();
sl@0
  1418
		TPde pde = *Mmu::PageDirectoryEntry(osAsid,iAliasTarget);
sl@0
  1419
		iAliasPde = pde;
sl@0
  1420
		*iAliasPdePtr = pde;
sl@0
  1421
		SinglePdeUpdated(iAliasPdePtr);
sl@0
  1422
		InvalidateTLBForPage(iAliasLinAddr|thisAsid);
sl@0
  1423
		MmuLock::Unlock();
sl@0
  1424
		}
sl@0
  1425
	}
sl@0
  1426
sl@0
  1427
sl@0
  1428
sl@0
  1429
//
sl@0
  1430
// Mapping/unmapping functions
sl@0
  1431
//
sl@0
  1432
sl@0
  1433
sl@0
  1434
/**
sl@0
  1435
Modify page table entries (PTEs) so they map the given memory pages.
sl@0
  1436
Entries are only updated if the current state of the corresponding page
sl@0
  1437
is RPageArray::ECommitted.
sl@0
  1438
sl@0
  1439
@param aPtePtr		Pointer into a page table for the PTE of the first page.
sl@0
  1440
@param aCount		The number of pages to modify.
sl@0
  1441
@param aPages		Pointer to the entry for the first page in a memory object's #RPageArray.
sl@0
  1442
					Each entry contains the physical address of a page together with its
sl@0
  1443
					current state (RPageArray::TState).
sl@0
  1444
@param aBlankPte	The value to use for each PTE, with the physical address component equal
sl@0
  1445
					to zero.
sl@0
  1446
sl@0
  1447
@return False, if the page table no longer maps any entries and may be freed.
sl@0
  1448
		True otherwise, to indicate that the page table is still needed.
sl@0
  1449
sl@0
  1450
@pre #MmuLock held.
sl@0
  1451
@post #MmuLock held and has not been released by this function.
sl@0
  1452
*/
sl@0
  1453
TBool Mmu::MapPages(TPte* const aPtePtr, const TUint aCount, TPhysAddr* aPages, TPte aBlankPte)
sl@0
  1454
	{
sl@0
  1455
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
  1456
	__NK_ASSERT_DEBUG(aCount);
sl@0
  1457
 	__NK_ASSERT_DEBUG(aBlankPte!=KPteUnallocatedEntry);
sl@0
  1458
sl@0
  1459
	TUint count = 0;
sl@0
  1460
	if(aCount==1)
sl@0
  1461
		{
sl@0
  1462
		// get page to map...
sl@0
  1463
		TPhysAddr pagePhys = *aPages;
sl@0
  1464
		TPte pte = *aPtePtr;
sl@0
  1465
		if(!RPageArray::TargetStateIsCommitted(pagePhys))
sl@0
  1466
			goto done; // page no longer needs mapping
sl@0
  1467
sl@0
  1468
		// clear type flags...
sl@0
  1469
		pagePhys &= ~KPageMask;
sl@0
  1470
	
sl@0
  1471
		// check nobody has already mapped the page...
sl@0
  1472
		if(pte!=KPteUnallocatedEntry)
sl@0
  1473
			{
sl@0
  1474
			// already mapped...
sl@0
  1475
#ifdef _DEBUG
sl@0
  1476
			if((pte^pagePhys)>=TPte(KPageSize))
sl@0
  1477
				{
sl@0
  1478
				// but different!
sl@0
  1479
				Kern::Printf("Mmu::MapPages already mapped %x->%x",pagePhys,pte);
sl@0
  1480
				__NK_ASSERT_DEBUG(0);
sl@0
  1481
				}
sl@0
  1482
#endif
sl@0
  1483
			return true; // return true to keep page table (it already had at least page mapped)
sl@0
  1484
			}
sl@0
  1485
sl@0
  1486
		// map page...
sl@0
  1487
		pte = pagePhys|aBlankPte;
sl@0
  1488
		TRACE2(("!PTE %x=%x",aPtePtr,pte));
sl@0
  1489
		*aPtePtr = pte;
sl@0
  1490
		count = 1;
sl@0
  1491
sl@0
  1492
		// clean cache...
sl@0
  1493
		CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
sl@0
  1494
		}
sl@0
  1495
	else
sl@0
  1496
		{
sl@0
  1497
		// check we are only updating a single page table...
sl@0
  1498
		__NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0);
sl@0
  1499
sl@0
  1500
		// map pages...
sl@0
  1501
		TPte* pPte = aPtePtr;
sl@0
  1502
		TPte* pPteEnd = aPtePtr+aCount;
sl@0
  1503
		do
sl@0
  1504
			{
sl@0
  1505
			// map page...
sl@0
  1506
			TPhysAddr pagePhys = *aPages++;
sl@0
  1507
			TPte pte = *pPte++;
sl@0
  1508
			if(RPageArray::TargetStateIsCommitted(pagePhys))
sl@0
  1509
				{
sl@0
  1510
				// clear type flags...
sl@0
  1511
				pagePhys &= ~KPageMask;
sl@0
  1512
sl@0
  1513
				// page not being freed, so try and map it...
sl@0
  1514
				if(pte!=KPteUnallocatedEntry)
sl@0
  1515
					{
sl@0
  1516
					// already mapped...
sl@0
  1517
#ifdef _DEBUG
sl@0
  1518
					if((pte^pagePhys)>=TPte(KPageSize))
sl@0
  1519
						{
sl@0
  1520
						// but different!
sl@0
  1521
						Kern::Printf("Mmu::MapPages already mapped %x->%x",pagePhys,pte);
sl@0
  1522
						__NK_ASSERT_DEBUG(0);
sl@0
  1523
						}
sl@0
  1524
#endif
sl@0
  1525
					}
sl@0
  1526
				else
sl@0
  1527
					{
sl@0
  1528
					// map page...
sl@0
  1529
					pte = pagePhys|aBlankPte;
sl@0
  1530
					TRACE2(("!PTE %x=%x",pPte-1,pte));
sl@0
  1531
					pPte[-1] = pte;
sl@0
  1532
					++count;
sl@0
  1533
					}
sl@0
  1534
				}
sl@0
  1535
			}
sl@0
  1536
		while(pPte!=pPteEnd);
sl@0
  1537
sl@0
  1538
		// clean cache...
sl@0
  1539
		CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr);
sl@0
  1540
		}
sl@0
  1541
sl@0
  1542
done:
sl@0
  1543
	// update page counts...
sl@0
  1544
	SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPtePtr);
sl@0
  1545
	count = pti->IncPageCount(count);
sl@0
  1546
	TRACE2(("pt %x page count=%d",TLinAddr(aPtePtr)&~KPageTableMask,pti->PageCount()));
sl@0
  1547
	__NK_ASSERT_DEBUG(pti->CheckPageCount());
sl@0
  1548
sl@0
  1549
	// see if page table needs freeing...
sl@0
  1550
	TUint keepPt = count | pti->PermanenceCount();
sl@0
  1551
sl@0
  1552
	__NK_ASSERT_DEBUG(!pti->IsDemandPaged()); // check not demand paged page table
sl@0
  1553
sl@0
  1554
	return keepPt;
sl@0
  1555
	}
sl@0
  1556
sl@0
  1557
sl@0
  1558
/**
sl@0
  1559
Modify page table entries (PTEs) so they map a new page.
sl@0
  1560
Entries are only updated if the current state of the corresponding page
sl@0
  1561
is RPageArray::ECommitted or RPageArray::EMoving.
sl@0
  1562
sl@0
  1563
@param aPtePtr		Pointer into a page table for the PTE of the page.
sl@0
  1564
@param aPage		Pointer to the entry for the page in a memory object's #RPageArray.
sl@0
  1565
					The entry contains the physical address of a page together with its
sl@0
  1566
					current state (RPageArray::TState).
sl@0
  1567
@param aBlankPte	The value to use for each PTE, with the physical address component equal
sl@0
  1568
					to zero.
sl@0
  1569
sl@0
  1570
@pre #MmuLock held.
sl@0
  1571
@post #MmuLock held and has not been released by this function.
sl@0
  1572
*/
sl@0
  1573
void Mmu::RemapPage(TPte* const aPtePtr, TPhysAddr& aPage, TPte aBlankPte)
sl@0
  1574
	{
sl@0
  1575
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
  1576
 	__NK_ASSERT_DEBUG(aBlankPte!=KPteUnallocatedEntry);
sl@0
  1577
sl@0
  1578
	// get page to remap...
sl@0
  1579
	TPhysAddr pagePhys = aPage;
sl@0
  1580
	
sl@0
  1581
	// Only remap the page if it is committed or it is being moved and
sl@0
  1582
	// no other operation has been performed on the page.
sl@0
  1583
	if(!RPageArray::TargetStateIsCommitted(pagePhys))
sl@0
  1584
		return; // page no longer needs mapping
sl@0
  1585
	
sl@0
  1586
	// Only remap the page if it is currently mapped, i.e. doesn't have an unallocated pte.
sl@0
  1587
	// This will only be true if a new mapping is being added but it hasn't yet updated 
sl@0
  1588
	// all the ptes for the pages that it maps.
sl@0
  1589
	TPte pte = *aPtePtr;
sl@0
  1590
	if (pte == KPteUnallocatedEntry)
sl@0
  1591
		return;
sl@0
  1592
	
sl@0
  1593
	// clear type flags...
sl@0
  1594
	pagePhys &= ~KPageMask;
sl@0
  1595
sl@0
  1596
	SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys);
sl@0
  1597
	if (pi)
sl@0
  1598
		{
sl@0
  1599
		SPageInfo::TPagedState pagedState = pi->PagedState();
sl@0
  1600
		if (pagedState != SPageInfo::EUnpaged)
sl@0
  1601
			{
sl@0
  1602
			// The page is demand paged.  Only remap the page if it is pinned or is currently
sl@0
  1603
			// accessible but to the old physical page.
sl@0
  1604
			if (pagedState != SPageInfo::EPagedPinned &&
sl@0
  1605
				 (Mmu::IsPteInaccessible(pte) || (pte^pagePhys) < TPte(KPageSize)))
sl@0
  1606
				return;
sl@0
  1607
			if (!pi->IsDirty())
sl@0
  1608
				{
sl@0
  1609
				// Ensure that the page is mapped as read only to prevent pages being marked dirty
sl@0
  1610
				// by page moving despite not having been written to
sl@0
  1611
				Mmu::MakePteInaccessible(aBlankPte, EFalse);
sl@0
  1612
				}
sl@0
  1613
			}
sl@0
  1614
		}
sl@0
  1615
	
sl@0
  1616
	// Map the page in the page array entry as this is always the physical
sl@0
  1617
	// page that the memory object's page should be mapped to.
sl@0
  1618
	pte = pagePhys|aBlankPte;
sl@0
  1619
	TRACE2(("!PTE %x=%x",aPtePtr,pte));
sl@0
  1620
	*aPtePtr = pte;
sl@0
  1621
	
sl@0
  1622
	// clean cache...
sl@0
  1623
	CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
sl@0
  1624
	}
sl@0
  1625
sl@0
  1626
sl@0
  1627
/**
sl@0
  1628
Modify page table entries (PTEs) so they no longer map any memory pages.
sl@0
  1629
sl@0
  1630
@param aPtePtr		Pointer into a page table for the PTE of the first page.
sl@0
  1631
@param aCount		The number of pages to modify.
sl@0
  1632
sl@0
  1633
@return False, if the page table no longer maps any entries and may be freed.
sl@0
  1634
		True otherwise, to indicate that the page table is still needed.
sl@0
  1635
sl@0
  1636
@pre #MmuLock held.
sl@0
  1637
@post #MmuLock held and has not been released by this function.
sl@0
  1638
*/
sl@0
  1639
TBool Mmu::UnmapPages(TPte* const aPtePtr, TUint aCount)
sl@0
  1640
	{
sl@0
  1641
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
  1642
	__NK_ASSERT_DEBUG(aCount);
sl@0
  1643
sl@0
  1644
	TUint count = 0;
sl@0
  1645
	if(aCount==1)
sl@0
  1646
		{
sl@0
  1647
		if(*aPtePtr==KPteUnallocatedEntry)
sl@0
  1648
			return true; // page already unmapped
sl@0
  1649
sl@0
  1650
		// unmap page...
sl@0
  1651
		++count;
sl@0
  1652
		TPte pte = KPteUnallocatedEntry;
sl@0
  1653
		TRACE2(("!PTE %x=%x",aPtePtr,pte));
sl@0
  1654
		*aPtePtr = pte;
sl@0
  1655
sl@0
  1656
		// clean cache...
sl@0
  1657
		CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
sl@0
  1658
		}
sl@0
  1659
	else
sl@0
  1660
		{
sl@0
  1661
		// check we are only updating a single page table...
sl@0
  1662
		__NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0);
sl@0
  1663
sl@0
  1664
		// unmap pages...
sl@0
  1665
		TPte* pPte = aPtePtr;
sl@0
  1666
		TPte* pPteEnd = aPtePtr+aCount;
sl@0
  1667
		do
sl@0
  1668
			{
sl@0
  1669
			if(*pPte!=KPteUnallocatedEntry)
sl@0
  1670
				{
sl@0
  1671
				// unmap page...
sl@0
  1672
				++count;
sl@0
  1673
				TPte pte = KPteUnallocatedEntry;
sl@0
  1674
				TRACE2(("!PTE %x=%x",pPte,pte));
sl@0
  1675
				*pPte = pte;
sl@0
  1676
				}
sl@0
  1677
			}
sl@0
  1678
		while(++pPte<pPteEnd);
sl@0
  1679
sl@0
  1680
		if(!count)
sl@0
  1681
			return true; // no PTEs changed, so nothing more to do
sl@0
  1682
sl@0
  1683
		// clean cache...
sl@0
  1684
		CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr);
sl@0
  1685
		}
sl@0
  1686
sl@0
  1687
	// update page table info...
sl@0
  1688
	SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPtePtr);
sl@0
  1689
	count = pti->DecPageCount(count);
sl@0
  1690
	TRACE2(("pt %x page count=%d",TLinAddr(aPtePtr)&~KPageTableMask,count));
sl@0
  1691
	__NK_ASSERT_DEBUG(pti->CheckPageCount());
sl@0
  1692
sl@0
  1693
	// see if page table needs freeing...
sl@0
  1694
	TUint keepPt = count | pti->PermanenceCount();
sl@0
  1695
sl@0
  1696
	return keepPt;
sl@0
  1697
	}
sl@0
  1698
sl@0
  1699
sl@0
  1700
/**
sl@0
  1701
Modify page table entries (PTEs) so they no longer map the given memory pages.
sl@0
  1702
Entries are only updated if the current state of the corresponding page
sl@0
  1703
is 'decommitted' i.e. RPageArray::TargetStateIsDecommitted returns true.
sl@0
  1704
sl@0
  1705
@param aPtePtr		Pointer into a page table for the PTE of the first page.
sl@0
  1706
@param aCount		The number of pages to modify.
sl@0
  1707
@param aPages		Pointer to the entry for the first page in a memory object's #RPageArray.
sl@0
  1708
					Each entry contains the physical address of a page together with its
sl@0
  1709
					current state (RPageArray::TState).
sl@0
  1710
sl@0
  1711
@return False, if the page table no longer maps any entries and may be freed.
sl@0
  1712
		True otherwise, to indicate that the page table is still needed.
sl@0
  1713
sl@0
  1714
@pre #MmuLock held.
sl@0
  1715
@post #MmuLock held and has not been released by this function.
sl@0
  1716
*/
sl@0
  1717
TBool Mmu::UnmapPages(TPte* const aPtePtr, TUint aCount, TPhysAddr* aPages)
sl@0
  1718
	{
sl@0
  1719
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
  1720
	__NK_ASSERT_DEBUG(aCount);
sl@0
  1721
sl@0
  1722
	TUint count = 0;
sl@0
  1723
	if(aCount==1)
sl@0
  1724
		{
sl@0
  1725
		if(*aPtePtr==KPteUnallocatedEntry)
sl@0
  1726
			return true; // page already unmapped
sl@0
  1727
sl@0
  1728
		if(!RPageArray::TargetStateIsDecommitted(*aPages))
sl@0
  1729
			return true; // page has been reallocated
sl@0
  1730
sl@0
  1731
		// unmap page...
sl@0
  1732
		++count;
sl@0
  1733
		TPte pte = KPteUnallocatedEntry;
sl@0
  1734
		TRACE2(("!PTE %x=%x",aPtePtr,pte));
sl@0
  1735
		*aPtePtr = pte;
sl@0
  1736
sl@0
  1737
		// clean cache...
sl@0
  1738
		CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
sl@0
  1739
		}
sl@0
  1740
	else
sl@0
  1741
		{
sl@0
  1742
		// check we are only updating a single page table...
sl@0
  1743
		__NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0);
sl@0
  1744
sl@0
  1745
		// unmap pages...
sl@0
  1746
		TPte* pPte = aPtePtr;
sl@0
  1747
		TPte* pPteEnd = aPtePtr+aCount;
sl@0
  1748
		do
sl@0
  1749
			{
sl@0
  1750
			if(RPageArray::TargetStateIsDecommitted(*aPages++) && *pPte!=KPteUnallocatedEntry)
sl@0
  1751
				{
sl@0
  1752
				// unmap page...
sl@0
  1753
				++count;
sl@0
  1754
				TPte pte = KPteUnallocatedEntry;
sl@0
  1755
				TRACE2(("!PTE %x=%x",pPte,pte));
sl@0
  1756
				*pPte = pte;
sl@0
  1757
				}
sl@0
  1758
			}
sl@0
  1759
		while(++pPte<pPteEnd);
sl@0
  1760
sl@0
  1761
		if(!count)
sl@0
  1762
			return true; // no PTEs changed, so nothing more to do
sl@0
  1763
sl@0
  1764
		// clean cache...
sl@0
  1765
		CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr);
sl@0
  1766
		}
sl@0
  1767
sl@0
  1768
	// update page table info...
sl@0
  1769
	SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPtePtr);
sl@0
  1770
	count = pti->DecPageCount(count);
sl@0
  1771
	TRACE2(("pt %x page count=%d",TLinAddr(aPtePtr)&~KPageTableMask,count));
sl@0
  1772
	__NK_ASSERT_DEBUG(pti->CheckPageCount());
sl@0
  1773
sl@0
  1774
	// see if page table needs freeing...
sl@0
  1775
	TUint keepPt = count | pti->PermanenceCount();
sl@0
  1776
sl@0
  1777
	return keepPt;
sl@0
  1778
	}
sl@0
  1779
sl@0
  1780
sl@0
  1781
/**
sl@0
  1782
Modify page table entries (PTEs) so the given memory pages are not accessible.
sl@0
  1783
Entries are only updated if the current state of the corresponding page
sl@0
  1784
is RPageArray::ERestrictingNA.
sl@0
  1785
sl@0
  1786
@param aPtePtr		Pointer into a page table for the PTE of the first page.
sl@0
  1787
@param aCount		The number of pages to modify.
sl@0
  1788
@param aPages		Pointer to the entry for the first page in a memory object's #RPageArray.
sl@0
  1789
					Each entry contains the physical address of a page together with its
sl@0
  1790
					current state (RPageArray::TState).
sl@0
  1791
sl@0
  1792
@pre #MmuLock held.
sl@0
  1793
@post #MmuLock held and has not been released by this function.
sl@0
  1794
*/
sl@0
  1795
void Mmu::RestrictPagesNA(TPte* const aPtePtr, TUint aCount, TPhysAddr* aPages)
sl@0
  1796
	{
sl@0
  1797
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
  1798
	__NK_ASSERT_DEBUG(aCount);
sl@0
  1799
sl@0
  1800
	if(aCount==1)
sl@0
  1801
		{
sl@0
  1802
		TPhysAddr page = *aPages;
sl@0
  1803
		TPte pte = *aPtePtr;
sl@0
  1804
		RPageArray::TState state = RPageArray::State(page);
sl@0
  1805
		if(state != RPageArray::ERestrictingNA && state != RPageArray::EMoving)
sl@0
  1806
			return; // page no longer needs restricting
sl@0
  1807
sl@0
  1808
		if(pte==KPteUnallocatedEntry)
sl@0
  1809
			return; // page gone
sl@0
  1810
sl@0
  1811
		// restrict page...
sl@0
  1812
		pte = Mmu::MakePteInaccessible(pte,false);
sl@0
  1813
		TRACE2(("!PTE %x=%x",aPtePtr,pte));
sl@0
  1814
		*aPtePtr = pte;
sl@0
  1815
sl@0
  1816
		// clean cache...
sl@0
  1817
		CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
sl@0
  1818
		}
sl@0
  1819
	else
sl@0
  1820
		{
sl@0
  1821
		// check we are only updating a single page table...
sl@0
  1822
		__NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0);
sl@0
  1823
sl@0
  1824
		// restrict pages...
sl@0
  1825
		TPte* pPte = aPtePtr;
sl@0
  1826
		TPte* pPteEnd = aPtePtr+aCount;
sl@0
  1827
		do
sl@0
  1828
			{
sl@0
  1829
			TPhysAddr page = *aPages++;
sl@0
  1830
			TPte pte = *pPte++;
sl@0
  1831
			if(RPageArray::State(page)==RPageArray::ERestrictingNA && pte!=KPteUnallocatedEntry)
sl@0
  1832
				{
sl@0
  1833
				pte = Mmu::MakePteInaccessible(pte,false);
sl@0
  1834
				TRACE2(("!PTE %x=%x",pPte-1,pte));
sl@0
  1835
				pPte[-1] = pte;
sl@0
  1836
				}
sl@0
  1837
			}
sl@0
  1838
		while(pPte<pPteEnd);
sl@0
  1839
sl@0
  1840
		// clean cache...
sl@0
  1841
		CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr);
sl@0
  1842
		}
sl@0
  1843
	}
sl@0
  1844
sl@0
  1845
sl@0
  1846
/**
sl@0
  1847
Modify page table entries (PTEs) so they map the given demand paged memory pages.
sl@0
  1848
sl@0
  1849
Entries are only updated if the current state of the corresponding page
sl@0
  1850
is RPageArray::ECommitted.
sl@0
  1851
sl@0
  1852
This function is used for demand paged memory when handling a page fault or
sl@0
  1853
memory pinning operation. It will widen the access permission of existing entries
sl@0
  1854
if required to match \a aBlankPte and will 'rejuvenate' the page table.
sl@0
  1855
sl@0
  1856
@param aPtePtr		Pointer into a page table for the PTE of the first page.
sl@0
  1857
@param aCount		The number of pages to modify.
sl@0
  1858
@param aPages		Pointer to the entry for the first page in a memory object's #RPageArray.
sl@0
  1859
					Each entry contains the physical address of a page together with its
sl@0
  1860
					current state (RPageArray::TState).
sl@0
  1861
@param aBlankPte	The value to use for each PTE, with the physical address component equal
sl@0
  1862
					to zero.
sl@0
  1863
sl@0
  1864
@return False, if the page table no longer maps any entries and may be freed.
sl@0
  1865
		True otherwise, to indicate that the page table is still needed.
sl@0
  1866
sl@0
  1867
@pre #MmuLock held.
sl@0
  1868
@post MmuLock held (but may have been released by this function)
sl@0
  1869
*/
sl@0
  1870
TBool Mmu::PageInPages(TPte* const aPtePtr, const TUint aCount, TPhysAddr* aPages, TPte aBlankPte)
sl@0
  1871
	{
sl@0
  1872
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
  1873
	__NK_ASSERT_DEBUG(aCount);
sl@0
  1874
	__NK_ASSERT_DEBUG(aBlankPte!=KPteUnallocatedEntry);
sl@0
  1875
sl@0
  1876
	TUint count = 0;
sl@0
  1877
sl@0
  1878
	if(aCount==1)
sl@0
  1879
		{
sl@0
  1880
		// get page to map...
sl@0
  1881
		TPhysAddr page = *aPages;
sl@0
  1882
		TPte pte = *aPtePtr;
sl@0
  1883
		if(!RPageArray::TargetStateIsCommitted(page))
sl@0
  1884
			goto done; // page no longer needs mapping
sl@0
  1885
sl@0
  1886
#ifdef _DEBUG
sl@0
  1887
		if(pte!=KPteUnallocatedEntry)
sl@0
  1888
			{
sl@0
  1889
			if ((pte^page)>=TPte(KPageSize) && !Mmu::IsPteInaccessible(pte) &&
sl@0
  1890
				!Mmu::IsPteReadOnly(pte))
sl@0
  1891
				{
sl@0
  1892
				// Page has been mapped before but the physical address is different
sl@0
  1893
				// and the page hasn't been moved as it is not inaccessible.
sl@0
  1894
				Kern::Printf("Mmu::PageInPages already mapped %x->%x",page,pte);
sl@0
  1895
				__NK_ASSERT_DEBUG(0);
sl@0
  1896
				}
sl@0
  1897
			}
sl@0
  1898
#endif
sl@0
  1899
		if(!Mmu::IsPteMoreAccessible(aBlankPte,pte))
sl@0
  1900
			return true; // return true to keep page table (it already had at least page mapped)
sl@0
  1901
sl@0
  1902
		// remap page with new increased permissions...
sl@0
  1903
		if(pte==KPteUnallocatedEntry)
sl@0
  1904
			count = 1; // we'll be adding a new pte entry, count it
sl@0
  1905
		if(!Mmu::IsPteReadOnly(aBlankPte))
sl@0
  1906
			ThePager.SetWritable(*SPageInfo::FromPhysAddr(page));
sl@0
  1907
		pte = (page&~KPageMask)|aBlankPte;
sl@0
  1908
		TRACE2(("!PTE %x=%x",aPtePtr,pte));
sl@0
  1909
		*aPtePtr = pte;
sl@0
  1910
sl@0
  1911
		// clean cache...
sl@0
  1912
		CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
sl@0
  1913
		}
sl@0
  1914
	else
sl@0
  1915
		{
sl@0
  1916
		// check we are only updating a single page table...
sl@0
  1917
		__NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0);
sl@0
  1918
sl@0
  1919
		// map pages...
sl@0
  1920
		TPte* pPte = aPtePtr;
sl@0
  1921
		TPte* pPteEnd = aPtePtr+aCount;
sl@0
  1922
		do
sl@0
  1923
			{
sl@0
  1924
			// map page...
sl@0
  1925
			TPhysAddr page = *aPages++;
sl@0
  1926
			TPte pte = *pPte++;
sl@0
  1927
			if(RPageArray::TargetStateIsCommitted(page))
sl@0
  1928
				{
sl@0
  1929
#ifdef _DEBUG
sl@0
  1930
				if(pte!=KPteUnallocatedEntry)
sl@0
  1931
					{
sl@0
  1932
					if ((pte^page)>=TPte(KPageSize) && !Mmu::IsPteInaccessible(pte) &&
sl@0
  1933
						!Mmu::IsPteReadOnly(pte))
sl@0
  1934
						{
sl@0
  1935
						// Page has been mapped before but the physical address is different
sl@0
  1936
						// and the page hasn't been moved as it is not inaccessible.
sl@0
  1937
						Kern::Printf("Mmu::PageInPages already mapped %x->%x",page,pte);
sl@0
  1938
						__NK_ASSERT_DEBUG(0);
sl@0
  1939
						}
sl@0
  1940
					}
sl@0
  1941
#endif
sl@0
  1942
				if(Mmu::IsPteMoreAccessible(aBlankPte,pte))
sl@0
  1943
					{
sl@0
  1944
					// remap page with new increased permissions...
sl@0
  1945
					if(pte==KPteUnallocatedEntry)
sl@0
  1946
						++count; // we'll be adding a new pte entry, count it
sl@0
  1947
					if(!Mmu::IsPteReadOnly(aBlankPte))
sl@0
  1948
						ThePager.SetWritable(*SPageInfo::FromPhysAddr(page));
sl@0
  1949
					pte = (page&~KPageMask)|aBlankPte;
sl@0
  1950
					TRACE2(("!PTE %x=%x",pPte-1,pte));
sl@0
  1951
					pPte[-1] = pte;
sl@0
  1952
					}
sl@0
  1953
				}
sl@0
  1954
			}
sl@0
  1955
		while(pPte!=pPteEnd);
sl@0
  1956
sl@0
  1957
		// clean cache...
sl@0
  1958
		CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr);
sl@0
  1959
		}
sl@0
  1960
sl@0
  1961
done:
sl@0
  1962
	// update page counts...
sl@0
  1963
	SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPtePtr);
sl@0
  1964
	count = pti->IncPageCount(count);
sl@0
  1965
	TRACE2(("pt %x page count=%d",TLinAddr(aPtePtr)&~KPageTableMask,pti->PageCount()));
sl@0
  1966
	__NK_ASSERT_DEBUG(pti->CheckPageCount());
sl@0
  1967
sl@0
  1968
	// see if page table needs freeing...
sl@0
  1969
	TUint keepPt = count | pti->PermanenceCount();
sl@0
  1970
sl@0
  1971
	// rejuvenate demand paged page tables...
sl@0
  1972
	ThePager.RejuvenatePageTable(aPtePtr);
sl@0
  1973
sl@0
  1974
	return keepPt;
sl@0
  1975
	}
sl@0
  1976
sl@0
  1977
sl@0
  1978
//
sl@0
  1979
// CodeModifier
sl@0
  1980
//
sl@0
  1981
sl@0
  1982
#ifdef __DEBUGGER_SUPPORT__
sl@0
  1983
sl@0
  1984
void DoWriteCode(TUint32* aAddress, TUint32 aValue);
sl@0
  1985
sl@0
  1986
#ifdef __SMP__
sl@0
  1987
sl@0
  1988
extern "C" void __e32_instruction_barrier();
sl@0
  1989
sl@0
  1990
class TCodeModifierBroadcast : public TGenericIPI
sl@0
  1991
	{
sl@0
  1992
public:
sl@0
  1993
	TCodeModifierBroadcast(TUint32* aAddress, TUint32 aValue);
sl@0
  1994
	static void Isr(TGenericIPI*);
sl@0
  1995
	void Go();
sl@0
  1996
public:
sl@0
  1997
	TUint32*		iAddress;
sl@0
  1998
	TUint32			iValue;
sl@0
  1999
	volatile TInt	iFlag;
sl@0
  2000
	};
sl@0
  2001
sl@0
  2002
TCodeModifierBroadcast::TCodeModifierBroadcast(TUint32* aAddress, TUint32 aValue)
sl@0
  2003
	:	iAddress(aAddress), iValue(aValue), iFlag(0)
sl@0
  2004
	{
sl@0
  2005
	}
sl@0
  2006
sl@0
  2007
void TCodeModifierBroadcast::Isr(TGenericIPI* aPtr)
sl@0
  2008
	{
sl@0
  2009
	TCodeModifierBroadcast& a = *(TCodeModifierBroadcast*)aPtr;
sl@0
  2010
	while (!__e32_atomic_load_acq32(&a.iFlag))
sl@0
  2011
		__chill();
sl@0
  2012
#ifdef __BROADCAST_CACHE_MAINTENANCE__
sl@0
  2013
	CacheMaintenance::CodeChanged((TLinAddr)a.iAddress, sizeof (TInt), CacheMaintenance::ECodeModifier);	// need to do separate Clean-D, Purge-I on each core
sl@0
  2014
#else
sl@0
  2015
	__e32_instruction_barrier();		// synchronize instruction execution
sl@0
  2016
#endif
sl@0
  2017
	}
sl@0
  2018
sl@0
  2019
void TCodeModifierBroadcast::Go()
sl@0
  2020
	{
sl@0
  2021
	NKern::Lock();
sl@0
  2022
	QueueAllOther(&Isr);
sl@0
  2023
	WaitEntry();					// wait for other cores to stop
sl@0
  2024
	DoWriteCode(iAddress, iValue);
sl@0
  2025
	iFlag = 1;
sl@0
  2026
	__e32_instruction_barrier();	// synchronize instruction execution
sl@0
  2027
	WaitCompletion();				// wait for other cores to resume
sl@0
  2028
	NKern::Unlock();
sl@0
  2029
	}
sl@0
  2030
#endif
sl@0
  2031
sl@0
  2032
/**
sl@0
  2033
@pre Calling thread must be in critical section
sl@0
  2034
@pre CodeSeg mutex held
sl@0
  2035
*/
sl@0
  2036
TInt CodeModifier::SafeWriteCode(DProcess* aProcess, TLinAddr aAddress, TInt aSize, TUint aValue, void* aOldValue)
sl@0
  2037
	{
sl@0
  2038
	__ASSERT_CRITICAL;
sl@0
  2039
	Mmu& m=TheMmu;
sl@0
  2040
	RamAllocLock::Lock();
sl@0
  2041
	MmuLock::Lock();
sl@0
  2042
	__UNLOCK_GUARD_START(MmuLock);
sl@0
  2043
sl@0
  2044
	// Check aProcess is still alive by opening a reference on its os asid.
sl@0
  2045
	TInt osAsid = ((DMemModelProcess*)aProcess)->TryOpenOsAsid();
sl@0
  2046
	if (osAsid < 0)
sl@0
  2047
		{
sl@0
  2048
		__KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - zombie process"));
sl@0
  2049
		__UNLOCK_GUARD_END(MmuLock);
sl@0
  2050
		MmuLock::Unlock();
sl@0
  2051
		RamAllocLock::Unlock();
sl@0
  2052
		return KErrBadDescriptor;
sl@0
  2053
		}
sl@0
  2054
sl@0
  2055
	// Find physical address of the page, the breakpoint belongs to
sl@0
  2056
	TPhysAddr physAddr = Mmu::LinearToPhysical(aAddress, osAsid);
sl@0
  2057
	__KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - PA:%x", physAddr));
sl@0
  2058
sl@0
  2059
sl@0
  2060
	if (physAddr==KPhysAddrInvalid)
sl@0
  2061
		{
sl@0
  2062
		__KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - invalid VA"));
sl@0
  2063
		__UNLOCK_GUARD_END(MmuLock);
sl@0
  2064
		MmuLock::Unlock();
sl@0
  2065
		RamAllocLock::Unlock();
sl@0
  2066
		// The os asid is no longer required.
sl@0
  2067
		((DMemModelProcess*)aProcess)->CloseOsAsid();
sl@0
  2068
		return KErrBadDescriptor;
sl@0
  2069
		}
sl@0
  2070
sl@0
  2071
	// Temporary map physical page
sl@0
  2072
	TLinAddr tempAddr = m.MapTemp(physAddr&~KPageMask, aAddress>>KPageShift);
sl@0
  2073
	tempAddr |=  aAddress & KPageMask;
sl@0
  2074
	__KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - tempAddr:%x",tempAddr));
sl@0
  2075
sl@0
  2076
	TInt r = KErrBadDescriptor;
sl@0
  2077
	TUint32* ptr = (TUint32*)(tempAddr&~3);
sl@0
  2078
	TUint32 oldWord;
sl@0
  2079
sl@0
  2080
	if(Kern::SafeRead(ptr,&oldWord,sizeof(oldWord))==0 // safely read the original value...
sl@0
  2081
		&& Kern::SafeWrite(ptr,&oldWord,sizeof(oldWord))==0 ) // and write it back
sl@0
  2082
		{
sl@0
  2083
		// We have successfully probed the memory by reading and writing to it
sl@0
  2084
		// so we assume it is now safe to access without generating exceptions.
sl@0
  2085
		// If this is wrong it will kill the system horribly.
sl@0
  2086
sl@0
  2087
		TUint32 newWord;
sl@0
  2088
		TUint badAlign;
sl@0
  2089
		TUint shift = (aAddress&3)*8;
sl@0
  2090
sl@0
  2091
		switch(aSize)
sl@0
  2092
			{
sl@0
  2093
		case 1: // 1 byte value
sl@0
  2094
			badAlign = 0;
sl@0
  2095
			*(TUint8*)aOldValue = oldWord>>shift;
sl@0
  2096
			newWord = (oldWord&~(0xff<<shift)) | ((aValue&0xff)<<shift);
sl@0
  2097
			break;
sl@0
  2098
sl@0
  2099
		case 2: // 2 byte value
sl@0
  2100
			badAlign = tempAddr&1;
sl@0
  2101
			if(!badAlign)
sl@0
  2102
				*(TUint16*)aOldValue = oldWord>>shift;
sl@0
  2103
			newWord = (oldWord&~(0xffff<<shift)) | ((aValue&0xffff)<<shift);
sl@0
  2104
			break;
sl@0
  2105
sl@0
  2106
		default: // 4 byte value
sl@0
  2107
			badAlign = tempAddr&3;
sl@0
  2108
			if(!badAlign)
sl@0
  2109
				*(TUint32*)aOldValue = oldWord;
sl@0
  2110
			newWord = aValue;
sl@0
  2111
			break;
sl@0
  2112
			}
sl@0
  2113
sl@0
  2114
		if(!badAlign)
sl@0
  2115
			{
sl@0
  2116
			// write the new value...
sl@0
  2117
#ifdef __SMP__
sl@0
  2118
			TCodeModifierBroadcast b(ptr, newWord);
sl@0
  2119
			b.Go();
sl@0
  2120
#else
sl@0
  2121
			DoWriteCode(ptr, newWord);
sl@0
  2122
#endif
sl@0
  2123
			r = KErrNone;
sl@0
  2124
			}
sl@0
  2125
		}
sl@0
  2126
sl@0
  2127
	__UNLOCK_GUARD_END(MmuLock);
sl@0
  2128
	m.UnmapTemp();
sl@0
  2129
	MmuLock::Unlock();
sl@0
  2130
	RamAllocLock::Unlock();
sl@0
  2131
	// The os asid is no longer required.
sl@0
  2132
	((DMemModelProcess*)aProcess)->CloseOsAsid();
sl@0
  2133
	return r;
sl@0
  2134
	}
sl@0
  2135
sl@0
  2136
/**
sl@0
  2137
@pre Calling thread must be in critical section
sl@0
  2138
@pre CodeSeg mutex held
sl@0
  2139
*/
sl@0
  2140
void DoWriteCode(TUint32* aAddress, TUint32 aValue)
sl@0
  2141
	{
sl@0
  2142
	// We do not want to be interrupted by e.g. ISR that will run altered code before IMB-Range.
sl@0
  2143
	// Therefore, copy data and clean/invalidate caches with interrupts disabled.
sl@0
  2144
	TInt irq = NKern::DisableAllInterrupts();
sl@0
  2145
	*aAddress = aValue;
sl@0
  2146
	CacheMaintenance::CodeChanged((TLinAddr)aAddress, sizeof(TUint32), CacheMaintenance::ECodeModifier);
sl@0
  2147
	NKern::RestoreInterrupts(irq);
sl@0
  2148
	}
sl@0
  2149
sl@0
  2150
#endif //__DEBUGGER_SUPPORT__
sl@0
  2151
sl@0
  2152
sl@0
  2153
sl@0
  2154
//
sl@0
  2155
// Virtual pinning
sl@0
  2156
//
sl@0
  2157
sl@0
  2158
TInt M::CreateVirtualPinObject(TVirtualPinObject*& aPinObject)
sl@0
  2159
	{
sl@0
  2160
	aPinObject = (TVirtualPinObject*)new DVirtualPinMapping;
sl@0
  2161
	return aPinObject != NULL ? KErrNone : KErrNoMemory;
sl@0
  2162
	}
sl@0
  2163
sl@0
  2164
TInt M::PinVirtualMemory(TVirtualPinObject* aPinObject, TLinAddr aStart, TUint aSize, DThread* aThread)
sl@0
  2165
	{
sl@0
  2166
	__ASSERT_CRITICAL;
sl@0
  2167
	TUint offsetInMapping;
sl@0
  2168
	TUint mapInstanceCount;
sl@0
  2169
	DMemoryMapping* mapping = MM::FindMappingInThread(	(DMemModelThread*)aThread, 
sl@0
  2170
														aStart, 
sl@0
  2171
														aSize, 
sl@0
  2172
														offsetInMapping, 
sl@0
  2173
														mapInstanceCount);
sl@0
  2174
	TInt r = KErrBadDescriptor;
sl@0
  2175
	if (mapping)
sl@0
  2176
		{
sl@0
  2177
		TInt count = ((aStart & KPageMask) + aSize + KPageMask) >> KPageShift;
sl@0
  2178
		if(mapping->IsPinned())
sl@0
  2179
			{
sl@0
  2180
			// Mapping for specified virtual address is pinned so we don't need to
sl@0
  2181
			// do anything. Also, we can't safely pin the memory in this case
sl@0
  2182
			// anyway, as pinned mappings may move between memory objects
sl@0
  2183
			r = KErrNone;
sl@0
  2184
			}
sl@0
  2185
		else
sl@0
  2186
			{
sl@0
  2187
			MmuLock::Lock();
sl@0
  2188
			DMemoryObject* memory = mapping->Memory();
sl@0
  2189
			if (mapInstanceCount != mapping->MapInstanceCount() || 
sl@0
  2190
				!memory || !memory->IsDemandPaged())
sl@0
  2191
				{
sl@0
  2192
				// mapping has been reused, no memory, or it's not paged, so no need to pin...
sl@0
  2193
				MmuLock::Unlock();
sl@0
  2194
				r = KErrNone;
sl@0
  2195
				}
sl@0
  2196
			else
sl@0
  2197
				{
sl@0
  2198
				// paged memory needs pinning...
sl@0
  2199
				// Open a reference on the memory so it doesn't get deleted.
sl@0
  2200
				memory->Open();
sl@0
  2201
				MmuLock::Unlock();
sl@0
  2202
sl@0
  2203
				TUint startInMemory = (offsetInMapping >> KPageShift) + mapping->iStartIndex;
sl@0
  2204
				r = ((DVirtualPinMapping*)aPinObject)->Pin(	memory, startInMemory, count, mapping->Permissions(),
sl@0
  2205
															mapping, mapInstanceCount);
sl@0
  2206
				memory->Close();
sl@0
  2207
				}
sl@0
  2208
			}
sl@0
  2209
		mapping->Close();
sl@0
  2210
		}	
sl@0
  2211
	return r;
sl@0
  2212
	}
sl@0
  2213
sl@0
  2214
TInt M::CreateAndPinVirtualMemory(TVirtualPinObject*& aPinObject, TLinAddr aStart, TUint aSize)
sl@0
  2215
	{
sl@0
  2216
	__ASSERT_CRITICAL;
sl@0
  2217
	aPinObject = 0;
sl@0
  2218
	TUint offsetInMapping;
sl@0
  2219
	TUint mapInstanceCount;
sl@0
  2220
	DMemoryMapping* mapping = MM::FindMappingInThread(	(DMemModelThread*)&Kern::CurrentThread(), 
sl@0
  2221
														aStart, 
sl@0
  2222
														aSize, 
sl@0
  2223
														offsetInMapping,
sl@0
  2224
														mapInstanceCount);
sl@0
  2225
	TInt r = KErrBadDescriptor;
sl@0
  2226
	if (mapping)
sl@0
  2227
		{
sl@0
  2228
		TInt count = ((aStart & KPageMask) + aSize + KPageMask) >> KPageShift;
sl@0
  2229
		if(mapping->IsPinned())
sl@0
  2230
			{
sl@0
  2231
			// Mapping for specified virtual address is pinned so we don't need to
sl@0
  2232
			// do anything. Also, we can't safely pin the memory in this case
sl@0
  2233
			// anyway, as pinned mappings may move between memory objects
sl@0
  2234
			r = KErrNone;
sl@0
  2235
			}
sl@0
  2236
		else
sl@0
  2237
			{
sl@0
  2238
			MmuLock::Lock();
sl@0
  2239
			DMemoryObject* memory = mapping->Memory();
sl@0
  2240
			if (mapInstanceCount != mapping->MapInstanceCount() || 
sl@0
  2241
				!memory || !memory->IsDemandPaged())
sl@0
  2242
				{
sl@0
  2243
				// mapping has been reused, no memory, or it's not paged, so no need to pin...
sl@0
  2244
				MmuLock::Unlock();
sl@0
  2245
				r = KErrNone;
sl@0
  2246
				}
sl@0
  2247
			else
sl@0
  2248
				{// The memory is demand paged so create a pin object and pin it.
sl@0
  2249
				// Open a reference on the memory so it doesn't get deleted.
sl@0
  2250
				memory->Open();
sl@0
  2251
				MmuLock::Unlock();
sl@0
  2252
				r = CreateVirtualPinObject(aPinObject);
sl@0
  2253
				if (r == KErrNone)
sl@0
  2254
					{
sl@0
  2255
					TUint startInMemory = (offsetInMapping >> KPageShift) + mapping->iStartIndex;
sl@0
  2256
					r = ((DVirtualPinMapping*)aPinObject)->Pin(	memory, startInMemory, count, mapping->Permissions(), 
sl@0
  2257
																mapping, mapInstanceCount);
sl@0
  2258
					if (r != KErrNone)
sl@0
  2259
						{// Failed to pin the memory so pin object is not required.
sl@0
  2260
						DestroyVirtualPinObject(aPinObject);
sl@0
  2261
						}
sl@0
  2262
					}
sl@0
  2263
				memory->Close();
sl@0
  2264
				}
sl@0
  2265
			}
sl@0
  2266
		mapping->Close();
sl@0
  2267
		}	
sl@0
  2268
	return r;
sl@0
  2269
	}
sl@0
  2270
sl@0
  2271
void M::UnpinVirtualMemory(TVirtualPinObject* aPinObject)
sl@0
  2272
	{
sl@0
  2273
	DVirtualPinMapping* mapping = (DVirtualPinMapping*)aPinObject;
sl@0
  2274
	if (mapping->IsAttached())
sl@0
  2275
		mapping->Unpin();
sl@0
  2276
	}
sl@0
  2277
	
sl@0
  2278
void M::DestroyVirtualPinObject(TVirtualPinObject*& aPinObject)
sl@0
  2279
	{
sl@0
  2280
	DVirtualPinMapping* mapping = (DVirtualPinMapping*)__e32_atomic_swp_ord_ptr(&aPinObject, 0);
sl@0
  2281
	if (mapping)
sl@0
  2282
		{
sl@0
  2283
		if (mapping->IsAttached())
sl@0
  2284
			mapping->Unpin();
sl@0
  2285
		mapping->AsyncClose();
sl@0
  2286
		}
sl@0
  2287
	}
sl@0
  2288
sl@0
  2289
//
sl@0
  2290
// Physical pinning
sl@0
  2291
//
sl@0
  2292
sl@0
  2293
TInt M::CreatePhysicalPinObject(TPhysicalPinObject*& aPinObject)
sl@0
  2294
	{
sl@0
  2295
	aPinObject = (TPhysicalPinObject*)new DPhysicalPinMapping;
sl@0
  2296
	return aPinObject != NULL ? KErrNone : KErrNoMemory;
sl@0
  2297
	}
sl@0
  2298
sl@0
  2299
TInt M::PinPhysicalMemory(TPhysicalPinObject* aPinObject, TLinAddr aStart, TUint aSize, TBool aReadOnly,
sl@0
  2300
				TPhysAddr& aAddress, TPhysAddr* aPages, TUint32& aMapAttr, TUint& aColour, DThread* aThread)
sl@0
  2301
	{
sl@0
  2302
	__ASSERT_CRITICAL;
sl@0
  2303
	TUint offsetInMapping;
sl@0
  2304
	TUint mapInstanceCount;
sl@0
  2305
	DMemoryMapping* mapping = MM::FindMappingInThread(	(DMemModelThread*)aThread, 
sl@0
  2306
														aStart, 
sl@0
  2307
														aSize, 
sl@0
  2308
														offsetInMapping, 
sl@0
  2309
														mapInstanceCount);
sl@0
  2310
	TInt r = KErrBadDescriptor;
sl@0
  2311
	if (mapping)
sl@0
  2312
		{
sl@0
  2313
		TInt count = ((aStart & KPageMask) + aSize + KPageMask) >> KPageShift;
sl@0
  2314
sl@0
  2315
		MmuLock::Lock();
sl@0
  2316
		DMemoryObject* memory = mapping->Memory();
sl@0
  2317
		if (mapInstanceCount == mapping->MapInstanceCount() && memory)
sl@0
  2318
			{
sl@0
  2319
			memory->Open();
sl@0
  2320
			MmuLock::Unlock();
sl@0
  2321
sl@0
  2322
			TUint startInMemory = (offsetInMapping >> KPageShift) + mapping->iStartIndex;
sl@0
  2323
			TMappingPermissions permissions = aReadOnly ? ESupervisorReadOnly : ESupervisorReadWrite;
sl@0
  2324
			r = ((DPhysicalPinMapping*)aPinObject)->Pin(memory, startInMemory, count, permissions);
sl@0
  2325
			if (r == KErrNone)
sl@0
  2326
				{
sl@0
  2327
				r = ((DPhysicalPinMapping*)aPinObject)->PhysAddr(0, count, aAddress, aPages);
sl@0
  2328
				if (r>=KErrNone)
sl@0
  2329
					{
sl@0
  2330
					r = KErrNone; //Do not report discontiguous memory in return value.
sl@0
  2331
					const TMappingAttributes2& mapAttr2 =
sl@0
  2332
											MM::LegacyMappingAttributes(memory->Attributes(), mapping->Permissions());
sl@0
  2333
					*(TMappingAttributes2*)&aMapAttr = mapAttr2;
sl@0
  2334
					}
sl@0
  2335
				else
sl@0
  2336
					UnpinPhysicalMemory(aPinObject);
sl@0
  2337
				}
sl@0
  2338
			memory->Close();
sl@0
  2339
			}
sl@0
  2340
		else // mapping has been reused or no memory...
sl@0
  2341
			{
sl@0
  2342
			MmuLock::Unlock();
sl@0
  2343
			}
sl@0
  2344
		mapping->Close();
sl@0
  2345
		}
sl@0
  2346
	aColour = (aStart >>KPageShift) & KPageColourMask;
sl@0
  2347
	return r;
sl@0
  2348
	}
sl@0
  2349
sl@0
  2350
void M::UnpinPhysicalMemory(TPhysicalPinObject* aPinObject)
sl@0
  2351
	{
sl@0
  2352
	DPhysicalPinMapping* mapping = (DPhysicalPinMapping*)aPinObject;
sl@0
  2353
	if (mapping->IsAttached())
sl@0
  2354
		mapping->Unpin();
sl@0
  2355
	}
sl@0
  2356
sl@0
  2357
void M::DestroyPhysicalPinObject(TPhysicalPinObject*& aPinObject)
sl@0
  2358
	{
sl@0
  2359
	DPhysicalPinMapping* mapping = (DPhysicalPinMapping*)__e32_atomic_swp_ord_ptr(&aPinObject, 0);
sl@0
  2360
	if (mapping)
sl@0
  2361
		{
sl@0
  2362
		if (mapping->IsAttached())
sl@0
  2363
			mapping->Unpin();
sl@0
  2364
		mapping->AsyncClose();
sl@0
  2365
		}
sl@0
  2366
	}
sl@0
  2367
sl@0
  2368
sl@0
  2369
//
sl@0
  2370
// Kernel map and pin.
sl@0
  2371
//
sl@0
  2372
sl@0
  2373
TInt M::CreateKernelMapObject(TKernelMapObject*& aMapObject, TUint aMaxReserveSize)
sl@0
  2374
	{
sl@0
  2375
	DKernelPinMapping*  pinObject = new DKernelPinMapping();
sl@0
  2376
	aMapObject = (TKernelMapObject*) pinObject;
sl@0
  2377
	if (pinObject == NULL)
sl@0
  2378
		{
sl@0
  2379
		return KErrNoMemory;
sl@0
  2380
		}
sl@0
  2381
	// Ensure we reserve enough bytes for all possible alignments of the start and 
sl@0
  2382
	// end of the region to map.
sl@0
  2383
	TUint reserveBytes = aMaxReserveSize? ((aMaxReserveSize + KPageMask) & ~KPageMask) + KPageSize : 0;
sl@0
  2384
	TInt r = pinObject->Construct(reserveBytes);
sl@0
  2385
	if (r != KErrNone)
sl@0
  2386
		{// Failed so delete the kernel mapping object.
sl@0
  2387
		pinObject->Close();
sl@0
  2388
		aMapObject = NULL;
sl@0
  2389
		}
sl@0
  2390
	return r;
sl@0
  2391
	}
sl@0
  2392
sl@0
  2393
sl@0
  2394
TInt M::MapAndPinMemory(TKernelMapObject* aMapObject, DThread* aThread, TLinAddr aStart, 
sl@0
  2395
						TUint aSize, TUint aMapAttributes, TLinAddr& aKernelAddr, TPhysAddr* aPages)
sl@0
  2396
	{
sl@0
  2397
	__ASSERT_CRITICAL;
sl@0
  2398
	TUint offsetInMapping;
sl@0
  2399
	TUint mapInstanceCount;
sl@0
  2400
	DMemoryMapping* mapping = MM::FindMappingInThread(	(DMemModelThread*)aThread, 
sl@0
  2401
														aStart, 
sl@0
  2402
														aSize, 
sl@0
  2403
														offsetInMapping, 
sl@0
  2404
														mapInstanceCount);
sl@0
  2405
	TInt r = KErrBadDescriptor;
sl@0
  2406
	if (mapping)
sl@0
  2407
		{
sl@0
  2408
		DKernelPinMapping* kernelMap = (DKernelPinMapping*)aMapObject;
sl@0
  2409
		TInt count = (((aStart + aSize + KPageMask) & ~KPageMask) - (aStart & ~KPageMask)) >> KPageShift;
sl@0
  2410
		if (kernelMap->iReservePages && kernelMap->iReservePages < count)
sl@0
  2411
			{
sl@0
  2412
			mapping->Close();
sl@0
  2413
			return KErrArgument;
sl@0
  2414
			}
sl@0
  2415
sl@0
  2416
		MmuLock::Lock();
sl@0
  2417
		DMemoryObject* memory = mapping->Memory();
sl@0
  2418
		if (mapInstanceCount == mapping->MapInstanceCount() && memory)
sl@0
  2419
			{
sl@0
  2420
			memory->Open();
sl@0
  2421
			MmuLock::Unlock();
sl@0
  2422
sl@0
  2423
			TUint startInMemory = (offsetInMapping >> KPageShift) + mapping->iStartIndex;
sl@0
  2424
			TBool readOnly = aMapAttributes & Kern::EKernelMap_ReadOnly;
sl@0
  2425
			TMappingPermissions permissions =  readOnly ? ESupervisorReadOnly : ESupervisorReadWrite;
sl@0
  2426
			r = kernelMap->MapAndPin(memory, startInMemory, count, permissions);
sl@0
  2427
			if (r == KErrNone)
sl@0
  2428
				{
sl@0
  2429
				__NK_ASSERT_DEBUG(!kernelMap->IsUserMapping());
sl@0
  2430
				aKernelAddr = kernelMap->Base();
sl@0
  2431
				TPhysAddr contigAddr;	// Ignore this value as aPages will be populated 
sl@0
  2432
										// whether the memory is contiguous or not.
sl@0
  2433
				r = kernelMap->PhysAddr(0, count, contigAddr, aPages);
sl@0
  2434
				if (r>=KErrNone)
sl@0
  2435
					{
sl@0
  2436
					r = KErrNone; //Do not report discontiguous memory in return value.
sl@0
  2437
					}
sl@0
  2438
				else
sl@0
  2439
					{
sl@0
  2440
					UnmapAndUnpinMemory((TKernelMapObject*)kernelMap);
sl@0
  2441
					}
sl@0
  2442
				}
sl@0
  2443
			memory->Close();
sl@0
  2444
			}
sl@0
  2445
		else // mapping has been reused or no memory...
sl@0
  2446
			{
sl@0
  2447
			MmuLock::Unlock();
sl@0
  2448
			}
sl@0
  2449
		mapping->Close();
sl@0
  2450
		}
sl@0
  2451
	return r;
sl@0
  2452
	}
sl@0
  2453
sl@0
  2454
sl@0
  2455
void M::UnmapAndUnpinMemory(TKernelMapObject* aMapObject)
sl@0
  2456
	{
sl@0
  2457
	DKernelPinMapping* mapping = (DKernelPinMapping*)aMapObject;
sl@0
  2458
	if (mapping->IsAttached())
sl@0
  2459
		mapping->UnmapAndUnpin();
sl@0
  2460
	}
sl@0
  2461
sl@0
  2462
sl@0
  2463
void M::DestroyKernelMapObject(TKernelMapObject*& aMapObject)
sl@0
  2464
	{
sl@0
  2465
	DKernelPinMapping* mapping = (DKernelPinMapping*)__e32_atomic_swp_ord_ptr(&aMapObject, 0);
sl@0
  2466
	if (mapping)
sl@0
  2467
		{
sl@0
  2468
		if (mapping->IsAttached())
sl@0
  2469
			mapping->UnmapAndUnpin();
sl@0
  2470
		mapping->AsyncClose();
sl@0
  2471
		}
sl@0
  2472
	}
sl@0
  2473
sl@0
  2474
sl@0
  2475
//
sl@0
  2476
// Cache sync operations
sl@0
  2477
//
sl@0
  2478
sl@0
  2479
//@pre	As for MASK_THREAD_STANDARD
sl@0
  2480
void Mmu::SyncPhysicalMemoryBeforeDmaWrite(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr)
sl@0
  2481
	{
sl@0
  2482
	//Jump over the pages we do not have to sync
sl@0
  2483
	aPages += aOffset>>KPageShift;
sl@0
  2484
	aOffset &=KPageMask;
sl@0
  2485
	aColour  = (aColour + (aOffset>>KPageShift)) & KPageColourMask;
sl@0
  2486
sl@0
  2487
	//Calculate page table entry for the temporary mapping.
sl@0
  2488
	TUint pteType = PteType(ESupervisorReadWrite,true);
sl@0
  2489
	TMappingAttributes2 mapAttr2(aMapAttr);
sl@0
  2490
	TPte pte = Mmu::BlankPte((TMemoryAttributes)mapAttr2.Type(), pteType);
sl@0
  2491
	
sl@0
  2492
	while (aSize) //A single pass of loop operates within page boundaries.
sl@0
  2493
		{
sl@0
  2494
		TUint sizeInLoopPass = Min (KPageSize, aOffset+aSize) - aOffset; //The size of the region in this pass.
sl@0
  2495
sl@0
  2496
		NKern::ThreadEnterCS();
sl@0
  2497
		Kern::MutexWait(*iPhysMemSyncMutex);
sl@0
  2498
		
sl@0
  2499
		TLinAddr linAddr = iPhysMemSyncTemp.Map(*aPages, aColour, pte);
sl@0
  2500
		CacheMaintenance::MakeCPUChangesVisible(linAddr+aOffset, sizeInLoopPass, aMapAttr, *aPages+aOffset);
sl@0
  2501
		iPhysMemSyncTemp.Unmap();
sl@0
  2502
		
sl@0
  2503
		Kern::MutexSignal(*iPhysMemSyncMutex);
sl@0
  2504
		NKern::ThreadLeaveCS();
sl@0
  2505
sl@0
  2506
		aSize-=sizeInLoopPass;  // Remaining bytes to sync
sl@0
  2507
		aOffset=0;				// In all the pages after the first, sync will always start with zero offset.
sl@0
  2508
		aPages++;	// Point to the next page
sl@0
  2509
		aColour  = (aColour+1) & KPageColourMask;
sl@0
  2510
		}
sl@0
  2511
	}
sl@0
  2512
sl@0
  2513
//@pre	As for MASK_THREAD_STANDARD
sl@0
  2514
void Mmu::SyncPhysicalMemoryBeforeDmaRead(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr)
sl@0
  2515
	{
sl@0
  2516
	//Jump over the pages we do not have to sync
sl@0
  2517
	aPages += aOffset>>KPageShift;
sl@0
  2518
	aOffset &=KPageMask;
sl@0
  2519
	aColour  = (aColour + (aOffset>>KPageShift)) & KPageColourMask;
sl@0
  2520
sl@0
  2521
	//Calculate page table entry for the temporary mapping.
sl@0
  2522
	TUint pteType = PteType(ESupervisorReadWrite,true);
sl@0
  2523
	TMappingAttributes2 mapAttr2(aMapAttr);
sl@0
  2524
	TPte pte = Mmu::BlankPte((TMemoryAttributes)mapAttr2.Type(), pteType);
sl@0
  2525
	
sl@0
  2526
	while (aSize) //A single pass of loop operates within page boundaries.
sl@0
  2527
		{
sl@0
  2528
		TUint sizeInLoopPass = Min (KPageSize, aOffset+aSize) - aOffset; //The size of the region in this pass.
sl@0
  2529
sl@0
  2530
		NKern::ThreadEnterCS();
sl@0
  2531
		Kern::MutexWait(*iPhysMemSyncMutex);
sl@0
  2532
		
sl@0
  2533
		TLinAddr linAddr = iPhysMemSyncTemp.Map(*aPages, aColour, pte);
sl@0
  2534
		CacheMaintenance::PrepareMemoryForExternalWrites(linAddr+aOffset, sizeInLoopPass, aMapAttr, *aPages+aOffset);
sl@0
  2535
		iPhysMemSyncTemp.Unmap();
sl@0
  2536
		
sl@0
  2537
		Kern::MutexSignal(*iPhysMemSyncMutex);
sl@0
  2538
		NKern::ThreadLeaveCS();
sl@0
  2539
sl@0
  2540
		aSize-=sizeInLoopPass;  // Remaining bytes to sync
sl@0
  2541
		aOffset=0;				// In all the pages after the first, sync will always start with zero offset.
sl@0
  2542
		aPages++;	// Point to the next page
sl@0
  2543
		aColour  = (aColour+1) & KPageColourMask;
sl@0
  2544
		}
sl@0
  2545
	}
sl@0
  2546
sl@0
  2547
//@pre	As for MASK_THREAD_STANDARD
sl@0
  2548
void Mmu::SyncPhysicalMemoryAfterDmaRead(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr)
sl@0
  2549
	{
sl@0
  2550
	//Jump over the pages we do not have to sync
sl@0
  2551
	aPages += aOffset>>KPageShift;
sl@0
  2552
	aOffset &=KPageMask;
sl@0
  2553
	aColour  = (aColour + (aOffset>>KPageShift)) & KPageColourMask;
sl@0
  2554
sl@0
  2555
	//Calculate page table entry for the temporary mapping.
sl@0
  2556
	TUint pteType = PteType(ESupervisorReadWrite,true);
sl@0
  2557
	TMappingAttributes2 mapAttr2(aMapAttr);
sl@0
  2558
	TPte pte = Mmu::BlankPte((TMemoryAttributes)mapAttr2.Type(), pteType);
sl@0
  2559
	
sl@0
  2560
	while (aSize) //A single pass of loop operates within page boundaries.
sl@0
  2561
		{
sl@0
  2562
		TUint sizeInLoopPass = Min (KPageSize, aOffset+aSize) - aOffset; //The size of the region in this pass.
sl@0
  2563
sl@0
  2564
		NKern::ThreadEnterCS();
sl@0
  2565
		Kern::MutexWait(*iPhysMemSyncMutex);
sl@0
  2566
		
sl@0
  2567
		TLinAddr linAddr = iPhysMemSyncTemp.Map(*aPages, aColour, pte);
sl@0
  2568
		CacheMaintenance::MakeExternalChangesVisible(linAddr+aOffset, sizeInLoopPass, aMapAttr, *aPages+aOffset);
sl@0
  2569
		iPhysMemSyncTemp.Unmap();
sl@0
  2570
		
sl@0
  2571
		Kern::MutexSignal(*iPhysMemSyncMutex);
sl@0
  2572
		NKern::ThreadLeaveCS();
sl@0
  2573
sl@0
  2574
		aSize-=sizeInLoopPass;  // Remaining bytes to sync
sl@0
  2575
		aOffset=0;				// In all the pages after the first, sync will always start with zero offset.
sl@0
  2576
		aPages++;	// Point to the next page
sl@0
  2577
		aColour  = (aColour+1) & KPageColourMask;
sl@0
  2578
		}
sl@0
  2579
	}
sl@0
  2580
sl@0
  2581
EXPORT_C TInt Cache::SyncPhysicalMemoryBeforeDmaWrite(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr)
sl@0
  2582
	{
sl@0
  2583
	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryBeforeDmaWrite");
sl@0
  2584
	TheMmu.SyncPhysicalMemoryBeforeDmaWrite(aPages, aColour, aOffset, aSize, aMapAttr);
sl@0
  2585
	return KErrNone;
sl@0
  2586
	}
sl@0
  2587
sl@0
  2588
EXPORT_C TInt Cache::SyncPhysicalMemoryBeforeDmaRead(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr)
sl@0
  2589
	{
sl@0
  2590
	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryBeforeDmaRead");
sl@0
  2591
	TheMmu.SyncPhysicalMemoryBeforeDmaRead(aPages, aColour, aOffset, aSize, aMapAttr);
sl@0
  2592
	return KErrNone;
sl@0
  2593
	}
sl@0
  2594
sl@0
  2595
EXPORT_C TInt Cache::SyncPhysicalMemoryAfterDmaRead(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr)
sl@0
  2596
	{
sl@0
  2597
	CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryAfterDmaRead");
sl@0
  2598
	TheMmu.SyncPhysicalMemoryAfterDmaRead(aPages, aColour, aOffset, aSize, aMapAttr);
sl@0
  2599
	return KErrNone;
sl@0
  2600
	}