os/kernelhwsrv/kernel/eka/memmodel/epoc/multiple/arm/xmmu.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\memmodel\epoc\multiple\arm\xmmu.cpp
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
#include "arm_mem.h"
sl@0
    19
#include <mmubase.inl>
sl@0
    20
#include <ramcache.h>
sl@0
    21
#include <demand_paging.h>
sl@0
    22
#include "execs.h"
sl@0
    23
#include <defrag.h>
sl@0
    24
#include "cache_maintenance.inl"
sl@0
    25
sl@0
    26
#undef __MMU_MACHINE_CODED__
sl@0
    27
sl@0
    28
// SECTION_PDE(perm, attr, domain, execute, global)
sl@0
    29
// PT_PDE(domain)
sl@0
    30
// LP_PTE(perm, attr, execute, global)
sl@0
    31
// SP_PTE(perm, attr, execute, global)
sl@0
    32
sl@0
    33
const TInt KPageColourShift=2;
sl@0
    34
const TInt KPageColourCount=(1<<KPageColourShift);
sl@0
    35
const TInt KPageColourMask=KPageColourCount-1;
sl@0
    36
sl@0
    37
sl@0
    38
const TPde KPdPdePerm=PT_PDE(0);
sl@0
    39
const TPde KPtPdePerm=PT_PDE(0);
sl@0
    40
const TPde KShadowPdePerm=PT_PDE(0);
sl@0
    41
sl@0
    42
#if defined(__CPU_MEMORY_TYPE_REMAPPING)
sl@0
    43
// ARM1176, ARM11MPCore, ARMv7 and later
sl@0
    44
// __CPU_MEMORY_TYPE_REMAPPING means that only three bits (TEX0:C:B) in page table define
sl@0
    45
// memory attributes. Kernel runs with a limited set of memory types: stronlgy ordered,
sl@0
    46
// device, normal un-cached & and normal WBWA. Due to lack of write through mode, page tables are
sl@0
    47
// write-back which means that cache has to be cleaned on every page/directory table update.
sl@0
    48
const TPte KPdPtePerm=				SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1);
sl@0
    49
const TPte KPtPtePerm=				SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1);
sl@0
    50
const TPte KPtInfoPtePerm=			SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1);
sl@0
    51
const TPte KRomPtePerm=				SP_PTE(KArmV6PermRORO, EMemAttNormalCached, 1, 1);
sl@0
    52
const TPte KShadowPtePerm=			SP_PTE(KArmV6PermRORO, EMemAttNormalCached, 1, 1);
sl@0
    53
const TPde KRomSectionPermissions=	SECTION_PDE(KArmV6PermRORO, EMemAttNormalCached, 0, 1, 1);
sl@0
    54
const TPte KUserCodeLoadPte=		SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 1, 0);
sl@0
    55
const TPte KUserCodeRunPte=			SP_PTE(KArmV6PermRORO, EMemAttNormalCached, 1, 0);
sl@0
    56
const TPte KGlobalCodeRunPte=		SP_PTE(KArmV6PermRORO, EMemAttNormalCached, 1, 1);
sl@0
    57
const TPte KKernelCodeRunPte=		SP_PTE(KArmV6PermRONO, EMemAttNormalCached, 1, 1);
sl@0
    58
sl@0
    59
const TInt KNormalUncachedAttr = EMemAttNormalUncached;
sl@0
    60
const TInt KNormalCachedAttr = EMemAttNormalCached;
sl@0
    61
sl@0
    62
#else
sl@0
    63
sl@0
    64
//ARM1136 
sl@0
    65
const TPte KPtInfoPtePerm=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1);
sl@0
    66
#if defined (__CPU_WriteThroughDisabled)
sl@0
    67
const TPte KPdPtePerm=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1);
sl@0
    68
const TPte KPtPtePerm=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1);
sl@0
    69
const TPte KRomPtePerm=SP_PTE(KArmV6PermRORO, KArmV6MemAttWBWAWBWA, 1, 1);
sl@0
    70
const TPte KShadowPtePerm=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWBWAWBWA, 1, 1);
sl@0
    71
const TPde KRomSectionPermissions	=	SECTION_PDE(KArmV6PermRORO, KArmV6MemAttWBWAWBWA, 0, 1, 1);
sl@0
    72
const TPte KUserCodeLoadPte=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 1, 0);
sl@0
    73
const TPte KUserCodeRunPte=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWBWAWBWA, 1, 0);
sl@0
    74
const TPte KGlobalCodeRunPte=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWBWAWBWA, 1, 1);
sl@0
    75
const TInt KKernelCodeRunPteAttr = KArmV6MemAttWBWAWBWA;
sl@0
    76
#else
sl@0
    77
const TPte KPdPtePerm=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBRAWTRA, 0, 1);
sl@0
    78
const TPte KPtPtePerm=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBRAWTRA, 0, 1);
sl@0
    79
const TPte KRomPtePerm=SP_PTE(KArmV6PermRORO, KArmV6MemAttWTRAWTRA, 1, 1);
sl@0
    80
const TPte KShadowPtePerm=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWTRAWTRA, 1, 1);
sl@0
    81
const TPde KRomSectionPermissions	=	SECTION_PDE(KArmV6PermRORO, KArmV6MemAttWTRAWTRA, 0, 1, 1);
sl@0
    82
const TPte KUserCodeLoadPte=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWTRAWTRA, 1, 0);
sl@0
    83
const TPte KUserCodeRunPte=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWTRAWTRA, 1, 0);
sl@0
    84
const TPte KGlobalCodeRunPte=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWTRAWTRA, 1, 1);
sl@0
    85
const TInt KKernelCodeRunPteAttr = KArmV6MemAttWTRAWTRA;
sl@0
    86
#endif
sl@0
    87
sl@0
    88
sl@0
    89
#if defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
sl@0
    90
const TInt KKernelCodeRunPtePerm = KArmV6PermRONO;
sl@0
    91
#else
sl@0
    92
const TInt KKernelCodeRunPtePerm = KArmV6PermRORO;
sl@0
    93
#endif
sl@0
    94
const TPte KKernelCodeRunPte=SP_PTE(KKernelCodeRunPtePerm, KKernelCodeRunPteAttr, 1, 1);
sl@0
    95
sl@0
    96
const TInt KNormalUncachedAttr = KArmV6MemAttNCNC;
sl@0
    97
const TInt KNormalCachedAttr = KArmV6MemAttWBWAWBWA;
sl@0
    98
sl@0
    99
#endif
sl@0
   100
sl@0
   101
sl@0
   102
extern void __FlushBtb();
sl@0
   103
sl@0
   104
#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
sl@0
   105
extern void remove_and_invalidate_page(TPte* aPte, TLinAddr aAddr, TInt aAsid);
sl@0
   106
extern void remove_and_invalidate_section(TPde* aPde, TLinAddr aAddr, TInt aAsid);
sl@0
   107
#endif
sl@0
   108
sl@0
   109
sl@0
   110
LOCAL_D const TPte ChunkPtePermissions[ENumChunkTypes] =
sl@0
   111
	{
sl@0
   112
#if defined(__CPU_MEMORY_TYPE_REMAPPING)
sl@0
   113
// ARM1176, ARM11 mcore, ARMv7 and later
sl@0
   114
	SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1),		// EKernelData
sl@0
   115
	SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1),		// EKernelStack
sl@0
   116
	SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 1, 1),		// EKernelCode - loading
sl@0
   117
	SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 1, 1),		// EDll (used for global code) - loading
sl@0
   118
	SP_PTE(KArmV6PermRORO, EMemAttNormalCached, 1, 0),		// EUserCode - run
sl@0
   119
	SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 1),		// ERamDrive
sl@0
   120
	SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 0),		// EUserData
sl@0
   121
	SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 0),		// EDllData
sl@0
   122
	SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 1, 0),		// EUserSelfModCode
sl@0
   123
	SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 0),		// ESharedKernelSingle
sl@0
   124
	SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 0),		// ESharedKernelMultiple
sl@0
   125
	SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 0),		// ESharedIo
sl@0
   126
	SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1),		// ESharedKernelMirror
sl@0
   127
	SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1),		// EKernelMessage
sl@0
   128
#else
sl@0
   129
	SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1),		// EKernelData
sl@0
   130
	SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1),		// EKernelStack
sl@0
   131
#if defined (__CPU_WriteThroughDisabled)
sl@0
   132
	SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 1, 1),		// EKernelCode - loading
sl@0
   133
	SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 1, 1),		// EDll (used for global code) - loading
sl@0
   134
	SP_PTE(KArmV6PermRWRO, KArmV6MemAttWBWAWBWA, 1, 0),		// EUserCode - run
sl@0
   135
#else
sl@0
   136
	SP_PTE(KArmV6PermRWNO, KArmV6MemAttWTRAWTRA, 1, 1),		// EKernelCode - loading
sl@0
   137
	SP_PTE(KArmV6PermRWNO, KArmV6MemAttWTRAWTRA, 1, 1),		// EDll (used for global code) - loading
sl@0
   138
	SP_PTE(KArmV6PermRWRO, KArmV6MemAttWTRAWTRA, 1, 0),		// EUserCode - run
sl@0
   139
#endif
sl@0
   140
	SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 1),		// ERamDrive
sl@0
   141
	SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 0),		// EUserData
sl@0
   142
	SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 0),		// EDllData
sl@0
   143
	SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 1, 0),		// EUserSelfModCode
sl@0
   144
	SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 0),		// ESharedKernelSingle
sl@0
   145
	SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 0),		// ESharedKernelMultiple
sl@0
   146
	SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 0),		// ESharedIo
sl@0
   147
	SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1),		// ESharedKernelMirror
sl@0
   148
	SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1),		// EKernelMessage
sl@0
   149
#endif
sl@0
   150
	};
sl@0
   151
sl@0
   152
// The domain for each chunk is selected according to its type.
sl@0
   153
// The RamDrive lives in a separate domain, to minimise the risk
sl@0
   154
// of accidental access and corruption. User chunks may also be
sl@0
   155
// located in a separate domain (15) in DEBUG builds.
sl@0
   156
LOCAL_D const TPde ChunkPdePermissions[ENumChunkTypes] =
sl@0
   157
	{
sl@0
   158
	PT_PDE(0),						// EKernelData
sl@0
   159
	PT_PDE(0),						// EKernelStack
sl@0
   160
	PT_PDE(0),						// EKernelCode
sl@0
   161
	PT_PDE(0),						// EDll
sl@0
   162
	PT_PDE(USER_MEMORY_DOMAIN),		// EUserCode
sl@0
   163
	PT_PDE(1),						// ERamDrive
sl@0
   164
	PT_PDE(USER_MEMORY_DOMAIN),		// EUserData
sl@0
   165
	PT_PDE(USER_MEMORY_DOMAIN),		// EDllData
sl@0
   166
	PT_PDE(USER_MEMORY_DOMAIN),		// EUserSelfModCode
sl@0
   167
	PT_PDE(USER_MEMORY_DOMAIN),		// ESharedKernelSingle
sl@0
   168
	PT_PDE(USER_MEMORY_DOMAIN),		// ESharedKernelMultiple
sl@0
   169
	PT_PDE(0),						// ESharedIo
sl@0
   170
	PT_PDE(0),						// ESharedKernelMirror
sl@0
   171
	PT_PDE(0),						// EKernelMessage
sl@0
   172
	};
sl@0
   173
sl@0
   174
// Inline functions for simple transformations
sl@0
   175
inline TLinAddr PageTableLinAddr(TInt aId)
sl@0
   176
	{
sl@0
   177
	return (KPageTableBase+(aId<<KPageTableShift));
sl@0
   178
	}
sl@0
   179
sl@0
   180
inline TPte* PageTable(TInt aId)
sl@0
   181
	{
sl@0
   182
	return (TPte*)(KPageTableBase+(aId<<KPageTableShift));
sl@0
   183
	}
sl@0
   184
sl@0
   185
inline TPte* PageTableEntry(TInt aId, TLinAddr aAddress)
sl@0
   186
	{
sl@0
   187
	return PageTable(aId) + ((aAddress >> KPageShift) & (KChunkMask >> KPageShift));
sl@0
   188
	}
sl@0
   189
sl@0
   190
inline TLinAddr PageDirectoryLinAddr(TInt aOsAsid)
sl@0
   191
	{
sl@0
   192
	return (KPageDirectoryBase+(aOsAsid<<KPageDirectoryShift));
sl@0
   193
	}
sl@0
   194
sl@0
   195
inline TPde* PageDirectoryEntry(TInt aOsAsid, TLinAddr aAddress)
sl@0
   196
	{
sl@0
   197
	return PageDirectory(aOsAsid) + (aAddress >> KChunkShift);
sl@0
   198
	}
sl@0
   199
sl@0
   200
extern void InvalidateTLBForPage(TLinAddr /*aLinAddr*/, TInt /*aAsid*/);
sl@0
   201
extern void FlushTLBs();
sl@0
   202
extern TUint32 TTCR();
sl@0
   203
sl@0
   204
TPte* SafePageTableFromPde(TPde aPde)
sl@0
   205
	{
sl@0
   206
	if((aPde&KPdeTypeMask)==KArmV6PdePageTable)
sl@0
   207
		{
sl@0
   208
		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aPde);
sl@0
   209
		if(pi)
sl@0
   210
			{
sl@0
   211
			TInt id = (pi->Offset()<<KPtClusterShift) | ((aPde>>KPageTableShift)&KPtClusterMask);
sl@0
   212
			return PageTable(id);
sl@0
   213
			}
sl@0
   214
		}
sl@0
   215
	return 0;
sl@0
   216
	}
sl@0
   217
sl@0
   218
TPte* SafePtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid=0)
sl@0
   219
	{
sl@0
   220
	if ((TInt)(aAddress>>KChunkShift)>=(TheMmu.iLocalPdSize>>2))
sl@0
   221
		aOsAsid = 0;
sl@0
   222
	TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift];
sl@0
   223
	TPte* pt = SafePageTableFromPde(pde);
sl@0
   224
	if(pt)
sl@0
   225
		pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
sl@0
   226
	return pt;
sl@0
   227
	}
sl@0
   228
sl@0
   229
#ifndef _DEBUG
sl@0
   230
// inline in UREL builds...
sl@0
   231
#ifdef __ARMCC__
sl@0
   232
	__forceinline /* RVCT ignores normal inline qualifier :-( */
sl@0
   233
#else
sl@0
   234
	inline
sl@0
   235
#endif
sl@0
   236
#endif
sl@0
   237
TPte* PtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid=0)
sl@0
   238
	{
sl@0
   239
	// this function only works for process local memory addresses, or for kernel memory (asid==0).
sl@0
   240
	__NK_ASSERT_DEBUG(aOsAsid==0 || (TInt)(aAddress>>KChunkShift)<(TheMmu.iLocalPdSize>>2));
sl@0
   241
	TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift];
sl@0
   242
	SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
sl@0
   243
	TInt id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
sl@0
   244
	TPte* pt = PageTable(id);
sl@0
   245
	pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
sl@0
   246
	return pt;
sl@0
   247
	}
sl@0
   248
sl@0
   249
sl@0
   250
TInt ArmMmu::LinearToPhysical(TLinAddr aLinAddr, TInt aSize, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList, TInt aOsAsid)
sl@0
   251
	{
sl@0
   252
	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical %08x+%08x, asid=%d",aLinAddr,aSize,aOsAsid));
sl@0
   253
	TPhysAddr physStart = ArmMmu::LinearToPhysical(aLinAddr,aOsAsid);
sl@0
   254
	TPhysAddr nextPhys = physStart&~KPageMask;
sl@0
   255
sl@0
   256
	TUint32* pageList = aPhysicalPageList;
sl@0
   257
sl@0
   258
	TInt pageIndex = aLinAddr>>KPageShift;
sl@0
   259
	TInt pagesLeft = ((aLinAddr+aSize-1)>>KPageShift)+1 - pageIndex;
sl@0
   260
	TInt pdeIndex = aLinAddr>>KChunkShift;
sl@0
   261
	TPde* pdePtr = (pdeIndex<(iLocalPdSize>>2) || (iAsidInfo[aOsAsid]&1))
sl@0
   262
					? PageDirectory(aOsAsid)
sl@0
   263
					: ::InitPageDirectory;
sl@0
   264
	pdePtr += pdeIndex;
sl@0
   265
	while(pagesLeft)
sl@0
   266
		{
sl@0
   267
		pageIndex &= KChunkMask>>KPageShift;
sl@0
   268
		TInt pagesLeftInChunk = (1<<(KChunkShift-KPageShift))-pageIndex;
sl@0
   269
		if(pagesLeftInChunk>pagesLeft)
sl@0
   270
			pagesLeftInChunk = pagesLeft;
sl@0
   271
		pagesLeft -= pagesLeftInChunk;
sl@0
   272
sl@0
   273
		TPhysAddr phys;
sl@0
   274
		TPde pde = *pdePtr++;
sl@0
   275
		TUint pdeType = pde&KPdeTypeMask;
sl@0
   276
		if(pdeType==KArmV6PdeSection)
sl@0
   277
			{
sl@0
   278
			phys = (pde & KPdeSectionAddrMask) + (pageIndex*KPageSize);
sl@0
   279
			__KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Section phys=%8x",phys));
sl@0
   280
			TInt n=pagesLeftInChunk;
sl@0
   281
			phys==nextPhys ? nextPhys+=n*KPageSize : nextPhys=KPhysAddrInvalid;
sl@0
   282
			if(pageList)
sl@0
   283
				{
sl@0
   284
				TUint32* pageEnd = pageList+n;
sl@0
   285
				do
sl@0
   286
					{
sl@0
   287
					*pageList++ = phys;
sl@0
   288
					phys+=KPageSize;
sl@0
   289
					}
sl@0
   290
				while(pageList<pageEnd);
sl@0
   291
				}
sl@0
   292
			}
sl@0
   293
		else
sl@0
   294
			{
sl@0
   295
			TPte* pt = SafePageTableFromPde(pde);
sl@0
   296
			if(!pt)
sl@0
   297
				{
sl@0
   298
				__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical missing page table: PDE=%8x",pde));
sl@0
   299
				return KErrNotFound;
sl@0
   300
				}
sl@0
   301
			pt += pageIndex;
sl@0
   302
			for(;;)
sl@0
   303
				{
sl@0
   304
				TPte pte = *pt++;
sl@0
   305
				TUint pte_type = pte & KPteTypeMask;
sl@0
   306
				if (pte_type >= KArmV6PteSmallPage)
sl@0
   307
					{
sl@0
   308
					phys = (pte & KPteSmallPageAddrMask);
sl@0
   309
					__KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Small Page phys=%8x",phys));
sl@0
   310
					phys==nextPhys ? nextPhys+=KPageSize : nextPhys=KPhysAddrInvalid;
sl@0
   311
					if(pageList)
sl@0
   312
						*pageList++ = phys;
sl@0
   313
					if(--pagesLeftInChunk)
sl@0
   314
						continue;
sl@0
   315
					break;
sl@0
   316
					}
sl@0
   317
				if (pte_type == KArmV6PteLargePage)
sl@0
   318
					{
sl@0
   319
					--pt; // back up ptr
sl@0
   320
					TUint pageOffset = ((TUint)pt>>2)&(KLargeSmallPageRatio-1);
sl@0
   321
					phys = (pte & KPteLargePageAddrMask) + pageOffset*KPageSize;
sl@0
   322
					__KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Large Page phys=%8x",phys));
sl@0
   323
					TInt n=KLargeSmallPageRatio-pageOffset;
sl@0
   324
					if(n>pagesLeftInChunk)
sl@0
   325
						n = pagesLeftInChunk;
sl@0
   326
					phys==nextPhys ? nextPhys+=n*KPageSize : nextPhys=KPhysAddrInvalid;
sl@0
   327
					if(pageList)
sl@0
   328
						{
sl@0
   329
						TUint32* pageEnd = pageList+n;
sl@0
   330
						do
sl@0
   331
							{
sl@0
   332
							*pageList++ = phys;
sl@0
   333
							phys+=KPageSize;
sl@0
   334
							}
sl@0
   335
						while(pageList<pageEnd);
sl@0
   336
						}
sl@0
   337
					pt += n;
sl@0
   338
					if(pagesLeftInChunk-=n)
sl@0
   339
						continue;
sl@0
   340
					break;
sl@0
   341
					}
sl@0
   342
				__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical bad PTE %8x",pte));
sl@0
   343
				return KErrNotFound;
sl@0
   344
				}
sl@0
   345
			}
sl@0
   346
		if(!pageList && nextPhys==KPhysAddrInvalid)
sl@0
   347
			{
sl@0
   348
			__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical not contiguous"));
sl@0
   349
			return KErrNotFound;
sl@0
   350
			}
sl@0
   351
		pageIndex = 0;
sl@0
   352
		}
sl@0
   353
sl@0
   354
	if(nextPhys==KPhysAddrInvalid)
sl@0
   355
		{
sl@0
   356
		// Memory is discontiguous...
sl@0
   357
		aPhysicalAddress = KPhysAddrInvalid;
sl@0
   358
		return 1;
sl@0
   359
		}
sl@0
   360
	else
sl@0
   361
		{
sl@0
   362
		// Memory is contiguous...
sl@0
   363
		aPhysicalAddress = physStart;
sl@0
   364
		return KErrNone;
sl@0
   365
		}
sl@0
   366
	}
sl@0
   367
sl@0
   368
TInt ArmMmu::PreparePagesForDMA(TLinAddr aLinAddr, TInt aSize, TInt aOsAsid, TPhysAddr* aPhysicalPageList)
sl@0
   369
//Returns the list of physical pages belonging to the specified memory space.
sl@0
   370
//Checks these pages belong to a chunk marked as being trusted. 
sl@0
   371
//Locks these pages so they can not be moved by e.g. ram defragmenation.
sl@0
   372
	{
sl@0
   373
	SPageInfo* pi = NULL;
sl@0
   374
	DChunk* chunk = NULL;
sl@0
   375
	TInt err = KErrNone;
sl@0
   376
	
sl@0
   377
	__KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::PreparePagesForDMA %08x+%08x, asid=%d",aLinAddr,aSize,aOsAsid));
sl@0
   378
sl@0
   379
	TUint32* pageList = aPhysicalPageList;
sl@0
   380
	TInt pagesInList = 0;				//The number of pages we put in the list so far
sl@0
   381
	
sl@0
   382
	TInt pageIndex = (aLinAddr & KChunkMask) >> KPageShift;	// Index of the page within the section
sl@0
   383
	TInt pagesLeft = ((aLinAddr & KPageMask) + aSize + KPageMask) >> KPageShift;
sl@0
   384
sl@0
   385
	TInt pdeIndex = aLinAddr>>KChunkShift;
sl@0
   386
sl@0
   387
sl@0
   388
	MmuBase::Wait(); 	// RamAlloc Mutex for accessing page/directory tables.
sl@0
   389
	NKern::LockSystem();// SystemlLock for accessing SPageInfo objects.
sl@0
   390
sl@0
   391
	TPde* pdePtr = (pdeIndex<(iLocalPdSize>>2) || (iAsidInfo[aOsAsid]&1)) ? PageDirectory(aOsAsid) : ::InitPageDirectory;
sl@0
   392
	pdePtr += pdeIndex;//This points to the first pde 
sl@0
   393
sl@0
   394
	while(pagesLeft)
sl@0
   395
		{
sl@0
   396
		TInt pagesLeftInChunk = (1<<(KChunkShift-KPageShift))-pageIndex;
sl@0
   397
		if(pagesLeftInChunk>pagesLeft)
sl@0
   398
			pagesLeftInChunk = pagesLeft;
sl@0
   399
		
sl@0
   400
		pagesLeft -= pagesLeftInChunk;
sl@0
   401
sl@0
   402
		TPte* pt = SafePageTableFromPde(*pdePtr++);
sl@0
   403
		if(!pt) { err = KErrNotFound; goto fail; }// Cannot get page table.
sl@0
   404
		
sl@0
   405
		pt += pageIndex;
sl@0
   406
sl@0
   407
		for(;pagesLeftInChunk--;)
sl@0
   408
			{
sl@0
   409
			TPhysAddr phys = (*pt++ & KPteSmallPageAddrMask);
sl@0
   410
			pi =  SPageInfo::SafeFromPhysAddr(phys);
sl@0
   411
			if(!pi)	{ err = KErrNotFound; goto fail; }// Invalid address
sl@0
   412
			
sl@0
   413
			__KTRACE_OPT(KMMU2,Kern::Printf("PageInfo: PA:%x T:%x S:%x O:%x C:%x",phys, pi->Type(), pi->State(), pi->Owner(), pi->LockCount()));
sl@0
   414
			if (chunk==NULL)
sl@0
   415
				{//This is the first page. Check 'trusted' bit.
sl@0
   416
				if (pi->Type()!= SPageInfo::EChunk)
sl@0
   417
					{ err = KErrAccessDenied; goto fail; }// The first page do not belong to chunk.	
sl@0
   418
sl@0
   419
				chunk = (DChunk*)pi->Owner();
sl@0
   420
				if ( (chunk == NULL) || ((chunk->iAttributes & DChunk::ETrustedChunk)== 0) )
sl@0
   421
					{ err = KErrAccessDenied; goto fail; }// Not a trusted chunk
sl@0
   422
				}
sl@0
   423
			pi->Lock();
sl@0
   424
sl@0
   425
			*pageList++ = phys;
sl@0
   426
			if ( (++pagesInList&127) == 0) //release system lock temporarily on every 512K
sl@0
   427
				NKern::FlashSystem();
sl@0
   428
			}
sl@0
   429
		pageIndex = 0;
sl@0
   430
		}
sl@0
   431
sl@0
   432
	if (pi->Type()!= SPageInfo::EChunk)
sl@0
   433
		{ err = KErrAccessDenied; goto fail; }// The last page do not belong to chunk.	
sl@0
   434
sl@0
   435
	if (chunk && (chunk != (DChunk*)pi->Owner()))
sl@0
   436
		{ err = KErrArgument; goto fail; }//The first & the last page do not belong to the same chunk.
sl@0
   437
sl@0
   438
	NKern::UnlockSystem();
sl@0
   439
	MmuBase::Signal();
sl@0
   440
	return KErrNone;
sl@0
   441
sl@0
   442
fail:
sl@0
   443
	__KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::PreparePagesForDMA failed"));
sl@0
   444
	NKern::UnlockSystem();
sl@0
   445
	MmuBase::Signal();
sl@0
   446
	ReleasePagesFromDMA(aPhysicalPageList, pagesInList);
sl@0
   447
	return err;
sl@0
   448
	}
sl@0
   449
sl@0
   450
TInt ArmMmu::ReleasePagesFromDMA(TPhysAddr* aPhysicalPageList, TInt aPageCount)
sl@0
   451
// Unlocks physical pages.
sl@0
   452
// @param aPhysicalPageList - points to the list of physical pages that should be released.
sl@0
   453
// @param aPageCount		- the number of physical pages in the list.
sl@0
   454
	{
sl@0
   455
	NKern::LockSystem();
sl@0
   456
	__KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::ReleasePagesFromDMA count:%d",aPageCount));
sl@0
   457
sl@0
   458
	while (aPageCount--)
sl@0
   459
		{
sl@0
   460
		SPageInfo* pi =  SPageInfo::SafeFromPhysAddr(*aPhysicalPageList++);
sl@0
   461
		if(!pi)
sl@0
   462
			{
sl@0
   463
			NKern::UnlockSystem();
sl@0
   464
			return KErrArgument;
sl@0
   465
			}
sl@0
   466
		__KTRACE_OPT(KMMU2,Kern::Printf("PageInfo: T:%x S:%x O:%x C:%x",pi->Type(), pi->State(), pi->Owner(), pi->LockCount()));
sl@0
   467
		pi->Unlock();
sl@0
   468
		}
sl@0
   469
	NKern::UnlockSystem();
sl@0
   470
	return KErrNone;
sl@0
   471
	}
sl@0
   472
sl@0
   473
TPhysAddr ArmMmu::LinearToPhysical(TLinAddr aLinAddr, TInt aOsAsid)
sl@0
   474
//
sl@0
   475
// Find the physical address corresponding to a given linear address in a specified OS
sl@0
   476
// address space. Call with system locked.
sl@0
   477
//
sl@0
   478
	{
sl@0
   479
	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical(%08x,%d)",aLinAddr,aOsAsid));
sl@0
   480
	TInt pdeIndex=aLinAddr>>KChunkShift;
sl@0
   481
	TPde pde = (pdeIndex<(iLocalPdSize>>2) || (iAsidInfo[aOsAsid]&1)) ? PageDirectory(aOsAsid)[pdeIndex] : ::InitPageDirectory[pdeIndex];
sl@0
   482
	TPhysAddr pa=KPhysAddrInvalid;
sl@0
   483
	if ((pde&KPdePresentMask)==KArmV6PdePageTable)
sl@0
   484
		{
sl@0
   485
		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
sl@0
   486
		if (pi)
sl@0
   487
			{
sl@0
   488
			TInt id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
sl@0
   489
			TPte* pPte=PageTable(id);
sl@0
   490
			TPte pte=pPte[(aLinAddr&KChunkMask)>>KPageShift];
sl@0
   491
			if (pte & KArmV6PteSmallPage)
sl@0
   492
				{
sl@0
   493
				pa=(pte&KPteSmallPageAddrMask)+(aLinAddr&~KPteSmallPageAddrMask);
sl@0
   494
				__KTRACE_OPT(KMMU,Kern::Printf("Mapped with small page - returning %08x",pa));
sl@0
   495
				}
sl@0
   496
			else if ((pte & KArmV6PteTypeMask) == KArmV6PteLargePage)
sl@0
   497
				{
sl@0
   498
				pa=(pte&KPteLargePageAddrMask)+(aLinAddr&~KPteLargePageAddrMask);
sl@0
   499
				__KTRACE_OPT(KMMU,Kern::Printf("Mapped with large page - returning %08x",pa));
sl@0
   500
				}
sl@0
   501
			}
sl@0
   502
		}
sl@0
   503
	else if ((pde&KPdePresentMask)==KArmV6PdeSection)
sl@0
   504
		{
sl@0
   505
		pa=(pde&KPdeSectionAddrMask)|(aLinAddr&~KPdeSectionAddrMask);
sl@0
   506
		__KTRACE_OPT(KMMU,Kern::Printf("Mapped with section - returning %08x",pa));
sl@0
   507
		}
sl@0
   508
	return pa;
sl@0
   509
	}
sl@0
   510
sl@0
   511
// permission table indexed by XN:APX:AP1:AP0
sl@0
   512
static const TInt PermissionLookup[16]=
sl@0
   513
	{													//XN:APX:AP1:AP0
sl@0
   514
	0,													//0   0   0   0  no access
sl@0
   515
	EMapAttrWriteSup|EMapAttrReadSup|EMapAttrExecSup,	//0   0   0   1  RW sup			execute
sl@0
   516
	EMapAttrWriteSup|EMapAttrReadUser|EMapAttrExecUser,	//0   0   1   0  supRW usrR		execute
sl@0
   517
	EMapAttrWriteUser|EMapAttrReadUser|EMapAttrExecUser,//0   0   1   1  supRW usrRW	execute
sl@0
   518
	0,													//0   1   0   0  reserved
sl@0
   519
	EMapAttrReadSup|EMapAttrExecSup,					//0   1   0   1  supR			execute
sl@0
   520
	EMapAttrReadUser|EMapAttrExecUser,					//0   1   1   0  supR usrR		execute
sl@0
   521
	0,													//0   1   1   1  reserved
sl@0
   522
	0,													//1   0   0   0  no access
sl@0
   523
	EMapAttrWriteSup|EMapAttrReadSup,					//1   0   0   1  RW sup
sl@0
   524
	EMapAttrWriteSup|EMapAttrReadUser,					//1   0   1   0  supRW usrR
sl@0
   525
	EMapAttrWriteUser|EMapAttrReadUser,					//1   0   1   1  supRW usrRW
sl@0
   526
	0,													//1   1   0   0  reserved
sl@0
   527
	EMapAttrReadSup,									//1   1   0   1  supR
sl@0
   528
	EMapAttrReadUser,									//1   1   1   0  supR usrR
sl@0
   529
	EMapAttrReadUser,									//1   1   1   1  supR usrR
sl@0
   530
	};
sl@0
   531
sl@0
   532
TInt ArmMmu::PageTableId(TLinAddr aAddr, TInt aOsAsid)
sl@0
   533
	{
sl@0
   534
	TInt id=-1;
sl@0
   535
	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::PageTableId(%08x,%d)",aAddr,aOsAsid));
sl@0
   536
	TInt pdeIndex=aAddr>>KChunkShift;
sl@0
   537
	TPde pde = (pdeIndex<(iLocalPdSize>>2) || (iAsidInfo[aOsAsid]&1)) ? PageDirectory(aOsAsid)[pdeIndex] : ::InitPageDirectory[pdeIndex];
sl@0
   538
	if ((pde&KArmV6PdeTypeMask)==KArmV6PdePageTable)
sl@0
   539
		{
sl@0
   540
		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
sl@0
   541
		if (pi)
sl@0
   542
			id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
sl@0
   543
		}
sl@0
   544
	__KTRACE_OPT(KMMU,Kern::Printf("ID=%d",id));
sl@0
   545
	return id;
sl@0
   546
	}
sl@0
   547
sl@0
   548
// Used only during boot for recovery of RAM drive
sl@0
   549
TInt ArmMmu::BootPageTableId(TLinAddr aAddr, TPhysAddr& aPtPhys)
sl@0
   550
	{
sl@0
   551
	TInt id=KErrNotFound;
sl@0
   552
	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:BootPageTableId(%08x,&)",aAddr));
sl@0
   553
	TPde* kpd=(TPde*)KPageDirectoryBase;	// kernel page directory
sl@0
   554
	TInt pdeIndex=aAddr>>KChunkShift;
sl@0
   555
	TPde pde = kpd[pdeIndex];
sl@0
   556
	if ((pde & KArmV6PdeTypeMask) == KArmV6PdePageTable)
sl@0
   557
		{
sl@0
   558
		aPtPhys = pde & KPdePageTableAddrMask;
sl@0
   559
		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
sl@0
   560
		if (pi)
sl@0
   561
			{
sl@0
   562
			SPageInfo::TType type = pi->Type();
sl@0
   563
			if (type == SPageInfo::EPageTable)
sl@0
   564
				id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
sl@0
   565
			else if (type == SPageInfo::EUnused)
sl@0
   566
				id = KErrUnknown;
sl@0
   567
			}
sl@0
   568
		}
sl@0
   569
	__KTRACE_OPT(KMMU,Kern::Printf("ID=%d",id));
sl@0
   570
	return id;
sl@0
   571
	}
sl@0
   572
sl@0
   573
TBool ArmMmu::PteIsPresent(TPte aPte)
sl@0
   574
	{
sl@0
   575
	return aPte & KArmV6PteTypeMask;
sl@0
   576
	}
sl@0
   577
sl@0
   578
TPhysAddr ArmMmu::PtePhysAddr(TPte aPte, TInt aPteIndex)
sl@0
   579
	{
sl@0
   580
	TUint32 pte_type = aPte & KArmV6PteTypeMask;
sl@0
   581
	if (pte_type == KArmV6PteLargePage)
sl@0
   582
		return (aPte & KPteLargePageAddrMask) + (TPhysAddr(aPteIndex << KPageShift) & KLargePageMask);
sl@0
   583
	else if (pte_type != 0)
sl@0
   584
		return aPte & KPteSmallPageAddrMask;
sl@0
   585
	return KPhysAddrInvalid;
sl@0
   586
	}
sl@0
   587
sl@0
   588
TPhysAddr ArmMmu::PdePhysAddr(TLinAddr aAddr)
sl@0
   589
	{
sl@0
   590
	TPde* kpd = (TPde*)KPageDirectoryBase;	// kernel page directory
sl@0
   591
	TPde pde = kpd[aAddr>>KChunkShift];
sl@0
   592
	if ((pde & KPdePresentMask) == KArmV6PdeSection)
sl@0
   593
		return pde & KPdeSectionAddrMask;
sl@0
   594
	return KPhysAddrInvalid;
sl@0
   595
	}
sl@0
   596
sl@0
   597
void ArmMmu::Init1()
sl@0
   598
	{
sl@0
   599
	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("ArmMmu::Init1"));
sl@0
   600
sl@0
   601
	// MmuBase data
sl@0
   602
	iPageSize=KPageSize;
sl@0
   603
	iPageMask=KPageMask;
sl@0
   604
	iPageShift=KPageShift;
sl@0
   605
	iChunkSize=KChunkSize;
sl@0
   606
	iChunkMask=KChunkMask;
sl@0
   607
	iChunkShift=KChunkShift;
sl@0
   608
	iPageTableSize=KPageTableSize;
sl@0
   609
	iPageTableMask=KPageTableMask;
sl@0
   610
	iPageTableShift=KPageTableShift;
sl@0
   611
	iPtClusterSize=KPtClusterSize;
sl@0
   612
	iPtClusterMask=KPtClusterMask;
sl@0
   613
	iPtClusterShift=KPtClusterShift;
sl@0
   614
	iPtBlockSize=KPtBlockSize;
sl@0
   615
	iPtBlockMask=KPtBlockMask;
sl@0
   616
	iPtBlockShift=KPtBlockShift;
sl@0
   617
	iPtGroupSize=KChunkSize/KPageTableSize;
sl@0
   618
	iPtGroupMask=iPtGroupSize-1;
sl@0
   619
	iPtGroupShift=iChunkShift-iPageTableShift;
sl@0
   620
	//TInt* iPtBlockCount;		// dynamically allocated - Init2
sl@0
   621
	//TInt* iPtGroupCount;		// dynamically allocated - Init2
sl@0
   622
	iPtInfo=(SPageTableInfo*)KPageTableInfoBase;
sl@0
   623
	iPageTableLinBase=KPageTableBase;
sl@0
   624
	//iRamPageAllocator;		// dynamically allocated - Init2
sl@0
   625
	//iAsyncFreeList;			// dynamically allocated - Init2
sl@0
   626
	//iPageTableAllocator;		// dynamically allocated - Init2
sl@0
   627
	//iPageTableLinearAllocator;// dynamically allocated - Init2
sl@0
   628
	iPtInfoPtePerm=KPtInfoPtePerm;
sl@0
   629
	iPtPtePerm=KPtPtePerm;
sl@0
   630
	iPtPdePerm=KPtPdePerm;
sl@0
   631
	iUserCodeLoadPtePerm=KUserCodeLoadPte;
sl@0
   632
	iKernelCodePtePerm=KKernelCodeRunPte;
sl@0
   633
	iTempAddr=KTempAddr;
sl@0
   634
	iSecondTempAddr=KSecondTempAddr;
sl@0
   635
	iMapSizes=KPageSize|KLargePageSize|KChunkSize;
sl@0
   636
	iRomLinearBase = ::RomHeaderAddress;
sl@0
   637
	iRomLinearEnd = KRomLinearEnd;
sl@0
   638
	iShadowPtePerm = KShadowPtePerm;
sl@0
   639
	iShadowPdePerm = KShadowPdePerm;
sl@0
   640
sl@0
   641
	// Mmu data
sl@0
   642
	TInt total_ram=TheSuperPage().iTotalRamSize;
sl@0
   643
sl@0
   644
	// Large or small configuration?
sl@0
   645
	// This is determined by the bootstrap based on RAM size
sl@0
   646
	TUint32 ttcr=TTCR();
sl@0
   647
	__NK_ASSERT_ALWAYS(ttcr==1 || ttcr==2);
sl@0
   648
	TBool large = (ttcr==1);
sl@0
   649
sl@0
   650
	// calculate cache colouring...
sl@0
   651
	TInt iColourCount = 0;
sl@0
   652
	TInt dColourCount = 0;
sl@0
   653
	TUint32 ctr = InternalCache::TypeRegister();
sl@0
   654
	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("CacheTypeRegister = %08x",ctr));
sl@0
   655
#ifdef __CPU_ARMV6
sl@0
   656
	__NK_ASSERT_ALWAYS((ctr>>29)==0);	// check ARMv6 format
sl@0
   657
	if(ctr&0x800)
sl@0
   658
		iColourCount = 4;
sl@0
   659
	if(ctr&0x800000)
sl@0
   660
		dColourCount = 4;
sl@0
   661
#else
sl@0
   662
	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("CacheTypeRegister = %08x",ctr));
sl@0
   663
	__NK_ASSERT_ALWAYS((ctr>>29)==4);	// check ARMv7 format
sl@0
   664
	TUint l1ip = (ctr>>14)&3;			// L1 instruction cache indexing and tagging policy
sl@0
   665
	__NK_ASSERT_ALWAYS(l1ip>=2);		// check I cache is physically tagged
sl@0
   666
sl@0
   667
	TUint32 clidr = InternalCache::LevelIDRegister();
sl@0
   668
	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("CacheLevelIDRegister = %08x",clidr));
sl@0
   669
	TUint l1type = clidr&7;
sl@0
   670
	if(l1type)
sl@0
   671
		{
sl@0
   672
		if(l1type==2 || l1type==3 || l1type==4)
sl@0
   673
			{
sl@0
   674
			// we have an L1 data cache...
sl@0
   675
			TUint32 csir = InternalCache::SizeIdRegister(0,0);
sl@0
   676
			TUint sets = ((csir>>13)&0x7fff)+1;
sl@0
   677
			TUint ways = ((csir>>3)&0x3ff)+1;
sl@0
   678
			TUint lineSizeShift = (csir&7)+4;
sl@0
   679
			// assume L1 data cache is VIPT and alias checks broken and so we need data cache colouring...
sl@0
   680
			dColourCount = (sets<<lineSizeShift)>>KPageShift;
sl@0
   681
			if(l1type==4) // unified cache, so set instruction cache colour as well...
sl@0
   682
				iColourCount = (sets<<lineSizeShift)>>KPageShift;
sl@0
   683
			__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("L1DCache = 0x%x,0x%x,%d colourCount=%d",sets,ways,lineSizeShift,(sets<<lineSizeShift)>>KPageShift));
sl@0
   684
			}
sl@0
   685
sl@0
   686
		if(l1type==1 || l1type==3)
sl@0
   687
			{
sl@0
   688
			// we have a separate L1 instruction cache...
sl@0
   689
			TUint32 csir = InternalCache::SizeIdRegister(1,0);
sl@0
   690
			TUint sets = ((csir>>13)&0x7fff)+1;
sl@0
   691
			TUint ways = ((csir>>3)&0x3ff)+1;
sl@0
   692
			TUint lineSizeShift = (csir&7)+4;
sl@0
   693
			iColourCount = (sets<<lineSizeShift)>>KPageShift;
sl@0
   694
			__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("L1ICache = 0x%x,0x%x,%d colourCount=%d",sets,ways,lineSizeShift,(sets<<lineSizeShift)>>KPageShift));
sl@0
   695
			}
sl@0
   696
		}
sl@0
   697
	if(l1ip==3)
sl@0
   698
		{
sl@0
   699
		// PIPT cache, so no colouring restrictions...
sl@0
   700
		__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("L1ICache is PIPT"));
sl@0
   701
		iColourCount = 0;
sl@0
   702
		}
sl@0
   703
	else
sl@0
   704
		{
sl@0
   705
		// VIPT cache...
sl@0
   706
		__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("L1ICache is VIPT"));
sl@0
   707
		}
sl@0
   708
#endif
sl@0
   709
	TUint colourShift = 0;
sl@0
   710
	for(TUint colourCount=Max(iColourCount,dColourCount); colourCount!=0; colourCount>>=1)
sl@0
   711
		++colourShift;
sl@0
   712
	iAliasSize=KPageSize<<colourShift;
sl@0
   713
	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iAliasSize=0x%x",iAliasSize));
sl@0
   714
	iAliasMask=iAliasSize-1;
sl@0
   715
	iAliasShift=KPageShift+colourShift;
sl@0
   716
sl@0
   717
	iDecommitThreshold = CacheMaintenance::SyncAllPerformanceThresholdPages();
sl@0
   718
sl@0
   719
	iNumOsAsids=KArmV6NumAsids;
sl@0
   720
	iNumGlobalPageDirs=1;
sl@0
   721
	//iOsAsidAllocator;			// dynamically allocated - Init2
sl@0
   722
	iGlobalPdSize=KPageDirectorySize;
sl@0
   723
	iGlobalPdShift=KPageDirectoryShift;
sl@0
   724
	iAsidGroupSize=KChunkSize/KPageDirectorySize;
sl@0
   725
	iAsidGroupMask=iAsidGroupSize-1;
sl@0
   726
	iAsidGroupShift=KChunkShift-KPageDirectoryShift;
sl@0
   727
	iUserLocalBase=KUserLocalDataBase;
sl@0
   728
	iAsidInfo=(TUint32*)KAsidInfoBase;
sl@0
   729
	iPdeBase=KPageDirectoryBase;
sl@0
   730
	iPdPtePerm=KPdPtePerm;
sl@0
   731
	iPdPdePerm=KPdPdePerm;
sl@0
   732
	iRamDriveMask=0x00f00000;
sl@0
   733
	iGlobalCodePtePerm=KGlobalCodeRunPte;
sl@0
   734
#if defined(__CPU_MEMORY_TYPE_REMAPPING)
sl@0
   735
	iCacheMaintenanceTempMapAttr = CacheMaintenance::TemporaryMapping();
sl@0
   736
#else
sl@0
   737
	switch(CacheMaintenance::TemporaryMapping())
sl@0
   738
		{
sl@0
   739
		case EMemAttNormalUncached:
sl@0
   740
			iCacheMaintenanceTempMapAttr = KArmV6MemAttNCNC;
sl@0
   741
			break;
sl@0
   742
		case EMemAttNormalCached:
sl@0
   743
			iCacheMaintenanceTempMapAttr = KArmV6MemAttWBWAWBWA;
sl@0
   744
			break;
sl@0
   745
		default:
sl@0
   746
			Panic(ETempMappingFailed);
sl@0
   747
		}
sl@0
   748
#endif	
sl@0
   749
	iMaxDllDataSize=Min(total_ram/2, 0x08000000);				// phys RAM/2 up to 128Mb
sl@0
   750
	iMaxDllDataSize=(iMaxDllDataSize+iChunkMask)&~iChunkMask;	// round up to chunk size
sl@0
   751
	iMaxUserCodeSize=Min(total_ram, 0x10000000);				// phys RAM up to 256Mb
sl@0
   752
	iMaxUserCodeSize=(iMaxUserCodeSize+iChunkMask)&~iChunkMask;	// round up to chunk size
sl@0
   753
	if (large)
sl@0
   754
		{
sl@0
   755
		iLocalPdSize=KPageDirectorySize/2;
sl@0
   756
		iLocalPdShift=KPageDirectoryShift-1;
sl@0
   757
		iUserSharedBase=KUserSharedDataBase2GB;
sl@0
   758
		iUserLocalEnd=iUserSharedBase-iMaxDllDataSize;
sl@0
   759
		iUserSharedEnd=KUserSharedDataEnd2GB-iMaxUserCodeSize;
sl@0
   760
		iDllDataBase=iUserLocalEnd;
sl@0
   761
		iUserCodeBase=iUserSharedEnd;
sl@0
   762
		}
sl@0
   763
	else
sl@0
   764
		{
sl@0
   765
		iLocalPdSize=KPageDirectorySize/4;
sl@0
   766
		iLocalPdShift=KPageDirectoryShift-2;
sl@0
   767
		iUserSharedBase=KUserSharedDataBase1GB;
sl@0
   768
		iUserLocalEnd=iUserSharedBase;
sl@0
   769
		iDllDataBase=KUserSharedDataEnd1GB-iMaxDllDataSize;
sl@0
   770
		iUserCodeBase=iDllDataBase-iMaxUserCodeSize;
sl@0
   771
		iUserSharedEnd=iUserCodeBase;
sl@0
   772
		}
sl@0
   773
	__KTRACE_OPT(KMMU,Kern::Printf("LPD size %08x GPD size %08x Alias size %08x",
sl@0
   774
													iLocalPdSize, iGlobalPdSize, iAliasSize));
sl@0
   775
	__KTRACE_OPT(KMMU,Kern::Printf("ULB %08x ULE %08x USB %08x USE %08x",iUserLocalBase,iUserLocalEnd,
sl@0
   776
																			iUserSharedBase,iUserSharedEnd));
sl@0
   777
	__KTRACE_OPT(KMMU,Kern::Printf("DDB %08x UCB %08x",iDllDataBase,iUserCodeBase));
sl@0
   778
sl@0
   779
	// ArmMmu data
sl@0
   780
sl@0
   781
	// other
sl@0
   782
	PP::MaxUserThreadStack=0x14000;			// 80K - STDLIB asks for 64K for PosixServer!!!!
sl@0
   783
	PP::UserThreadStackGuard=0x2000;		// 8K
sl@0
   784
	PP::MaxStackSpacePerProcess=0x200000;	// 2Mb
sl@0
   785
	K::SupervisorThreadStackSize=0x1000;	// 4K
sl@0
   786
	PP::SupervisorThreadStackGuard=0x1000;	// 4K
sl@0
   787
	K::MachineConfig=(TMachineConfig*)KMachineConfigLinAddr;
sl@0
   788
	PP::RamDriveStartAddress=KRamDriveStartAddress;
sl@0
   789
	PP::RamDriveRange=KRamDriveMaxSize;
sl@0
   790
	PP::RamDriveMaxSize=KRamDriveMaxSize;	// may be reduced later
sl@0
   791
	K::MemModelAttributes=EMemModelTypeMultiple|EMemModelAttrNonExProt|EMemModelAttrKernProt|EMemModelAttrWriteProt|
sl@0
   792
						EMemModelAttrVA|EMemModelAttrProcessProt|EMemModelAttrSameVA|EMemModelAttrSvKernProt|
sl@0
   793
						EMemModelAttrIPCKernProt|EMemModelAttrRamCodeProt;
sl@0
   794
sl@0
   795
	Arm::DefaultDomainAccess=KDefaultDomainAccess;
sl@0
   796
sl@0
   797
	Mmu::Init1();
sl@0
   798
	}
sl@0
   799
sl@0
   800
void ArmMmu::DoInit2()
sl@0
   801
	{
sl@0
   802
	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("ArmMmu::DoInit2"));
sl@0
   803
	iTempPte=PageTable(PageTableId(iTempAddr,0))+((iTempAddr&KChunkMask)>>KPageShift);
sl@0
   804
	iSecondTempPte=PageTable(PageTableId(iSecondTempAddr,0))+((iSecondTempAddr&KChunkMask)>>KPageShift);
sl@0
   805
	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iTempAddr=%08x, iTempPte=%08x, iSecondTempAddr=%08x, iSecondTempPte=%08x",
sl@0
   806
			iTempAddr, iTempPte, iSecondTempAddr, iSecondTempPte));
sl@0
   807
	CreateKernelSection(KKernelSectionEnd, iAliasShift);
sl@0
   808
	CreateUserGlobalSection(KUserGlobalDataBase, KUserGlobalDataEnd);
sl@0
   809
	Mmu::DoInit2();
sl@0
   810
	}
sl@0
   811
sl@0
   812
#ifndef __MMU_MACHINE_CODED__
sl@0
   813
void ArmMmu::MapRamPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, const TPhysAddr* aPageList, TInt aNumPages, TPte aPtePerm)
sl@0
   814
//
sl@0
   815
// Map a list of physical RAM pages into a specified page table with specified PTE permissions.
sl@0
   816
// Update the page information array.
sl@0
   817
// Call this with the system locked.
sl@0
   818
//
sl@0
   819
	{
sl@0
   820
	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::MapRamPages() id=%d type=%d ptr=%08x off=%08x n=%d perm=%08x",
sl@0
   821
			aId, aType, aPtr, aOffset, aNumPages, aPtePerm));
sl@0
   822
sl@0
   823
	SPageTableInfo& ptinfo=iPtInfo[aId];
sl@0
   824
	ptinfo.iCount+=aNumPages;
sl@0
   825
	aOffset>>=KPageShift;
sl@0
   826
	TInt ptOffset=aOffset & KPagesInPDEMask;				// entry number in page table
sl@0
   827
	TPte* pPte=PageTable(aId)+ptOffset;						// address of first PTE
sl@0
   828
sl@0
   829
	TLinAddr firstPte = (TLinAddr)pPte; //Will need this to clean page table changes in cache.
sl@0
   830
sl@0
   831
	while(aNumPages--)
sl@0
   832
		{
sl@0
   833
		TPhysAddr pa = *aPageList++;
sl@0
   834
		if(pa==KPhysAddrInvalid)
sl@0
   835
			{
sl@0
   836
			++pPte;
sl@0
   837
			__NK_ASSERT_DEBUG(aType==SPageInfo::EInvalid);
sl@0
   838
			continue;
sl@0
   839
			}
sl@0
   840
		*pPte++ =  pa | aPtePerm;					// insert PTE
sl@0
   841
		__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",pPte[-1],pPte-1));
sl@0
   842
		if (aType!=SPageInfo::EInvalid)
sl@0
   843
			{
sl@0
   844
			SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa);
sl@0
   845
			if(pi)
sl@0
   846
				{
sl@0
   847
				pi->Set(aType,aPtr,aOffset);
sl@0
   848
				__KTRACE_OPT(KMMU,Kern::Printf("I: %d %08x %08x",aType,aPtr,aOffset));
sl@0
   849
				++aOffset;	// increment offset for next page
sl@0
   850
				}
sl@0
   851
			}
sl@0
   852
		}
sl@0
   853
	CacheMaintenance::MultiplePtesUpdated(firstPte, (TUint)pPte-firstPte);
sl@0
   854
	}
sl@0
   855
sl@0
   856
void ArmMmu::MapPhysicalPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, TPhysAddr aPhysAddr, TInt aNumPages, TPte aPtePerm)
sl@0
   857
//
sl@0
   858
// Map consecutive physical pages into a specified page table with specified PTE permissions.
sl@0
   859
// Update the page information array if RAM pages are being mapped.
sl@0
   860
// Call this with the system locked.
sl@0
   861
//
sl@0
   862
	{
sl@0
   863
	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::MapPhysicalPages() id=%d type=%d ptr=%08x off=%08x phys=%08x n=%d perm=%08x",
sl@0
   864
			aId, aType, aPtr, aOffset, aPhysAddr, aNumPages, aPtePerm));
sl@0
   865
	SPageTableInfo& ptinfo=iPtInfo[aId];
sl@0
   866
	ptinfo.iCount+=aNumPages;
sl@0
   867
	aOffset>>=KPageShift;
sl@0
   868
	TInt ptOffset=aOffset & KPagesInPDEMask;				// entry number in page table
sl@0
   869
	TPte* pPte=(TPte*)(PageTableLinAddr(aId))+ptOffset;		// address of first PTE
sl@0
   870
sl@0
   871
	TLinAddr firstPte = (TLinAddr)pPte; //Will need this to clean page table changes in cache
sl@0
   872
sl@0
   873
	SPageInfo* pi;
sl@0
   874
	if(aType==SPageInfo::EInvalid)
sl@0
   875
		pi = NULL;
sl@0
   876
	else
sl@0
   877
		pi = SPageInfo::SafeFromPhysAddr(aPhysAddr);
sl@0
   878
	while(aNumPages--)
sl@0
   879
		{
sl@0
   880
		*pPte++ = aPhysAddr|aPtePerm;						// insert PTE
sl@0
   881
		aPhysAddr+=KPageSize;
sl@0
   882
		__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",pPte[-1],pPte-1));
sl@0
   883
		if (pi)
sl@0
   884
			{
sl@0
   885
			pi->Set(aType,aPtr,aOffset);
sl@0
   886
			__KTRACE_OPT(KMMU,Kern::Printf("I: %d %08x %08x",aType,aPtr,aOffset));
sl@0
   887
			++aOffset;	// increment offset for next page
sl@0
   888
			++pi;
sl@0
   889
			}
sl@0
   890
		}
sl@0
   891
sl@0
   892
	CacheMaintenance::MultiplePtesUpdated(firstPte, (TUint)pPte-firstPte);
sl@0
   893
	}
sl@0
   894
sl@0
   895
void ArmMmu::MapVirtual(TInt aId, TInt aNumPages)
sl@0
   896
//
sl@0
   897
// Called in place of MapRamPages or MapPhysicalPages to update mmu data structures when committing
sl@0
   898
// virtual address space to a chunk.  No pages are mapped.
sl@0
   899
// Call this with the system locked.
sl@0
   900
//
sl@0
   901
	{
sl@0
   902
	SPageTableInfo& ptinfo=iPtInfo[aId];
sl@0
   903
	ptinfo.iCount+=aNumPages;
sl@0
   904
	}
sl@0
   905
sl@0
   906
void ArmMmu::RemapPage(TInt aId, TUint32 aAddr, TPhysAddr aOldAddr, TPhysAddr aNewAddr, TPte aPtePerm, DProcess* aProcess)
sl@0
   907
//
sl@0
   908
// Replace the mapping at address aAddr in page table aId.
sl@0
   909
// Update the page information array for both the old and new pages.
sl@0
   910
// Return physical address of old page if it is now ready to be freed.
sl@0
   911
// Call this with the system locked.
sl@0
   912
// May be called with interrupts disabled, do not enable/disable them.
sl@0
   913
//
sl@0
   914
	{
sl@0
   915
	TInt ptOffset=(aAddr&KChunkMask)>>KPageShift;			// entry number in page table
sl@0
   916
	TPte* pPte=PageTable(aId)+ptOffset;						// address of PTE
sl@0
   917
	TPte pte=*pPte;
sl@0
   918
	TInt asid=aProcess ? ((DMemModelProcess*)aProcess)->iOsAsid :
sl@0
   919
						 (aAddr<KRomLinearBase ? (TInt)UNKNOWN_MAPPING : (TInt)KERNEL_MAPPING );
sl@0
   920
	
sl@0
   921
	if (pte & KArmV6PteSmallPage)
sl@0
   922
		{
sl@0
   923
		__ASSERT_ALWAYS((pte & KPteSmallPageAddrMask) == aOldAddr, Panic(ERemapPageFailed));
sl@0
   924
		SPageInfo* oldpi = SPageInfo::FromPhysAddr(aOldAddr);
sl@0
   925
		__ASSERT_DEBUG(oldpi->LockCount()==0,Panic(ERemapPageFailed));
sl@0
   926
sl@0
   927
		// remap page
sl@0
   928
		*pPte = aNewAddr | aPtePerm;					// overwrite PTE
sl@0
   929
		CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
sl@0
   930
		InvalidateTLBForPage(aAddr,asid);	// flush TLB entry
sl@0
   931
		
sl@0
   932
		// update new pageinfo, clear old
sl@0
   933
		SPageInfo* pi = SPageInfo::FromPhysAddr(aNewAddr);
sl@0
   934
		pi->Set(oldpi->Type(),oldpi->Owner(),oldpi->Offset());
sl@0
   935
		oldpi->SetUnused();
sl@0
   936
		}
sl@0
   937
	else
sl@0
   938
		{
sl@0
   939
		Panic(ERemapPageFailed);
sl@0
   940
		}
sl@0
   941
	}
sl@0
   942
sl@0
   943
void ArmMmu::RemapPageByAsid(TBitMapAllocator* aOsAsids, TLinAddr aLinAddr, TPhysAddr aOldAddr, TPhysAddr aNewAddr, TPte aPtePerm)
sl@0
   944
//
sl@0
   945
// Replace the mapping at address aLinAddr in the relevant page table for all
sl@0
   946
// ASIDs specified in aOsAsids, but only if the currently mapped address is
sl@0
   947
// aOldAddr.
sl@0
   948
// Update the page information array for both the old and new pages.
sl@0
   949
// Call this with the system unlocked.
sl@0
   950
//
sl@0
   951
	{
sl@0
   952
	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageByAsid() linaddr=%08x oldaddr=%08x newaddr=%08x perm=%08x", aLinAddr, aOldAddr, aNewAddr, aPtePerm));
sl@0
   953
sl@0
   954
	TInt asid = -1;
sl@0
   955
	TInt lastAsid = KArmV6NumAsids - 1;
sl@0
   956
	TUint32* ptr = aOsAsids->iMap;
sl@0
   957
	NKern::LockSystem();
sl@0
   958
	do
sl@0
   959
		{
sl@0
   960
		TUint32 bits = *ptr++;
sl@0
   961
		do
sl@0
   962
			{
sl@0
   963
			++asid;
sl@0
   964
			if(bits & 0x80000000u)
sl@0
   965
				{
sl@0
   966
				// mapped in this address space, so update PTE...
sl@0
   967
				TPte* pPte = PtePtrFromLinAddr(aLinAddr, asid);
sl@0
   968
				TPte pte = *pPte;
sl@0
   969
				if ((pte&~KPageMask) == aOldAddr)
sl@0
   970
					{
sl@0
   971
					*pPte = aNewAddr | aPtePerm;
sl@0
   972
					__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x in asid %d",*pPte,pPte,asid));
sl@0
   973
					CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
sl@0
   974
					InvalidateTLBForPage(aLinAddr,asid);	// flush TLB entry
sl@0
   975
					}
sl@0
   976
				}
sl@0
   977
			}
sl@0
   978
		while(bits<<=1);
sl@0
   979
		NKern::FlashSystem();
sl@0
   980
		asid |= 31;
sl@0
   981
		}
sl@0
   982
	while(asid<lastAsid);
sl@0
   983
sl@0
   984
	// copy pageinfo attributes and mark old page unused
sl@0
   985
	SPageInfo* oldpi = SPageInfo::FromPhysAddr(aOldAddr);
sl@0
   986
	SPageInfo::FromPhysAddr(aNewAddr)->Set(oldpi->Type(),oldpi->Owner(),oldpi->Offset());
sl@0
   987
	oldpi->SetUnused();
sl@0
   988
sl@0
   989
	NKern::UnlockSystem();
sl@0
   990
	}
sl@0
   991
sl@0
   992
TInt ArmMmu::UnmapPages(TInt aId, TUint32 aAddr, TInt aNumPages, TPhysAddr* aPageList, TBool aSetPagesFree, TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess)
sl@0
   993
//
sl@0
   994
// Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped
sl@0
   995
// pages into aPageList, and count of unmapped pages into aNumPtes.
sl@0
   996
// Return number of pages still mapped using this page table.
sl@0
   997
// Call this with the system locked.
sl@0
   998
// On multiple memory model, do not call this method with aSetPagesFree false. Call UnmapUnownedPages instead.
sl@0
   999
	{
sl@0
  1000
	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::UnmapPages() id=%d addr=%08x n=%d pl=%08x set-free=%d",aId,aAddr,aNumPages,aPageList,aSetPagesFree));
sl@0
  1001
	TInt ptOffset=(aAddr&KChunkMask)>>KPageShift;			// entry number in page table
sl@0
  1002
	TPte* pPte=PageTable(aId)+ptOffset;						// address of first PTE
sl@0
  1003
	TInt np=0;
sl@0
  1004
	TInt nf=0;
sl@0
  1005
	TUint32 ng=0;
sl@0
  1006
	TInt asid=aProcess ? ((DMemModelProcess*)aProcess)->iOsAsid :
sl@0
  1007
	                     (aAddr<KRomLinearBase ? (TInt)UNKNOWN_MAPPING : (TInt)KERNEL_MAPPING );
sl@0
  1008
sl@0
  1009
	
sl@0
  1010
	while(aNumPages--)
sl@0
  1011
		{
sl@0
  1012
		TPte pte=*pPte;						// get original PTE
sl@0
  1013
#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
sl@0
  1014
		remove_and_invalidate_page(pPte, aAddr, asid);
sl@0
  1015
		++pPte;
sl@0
  1016
#else
sl@0
  1017
		*pPte++=0;							// clear PTE
sl@0
  1018
#endif
sl@0
  1019
		
sl@0
  1020
		// We count all unmapped pages in np, including demand paged 'old' pages - but we don't pass
sl@0
  1021
		// these to PageUnmapped, as the page doesn't become free until it's unmapped from all
sl@0
  1022
		// processes		
sl@0
  1023
		if (pte != KPteNotPresentEntry)
sl@0
  1024
			++np;
sl@0
  1025
		
sl@0
  1026
		if (pte & KArmV6PteSmallPage)
sl@0
  1027
			{
sl@0
  1028
			ng |= pte;
sl@0
  1029
#if !defined(__CPU_ARM1136__) || defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
sl@0
  1030
			// Remove_and_invalidate_page will sort out cache and TLB. 
sl@0
  1031
			// When __CPU_ARM1136_ERRATUM_353494_FIXED, we have to do it here.
sl@0
  1032
			CacheMaintenance::SinglePteUpdated((TLinAddr)(pPte-1));
sl@0
  1033
			if (asid >= 0) //otherwise, KUnmapPagesTLBFlushDeferred will be returned.
sl@0
  1034
				InvalidateTLBForPage(aAddr,asid);	// flush any corresponding TLB entry
sl@0
  1035
#endif
sl@0
  1036
			TPhysAddr pa=pte & KPteSmallPageAddrMask;	// physical address of unmapped page
sl@0
  1037
			if (aSetPagesFree)
sl@0
  1038
				{
sl@0
  1039
				SPageInfo* pi = SPageInfo::FromPhysAddr(pa);
sl@0
  1040
				if(iRamCache->PageUnmapped(pi))
sl@0
  1041
					{
sl@0
  1042
					pi->SetUnused();					// mark page as unused
sl@0
  1043
					if (pi->LockCount()==0)
sl@0
  1044
						{
sl@0
  1045
						*aPageList++=pa;			// store in page list
sl@0
  1046
						++nf;						// count free pages
sl@0
  1047
						}
sl@0
  1048
					}
sl@0
  1049
				}
sl@0
  1050
			else
sl@0
  1051
				*aPageList++=pa;				// store in page list
sl@0
  1052
			}
sl@0
  1053
		aAddr+=KPageSize;
sl@0
  1054
		}
sl@0
  1055
sl@0
  1056
	aNumPtes=np;
sl@0
  1057
	aNumFree=nf;
sl@0
  1058
	SPageTableInfo& ptinfo=iPtInfo[aId];
sl@0
  1059
	TInt r=(ptinfo.iCount-=np);
sl@0
  1060
	if (asid<0)
sl@0
  1061
		r|=KUnmapPagesTLBFlushDeferred;
sl@0
  1062
sl@0
  1063
	
sl@0
  1064
	#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
sl@0
  1065
	__FlushBtb();
sl@0
  1066
	#endif
sl@0
  1067
sl@0
  1068
	__KTRACE_OPT(KMMU,Kern::Printf("Unmapped %d; Freed: %d; Return %08x",np,nf,r));
sl@0
  1069
	return r;								// return number of pages remaining in this page table
sl@0
  1070
	}
sl@0
  1071
sl@0
  1072
TInt ArmMmu::UnmapVirtual(TInt aId, TUint32 aAddr, TInt aNumPages, TPhysAddr* aPageList, TBool aSetPagesFree, TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess)
sl@0
  1073
//
sl@0
  1074
// Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped
sl@0
  1075
// pages into aPageList, and count of unmapped pages into aNumPtes.
sl@0
  1076
// Adjust the page table reference count as if aNumPages pages were unmapped.
sl@0
  1077
// Return number of pages still mapped using this page table.
sl@0
  1078
// Call this with the system locked.
sl@0
  1079
// On multiple memory model, do not call this method with aSetPagesFree false. Call UnmapUnownedVirtual instead.
sl@0
  1080
//
sl@0
  1081
	{
sl@0
  1082
	SPageTableInfo& ptinfo=iPtInfo[aId];
sl@0
  1083
	TInt newCount = ptinfo.iCount - aNumPages;
sl@0
  1084
	UnmapPages(aId, aAddr, aNumPages, aPageList, aSetPagesFree, aNumPtes, aNumFree, aProcess);
sl@0
  1085
	ptinfo.iCount = newCount;
sl@0
  1086
	aNumPtes = aNumPages;
sl@0
  1087
	return newCount;
sl@0
  1088
	}
sl@0
  1089
sl@0
  1090
TInt ArmMmu::UnmapUnownedPages(TInt aId, TUint32 aAddr, TInt aNumPages,
sl@0
  1091
		TPhysAddr* aPageList, TLinAddr* aLAPageList,TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess)
sl@0
  1092
/*
sl@0
  1093
 * Unmaps specified area at address aAddr in page table aId.
sl@0
  1094
 * Places physical addresses of not-demaned-paged unmapped pages into aPageList.
sl@0
  1095
 * Corresponding linear addresses are placed into aLAPageList.
sl@0
  1096
 * 'Old' demand-paged pages (holds invalid PE entry with physucal address) are neither unmapped nor
sl@0
  1097
 * encountered in aPageList but are still counted in aNumPtes.
sl@0
  1098
 * 
sl@0
  1099
 * This method should be called to decommit physical memory not owned by the chunk. As we do not know
sl@0
  1100
 * the origin of such memory, PtInfo could be invalid (or does't exist) so cache maintenance may not be
sl@0
  1101
 * able to obtain mapping colour. For that reason, this also returns former linear address of each page 
sl@0
  1102
 * in aPageList.   
sl@0
  1103
 *   
sl@0
  1104
 * @pre All pages are mapped within a single page table identified by aId.
sl@0
  1105
 * @pre On entry, system locked is held and is not released during the execution.
sl@0
  1106
 *
sl@0
  1107
 * @arg aId             Id of the page table that maps tha pages.
sl@0
  1108
 * @arg aAddr           Linear address of the start of the area.
sl@0
  1109
 * @arg aNumPages       The number of pages to unmap.
sl@0
  1110
 * @arg aProcess        The owning process of the mamory area to unmap.
sl@0
  1111
 * @arg aPageList       On  exit, holds the list of unmapped pages.
sl@0
  1112
 * @arg aLAPageList     On  exit, holds the list of linear addresses of unmapped pages.
sl@0
  1113
 * @arg aNumFree        On exit, holds the number of pages in aPageList.
sl@0
  1114
 * @arg aNumPtes        On exit, holds the number of unmapped pages. This includes demand-paged 'old'
sl@0
  1115
 *                      pages (with invalid page table entry still holding the address of physical page.)
sl@0
  1116
 *                      
sl@0
  1117
 * @return              The number of pages still mapped using this page table. It is orred by
sl@0
  1118
 *                      KUnmapPagesTLBFlushDeferred if TLB flush is not executed - which requires 
sl@0
  1119
 *                      the caller to do global TLB flush.
sl@0
  1120
 */ 
sl@0
  1121
    {
sl@0
  1122
	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::UnmapUnownedPages() id=%d addr=%08x n=%d pl=%08x",aId,aAddr,aNumPages,aPageList));
sl@0
  1123
	TInt ptOffset=(aAddr&KChunkMask)>>KPageShift;			// entry number in page table
sl@0
  1124
	TPte* pPte=PageTable(aId)+ptOffset;						// address of first PTE
sl@0
  1125
	TInt np=0;
sl@0
  1126
	TInt nf=0;
sl@0
  1127
	TUint32 ng=0;
sl@0
  1128
	TInt asid=aProcess ? ((DMemModelProcess*)aProcess)->iOsAsid :
sl@0
  1129
	                     (aAddr<KRomLinearBase ? (TInt)UNKNOWN_MAPPING : (TInt)KERNEL_MAPPING );
sl@0
  1130
sl@0
  1131
	while(aNumPages--)
sl@0
  1132
		{
sl@0
  1133
		TPte pte=*pPte;						// get original PTE
sl@0
  1134
#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
sl@0
  1135
		remove_and_invalidate_page(pPte, aAddr, asid);
sl@0
  1136
		++pPte;
sl@0
  1137
#else
sl@0
  1138
		*pPte++=0;							// clear PTE
sl@0
  1139
#endif
sl@0
  1140
		
sl@0
  1141
		// We count all unmapped pages in np, including demand paged 'old' pages - but we don't pass
sl@0
  1142
		// these to PageUnmapped, as the page doesn't become free until it's unmapped from all
sl@0
  1143
		// processes		
sl@0
  1144
		if (pte != KPteNotPresentEntry)
sl@0
  1145
			++np;
sl@0
  1146
		
sl@0
  1147
		if (pte & KArmV6PteSmallPage)
sl@0
  1148
			{
sl@0
  1149
			ng |= pte;
sl@0
  1150
#if !defined(__CPU_ARM1136__) || defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
sl@0
  1151
			// Remove_and_invalidate_page will sort out cache and TLB. 
sl@0
  1152
			// When __CPU_ARM1136_ERRATUM_353494_FIXED, we have to do it here.
sl@0
  1153
			CacheMaintenance::SinglePteUpdated((TLinAddr)(pPte-1));
sl@0
  1154
			if (asid >= 0) //otherwise, KUnmapPagesTLBFlushDeferred will be returned.
sl@0
  1155
				InvalidateTLBForPage(aAddr,asid);	// flush any corresponding TLB entry
sl@0
  1156
#endif
sl@0
  1157
			TPhysAddr pa=pte & KPteSmallPageAddrMask;	// physical address of unmapped page
sl@0
  1158
	        ++nf;
sl@0
  1159
	        *aPageList++=pa;				// store physical aaddress in page list
sl@0
  1160
	        *aLAPageList++=aAddr;			// store linear address in page list
sl@0
  1161
			}
sl@0
  1162
		aAddr+=KPageSize;
sl@0
  1163
		}
sl@0
  1164
sl@0
  1165
	aNumPtes=np;
sl@0
  1166
	aNumFree=nf;
sl@0
  1167
	SPageTableInfo& ptinfo=iPtInfo[aId];
sl@0
  1168
	TInt r=(ptinfo.iCount-=np);
sl@0
  1169
	if (asid<0)
sl@0
  1170
		r|=KUnmapPagesTLBFlushDeferred;
sl@0
  1171
sl@0
  1172
	
sl@0
  1173
	#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
sl@0
  1174
	__FlushBtb();
sl@0
  1175
	#endif
sl@0
  1176
sl@0
  1177
	__KTRACE_OPT(KMMU,Kern::Printf("Unmapped %d; Freed: %d; Return %08x",np,nf,r));
sl@0
  1178
	return r;								// return number of pages remaining in this page table
sl@0
  1179
	}
sl@0
  1180
sl@0
  1181
sl@0
  1182
TInt ArmMmu::UnmapUnownedVirtual(TInt aId, TUint32 aAddr, TInt aNumPages,
sl@0
  1183
		TPhysAddr* aPageList, TLinAddr* aLAPageList,TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess)
sl@0
  1184
//
sl@0
  1185
// Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped
sl@0
  1186
// pages into aPageList, and count of unmapped pages into aNumPtes.
sl@0
  1187
// Adjust the page table reference count as if aNumPages pages were unmapped.
sl@0
  1188
// Return number of pages still mapped using this page table.
sl@0
  1189
// Call this with the system locked.
sl@0
  1190
//
sl@0
  1191
	{
sl@0
  1192
	SPageTableInfo& ptinfo=iPtInfo[aId];
sl@0
  1193
	TInt newCount = ptinfo.iCount - aNumPages;
sl@0
  1194
	UnmapUnownedPages(aId, aAddr, aNumPages, aPageList,  aLAPageList, aNumPtes,  aNumFree,  aProcess);
sl@0
  1195
	ptinfo.iCount = newCount;
sl@0
  1196
	aNumPtes = aNumPages;	
sl@0
  1197
	return newCount;
sl@0
  1198
	}
sl@0
  1199
sl@0
  1200
void ArmMmu::DoAssignPageTable(TInt aId, TLinAddr aAddr, TPde aPdePerm, const TAny* aOsAsids)
sl@0
  1201
//
sl@0
  1202
// Assign an allocated page table to map a given linear address with specified permissions.
sl@0
  1203
// This should be called with the system unlocked and the MMU mutex held.
sl@0
  1204
//
sl@0
  1205
	{
sl@0
  1206
	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DoAssignPageTable %d to %08x perm %08x asid %08x",aId,aAddr,aPdePerm,aOsAsids));
sl@0
  1207
	TLinAddr ptLin=PageTableLinAddr(aId);
sl@0
  1208
	TPhysAddr ptPhys=LinearToPhysical(ptLin,0);
sl@0
  1209
	TInt pdeIndex=TInt(aAddr>>KChunkShift);
sl@0
  1210
	TBool gpd=(pdeIndex>=(iLocalPdSize>>2));
sl@0
  1211
	TInt os_asid=(TInt)aOsAsids;
sl@0
  1212
	if (TUint32(os_asid)<TUint32(iNumOsAsids))
sl@0
  1213
		{
sl@0
  1214
		// single OS ASID
sl@0
  1215
		TPde* pageDir=PageDirectory(os_asid);
sl@0
  1216
		NKern::LockSystem();
sl@0
  1217
		pageDir[pdeIndex]=ptPhys|aPdePerm;	// will blow up here if address is in global region aOsAsid doesn't have a global PD
sl@0
  1218
		CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
sl@0
  1219
		NKern::UnlockSystem();
sl@0
  1220
				
sl@0
  1221
		__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",ptPhys|aPdePerm,pageDir+pdeIndex));
sl@0
  1222
		}
sl@0
  1223
	else if (os_asid==-1 && gpd)
sl@0
  1224
		{
sl@0
  1225
		// all OS ASIDs, address in global region
sl@0
  1226
		TInt num_os_asids=iNumGlobalPageDirs;
sl@0
  1227
		const TBitMapAllocator& b=*(const TBitMapAllocator*)iOsAsidAllocator;
sl@0
  1228
		for (os_asid=0; num_os_asids; ++os_asid)
sl@0
  1229
			{
sl@0
  1230
			if (!b.NotAllocated(os_asid,1) && (iAsidInfo[os_asid]&1))
sl@0
  1231
				{
sl@0
  1232
				// this OS ASID exists and has a global page directory
sl@0
  1233
				TPde* pageDir=PageDirectory(os_asid);
sl@0
  1234
				NKern::LockSystem();
sl@0
  1235
				pageDir[pdeIndex]=ptPhys|aPdePerm;
sl@0
  1236
				CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
sl@0
  1237
				NKern::UnlockSystem();
sl@0
  1238
sl@0
  1239
				__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",ptPhys|aPdePerm,pageDir+pdeIndex));
sl@0
  1240
				--num_os_asids;
sl@0
  1241
				}
sl@0
  1242
			}
sl@0
  1243
		}
sl@0
  1244
	else
sl@0
  1245
		{
sl@0
  1246
		// selection of OS ASIDs or all OS ASIDs
sl@0
  1247
		const TBitMapAllocator* pB=(const TBitMapAllocator*)aOsAsids;
sl@0
  1248
		if (os_asid==-1)
sl@0
  1249
			pB=iOsAsidAllocator;	// 0's in positions which exist
sl@0
  1250
		TInt num_os_asids=pB->iSize-pB->iAvail;
sl@0
  1251
		for (os_asid=0; num_os_asids; ++os_asid)
sl@0
  1252
			{
sl@0
  1253
			if (pB->NotAllocated(os_asid,1))
sl@0
  1254
				continue;			// os_asid is not needed
sl@0
  1255
			TPde* pageDir=PageDirectory(os_asid);
sl@0
  1256
			NKern::LockSystem();
sl@0
  1257
			pageDir[pdeIndex]=ptPhys|aPdePerm;
sl@0
  1258
			CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
sl@0
  1259
			NKern::UnlockSystem();
sl@0
  1260
sl@0
  1261
			__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",ptPhys|aPdePerm,pageDir+pdeIndex));
sl@0
  1262
			--num_os_asids;
sl@0
  1263
			}
sl@0
  1264
		}
sl@0
  1265
	}
sl@0
  1266
sl@0
  1267
void ArmMmu::RemapPageTableSingle(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr, TInt aOsAsid)
sl@0
  1268
//
sl@0
  1269
// Replace a single page table mapping the specified linear address.
sl@0
  1270
// This should be called with the system locked and the MMU mutex held.
sl@0
  1271
//
sl@0
  1272
	{
sl@0
  1273
	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTableSingle %08x to %08x at %08x asid %d",aOld,aNew,aAddr,aOsAsid));
sl@0
  1274
	TPde* pageDir=PageDirectory(aOsAsid);
sl@0
  1275
	TInt pdeIndex=TInt(aAddr>>KChunkShift);
sl@0
  1276
	TPde pde=pageDir[pdeIndex];
sl@0
  1277
	__ASSERT_ALWAYS((pde & KPdePageTableAddrMask) == aOld, Panic(ERemapPageTableFailed));
sl@0
  1278
	TPde newPde=aNew|(pde&~KPdePageTableAddrMask);
sl@0
  1279
	pageDir[pdeIndex]=newPde;	// will blow up here if address is in global region aOsAsid doesn't have a global PD
sl@0
  1280
	CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
sl@0
  1281
				
sl@0
  1282
	__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",newPde,pageDir+pdeIndex));
sl@0
  1283
	}
sl@0
  1284
sl@0
  1285
void ArmMmu::RemapPageTableGlobal(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr)
sl@0
  1286
//
sl@0
  1287
// Replace a global page table mapping the specified linear address.
sl@0
  1288
// This should be called with the system locked and the MMU mutex held.
sl@0
  1289
//
sl@0
  1290
	{
sl@0
  1291
	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTableGlobal %08x to %08x at %08x",aOld,aNew,aAddr));
sl@0
  1292
	TInt pdeIndex=TInt(aAddr>>KChunkShift);
sl@0
  1293
	TInt num_os_asids=iNumGlobalPageDirs;
sl@0
  1294
	const TBitMapAllocator& b=*(const TBitMapAllocator*)iOsAsidAllocator;
sl@0
  1295
	for (TInt os_asid=0; num_os_asids; ++os_asid)
sl@0
  1296
		{
sl@0
  1297
		if (!b.NotAllocated(os_asid,1) && (iAsidInfo[os_asid]&1))
sl@0
  1298
			{
sl@0
  1299
			// this OS ASID exists and has a global page directory
sl@0
  1300
			TPde* pageDir=PageDirectory(os_asid);
sl@0
  1301
			TPde pde=pageDir[pdeIndex];
sl@0
  1302
			if ((pde & KPdePageTableAddrMask) == aOld)
sl@0
  1303
				{
sl@0
  1304
				TPde newPde=aNew|(pde&~KPdePageTableAddrMask);
sl@0
  1305
				pageDir[pdeIndex]=newPde;
sl@0
  1306
				CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
sl@0
  1307
sl@0
  1308
				__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",newPde,pageDir+pdeIndex));
sl@0
  1309
				}
sl@0
  1310
			--num_os_asids;
sl@0
  1311
			}
sl@0
  1312
		if ((os_asid&31)==31)
sl@0
  1313
			NKern::FlashSystem();
sl@0
  1314
		}
sl@0
  1315
	}
sl@0
  1316
sl@0
  1317
void ArmMmu::RemapPageTableMultiple(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr, const TAny* aOsAsids)
sl@0
  1318
//
sl@0
  1319
// Replace multiple page table mappings of the specified linear address.
sl@0
  1320
// This should be called with the system locked and the MMU mutex held.
sl@0
  1321
//
sl@0
  1322
	{
sl@0
  1323
	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTableMultiple %08x to %08x at %08x asids %08x",aOld,aNew,aAddr,aOsAsids));
sl@0
  1324
	TInt pdeIndex=TInt(aAddr>>KChunkShift);
sl@0
  1325
	const TBitMapAllocator* pB=(const TBitMapAllocator*)aOsAsids;
sl@0
  1326
	if ((TInt)aOsAsids==-1)
sl@0
  1327
		pB=iOsAsidAllocator;	// 0's in positions which exist
sl@0
  1328
	
sl@0
  1329
	TInt asid = -1;
sl@0
  1330
	TInt lastAsid = KArmV6NumAsids - 1;
sl@0
  1331
	const TUint32* ptr = pB->iMap;
sl@0
  1332
	do
sl@0
  1333
		{
sl@0
  1334
		TUint32 bits = *ptr++;
sl@0
  1335
		do
sl@0
  1336
			{
sl@0
  1337
			++asid;
sl@0
  1338
			if ((bits & 0x80000000u) == 0)
sl@0
  1339
				{
sl@0
  1340
				// mapped in this address space - bitmap is inverted
sl@0
  1341
				TPde* pageDir=PageDirectory(asid);
sl@0
  1342
				TPde pde=pageDir[pdeIndex];
sl@0
  1343
				if ((pde & KPdePageTableAddrMask) == aOld)
sl@0
  1344
					{
sl@0
  1345
					TPde newPde=aNew|(pde&~KPdePageTableAddrMask);
sl@0
  1346
					pageDir[pdeIndex]=newPde;
sl@0
  1347
					CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
sl@0
  1348
sl@0
  1349
					__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",newPde,pageDir+pdeIndex));
sl@0
  1350
					}
sl@0
  1351
				}
sl@0
  1352
			}
sl@0
  1353
		while(bits<<=1);
sl@0
  1354
		NKern::FlashSystem();
sl@0
  1355
		asid |= 31;
sl@0
  1356
		}
sl@0
  1357
	while(asid<lastAsid);
sl@0
  1358
	}
sl@0
  1359
sl@0
  1360
void ArmMmu::RemapPageTableAliases(TPhysAddr aOld, TPhysAddr aNew)
sl@0
  1361
//
sl@0
  1362
// Replace aliases of the specified page table.
sl@0
  1363
// This should be called with the system locked and the MMU mutex held.
sl@0
  1364
//
sl@0
  1365
	{
sl@0
  1366
	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTableAliases %08x to %08x",aOld,aNew));
sl@0
  1367
	SDblQue checkedList;
sl@0
  1368
	SDblQueLink* next;
sl@0
  1369
sl@0
  1370
	while(!iAliasList.IsEmpty())
sl@0
  1371
		{
sl@0
  1372
		next = iAliasList.First()->Deque();
sl@0
  1373
		checkedList.Add(next);
sl@0
  1374
		DMemModelThread* thread = (DMemModelThread*)((TInt)next-_FOFF(DMemModelThread,iAliasLink));
sl@0
  1375
		TPde pde = thread->iAliasPde;
sl@0
  1376
		if ((pde & ~KPageMask) == aOld)
sl@0
  1377
			{
sl@0
  1378
			// a page table in this page is being aliased by the thread, so update it...
sl@0
  1379
			thread->iAliasPde = (pde & KPageMask) | aNew;
sl@0
  1380
			}
sl@0
  1381
		NKern::FlashSystem();
sl@0
  1382
		}
sl@0
  1383
sl@0
  1384
	// copy checkedList back to iAliasList
sl@0
  1385
	iAliasList.MoveFrom(&checkedList);
sl@0
  1386
	}
sl@0
  1387
sl@0
  1388
void ArmMmu::DoUnassignPageTable(TLinAddr aAddr, const TAny* aOsAsids)
sl@0
  1389
//
sl@0
  1390
// Unassign a now-empty page table currently mapping the specified linear address.
sl@0
  1391
// We assume that TLB and/or cache flushing has been done when any RAM pages were unmapped.
sl@0
  1392
// This should be called with the system unlocked and the MMU mutex held.
sl@0
  1393
//
sl@0
  1394
	{
sl@0
  1395
	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DoUnassignPageTable at %08x a=%08x",aAddr,aOsAsids));
sl@0
  1396
	TInt pdeIndex=TInt(aAddr>>KChunkShift);
sl@0
  1397
	TBool gpd=(pdeIndex>=(iLocalPdSize>>2));
sl@0
  1398
	TInt os_asid=(TInt)aOsAsids;
sl@0
  1399
	TUint pde=0;
sl@0
  1400
sl@0
  1401
	SDblQue checkedList;
sl@0
  1402
	SDblQueLink* next;
sl@0
  1403
sl@0
  1404
	if (TUint32(os_asid)<TUint32(iNumOsAsids))
sl@0
  1405
		{
sl@0
  1406
		// single OS ASID
sl@0
  1407
		TPde* pageDir=PageDirectory(os_asid);
sl@0
  1408
		NKern::LockSystem();
sl@0
  1409
		pde = pageDir[pdeIndex];
sl@0
  1410
		pageDir[pdeIndex]=0;
sl@0
  1411
		CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
sl@0
  1412
		__KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x",pageDir+pdeIndex));
sl@0
  1413
sl@0
  1414
		// remove any aliases of the page table...
sl@0
  1415
		TUint ptId = pde>>KPageTableShift;
sl@0
  1416
		while(!iAliasList.IsEmpty())
sl@0
  1417
			{
sl@0
  1418
			next = iAliasList.First()->Deque();
sl@0
  1419
			checkedList.Add(next);
sl@0
  1420
			DMemModelThread* thread = (DMemModelThread*)((TInt)next-_FOFF(DMemModelThread,iAliasLink));
sl@0
  1421
			if(thread->iAliasOsAsid==os_asid && (thread->iAliasPde>>KPageTableShift)==ptId)
sl@0
  1422
				{
sl@0
  1423
				// the page table is being aliased by the thread, so remove it...
sl@0
  1424
				thread->iAliasPde = 0;
sl@0
  1425
				}
sl@0
  1426
			NKern::FlashSystem();
sl@0
  1427
			}
sl@0
  1428
		}
sl@0
  1429
	else if (os_asid==-1 && gpd)
sl@0
  1430
		{
sl@0
  1431
		// all OS ASIDs, address in global region
sl@0
  1432
		TInt num_os_asids=iNumGlobalPageDirs;
sl@0
  1433
		const TBitMapAllocator& b=*(const TBitMapAllocator*)iOsAsidAllocator;
sl@0
  1434
		for (os_asid=0; num_os_asids; ++os_asid)
sl@0
  1435
			{
sl@0
  1436
			if (!b.NotAllocated(os_asid,1) && (iAsidInfo[os_asid]&1))
sl@0
  1437
				{
sl@0
  1438
				// this OS ASID exists and has a global page directory
sl@0
  1439
				TPde* pageDir=PageDirectory(os_asid);
sl@0
  1440
				NKern::LockSystem();
sl@0
  1441
				pageDir[pdeIndex]=0;
sl@0
  1442
				CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
sl@0
  1443
				NKern::UnlockSystem();
sl@0
  1444
				
sl@0
  1445
				__KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x",pageDir+pdeIndex));
sl@0
  1446
				--num_os_asids;
sl@0
  1447
				}
sl@0
  1448
			}
sl@0
  1449
		// we don't need to look for aliases in this case, because these aren't
sl@0
  1450
		// created for page tables in the global region.
sl@0
  1451
		NKern::LockSystem();
sl@0
  1452
		}
sl@0
  1453
	else
sl@0
  1454
		{
sl@0
  1455
		// selection of OS ASIDs or all OS ASIDs
sl@0
  1456
		const TBitMapAllocator* pB=(const TBitMapAllocator*)aOsAsids;
sl@0
  1457
		if (os_asid==-1)
sl@0
  1458
			pB=iOsAsidAllocator;	// 0's in positions which exist
sl@0
  1459
		TInt num_os_asids=pB->iSize-pB->iAvail;
sl@0
  1460
		for (os_asid=0; num_os_asids; ++os_asid)
sl@0
  1461
			{
sl@0
  1462
			if (pB->NotAllocated(os_asid,1))
sl@0
  1463
				continue;			// os_asid is not needed
sl@0
  1464
			TPde* pageDir=PageDirectory(os_asid);
sl@0
  1465
			NKern::LockSystem();
sl@0
  1466
			pde = pageDir[pdeIndex];
sl@0
  1467
			pageDir[pdeIndex]=0;
sl@0
  1468
			CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
sl@0
  1469
			NKern::UnlockSystem();
sl@0
  1470
			
sl@0
  1471
			__KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x",pageDir+pdeIndex));
sl@0
  1472
			--num_os_asids;
sl@0
  1473
			}
sl@0
  1474
sl@0
  1475
		// remove any aliases of the page table...
sl@0
  1476
		TUint ptId = pde>>KPageTableShift;
sl@0
  1477
		NKern::LockSystem();
sl@0
  1478
		while(!iAliasList.IsEmpty())
sl@0
  1479
			{
sl@0
  1480
			next = iAliasList.First()->Deque();
sl@0
  1481
			checkedList.Add(next);
sl@0
  1482
			DMemModelThread* thread = (DMemModelThread*)((TInt)next-_FOFF(DMemModelThread,iAliasLink));
sl@0
  1483
			if((thread->iAliasPde>>KPageTableShift)==ptId && !pB->NotAllocated(thread->iAliasOsAsid,1))
sl@0
  1484
				{
sl@0
  1485
				// the page table is being aliased by the thread, so remove it...
sl@0
  1486
				thread->iAliasPde = 0;
sl@0
  1487
				}
sl@0
  1488
			NKern::FlashSystem();
sl@0
  1489
			}
sl@0
  1490
		}
sl@0
  1491
sl@0
  1492
	// copy checkedList back to iAliasList
sl@0
  1493
	iAliasList.MoveFrom(&checkedList);
sl@0
  1494
sl@0
  1495
	NKern::UnlockSystem();
sl@0
  1496
	}
sl@0
  1497
#endif
sl@0
  1498
sl@0
  1499
// Initialise page table at physical address aXptPhys to be used as page table aXptId
sl@0
  1500
// to expand the virtual address range used for mapping page tables. Map the page table
sl@0
  1501
// at aPhysAddr as page table aId using the expanded range.
sl@0
  1502
// Assign aXptPhys to kernel's Page Directory.
sl@0
  1503
// Called with system unlocked and MMU mutex held.
sl@0
  1504
void ArmMmu::BootstrapPageTable(TInt aXptId, TPhysAddr aXptPhys, TInt aId, TPhysAddr aPhysAddr)
sl@0
  1505
	{
sl@0
  1506
	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::BootstrapPageTable xptid=%04x, xptphys=%08x, id=%04x, phys=%08x",
sl@0
  1507
						aXptId, aXptPhys, aId, aPhysAddr));
sl@0
  1508
	
sl@0
  1509
	// put in a temporary mapping for aXptPhys
sl@0
  1510
	// make it noncacheable
sl@0
  1511
	TPhysAddr pa=aXptPhys&~KPageMask;
sl@0
  1512
	*iTempPte = pa | SP_PTE(KArmV6PermRWNO, KNormalUncachedAttr, 0, 1);
sl@0
  1513
	CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte);
sl@0
  1514
	
sl@0
  1515
	// clear XPT
sl@0
  1516
	TPte* xpt=(TPte*)(iTempAddr+(aXptPhys&KPageMask));
sl@0
  1517
	memclr(xpt, KPageTableSize);
sl@0
  1518
sl@0
  1519
	// must in fact have aXptPhys and aPhysAddr in same physical page
sl@0
  1520
	__ASSERT_ALWAYS( TUint32(aXptPhys^aPhysAddr)<TUint32(KPageSize), MM::Panic(MM::EBootstrapPageTableBadAddr));
sl@0
  1521
sl@0
  1522
	// so only need one mapping
sl@0
  1523
	xpt[(aXptId>>KPtClusterShift)&KPagesInPDEMask] = pa | KPtPtePerm;
sl@0
  1524
	CacheMaintenance::MultiplePtesUpdated((TLinAddr)xpt, KPageTableSize);
sl@0
  1525
sl@0
  1526
	// remove temporary mapping
sl@0
  1527
	*iTempPte=0;
sl@0
  1528
	CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte);
sl@0
  1529
	
sl@0
  1530
	InvalidateTLBForPage(iTempAddr, KERNEL_MAPPING);
sl@0
  1531
sl@0
  1532
	// initialise PtInfo...
sl@0
  1533
	TLinAddr xptAddr = PageTableLinAddr(aXptId);
sl@0
  1534
	iPtInfo[aXptId].SetGlobal(xptAddr>>KChunkShift);
sl@0
  1535
sl@0
  1536
	// map xpt...
sl@0
  1537
	TInt pdeIndex=TInt(xptAddr>>KChunkShift);
sl@0
  1538
	TPde* pageDir=PageDirectory(0);
sl@0
  1539
	NKern::LockSystem();
sl@0
  1540
	pageDir[pdeIndex]=aXptPhys|KPtPdePerm;
sl@0
  1541
	CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
sl@0
  1542
	
sl@0
  1543
	NKern::UnlockSystem();				
sl@0
  1544
	}
sl@0
  1545
sl@0
  1546
// Edit the self-mapping entry in page table aId, mapped at aTempMap, to
sl@0
  1547
// change the physical address from aOld to aNew. Used when moving page
sl@0
  1548
// tables which were created by BootstrapPageTable.
sl@0
  1549
// Called with system locked and MMU mutex held.
sl@0
  1550
void ArmMmu::FixupXPageTable(TInt aId, TLinAddr aTempMap, TPhysAddr aOld, TPhysAddr aNew)
sl@0
  1551
	{
sl@0
  1552
	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::FixupXPageTable id=%04x, tempmap=%08x, old=%08x, new=%08x",
sl@0
  1553
						aId, aTempMap, aOld, aNew));
sl@0
  1554
	
sl@0
  1555
	// find correct page table inside the page
sl@0
  1556
	TPte* xpt=(TPte*)(aTempMap + ((aId & KPtClusterMask) << KPageTableShift));
sl@0
  1557
	// find the pte in that page table
sl@0
  1558
	xpt += (aId>>KPtClusterShift)&KPagesInPDEMask;
sl@0
  1559
sl@0
  1560
	// switch the mapping
sl@0
  1561
	__ASSERT_ALWAYS((*xpt&~KPageMask)==aOld, Panic(EFixupXPTFailed));
sl@0
  1562
	*xpt = aNew | KPtPtePerm;
sl@0
  1563
	// mapped with MapTemp, and thus not mapped as a PTE - have to do real cache clean.
sl@0
  1564
	CacheMaintenance::SinglePteUpdated((TLinAddr)xpt);
sl@0
  1565
	}
sl@0
  1566
sl@0
  1567
TInt ArmMmu::NewPageDirectory(TInt aOsAsid, TBool aSeparateGlobal, TPhysAddr& aPhysAddr, TInt& aNumPages)
sl@0
  1568
	{
sl@0
  1569
	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::NewPageDirectory(%d,%d)",aOsAsid,aSeparateGlobal));
sl@0
  1570
	TInt r=0;
sl@0
  1571
	TInt nlocal=iLocalPdSize>>KPageShift;
sl@0
  1572
	aNumPages=aSeparateGlobal ? KPageDirectorySize/KPageSize : nlocal;
sl@0
  1573
	__KTRACE_OPT(KMMU,Kern::Printf("nlocal=%d, aNumPages=%d",nlocal,aNumPages));
sl@0
  1574
	if (aNumPages>1)
sl@0
  1575
		{
sl@0
  1576
		TInt align=aSeparateGlobal ? KPageDirectoryShift : KPageDirectoryShift-1;
sl@0
  1577
		r=AllocContiguousRam(aNumPages<<KPageShift, aPhysAddr, EPageFixed, align);
sl@0
  1578
		}
sl@0
  1579
	else
sl@0
  1580
		r=AllocRamPages(&aPhysAddr,1, EPageFixed);
sl@0
  1581
	__KTRACE_OPT(KMMU,Kern::Printf("r=%d, phys=%08x",r,aPhysAddr));
sl@0
  1582
	if (r!=KErrNone)
sl@0
  1583
		return r;
sl@0
  1584
#ifdef BTRACE_KERNEL_MEMORY
sl@0
  1585
	BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, aNumPages<<KPageShift);
sl@0
  1586
	Epoc::KernelMiscPages += aNumPages;
sl@0
  1587
#endif
sl@0
  1588
	SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
sl@0
  1589
	NKern::LockSystem();
sl@0
  1590
	TInt i;
sl@0
  1591
	for (i=0; i<aNumPages; ++i)
sl@0
  1592
		pi[i].SetPageDir(aOsAsid,i);
sl@0
  1593
	NKern::UnlockSystem();
sl@0
  1594
	return KErrNone;
sl@0
  1595
	}
sl@0
  1596
sl@0
  1597
inline void CopyPdes(TPde* aDest, const TPde* aSrc, TLinAddr aBase, TLinAddr aEnd)
sl@0
  1598
	{
sl@0
  1599
	memcpy(aDest+(aBase>>KChunkShift), aSrc+(aBase>>KChunkShift), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde));
sl@0
  1600
	CacheMaintenance::MultiplePtesUpdated((TLinAddr)(aDest+(aBase>>KChunkShift)), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde));
sl@0
  1601
	}
sl@0
  1602
sl@0
  1603
inline void ZeroPdes(TPde* aDest, TLinAddr aBase, TLinAddr aEnd)
sl@0
  1604
	{
sl@0
  1605
	memclr(aDest+(aBase>>KChunkShift), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde));
sl@0
  1606
	CacheMaintenance::MultiplePtesUpdated((TLinAddr)(aDest+(aBase>>KChunkShift)), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde));
sl@0
  1607
	}
sl@0
  1608
sl@0
  1609
void ArmMmu::InitPageDirectory(TInt aOsAsid, TBool aSeparateGlobal)
sl@0
  1610
	{
sl@0
  1611
	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::InitPageDirectory(%d,%d)",aOsAsid,aSeparateGlobal));
sl@0
  1612
	TPde* newpd=PageDirectory(aOsAsid);	// new page directory
sl@0
  1613
	memclr(newpd, iLocalPdSize);		// clear local page directory
sl@0
  1614
	CacheMaintenance::MultiplePtesUpdated((TLinAddr)newpd, iLocalPdSize);
sl@0
  1615
	if (aSeparateGlobal)
sl@0
  1616
		{
sl@0
  1617
		const TPde* kpd=(const TPde*)KPageDirectoryBase;	// kernel page directory
sl@0
  1618
		if (iLocalPdSize==KPageSize)
sl@0
  1619
			ZeroPdes(newpd, KUserSharedDataEnd1GB, KUserSharedDataEnd2GB);
sl@0
  1620
		ZeroPdes(newpd, KRamDriveStartAddress, KRamDriveEndAddress);	// don't copy RAM drive
sl@0
  1621
		CopyPdes(newpd, kpd, KRomLinearBase, KUserGlobalDataEnd);		// copy ROM + user global
sl@0
  1622
		CopyPdes(newpd, kpd, KRamDriveEndAddress, 0x00000000);			// copy kernel mappings
sl@0
  1623
		}
sl@0
  1624
	}
sl@0
  1625
sl@0
  1626
void ArmMmu::ClearPageTable(TInt aId, TInt aFirstIndex)
sl@0
  1627
	{
sl@0
  1628
	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::ClearPageTable(%d,%d)",aId,aFirstIndex));
sl@0
  1629
	TPte* pte=PageTable(aId);
sl@0
  1630
	memclr(pte+aFirstIndex, KPageTableSize-aFirstIndex*sizeof(TPte));
sl@0
  1631
	CacheMaintenance::MultiplePtesUpdated((TLinAddr)(pte+aFirstIndex), KPageTableSize-aFirstIndex*sizeof(TPte));
sl@0
  1632
	}
sl@0
  1633
sl@0
  1634
void ArmMmu::ApplyTopLevelPermissions(TLinAddr aAddr, TInt aOsAsid, TInt aNumPdes, TPde aPdePerm)
sl@0
  1635
	{
sl@0
  1636
	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::ApplyTopLevelPermissions %04x:%08x->%08x count %d",
sl@0
  1637
												aOsAsid, aAddr, aPdePerm, aNumPdes));
sl@0
  1638
	TInt ix=aAddr>>KChunkShift;
sl@0
  1639
	TPde* pPde=PageDirectory(aOsAsid)+ix;
sl@0
  1640
	TLinAddr firstPde = (TLinAddr)pPde; //Will need this to clean page table memory region in cache
sl@0
  1641
sl@0
  1642
	TPde* pPdeEnd=pPde+aNumPdes;
sl@0
  1643
	NKern::LockSystem();
sl@0
  1644
	for (; pPde<pPdeEnd; ++pPde)
sl@0
  1645
		{
sl@0
  1646
		TPde pde=*pPde;
sl@0
  1647
		if (pde)
sl@0
  1648
			*pPde = (pde&KPdePageTableAddrMask)|aPdePerm;
sl@0
  1649
		}
sl@0
  1650
	CacheMaintenance::MultiplePtesUpdated(firstPde, aNumPdes*sizeof(TPde));
sl@0
  1651
	FlushTLBs();
sl@0
  1652
	NKern::UnlockSystem();
sl@0
  1653
	}
sl@0
  1654
sl@0
  1655
void ArmMmu::ApplyPagePermissions(TInt aId, TInt aPageOffset, TInt aNumPages, TPte aPtePerm)
sl@0
  1656
	{
sl@0
  1657
	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::ApplyPagePermissions %04x:%03x+%03x perm %08x",
sl@0
  1658
												aId, aPageOffset, aNumPages, aPtePerm));
sl@0
  1659
	TPte* pPte=PageTable(aId)+aPageOffset;
sl@0
  1660
	TLinAddr firstPte = (TLinAddr)pPte; //Will need this to clean page table memory region in cache
sl@0
  1661
sl@0
  1662
	TPde* pPteEnd=pPte+aNumPages;
sl@0
  1663
	NKern::LockSystem();
sl@0
  1664
	for (; pPte<pPteEnd; ++pPte)
sl@0
  1665
		{
sl@0
  1666
		TPte pte=*pPte;
sl@0
  1667
		if (pte)
sl@0
  1668
			*pPte = (pte&KPteSmallPageAddrMask)|aPtePerm;
sl@0
  1669
		}
sl@0
  1670
	CacheMaintenance::MultiplePtesUpdated(firstPte, aNumPages*sizeof(TPte));
sl@0
  1671
	FlushTLBs();
sl@0
  1672
	NKern::UnlockSystem();
sl@0
  1673
	}
sl@0
  1674
sl@0
  1675
void ArmMmu::ClearRamDrive(TLinAddr aStart)
sl@0
  1676
	{
sl@0
  1677
	// clear the page directory entries corresponding to the RAM drive
sl@0
  1678
	TPde* kpd=(TPde*)KPageDirectoryBase;	// kernel page directory
sl@0
  1679
	ZeroPdes(kpd, aStart, KRamDriveEndAddress);
sl@0
  1680
	}
sl@0
  1681
sl@0
  1682
TPde ArmMmu::PdePermissions(TChunkType aChunkType, TBool aRO)
sl@0
  1683
	{
sl@0
  1684
//	if (aChunkType==EUserData && aRO)
sl@0
  1685
//		return KPdePtePresent|KPdePteUser;
sl@0
  1686
	return ChunkPdePermissions[aChunkType];
sl@0
  1687
	}
sl@0
  1688
sl@0
  1689
TPte ArmMmu::PtePermissions(TChunkType aChunkType)
sl@0
  1690
	{
sl@0
  1691
	return ChunkPtePermissions[aChunkType];
sl@0
  1692
	}
sl@0
  1693
sl@0
  1694
// Set up a page table (specified by aId) to map a 1Mb section of ROM containing aRomAddr
sl@0
  1695
// using ROM at aOrigPhys.
sl@0
  1696
void ArmMmu::InitShadowPageTable(TInt aId, TLinAddr aRomAddr, TPhysAddr aOrigPhys)
sl@0
  1697
	{
sl@0
  1698
	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:InitShadowPageTable id=%04x aRomAddr=%08x aOrigPhys=%08x",
sl@0
  1699
		aId, aRomAddr, aOrigPhys));
sl@0
  1700
	TPte* ppte = PageTable(aId);
sl@0
  1701
	TLinAddr firstPte = (TLinAddr)ppte; //Will need this to clean page table memory region in cache
sl@0
  1702
sl@0
  1703
	TPte* ppte_End = ppte + KChunkSize/KPageSize;
sl@0
  1704
	TPhysAddr phys = aOrigPhys - (aRomAddr & KChunkMask);
sl@0
  1705
	for (; ppte<ppte_End; ++ppte, phys+=KPageSize)
sl@0
  1706
		*ppte = phys | KRomPtePerm;
sl@0
  1707
	CacheMaintenance::MultiplePtesUpdated(firstPte, sizeof(TPte)*KChunkSize/KPageSize);
sl@0
  1708
	}
sl@0
  1709
sl@0
  1710
// Copy the contents of ROM at aRomAddr to a shadow page at physical address aShadowPhys
sl@0
  1711
// It is assumed aShadowPage is not mapped, therefore any mapping colour is OK.
sl@0
  1712
void ArmMmu::InitShadowPage(TPhysAddr aShadowPhys, TLinAddr aRomAddr)
sl@0
  1713
	{
sl@0
  1714
	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:InitShadowPage aShadowPhys=%08x aRomAddr=%08x",
sl@0
  1715
		aShadowPhys, aRomAddr));
sl@0
  1716
sl@0
  1717
	// put in a temporary mapping for aShadowPhys
sl@0
  1718
	// make it noncacheable
sl@0
  1719
	*iTempPte = aShadowPhys | SP_PTE(KArmV6PermRWNO, KNormalUncachedAttr, 0, 1);
sl@0
  1720
	CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte);
sl@0
  1721
sl@0
  1722
	// copy contents of ROM
sl@0
  1723
	wordmove( (TAny*)iTempAddr, (const TAny*)aRomAddr, KPageSize );
sl@0
  1724
	//Temp address is uncached. No need to clean cache, just flush write buffer
sl@0
  1725
	CacheMaintenance::MemoryToPreserveAndReuse((TLinAddr)iTempAddr, KPageSize, EMapAttrBufferedC);
sl@0
  1726
	
sl@0
  1727
	// remove temporary mapping
sl@0
  1728
	*iTempPte=0;
sl@0
  1729
	CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte);
sl@0
  1730
	InvalidateTLBForPage(iTempAddr, KERNEL_MAPPING);
sl@0
  1731
	}
sl@0
  1732
sl@0
  1733
// Assign a shadow page table to replace a ROM section mapping
sl@0
  1734
// Enter and return with system locked
sl@0
  1735
void ArmMmu::AssignShadowPageTable(TInt aId, TLinAddr aRomAddr)
sl@0
  1736
	{
sl@0
  1737
	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:AssignShadowPageTable aId=%04x aRomAddr=%08x",
sl@0
  1738
		aId, aRomAddr));
sl@0
  1739
	TLinAddr ptLin=PageTableLinAddr(aId);
sl@0
  1740
	TPhysAddr ptPhys=LinearToPhysical(ptLin, 0);
sl@0
  1741
	TPde* ppde = ::InitPageDirectory + (aRomAddr>>KChunkShift);
sl@0
  1742
	TPde newpde = ptPhys | KShadowPdePerm;
sl@0
  1743
	__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde));
sl@0
  1744
	TInt irq=NKern::DisableAllInterrupts();
sl@0
  1745
	*ppde = newpde;		// map in the page table
sl@0
  1746
	CacheMaintenance::SinglePteUpdated((TLinAddr)ppde);
sl@0
  1747
	
sl@0
  1748
	FlushTLBs();	// flush both TLBs (no need to flush cache yet)
sl@0
  1749
	NKern::RestoreInterrupts(irq);
sl@0
  1750
	}
sl@0
  1751
sl@0
  1752
void ArmMmu::DoUnmapShadowPage(TInt aId, TLinAddr aRomAddr, TPhysAddr aOrigPhys)
sl@0
  1753
	{
sl@0
  1754
	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:DoUnmapShadowPage, id=%04x lin=%08x origphys=%08x", aId, aRomAddr, aOrigPhys));
sl@0
  1755
	TPte* ppte = PageTable(aId) + ((aRomAddr & KChunkMask)>>KPageShift);
sl@0
  1756
	TPte newpte = aOrigPhys | KRomPtePerm;
sl@0
  1757
	__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", newpte, ppte));
sl@0
  1758
	TInt irq=NKern::DisableAllInterrupts();
sl@0
  1759
	*ppte = newpte;
sl@0
  1760
	CacheMaintenance::SinglePteUpdated((TLinAddr)ppte);
sl@0
  1761
	
sl@0
  1762
	InvalidateTLBForPage(aRomAddr, KERNEL_MAPPING);
sl@0
  1763
	#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
sl@0
  1764
	__FlushBtb();
sl@0
  1765
	#endif
sl@0
  1766
sl@0
  1767
	CacheMaintenance::CodeChanged(aRomAddr, KPageSize, CacheMaintenance::EMemoryRemap);
sl@0
  1768
	CacheMaintenance::PageToReuse(aRomAddr, EMemAttNormalCached, KPhysAddrInvalid);
sl@0
  1769
	NKern::RestoreInterrupts(irq);
sl@0
  1770
	}
sl@0
  1771
sl@0
  1772
TInt ArmMmu::UnassignShadowPageTable(TLinAddr aRomAddr, TPhysAddr aOrigPhys)
sl@0
  1773
	{
sl@0
  1774
	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:UnassignShadowPageTable, lin=%08x origphys=%08x", aRomAddr, aOrigPhys));
sl@0
  1775
	TPde* ppde = ::InitPageDirectory + (aRomAddr>>KChunkShift);
sl@0
  1776
	TPde newpde = (aOrigPhys &~ KChunkMask) | KRomSectionPermissions;
sl@0
  1777
	__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde));
sl@0
  1778
	TInt irq=NKern::DisableAllInterrupts();
sl@0
  1779
	*ppde = newpde;			// revert to section mapping
sl@0
  1780
	CacheMaintenance::SinglePteUpdated((TLinAddr)ppde);
sl@0
  1781
	
sl@0
  1782
	FlushTLBs();			// flush both TLBs
sl@0
  1783
	NKern::RestoreInterrupts(irq);
sl@0
  1784
	return KErrNone;
sl@0
  1785
	}
sl@0
  1786
sl@0
  1787
sl@0
  1788
#if defined(__CPU_MEMORY_TYPE_REMAPPING)	// arm1176, arm11mcore, armv7, ...
sl@0
  1789
/**
sl@0
  1790
Shadow pages on platforms with remapping (mpcore, 1176, cortex...) are not writable.
sl@0
  1791
This will map the region into writable memory first.
sl@0
  1792
@pre No Fast Mutex held
sl@0
  1793
*/
sl@0
  1794
TInt ArmMmu::CopyToShadowMemory(TLinAddr aDest, TLinAddr aSrc, TUint32 aLength)
sl@0
  1795
	{
sl@0
  1796
	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:CopyToShadowMemory aDest=%08x aSrc=%08x aLength=%08x", aDest, aSrc, aLength));
sl@0
  1797
sl@0
  1798
	// Check that destination is ROM
sl@0
  1799
	if (aDest<iRomLinearBase || (aDest+aLength) > iRomLinearEnd)
sl@0
  1800
		{
sl@0
  1801
		__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:CopyToShadowMemory: Destination not entirely in ROM"));
sl@0
  1802
		return KErrArgument;
sl@0
  1803
		}
sl@0
  1804
	// do operation with RamAlloc mutex held (to prevent shadow pages from being released from under us)
sl@0
  1805
	MmuBase::Wait();
sl@0
  1806
sl@0
  1807
sl@0
  1808
	TInt r = KErrNone;
sl@0
  1809
	while (aLength)
sl@0
  1810
		{
sl@0
  1811
		// Calculate memory size to copy in this loop. A single page region will be copied per loop
sl@0
  1812
		TInt copySize = Min(aLength, iPageSize - (aDest&iPageMask));
sl@0
  1813
sl@0
  1814
		// Get physical address
sl@0
  1815
		TPhysAddr	physAddr = LinearToPhysical(aDest&~iPageMask, 0);
sl@0
  1816
		if (KPhysAddrInvalid==physAddr)
sl@0
  1817
			{
sl@0
  1818
			r = KErrArgument;
sl@0
  1819
			break;
sl@0
  1820
			}
sl@0
  1821
		
sl@0
  1822
		//check whether it is shadowed rom
sl@0
  1823
		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(physAddr);
sl@0
  1824
		if (pi==0 || pi->Type()!=SPageInfo::EShadow)
sl@0
  1825
			{
sl@0
  1826
			__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:CopyToShadowMemory: No shadow page at this address"));
sl@0
  1827
			r = KErrArgument;
sl@0
  1828
			break;
sl@0
  1829
			}
sl@0
  1830
sl@0
  1831
		//Temporarily map into writable memory and copy data. RamAllocator DMutex is required
sl@0
  1832
		TLinAddr tempAddr = MapTemp (physAddr, aDest&~iPageMask);
sl@0
  1833
		__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:CopyToShadowMemory Copy aDest=%08x aSrc=%08x aSize=%08x", tempAddr+(aDest&iPageMask), aSrc, copySize));
sl@0
  1834
		memcpy ((TAny*)(tempAddr+(aDest&iPageMask)), (const TAny*)aSrc, copySize);  //Kernel-to-Kernel copy is presumed
sl@0
  1835
		UnmapTemp();
sl@0
  1836
sl@0
  1837
		//Update variables for the next loop/page
sl@0
  1838
		aDest+=copySize;
sl@0
  1839
		aSrc+=copySize;
sl@0
  1840
		aLength-=copySize;
sl@0
  1841
		}
sl@0
  1842
	MmuBase::Signal();
sl@0
  1843
	return r;
sl@0
  1844
	}
sl@0
  1845
#endif
sl@0
  1846
sl@0
  1847
void ArmMmu::DoFreezeShadowPage(TInt aId, TLinAddr aRomAddr)
sl@0
  1848
	{
sl@0
  1849
#if defined(__CPU_MEMORY_TYPE_REMAPPING) //arm1176, arm11mcore, armv7 and later
sl@0
  1850
	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:DoFreezeShadowPage not required with MEMORY_TYPE_REMAPPING"));
sl@0
  1851
#else
sl@0
  1852
	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:DoFreezeShadowPage aId=%04x aRomAddr=%08x",
sl@0
  1853
		aId, aRomAddr));
sl@0
  1854
	TPte* ppte = PageTable(aId) + ((aRomAddr & KChunkMask)>>KPageShift);
sl@0
  1855
	TPte newpte = (*ppte & KPteSmallPageAddrMask) | KRomPtePerm;
sl@0
  1856
	__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", newpte, ppte));
sl@0
  1857
	*ppte = newpte;
sl@0
  1858
	CacheMaintenance::SinglePteUpdated((TLinAddr)ppte);
sl@0
  1859
	InvalidateTLBForPage(aRomAddr, KERNEL_MAPPING);
sl@0
  1860
#endif	
sl@0
  1861
	}
sl@0
  1862
sl@0
  1863
/** Replaces large page(64K) entry in page table with small page(4K) entries.*/
sl@0
  1864
void ArmMmu::Pagify(TInt aId, TLinAddr aLinAddr)
sl@0
  1865
	{
sl@0
  1866
	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:Pagify aId=%04x aLinAddr=%08x", aId, aLinAddr));
sl@0
  1867
	
sl@0
  1868
	TInt pteIndex = (aLinAddr & KChunkMask)>>KPageShift;
sl@0
  1869
	TPte* pte = PageTable(aId);
sl@0
  1870
	if ((pte[pteIndex] & KArmV6PteTypeMask) == KArmV6PteLargePage)
sl@0
  1871
		{
sl@0
  1872
		__KTRACE_OPT(KMMU,Kern::Printf("Converting 64K page to 4K pages"));
sl@0
  1873
		pteIndex &= ~0xf;
sl@0
  1874
		TPte source = pte[pteIndex];
sl@0
  1875
		source = (source & KPteLargePageAddrMask) | SP_PTE_FROM_LP_PTE(source);
sl@0
  1876
		pte += pteIndex;
sl@0
  1877
		for (TInt entry=0; entry<16; entry++)
sl@0
  1878
			{
sl@0
  1879
			pte[entry] = source | (entry<<12);
sl@0
  1880
			}
sl@0
  1881
		CacheMaintenance::MultiplePtesUpdated((TLinAddr)pte, 16*sizeof(TPte));
sl@0
  1882
		FlushTLBs();
sl@0
  1883
		}
sl@0
  1884
	}
sl@0
  1885
sl@0
  1886
void ArmMmu::FlushShadow(TLinAddr aRomAddr)
sl@0
  1887
	{
sl@0
  1888
	CacheMaintenance::CodeChanged(aRomAddr, KPageSize, CacheMaintenance::EMemoryRemap);
sl@0
  1889
	CacheMaintenance::PageToReuse(aRomAddr, EMemAttNormalCached, KPhysAddrInvalid);
sl@0
  1890
	InvalidateTLBForPage(aRomAddr, KERNEL_MAPPING);		// remove all TLB references to original ROM page
sl@0
  1891
	}
sl@0
  1892
sl@0
  1893
sl@0
  1894
#if defined(__CPU_MEMORY_TYPE_REMAPPING) //arm1176, arm11mcore, armv7
sl@0
  1895
/**
sl@0
  1896
Calculates page directory/table entries for memory type described in aMapAttr.
sl@0
  1897
Global, small page (4KB) mapping is assumed.
sl@0
  1898
(All magic numbers come from ARM page table descriptions.)
sl@0
  1899
@param aMapAttr On entry, holds description(memory type, access permisions,...) of the memory.
sl@0
  1900
				It is made up of TMappingAttributes constants or TMappingAttributes2 object. If TMappingAttributes,
sl@0
  1901
				may be altered 	on exit to hold the actual cache attributes & access permissions.
sl@0
  1902
@param aPde		On exit, holds page-table-entry for the 1st level descriptor
sl@0
  1903
				for given type of memory, with base address set to 0.
sl@0
  1904
@param aPte		On exit, holds small-page-entry (4K) for the 2nd level descriptor
sl@0
  1905
				for given type of memory, with base address set to 0.
sl@0
  1906
@return KErrNotSupported 	If memory described in aMapAttr is not supported
sl@0
  1907
		KErrNone			Otherwise
sl@0
  1908
*/
sl@0
  1909
TInt ArmMmu::PdePtePermissions(TUint& aMapAttr, TPde& aPde, TPte& aPte)
sl@0
  1910
	{
sl@0
  1911
	__KTRACE_OPT(KMMU,Kern::Printf(">ArmMmu::PdePtePermissions, mapattr=%08x",aMapAttr));
sl@0
  1912
sl@0
  1913
	TMappingAttributes2& memory = (TMappingAttributes2&)aMapAttr;
sl@0
  1914
sl@0
  1915
	if(memory.ObjectType2())
sl@0
  1916
		{
sl@0
  1917
//---------Memory described by TMappingAttributes2 object-----------------
sl@0
  1918
		aPde = 	KArmV6PdePageTable	|
sl@0
  1919
				(memory.Parity() ? KArmV6PdeECCEnable : 0);
sl@0
  1920
#if defined(FAULTY_NONSHARED_DEVICE_MEMORY)
sl@0
  1921
		if(!memory.Shared() && (memory.Type() == EMemAttDevice ))
sl@0
  1922
		{
sl@0
  1923
			aMapAttr ^= EMapAttrBufferedNC;
sl@0
  1924
			aMapAttr |= EMapAttrFullyBlocking;
sl@0
  1925
			// Clear EMemAttDevice
sl@0
  1926
			aMapAttr ^= (EMemAttDevice << 26);
sl@0
  1927
			aMapAttr |= (EMemAttStronglyOrdered << 26);
sl@0
  1928
		}
sl@0
  1929
#endif
sl@0
  1930
		aPte =	KArmV6PteSmallPage										|
sl@0
  1931
				KArmV6PteAP0											|	// AP0 bit always 1
sl@0
  1932
				((memory.Type()&3)<<2) | ((memory.Type()&4)<<4)			|	// memory type
sl@0
  1933
				(memory.Executable() ? 0			: KArmV6PteSmallXN)	|	// eXecuteNever bit
sl@0
  1934
#if defined	(__CPU_USE_SHARED_MEMORY)
sl@0
  1935
				KArmV6PteS 												|	// Memory is always shared.
sl@0
  1936
#else
sl@0
  1937
				(memory.Shared()	  ? KArmV6PteS	: 0) 				|	// Shared bit
sl@0
  1938
#endif				
sl@0
  1939
				(memory.Writable()	  ? 0			: KArmV6PteAPX)		|	// APX = !Writable
sl@0
  1940
				(memory.UserAccess() ? KArmV6PteAP1: 0);					// AP1 = UserAccess
sl@0
  1941
		// aMapAttr remains the same
sl@0
  1942
		}
sl@0
  1943
	else
sl@0
  1944
		{
sl@0
  1945
//---------Memory described by TMappingAttributes bitmask-----------------
sl@0
  1946
#if defined(FAULTY_NONSHARED_DEVICE_MEMORY)
sl@0
  1947
		if(((aMapAttr & EMapAttrL1CacheMask) == EMapAttrBufferedNC) && !(aMapAttr & EMapAttrShared))
sl@0
  1948
		{
sl@0
  1949
			// Clear EMapAttrBufferedNC attribute
sl@0
  1950
			aMapAttr ^= EMapAttrBufferedNC;
sl@0
  1951
			aMapAttr |= EMapAttrFullyBlocking;
sl@0
  1952
		}
sl@0
  1953
#endif
sl@0
  1954
		//	1.	Calculate TEX0:C:B bits in page table and actual cache attributes.
sl@0
  1955
		//		Only L1 cache attribute from aMapAttr matters. Outer (L2) cache policy will be the same as inner one.
sl@0
  1956
		TUint l1cache=aMapAttr & EMapAttrL1CacheMask; // Inner cache attributes. May change to actual value.
sl@0
  1957
		TUint l2cache;	// Will hold actual L2 cache attributes (in terms of TMappingAttributes constants)
sl@0
  1958
		TUint tex0_c_b; // Will hold TEX[0]:C:B value in page table
sl@0
  1959
sl@0
  1960
		switch (l1cache)
sl@0
  1961
			{
sl@0
  1962
			case EMapAttrFullyBlocking:
sl@0
  1963
				tex0_c_b = EMemAttStronglyOrdered;
sl@0
  1964
				l2cache = EMapAttrL2Uncached;
sl@0
  1965
				break;
sl@0
  1966
			case EMapAttrBufferedNC:
sl@0
  1967
				tex0_c_b = EMemAttDevice;
sl@0
  1968
				l2cache = EMapAttrL2Uncached;
sl@0
  1969
				break;
sl@0
  1970
			case EMapAttrBufferedC:
sl@0
  1971
			case EMapAttrL1Uncached:
sl@0
  1972
			case EMapAttrCachedWTRA:
sl@0
  1973
			case EMapAttrCachedWTWA:
sl@0
  1974
				tex0_c_b = EMemAttNormalUncached;
sl@0
  1975
				l1cache = EMapAttrBufferedC;
sl@0
  1976
				l2cache = EMapAttrL2Uncached;
sl@0
  1977
				break;
sl@0
  1978
			case EMapAttrCachedWBRA:
sl@0
  1979
			case EMapAttrCachedWBWA:
sl@0
  1980
			case EMapAttrL1CachedMax:
sl@0
  1981
				tex0_c_b = EMemAttNormalCached;
sl@0
  1982
				l1cache = EMapAttrCachedWBWA;
sl@0
  1983
				l2cache = EMapAttrL2CachedWBWA;
sl@0
  1984
				break;
sl@0
  1985
			default:
sl@0
  1986
				return KErrNotSupported;
sl@0
  1987
			}
sl@0
  1988
sl@0
  1989
		//	2.	Step 2 has been removed :)
sl@0
  1990
sl@0
  1991
		//	3.	Calculate access permissions (apx:ap bits in page table + eXecute it)
sl@0
  1992
		TUint read=aMapAttr & EMapAttrReadMask;
sl@0
  1993
		TUint write=(aMapAttr & EMapAttrWriteMask)>>4;
sl@0
  1994
		TUint exec=(aMapAttr & EMapAttrExecMask)>>8;
sl@0
  1995
sl@0
  1996
		read|=exec; 		// User/Sup execute access requires User/Sup read access.
sl@0
  1997
		if (exec) exec = 1; // There is a single eXecute bit in page table. Set to one if User or Sup exec is required.
sl@0
  1998
sl@0
  1999
		TUint apxap=0;
sl@0
  2000
		if (write==0) 		// no write required
sl@0
  2001
			{
sl@0
  2002
			if 		(read>=4)	apxap=KArmV6PermRORO;		// user read required
sl@0
  2003
			else if (read==1) 	apxap=KArmV6PermRONO;		// supervisor read required
sl@0
  2004
			else 				return KErrNotSupported;	// no read required
sl@0
  2005
			}
sl@0
  2006
		else if (write<4)	// supervisor write required
sl@0
  2007
			{
sl@0
  2008
			if (read<4) 		apxap=KArmV6PermRWNO;		// user read not required
sl@0
  2009
			else 				return KErrNotSupported;	// user read required 
sl@0
  2010
			}
sl@0
  2011
		else				// user & supervisor writes required
sl@0
  2012
			{
sl@0
  2013
			apxap=KArmV6PermRWRW;		
sl@0
  2014
			}
sl@0
  2015
	
sl@0
  2016
		//	4.	Calculate page-table-entry for the 1st level (aka page directory) descriptor 
sl@0
  2017
		aPde=((aMapAttr&EMapAttrUseECC)>>8)|KArmV6PdePageTable;
sl@0
  2018
sl@0
  2019
		//	5.	Calculate small-page-entry for the 2nd level (aka page table) descriptor 
sl@0
  2020
		aPte=SP_PTE(apxap, tex0_c_b, exec, 1);	// always global
sl@0
  2021
		if (aMapAttr&EMapAttrShared)
sl@0
  2022
			aPte |= KArmV6PteS;
sl@0
  2023
	
sl@0
  2024
		//	6.	Fix aMapAttr to hold the actual values for access permission & cache attributes
sl@0
  2025
		TUint xnapxap=((aPte<<3)&8)|((aPte>>7)&4)|((aPte>>4)&3);
sl@0
  2026
		aMapAttr &= ~(EMapAttrAccessMask|EMapAttrL1CacheMask|EMapAttrL2CacheMask);
sl@0
  2027
		aMapAttr |= PermissionLookup[xnapxap]; 	// Set actual access permissions
sl@0
  2028
		aMapAttr |= l1cache;					// Set actual inner cache attributes
sl@0
  2029
		aMapAttr |= l2cache;					// Set actual outer cache attributes
sl@0
  2030
		}
sl@0
  2031
sl@0
  2032
	__KTRACE_OPT(KMMU,Kern::Printf("<ArmMmu::PdePtePermissions, mapattr=%08x, pde=%08x, pte=%08x", 	aMapAttr, aPde, aPte));
sl@0
  2033
	return KErrNone;
sl@0
  2034
	}
sl@0
  2035
sl@0
  2036
#else //ARMv6 (arm1136)
sl@0
  2037
sl@0
  2038
const TUint FBLK=(EMapAttrFullyBlocking>>12);
sl@0
  2039
const TUint BFNC=(EMapAttrBufferedNC>>12);
sl@0
  2040
//const TUint BUFC=(EMapAttrBufferedC>>12);
sl@0
  2041
const TUint L1UN=(EMapAttrL1Uncached>>12);
sl@0
  2042
const TUint WTRA=(EMapAttrCachedWTRA>>12);
sl@0
  2043
//const TUint WTWA=(EMapAttrCachedWTWA>>12);
sl@0
  2044
const TUint WBRA=(EMapAttrCachedWBRA>>12);
sl@0
  2045
const TUint WBWA=(EMapAttrCachedWBWA>>12);
sl@0
  2046
const TUint AWTR=(EMapAttrAltCacheWTRA>>12);
sl@0
  2047
//const TUint AWTW=(EMapAttrAltCacheWTWA>>12);
sl@0
  2048
//const TUint AWBR=(EMapAttrAltCacheWBRA>>12);
sl@0
  2049
const TUint AWBW=(EMapAttrAltCacheWBWA>>12);
sl@0
  2050
const TUint MAXC=(EMapAttrL1CachedMax>>12);
sl@0
  2051
sl@0
  2052
const TUint L2UN=(EMapAttrL2Uncached>>16);
sl@0
  2053
sl@0
  2054
const TUint8 UNS=0xffu;	// Unsupported attribute
sl@0
  2055
sl@0
  2056
//Maps L1 & L2 cache attributes into TEX[4:2]:CB[1:0]
sl@0
  2057
//ARMv6 doesn't do WTWA so we use WTRA instead
sl@0
  2058
sl@0
  2059
#if !defined(__CPU_ARM1136_ERRATUM_399234_FIXED)
sl@0
  2060
// L1 Write-Through mode is outlawed, L1WT acts as L1UN.
sl@0
  2061
static const TUint8 CBTEX[40]=
sl@0
  2062
	{            // L1CACHE:
sl@0
  2063
//  FBLK  BFNC  BUFC  L1UN  WTRA  WTWA  WBRA  WBWA 	  L2CACHE:
sl@0
  2064
	0x00, 0x01, 0x01, 0x04, 0x04, 0x04, 0x13, 0x11,	//NC
sl@0
  2065
	0x00, 0x01, 0x01, 0x18, 0x18, 0x18, 0x1b, 0x19,	//WTRA
sl@0
  2066
	0x00, 0x01, 0x01, 0x18, 0x18, 0x18, 0x1b, 0x19,	//WTWA
sl@0
  2067
	0x00, 0x01, 0x01, 0x1c, 0x1c, 0x1c, 0x1f, 0x1d,	//WBRA
sl@0
  2068
	0x00, 0x01, 0x01, 0x14, 0x14, 0x14, 0x17, 0x15	//WBWA
sl@0
  2069
	};
sl@0
  2070
#else
sl@0
  2071
static const TUint8 CBTEX[40]=
sl@0
  2072
	{            // L1CACHE:
sl@0
  2073
//  FBLK  BFNC  BUFC  L1UN  WTRA  WTWA  WBRA  WBWA 	  L2CACHE:
sl@0
  2074
	0x00, 0x01, 0x01, 0x04, 0x12, 0x12, 0x13, 0x11,	//NC
sl@0
  2075
	0x00, 0x01, 0x01, 0x18, 0x02, 0x02, 0x1b, 0x19,	//WTRA
sl@0
  2076
	0x00, 0x01, 0x01, 0x18, 0x02, 0x02, 0x1b, 0x19,	//WTWA
sl@0
  2077
	0x00, 0x01, 0x01, 0x1c, 0x1e, 0x1e, 0x1f, 0x1d,	//WBRA
sl@0
  2078
	0x00, 0x01, 0x01, 0x14, 0x16, 0x16, 0x17, 0x15	//WBWA
sl@0
  2079
	};
sl@0
  2080
#endif
sl@0
  2081
sl@0
  2082
//Maps TEX[4:2]:CB[1:0] value into L1 cache attributes
sl@0
  2083
static const TUint8 L1Actual[32]=
sl@0
  2084
	{
sl@0
  2085
//CB 00		 01		 10		 11		//TEX
sl@0
  2086
	FBLK,	BFNC,	WTRA,	WBRA,	//000
sl@0
  2087
	L1UN,  	UNS,  	UNS, 	WBWA,	//001
sl@0
  2088
	BFNC,	UNS,	UNS,  	UNS,	//010
sl@0
  2089
	UNS,	UNS,	UNS,	UNS,	//011
sl@0
  2090
	L1UN, 	WBWA, 	WTRA, 	WBRA,	//100
sl@0
  2091
	L1UN, 	WBWA, 	WTRA, 	WBRA,	//101
sl@0
  2092
	L1UN, 	WBWA, 	WTRA, 	WBRA,	//110
sl@0
  2093
	L1UN, 	WBWA, 	WTRA, 	WBRA	//111
sl@0
  2094
	};
sl@0
  2095
sl@0
  2096
//Maps TEX[4:2]:CB[1:0] value into L2 cache attributes
sl@0
  2097
static const TUint8 L2Actual[32]=
sl@0
  2098
	{
sl@0
  2099
//CB 00		 01		 10		 11		//TEX
sl@0
  2100
	L2UN,	L2UN,	WTRA,	WBRA,	//000
sl@0
  2101
	L2UN,	UNS,	UNS,	WBWA,	//001
sl@0
  2102
	L2UN,	UNS,	UNS,	UNS,	//010
sl@0
  2103
	UNS,	UNS,	UNS,	UNS,	//011
sl@0
  2104
	L2UN,	L2UN,	L2UN,	L2UN,	//100
sl@0
  2105
	WBWA,	WBWA,	WBWA,	WBWA,	//101
sl@0
  2106
	WTRA,	WTRA,	WTRA,	WTRA,	//110
sl@0
  2107
	WBRA,	WBRA,	WBRA,	WBRA	//111
sl@0
  2108
	};
sl@0
  2109
sl@0
  2110
TInt ArmMmu::PdePtePermissions(TUint& aMapAttr, TPde& aPde, TPte& aPte)
sl@0
  2111
	{
sl@0
  2112
	__KTRACE_OPT(KMMU,Kern::Printf(">ArmMmu::PdePtePermissions, mapattr=%08x",aMapAttr));
sl@0
  2113
sl@0
  2114
	TUint read=aMapAttr & EMapAttrReadMask;
sl@0
  2115
	TUint write=(aMapAttr & EMapAttrWriteMask)>>4;
sl@0
  2116
	TUint exec=(aMapAttr & EMapAttrExecMask)>>8;
sl@0
  2117
	TUint l1cache=(aMapAttr & EMapAttrL1CacheMask)>>12;
sl@0
  2118
	TUint l2cache=(aMapAttr & EMapAttrL2CacheMask)>>16;
sl@0
  2119
	if (l1cache==MAXC) l1cache=WBRA;	// map max cache to WBRA
sl@0
  2120
	if (l1cache>AWBW)
sl@0
  2121
		return KErrNotSupported;		// undefined attribute
sl@0
  2122
	if (l1cache>=AWTR) l1cache-=4;		// no alternate cache, so use normal cache
sl@0
  2123
	if (l1cache<L1UN) l2cache=0;		// for blocking/device, don't cache L2
sl@0
  2124
	if (l2cache==MAXC) l2cache=WBRA;	// map max cache to WBRA
sl@0
  2125
	if (l2cache>WBWA)
sl@0
  2126
		return KErrNotSupported;		// undefined attribute
sl@0
  2127
	if (l2cache) l2cache-=(WTRA-1);		// l2cache now in range 0-4
sl@0
  2128
	aPde=((aMapAttr&EMapAttrUseECC)>>8)|KArmV6PdePageTable;
sl@0
  2129
sl@0
  2130
#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
sl@0
  2131
	// if broken 1136, can't have supervisor only code
sl@0
  2132
	if (exec)
sl@0
  2133
		exec = TUint(EMapAttrExecUser>>8);
sl@0
  2134
#endif
sl@0
  2135
sl@0
  2136
	// if any execute access, must have read=execute
sl@0
  2137
	if (exec)
sl@0
  2138
		(void)(read>=exec || (read=exec)!=0), exec=1;
sl@0
  2139
sl@0
  2140
	// l1cache between 0 and 7, l2cache between 0 and 4; look up CBTEX
sl@0
  2141
	TUint cbtex=CBTEX[(l2cache<<3)|l1cache];
sl@0
  2142
sl@0
  2143
	// work out apx:ap
sl@0
  2144
	TUint apxap;
sl@0
  2145
	if (write==0)
sl@0
  2146
		apxap=(read>=4)?KArmV6PermRORO:(read?KArmV6PermRONO:KArmV6PermNONO);
sl@0
  2147
	else if (write<4)
sl@0
  2148
		apxap=(read>=4)?KArmV6PermRWRO:KArmV6PermRWNO;
sl@0
  2149
	else
sl@0
  2150
		apxap=KArmV6PermRWRW;
sl@0
  2151
	TPte pte=SP_PTE(apxap, cbtex, exec, 1);	// always global
sl@0
  2152
	if (aMapAttr&EMapAttrShared)
sl@0
  2153
		pte |= KArmV6PteS;
sl@0
  2154
sl@0
  2155
	// Translate back to get actual map attributes
sl@0
  2156
	TUint xnapxap=((pte<<3)&8)|((pte>>7)&4)|((pte>>4)&3);
sl@0
  2157
	cbtex=((pte>>4)&0x1c)|((pte>>2)&3);  // = TEX[4:2]::CB[1:0]
sl@0
  2158
	aMapAttr &= ~(EMapAttrAccessMask|EMapAttrL1CacheMask|EMapAttrL2CacheMask);
sl@0
  2159
	aMapAttr |= PermissionLookup[xnapxap];
sl@0
  2160
	aMapAttr |= (L1Actual[cbtex]<<12);
sl@0
  2161
	aMapAttr |= (L2Actual[cbtex]<<16);
sl@0
  2162
	aPte=pte;
sl@0
  2163
	__KTRACE_OPT(KMMU,Kern::Printf("<ArmMmu::PdePtePermissions, mapattr=%08x, pde=%08x, pte=%08x",
sl@0
  2164
								aMapAttr, aPde, aPte));
sl@0
  2165
	return KErrNone;
sl@0
  2166
	}
sl@0
  2167
#endif
sl@0
  2168
sl@0
  2169
void ArmMmu::Map(TLinAddr aLinAddr, TPhysAddr aPhysAddr, TInt aSize, TPde aPdePerm, TPte aPtePerm, TInt aMapShift)
sl@0
  2170
//
sl@0
  2171
// Map a region of physical addresses aPhysAddr to aPhysAddr+aSize-1 to virtual address aLinAddr.
sl@0
  2172
// Use permissions specified by aPdePerm and aPtePerm. Use mapping sizes up to and including (1<<aMapShift).
sl@0
  2173
// Assume any page tables required are already assigned.
sl@0
  2174
// aLinAddr, aPhysAddr, aSize must be page-aligned.
sl@0
  2175
//
sl@0
  2176
	{
sl@0
  2177
	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu::Map lin=%08x phys=%08x size=%08x", aLinAddr, aPhysAddr, aSize));
sl@0
  2178
	__KTRACE_OPT(KMMU, Kern::Printf("pde=%08x pte=%08x mapshift=%d", aPdePerm, aPtePerm, aMapShift));
sl@0
  2179
	TPde pt_pde=aPdePerm;
sl@0
  2180
	TPte sp_pte=aPtePerm;
sl@0
  2181
	TPde section_pde=SECTION_PDE_FROM_PDEPTE(pt_pde, sp_pte);
sl@0
  2182
	TPte lp_pte=LP_PTE_FROM_SP_PTE(sp_pte);
sl@0
  2183
	TLinAddr la=aLinAddr;
sl@0
  2184
	TPhysAddr pa=aPhysAddr;
sl@0
  2185
	TInt remain=aSize;
sl@0
  2186
	while (remain)
sl@0
  2187
		{
sl@0
  2188
		if (aMapShift>=KChunkShift && (la & KChunkMask)==0 && remain>=KChunkSize)
sl@0
  2189
			{
sl@0
  2190
			// use sections - ASSUMES ADDRESS IS IN GLOBAL REGION
sl@0
  2191
			TInt npdes=remain>>KChunkShift;
sl@0
  2192
			const TBitMapAllocator& b=*iOsAsidAllocator;
sl@0
  2193
			TInt num_os_asids=iNumGlobalPageDirs;
sl@0
  2194
			TInt os_asid=0;
sl@0
  2195
			for (; num_os_asids; ++os_asid)
sl@0
  2196
				{
sl@0
  2197
				if (b.NotAllocated(os_asid,1) || (iAsidInfo[os_asid]&1)==0)
sl@0
  2198
					continue;			// os_asid is not needed
sl@0
  2199
				TPde* p_pde=PageDirectory(os_asid)+(la>>KChunkShift);
sl@0
  2200
				TPde* p_pde_E=p_pde+npdes;
sl@0
  2201
				TPde pde=pa|section_pde;
sl@0
  2202
				TLinAddr firstPde = (TLinAddr)p_pde; //Will need this to clean page table memory region from cache
sl@0
  2203
sl@0
  2204
				NKern::LockSystem();
sl@0
  2205
				for (; p_pde < p_pde_E; pde+=KChunkSize)
sl@0
  2206
					{
sl@0
  2207
					__ASSERT_DEBUG(*p_pde==0, MM::Panic(MM::EPdeAlreadyInUse));
sl@0
  2208
					__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", pde, p_pde));
sl@0
  2209
					*p_pde++=pde;
sl@0
  2210
					}
sl@0
  2211
				CacheMaintenance::MultiplePtesUpdated(firstPde, (TUint)p_pde-firstPde);
sl@0
  2212
				NKern::UnlockSystem();
sl@0
  2213
				--num_os_asids;
sl@0
  2214
				}
sl@0
  2215
			npdes<<=KChunkShift;
sl@0
  2216
			la+=npdes, pa+=npdes, remain-=npdes;
sl@0
  2217
			continue;
sl@0
  2218
			}
sl@0
  2219
		TInt block_size = Min(remain, KChunkSize-(la&KChunkMask));
sl@0
  2220
		TPte pa_mask=~KPageMask;
sl@0
  2221
		TPte pte_perm=sp_pte;
sl@0
  2222
		if (aMapShift>=KLargePageShift && block_size>=KLargePageSize)
sl@0
  2223
			{
sl@0
  2224
			if ((la & KLargePageMask)==0)
sl@0
  2225
				{
sl@0
  2226
				// use 64K large pages
sl@0
  2227
				pa_mask=~KLargePageMask;
sl@0
  2228
				pte_perm=lp_pte;
sl@0
  2229
				}
sl@0
  2230
			else
sl@0
  2231
				block_size = Min(remain, KLargePageSize-(la&KLargePageMask));
sl@0
  2232
			}
sl@0
  2233
		block_size &= pa_mask;
sl@0
  2234
sl@0
  2235
		// use pages (large or small)
sl@0
  2236
		TInt id=PageTableId(la, 0);
sl@0
  2237
		__ASSERT_DEBUG(id>=0, MM::Panic(MM::EMmuMapNoPageTable));
sl@0
  2238
		TPte* p_pte=PageTable(id)+((la&KChunkMask)>>KPageShift);
sl@0
  2239
		TPte* p_pte_E=p_pte + (block_size>>KPageShift);
sl@0
  2240
		SPageTableInfo& ptinfo=iPtInfo[id];
sl@0
  2241
		TLinAddr firstPte = (TLinAddr)p_pte; //Will need this to clean page table memory region from cache
sl@0
  2242
		
sl@0
  2243
		NKern::LockSystem();
sl@0
  2244
		for (; p_pte < p_pte_E; pa+=KPageSize)
sl@0
  2245
			{
sl@0
  2246
			__ASSERT_DEBUG(*p_pte==0, MM::Panic(MM::EPteAlreadyInUse));
sl@0
  2247
			TPte pte = (pa & pa_mask) | pte_perm;
sl@0
  2248
			__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", pte, p_pte));
sl@0
  2249
			*p_pte++=pte;
sl@0
  2250
			++ptinfo.iCount;
sl@0
  2251
			NKern::FlashSystem();
sl@0
  2252
			}
sl@0
  2253
		CacheMaintenance::MultiplePtesUpdated(firstPte, (TUint)p_pte-firstPte);
sl@0
  2254
		NKern::UnlockSystem();
sl@0
  2255
		la+=block_size, remain-=block_size;
sl@0
  2256
		}
sl@0
  2257
	}
sl@0
  2258
sl@0
  2259
void ArmMmu::Unmap(TLinAddr aLinAddr, TInt aSize)
sl@0
  2260
//
sl@0
  2261
// Remove all mappings in the specified range of addresses.
sl@0
  2262
// Assumes there are only global mappings involved.
sl@0
  2263
// Don't free page tables.
sl@0
  2264
// aLinAddr, aSize must be page-aligned.
sl@0
  2265
//
sl@0
  2266
	{
sl@0
  2267
	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu::Unmap lin=%08x size=%08x", aLinAddr, aSize));
sl@0
  2268
	TLinAddr a=aLinAddr;
sl@0
  2269
	TLinAddr end=a+aSize;
sl@0
  2270
	__KTRACE_OPT(KMMU,Kern::Printf("a=%08x end=%08x",a,end));
sl@0
  2271
	NKern::LockSystem();
sl@0
  2272
	while(a!=end)
sl@0
  2273
		{
sl@0
  2274
		TInt pdeIndex=a>>KChunkShift;
sl@0
  2275
		TLinAddr next=(pdeIndex<<KChunkShift)+KChunkSize;
sl@0
  2276
		TInt to_do = Min(TInt(end-a), TInt(next-a))>>KPageShift;
sl@0
  2277
		__KTRACE_OPT(KMMU,Kern::Printf("a=%08x next=%08x to_do=%d",a,next,to_do));
sl@0
  2278
		TPde pde=::InitPageDirectory[pdeIndex];
sl@0
  2279
		if ( (pde&KArmV6PdeTypeMask)==KArmV6PdeSection )
sl@0
  2280
			{
sl@0
  2281
			__ASSERT_DEBUG(!(a&KChunkMask), MM::Panic(MM::EUnmapBadAlignment));
sl@0
  2282
#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
sl@0
  2283
			remove_and_invalidate_section(::InitPageDirectory + pdeIndex, a, KERNEL_MAPPING);
sl@0
  2284
#else
sl@0
  2285
			::InitPageDirectory[pdeIndex]=0;
sl@0
  2286
			CacheMaintenance::SinglePteUpdated(TLinAddr(::InitPageDirectory + pdeIndex));
sl@0
  2287
			InvalidateTLBForPage(a, KERNEL_MAPPING);		// ASID irrelevant since global
sl@0
  2288
#endif
sl@0
  2289
			a=next;
sl@0
  2290
			NKern::FlashSystem();
sl@0
  2291
			continue;
sl@0
  2292
			}
sl@0
  2293
		TInt ptid=PageTableId(a,0);
sl@0
  2294
		SPageTableInfo& ptinfo=iPtInfo[ptid];
sl@0
  2295
		if (ptid>=0)
sl@0
  2296
			{
sl@0
  2297
			TPte* ppte=PageTable(ptid)+((a&KChunkMask)>>KPageShift);
sl@0
  2298
			TPte* ppte_End=ppte+to_do;
sl@0
  2299
			for (; ppte<ppte_End; ++ppte, a+=KPageSize)
sl@0
  2300
				{
sl@0
  2301
				if (*ppte & KArmV6PteSmallPage)
sl@0
  2302
					{
sl@0
  2303
					--ptinfo.iCount;
sl@0
  2304
#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
sl@0
  2305
					remove_and_invalidate_page(ppte, a, KERNEL_MAPPING);
sl@0
  2306
#else
sl@0
  2307
					*ppte=0;
sl@0
  2308
					CacheMaintenance::SinglePteUpdated((TLinAddr)ppte);
sl@0
  2309
					InvalidateTLBForPage(a, KERNEL_MAPPING);
sl@0
  2310
#endif
sl@0
  2311
					}
sl@0
  2312
				else if ((*ppte & KArmV6PteTypeMask) == KArmV6PteLargePage)
sl@0
  2313
					{
sl@0
  2314
					__ASSERT_DEBUG(!(a&KLargePageMask), MM::Panic(MM::EUnmapBadAlignment));
sl@0
  2315
					ptinfo.iCount-=KLargeSmallPageRatio;
sl@0
  2316
#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
sl@0
  2317
					remove_and_invalidate_page(ppte, a, KERNEL_MAPPING);
sl@0
  2318
#else
sl@0
  2319
					memclr(ppte, KLargeSmallPageRatio*sizeof(TPte));
sl@0
  2320
					CacheMaintenance::MultiplePtesUpdated((TLinAddr)ppte, KLargeSmallPageRatio*sizeof(TPte));
sl@0
  2321
					InvalidateTLBForPage(a, KERNEL_MAPPING);
sl@0
  2322
#endif
sl@0
  2323
					a+=(KLargePageSize-KPageSize);
sl@0
  2324
					ppte+=(KLargeSmallPageRatio-1);
sl@0
  2325
					}
sl@0
  2326
				NKern::FlashSystem();
sl@0
  2327
				}
sl@0
  2328
			}
sl@0
  2329
		else
sl@0
  2330
			a += (to_do<<KPageShift);
sl@0
  2331
		}
sl@0
  2332
	NKern::UnlockSystem();
sl@0
  2333
	#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
sl@0
  2334
	__FlushBtb();
sl@0
  2335
	#endif
sl@0
  2336
	}
sl@0
  2337
sl@0
  2338
sl@0
  2339
void ArmMmu::ClearPages(TInt aNumPages, TPhysAddr* aPageList, TUint8 aClearByte)
sl@0
  2340
	{
sl@0
  2341
	//map the pages at a temporary address, clear them and unmap
sl@0
  2342
	__ASSERT_MUTEX(RamAllocatorMutex);
sl@0
  2343
	while (--aNumPages >= 0)
sl@0
  2344
		{
sl@0
  2345
		TPhysAddr pa;
sl@0
  2346
		if((TInt)aPageList&1)
sl@0
  2347
			{
sl@0
  2348
			pa = (TPhysAddr)aPageList&~1;
sl@0
  2349
			*(TPhysAddr*)&aPageList += iPageSize;
sl@0
  2350
			}
sl@0
  2351
		else
sl@0
  2352
			pa = *aPageList++;
sl@0
  2353
		
sl@0
  2354
		*iTempPte = pa | SP_PTE(KArmV6PermRWNO, KNormalUncachedAttr, 0, 1);
sl@0
  2355
		CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte);
sl@0
  2356
		InvalidateTLBForPage(iTempAddr, KERNEL_MAPPING);
sl@0
  2357
		memset((TAny*)iTempAddr, aClearByte, iPageSize);
sl@0
  2358
		// This temporary mapping is noncached => No need to flush cache here.
sl@0
  2359
		// Still, we have to make sure that write buffer(s) are drained.
sl@0
  2360
		CacheMaintenance::MemoryToPreserveAndReuse((TLinAddr)iTempAddr, iPageSize, EMapAttrBufferedC);
sl@0
  2361
		}
sl@0
  2362
	*iTempPte=0;
sl@0
  2363
	CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte);
sl@0
  2364
	InvalidateTLBForPage(iTempAddr, KERNEL_MAPPING);
sl@0
  2365
	}
sl@0
  2366
sl@0
  2367
sl@0
  2368
/**
sl@0
  2369
Create a temporary mapping of one or more contiguous physical pages.
sl@0
  2370
Fully cached memory attributes apply.
sl@0
  2371
The RamAllocatorMutex must be held before this function is called and not released
sl@0
  2372
until after UnmapTemp has been called.
sl@0
  2373
sl@0
  2374
@param aPage	The physical address of the pages to be mapped.
sl@0
  2375
@param aLinAddr The linear address of any existing location where the page is mapped.
sl@0
  2376
				If the page isn't already mapped elsewhere as a cachable page then
sl@0
  2377
				this value irrelevent. (It is used for page colouring.)
sl@0
  2378
@param aPages	Number of pages to map.
sl@0
  2379
sl@0
  2380
@return The linear address of where the pages have been mapped.
sl@0
  2381
*/
sl@0
  2382
TLinAddr ArmMmu::MapTemp(TPhysAddr aPage,TLinAddr aLinAddr,TInt aPages)
sl@0
  2383
	{
sl@0
  2384
	__ASSERT_MUTEX(RamAllocatorMutex);
sl@0
  2385
	__ASSERT_DEBUG(!*iTempPte,MM::Panic(MM::ETempMappingAlreadyInUse));
sl@0
  2386
	iTempMapColor = (aLinAddr>>KPageShift)&KPageColourMask;
sl@0
  2387
	iTempMapCount = aPages;
sl@0
  2388
	if (aPages==1)
sl@0
  2389
		{
sl@0
  2390
		iTempPte[iTempMapColor] = (aPage&~KPageMask) | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1);
sl@0
  2391
		CacheMaintenance::SinglePteUpdated((TLinAddr)(iTempPte+iTempMapColor));
sl@0
  2392
		}
sl@0
  2393
	else
sl@0
  2394
		{
sl@0
  2395
		__ASSERT_DEBUG(iTempMapColor+aPages<=KPageColourCount,MM::Panic(MM::ETempMappingNoRoom));
sl@0
  2396
		for (TInt i=0; i<aPages; i++)
sl@0
  2397
			iTempPte[iTempMapColor+i] = ((aPage&~KPageMask)+(i<<KPageShift)) | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1);	
sl@0
  2398
		CacheMaintenance::MultiplePtesUpdated((TLinAddr)(iTempPte+iTempMapColor), aPages*sizeof(TPte));
sl@0
  2399
		}
sl@0
  2400
	return iTempAddr+(iTempMapColor<<KPageShift);
sl@0
  2401
	}
sl@0
  2402
sl@0
  2403
/**
sl@0
  2404
Create a temporary mapping of one or more contiguous physical pages.
sl@0
  2405
Memory attributes as specified by aMemType apply.
sl@0
  2406
@See ArmMmu::MapTemp(TPhysAddr aPage,TLinAddr aLinAddr,TInt aPages) for other details.
sl@0
  2407
*/
sl@0
  2408
TLinAddr ArmMmu::MapTemp(TPhysAddr aPage,TLinAddr aLinAddr,TInt aPages, TMemoryType aMemType)
sl@0
  2409
	{
sl@0
  2410
	__ASSERT_MUTEX(RamAllocatorMutex);
sl@0
  2411
	__ASSERT_DEBUG(!*iTempPte,MM::Panic(MM::ETempMappingAlreadyInUse));
sl@0
  2412
	iTempMapColor = (aLinAddr>>KPageShift)&KPageColourMask;
sl@0
  2413
	iTempMapCount = aPages;
sl@0
  2414
	TUint pte = SP_PTE(KArmV6PermRWNO, aMemType, 0, 1);
sl@0
  2415
	if (aPages==1)
sl@0
  2416
		{
sl@0
  2417
		iTempPte[iTempMapColor] = (aPage&~KPageMask) | SP_PTE(KArmV6PermRWNO, pte, 0, 1);
sl@0
  2418
		CacheMaintenance::SinglePteUpdated((TLinAddr)(iTempPte+iTempMapColor));
sl@0
  2419
		}
sl@0
  2420
	else
sl@0
  2421
		{
sl@0
  2422
		__ASSERT_DEBUG(iTempMapColor+aPages<=KPageColourCount,MM::Panic(MM::ETempMappingNoRoom));
sl@0
  2423
		for (TInt i=0; i<aPages; i++)
sl@0
  2424
			iTempPte[iTempMapColor+i] = ((aPage&~KPageMask)+(i<<KPageShift)) | SP_PTE(KArmV6PermRWNO, pte, 0, 1);	
sl@0
  2425
		CacheMaintenance::MultiplePtesUpdated((TLinAddr)(iTempPte+iTempMapColor), aPages*sizeof(TPte));
sl@0
  2426
		}
sl@0
  2427
	return iTempAddr+(iTempMapColor<<KPageShift);
sl@0
  2428
	}
sl@0
  2429
sl@0
  2430
/**
sl@0
  2431
Create a temporary mapping of one or more contiguous physical pages, distinct from
sl@0
  2432
that created by MapTemp.
sl@0
  2433
The RamAllocatorMutex must be held before this function is called and not released
sl@0
  2434
until after UnmapSecondTemp has been called.
sl@0
  2435
sl@0
  2436
@param aPage	The physical address of the pages to be mapped.
sl@0
  2437
@param aLinAddr The linear address of any existing location where the page is mapped.
sl@0
  2438
				If the page isn't already mapped elsewhere as a cachable page then
sl@0
  2439
				this value irrelevent. (It is used for page colouring.)
sl@0
  2440
@param aPages	Number of pages to map.
sl@0
  2441
sl@0
  2442
@return The linear address of where the pages have been mapped.
sl@0
  2443
*/
sl@0
  2444
TLinAddr ArmMmu::MapSecondTemp(TPhysAddr aPage,TLinAddr aLinAddr,TInt aPages)
sl@0
  2445
	{
sl@0
  2446
	__ASSERT_MUTEX(RamAllocatorMutex);
sl@0
  2447
	__ASSERT_DEBUG(!*iSecondTempPte,MM::Panic(MM::ETempMappingAlreadyInUse));
sl@0
  2448
	iSecondTempMapColor = (aLinAddr>>KPageShift)&KPageColourMask;
sl@0
  2449
	iSecondTempMapCount = aPages;
sl@0
  2450
	if (aPages==1)
sl@0
  2451
		{
sl@0
  2452
		iSecondTempPte[iSecondTempMapColor] = (aPage&~KPageMask) | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1);
sl@0
  2453
		CacheMaintenance::SinglePteUpdated((TLinAddr)(iSecondTempPte+iSecondTempMapColor));
sl@0
  2454
		}
sl@0
  2455
	else
sl@0
  2456
		{
sl@0
  2457
		__ASSERT_DEBUG(iSecondTempMapColor+aPages<=KPageColourCount,MM::Panic(MM::ETempMappingNoRoom));
sl@0
  2458
		for (TInt i=0; i<aPages; i++)
sl@0
  2459
			iSecondTempPte[iSecondTempMapColor+i] = ((aPage&~KPageMask)+(i<<KPageShift)) | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1);	
sl@0
  2460
		CacheMaintenance::MultiplePtesUpdated((TLinAddr)(iSecondTempPte+iSecondTempMapColor), aPages*sizeof(TPte));
sl@0
  2461
		}
sl@0
  2462
	return iSecondTempAddr+(iSecondTempMapColor<<KPageShift);
sl@0
  2463
	}
sl@0
  2464
sl@0
  2465
/**
sl@0
  2466
Remove the temporary mapping created with MapTemp.
sl@0
  2467
*/
sl@0
  2468
void ArmMmu::UnmapTemp()
sl@0
  2469
	{
sl@0
  2470
	__ASSERT_MUTEX(RamAllocatorMutex);
sl@0
  2471
	for (TInt i=0; i<iTempMapCount; i++)
sl@0
  2472
		{
sl@0
  2473
		iTempPte[iTempMapColor+i] = 0;
sl@0
  2474
		CacheMaintenance::SinglePteUpdated((TLinAddr)(iTempPte+iTempMapColor+i));
sl@0
  2475
		InvalidateTLBForPage(iTempAddr+((iTempMapColor+i)<<KPageShift), KERNEL_MAPPING);
sl@0
  2476
		}
sl@0
  2477
	}
sl@0
  2478
sl@0
  2479
/**
sl@0
  2480
Remove the temporary mapping created with MapSecondTemp.
sl@0
  2481
*/
sl@0
  2482
void ArmMmu::UnmapSecondTemp()
sl@0
  2483
	{
sl@0
  2484
	__ASSERT_MUTEX(RamAllocatorMutex);
sl@0
  2485
	for (TInt i=0; i<iSecondTempMapCount; i++)
sl@0
  2486
		{
sl@0
  2487
		iSecondTempPte[iSecondTempMapColor+i] = 0;
sl@0
  2488
		CacheMaintenance::SinglePteUpdated((TLinAddr)(iSecondTempPte+iSecondTempMapColor+i));
sl@0
  2489
		InvalidateTLBForPage(iSecondTempAddr+((iSecondTempMapColor+i)<<KPageShift), KERNEL_MAPPING);
sl@0
  2490
		}
sl@0
  2491
	}
sl@0
  2492
sl@0
  2493
sl@0
  2494
TBool ArmMmu::ValidateLocalIpcAddress(TLinAddr aAddr,TInt aSize,TBool aWrite)
sl@0
  2495
	{
sl@0
  2496
	__NK_ASSERT_DEBUG(aSize<=KChunkSize);
sl@0
  2497
	TLinAddr end = aAddr+aSize-1;
sl@0
  2498
	if(end<aAddr)
sl@0
  2499
		end = ~0u;
sl@0
  2500
sl@0
  2501
	if(TUint(aAddr^KIPCAlias)<TUint(KChunkSize) || TUint(end^KIPCAlias)<TUint(KChunkSize))
sl@0
  2502
		{
sl@0
  2503
		// local address is in alias region.
sl@0
  2504
		// remove alias...
sl@0
  2505
		NKern::LockSystem();
sl@0
  2506
		((DMemModelThread*)TheCurrentThread)->RemoveAlias();
sl@0
  2507
		NKern::UnlockSystem();
sl@0
  2508
		// access memory, which will cause an exception...
sl@0
  2509
		if(!(TUint(aAddr^KIPCAlias)<TUint(KChunkSize)))
sl@0
  2510
			aAddr = end;
sl@0
  2511
		InvalidateTLBForPage(aAddr,((DMemModelProcess*)TheCurrentThread->iOwningProcess)->iOsAsid);
sl@0
  2512
		if(aWrite)
sl@0
  2513
			*(volatile TUint8*)aAddr = 0;
sl@0
  2514
		else
sl@0
  2515
			aWrite = *(volatile TUint8*)aAddr;
sl@0
  2516
		// can't get here
sl@0
  2517
		__NK_ASSERT_DEBUG(0);
sl@0
  2518
		}
sl@0
  2519
sl@0
  2520
	TUint32 local_mask;
sl@0
  2521
	DMemModelProcess* process=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
sl@0
  2522
	if(aWrite)
sl@0
  2523
		local_mask = process->iAddressCheckMaskW;
sl@0
  2524
	else
sl@0
  2525
		local_mask = process->iAddressCheckMaskR;
sl@0
  2526
	TUint32 mask = 2<<(end>>27);
sl@0
  2527
	mask -= 1<<(aAddr>>27);
sl@0
  2528
	if((local_mask&mask)!=mask)
sl@0
  2529
		return EFalse;
sl@0
  2530
sl@0
  2531
	if(!aWrite)
sl@0
  2532
		return ETrue; // reads are ok
sl@0
  2533
sl@0
  2534
	// writes need further checking...
sl@0
  2535
	TLinAddr userCodeStart = iUserCodeBase;
sl@0
  2536
	TLinAddr userCodeEnd = userCodeStart+iMaxUserCodeSize;
sl@0
  2537
	if(end>=userCodeStart && aAddr<userCodeEnd)
sl@0
  2538
		return EFalse; // trying to write to user code area
sl@0
  2539
sl@0
  2540
	return ETrue;
sl@0
  2541
	}
sl@0
  2542
sl@0
  2543
TInt DMemModelThread::Alias(TLinAddr aAddr, DMemModelProcess* aProcess, TInt aSize, TInt aPerm, TLinAddr& aAliasAddr, TInt& aAliasSize)
sl@0
  2544
//
sl@0
  2545
// Set up an alias mapping starting at address aAddr in specified process.
sl@0
  2546
// Check permissions aPerm.
sl@0
  2547
// Enter and return with system locked.
sl@0
  2548
// Note: Alias is removed if an exception if trapped by DThread::IpcExcHandler.
sl@0
  2549
//
sl@0
  2550
	{
sl@0
  2551
	__KTRACE_OPT(KMMU2,Kern::Printf("Thread %O Alias %08x+%x Process %O perm %x",this,aAddr,aSize,aProcess,aPerm));
sl@0
  2552
	__ASSERT_SYSTEM_LOCK
sl@0
  2553
sl@0
  2554
	if(TUint(aAddr^KIPCAlias)<TUint(KChunkSize))
sl@0
  2555
		return KErrBadDescriptor; // prevent access to alias region
sl@0
  2556
sl@0
  2557
	ArmMmu& m=::TheMmu;
sl@0
  2558
sl@0
  2559
	// check if memory is in region which is safe to access with supervisor permissions...
sl@0
  2560
	TBool okForSupervisorAccess = aPerm&(EMapAttrReadSup|EMapAttrWriteSup) ? 1 : 0;
sl@0
  2561
	if(!okForSupervisorAccess)
sl@0
  2562
		{
sl@0
  2563
		TInt shift = aAddr>>27;
sl@0
  2564
		if(!(aPerm&EMapAttrWriteUser))
sl@0
  2565
			{
sl@0
  2566
			// reading with user permissions...
sl@0
  2567
			okForSupervisorAccess = (aProcess->iAddressCheckMaskR>>shift)&1;
sl@0
  2568
			}
sl@0
  2569
		else
sl@0
  2570
			{
sl@0
  2571
			// writing with user permissions...
sl@0
  2572
			okForSupervisorAccess = (aProcess->iAddressCheckMaskW>>shift)&1;
sl@0
  2573
			if(okForSupervisorAccess)
sl@0
  2574
				{
sl@0
  2575
				// check for user code, because this is supervisor r/w and so
sl@0
  2576
				// is not safe to write to access with supervisor permissions.
sl@0
  2577
				if(TUint(aAddr-m.iUserCodeBase)<TUint(m.iMaxUserCodeSize))
sl@0
  2578
					return KErrBadDescriptor; // prevent write to this...
sl@0
  2579
				}
sl@0
  2580
			}
sl@0
  2581
		}
sl@0
  2582
sl@0
  2583
	TInt pdeIndex = aAddr>>KChunkShift;
sl@0
  2584
	if(pdeIndex>=(m.iLocalPdSize>>2))
sl@0
  2585
		{
sl@0
  2586
		// address is in global section, don't bother aliasing it...
sl@0
  2587
		if(iAliasLinAddr)
sl@0
  2588
			RemoveAlias();
sl@0
  2589
		aAliasAddr = aAddr;
sl@0
  2590
		TInt maxSize = KChunkSize-(aAddr&KChunkMask);
sl@0
  2591
		aAliasSize = aSize<maxSize ? aSize : maxSize;
sl@0
  2592
		__KTRACE_OPT(KMMU2,Kern::Printf("DMemModelThread::Alias() abandoned as memory is globaly mapped"));
sl@0
  2593
		return okForSupervisorAccess;
sl@0
  2594
		}
sl@0
  2595
sl@0
  2596
	TInt asid = aProcess->iOsAsid;
sl@0
  2597
	TPde* pd = PageDirectory(asid);
sl@0
  2598
	TPde pde = pd[pdeIndex];
sl@0
  2599
	if ((TPhysAddr)(pde&~KPageMask) == AliasRemapOld)
sl@0
  2600
		pde = AliasRemapNew|(pde&KPageMask);
sl@0
  2601
	pde = PDE_IN_DOMAIN(pde, KIPCAliasDomain);
sl@0
  2602
	TLinAddr aliasAddr = KIPCAlias+(aAddr&(KChunkMask & ~KPageMask));
sl@0
  2603
	if(pde==iAliasPde && iAliasLinAddr)
sl@0
  2604
		{
sl@0
  2605
		// pde already aliased, so just update linear address...
sl@0
  2606
		iAliasLinAddr = aliasAddr;
sl@0
  2607
		}
sl@0
  2608
	else
sl@0
  2609
		{
sl@0
  2610
		// alias PDE changed...
sl@0
  2611
		iAliasPde = pde;
sl@0
  2612
		iAliasOsAsid = asid;
sl@0
  2613
		if(!iAliasLinAddr)
sl@0
  2614
			{
sl@0
  2615
			ArmMmu::UnlockAlias();
sl@0
  2616
			::TheMmu.iAliasList.Add(&iAliasLink); // add to list if not already aliased
sl@0
  2617
			}
sl@0
  2618
		iAliasLinAddr = aliasAddr;
sl@0
  2619
		*iAliasPdePtr = pde;
sl@0
  2620
		CacheMaintenance::SinglePteUpdated((TLinAddr)iAliasPdePtr);
sl@0
  2621
		}
sl@0
  2622
sl@0
  2623
	__KTRACE_OPT(KMMU2,Kern::Printf("DMemModelThread::Alias() PDEntry=%x, iAliasLinAddr=%x",pde, aliasAddr));
sl@0
  2624
	InvalidateTLBForPage(aliasAddr, ((DMemModelProcess*)iOwningProcess)->iOsAsid);
sl@0
  2625
	TInt offset = aAddr&KPageMask;
sl@0
  2626
	aAliasAddr = aliasAddr | offset;
sl@0
  2627
	TInt maxSize = KPageSize - offset;
sl@0
  2628
	aAliasSize = aSize<maxSize ? aSize : maxSize;
sl@0
  2629
	iAliasTarget = aAddr & ~KPageMask;
sl@0
  2630
	return okForSupervisorAccess;
sl@0
  2631
	}
sl@0
  2632
sl@0
  2633
void DMemModelThread::RemoveAlias()
sl@0
  2634
//
sl@0
  2635
// Remove alias mapping (if present)
sl@0
  2636
// Enter and return with system locked.
sl@0
  2637
//
sl@0
  2638
	{
sl@0
  2639
	__KTRACE_OPT(KMMU2,Kern::Printf("Thread %O RemoveAlias", this));
sl@0
  2640
	__ASSERT_SYSTEM_LOCK
sl@0
  2641
	TLinAddr addr = iAliasLinAddr;
sl@0
  2642
	if(addr)
sl@0
  2643
		{
sl@0
  2644
		ArmMmu::LockAlias();
sl@0
  2645
		iAliasLinAddr = 0;
sl@0
  2646
		iAliasPde = 0;
sl@0
  2647
		*iAliasPdePtr = 0;
sl@0
  2648
		CacheMaintenance::SinglePteUpdated((TLinAddr)iAliasPdePtr);
sl@0
  2649
		InvalidateTLBForPage(addr, ((DMemModelProcess*)iOwningProcess)->iOsAsid);
sl@0
  2650
		iAliasLink.Deque();
sl@0
  2651
		}
sl@0
  2652
	}
sl@0
  2653
sl@0
  2654
/*
sl@0
  2655
 * Performs cache maintenance for physical page that is going to be reused.
sl@0
  2656
 * Fully cached attributes are assumed. 
sl@0
  2657
 */
sl@0
  2658
void ArmMmu::CacheMaintenanceOnDecommit(TPhysAddr a)
sl@0
  2659
	{
sl@0
  2660
	// purge a single page from the cache following decommit
sl@0
  2661
	ArmMmu& m=::TheMmu;
sl@0
  2662
	TInt colour = SPageInfo::FromPhysAddr(a)->Offset()&KPageColourMask;
sl@0
  2663
	TPte& pte=m.iTempPte[colour];
sl@0
  2664
	TLinAddr va=m.iTempAddr+(colour<<KPageShift);
sl@0
  2665
	pte=a|SP_PTE(KArmV6PermRWNO, iCacheMaintenanceTempMapAttr, 1, 1);
sl@0
  2666
	CacheMaintenance::SinglePteUpdated((TLinAddr)&pte);
sl@0
  2667
sl@0
  2668
	CacheMaintenance::PageToReuse(va,EMemAttNormalCached, a);
sl@0
  2669
sl@0
  2670
	pte=0;
sl@0
  2671
	CacheMaintenance::SinglePteUpdated((TLinAddr)&pte);
sl@0
  2672
	InvalidateTLBForPage(va,KERNEL_MAPPING);
sl@0
  2673
	}
sl@0
  2674
sl@0
  2675
void ArmMmu::CacheMaintenanceOnDecommit(const TPhysAddr* al, TInt n)
sl@0
  2676
	{
sl@0
  2677
	// purge a list of pages from the cache following decommit
sl@0
  2678
	while (--n>=0)
sl@0
  2679
		ArmMmu::CacheMaintenanceOnDecommit(*al++);
sl@0
  2680
	}
sl@0
  2681
sl@0
  2682
/*
sl@0
  2683
 * Performs cache maintenance to preserve physical page that is going to be reused. 
sl@0
  2684
 */
sl@0
  2685
void ArmMmu::CacheMaintenanceOnPreserve(TPhysAddr a, TUint aMapAttr)
sl@0
  2686
	{
sl@0
  2687
	// purge a single page from the cache following decommit
sl@0
  2688
	ArmMmu& m=::TheMmu;
sl@0
  2689
	TInt colour = SPageInfo::FromPhysAddr(a)->Offset()&KPageColourMask;
sl@0
  2690
	TPte& pte=m.iTempPte[colour];
sl@0
  2691
	TLinAddr va=m.iTempAddr+(colour<<KPageShift);
sl@0
  2692
	pte=a|SP_PTE(KArmV6PermRWNO, iCacheMaintenanceTempMapAttr, 1, 1);
sl@0
  2693
	CacheMaintenance::SinglePteUpdated((TLinAddr)&pte);
sl@0
  2694
sl@0
  2695
	CacheMaintenance::MemoryToPreserveAndReuse(va, KPageSize,aMapAttr);
sl@0
  2696
sl@0
  2697
	pte=0;
sl@0
  2698
	CacheMaintenance::SinglePteUpdated((TLinAddr)&pte);
sl@0
  2699
	InvalidateTLBForPage(va,KERNEL_MAPPING);
sl@0
  2700
	}
sl@0
  2701
sl@0
  2702
void ArmMmu::CacheMaintenanceOnPreserve(const TPhysAddr* al, TInt n, TUint aMapAttr)
sl@0
  2703
	{
sl@0
  2704
	// purge a list of pages from the cache following decommit
sl@0
  2705
	while (--n>=0)
sl@0
  2706
		ArmMmu::CacheMaintenanceOnPreserve(*al++, aMapAttr);
sl@0
  2707
	}
sl@0
  2708
sl@0
  2709
/*
sl@0
  2710
 * Performs cache maintenance of physical memory that has been decommited and has to be preserved.
sl@0
  2711
 * Call this method for physical pages with no page info updated (or no page info at all).
sl@0
  2712
 * @arg aPhysAddr	The address of contiguous physical memory to be preserved.
sl@0
  2713
 * @arg aSize		The size of the region
sl@0
  2714
 * @arg aLinAddr 	Former linear address of the region. As said above, the physical memory is
sl@0
  2715
 * 					already remapped from this linear address.
sl@0
  2716
 * @arg aMapAttr 	Mapping attributes of the region when it was mapped in aLinAddr.
sl@0
  2717
 * @pre MMU mutex is held.  
sl@0
  2718
 */
sl@0
  2719
void ArmMmu::CacheMaintenanceOnPreserve(TPhysAddr aPhysAddr, TInt aSize, TLinAddr aLinAddr, TUint aMapAttr )
sl@0
  2720
	{
sl@0
  2721
	__NK_ASSERT_DEBUG((aPhysAddr&KPageMask)==0);
sl@0
  2722
	__NK_ASSERT_DEBUG((aSize&KPageMask)==0);
sl@0
  2723
	__NK_ASSERT_DEBUG((aLinAddr&KPageMask)==0);
sl@0
  2724
sl@0
  2725
	TPhysAddr pa = aPhysAddr;
sl@0
  2726
	TInt size = aSize;
sl@0
  2727
	TInt colour = (aLinAddr>>KPageShift)&KPageColourMask;
sl@0
  2728
	TPte* pte = &(iTempPte[colour]);
sl@0
  2729
	while (size)
sl@0
  2730
		{
sl@0
  2731
		pte=&(iTempPte[colour]);
sl@0
  2732
		TLinAddr va=iTempAddr+(colour<<KPageShift);
sl@0
  2733
		*pte=pa|SP_PTE(KArmV6PermRWNO, iCacheMaintenanceTempMapAttr, 1, 1);
sl@0
  2734
		CacheMaintenance::SinglePteUpdated((TLinAddr)pte);
sl@0
  2735
		CacheMaintenance::MemoryToPreserveAndReuse(va, KPageSize,aMapAttr);
sl@0
  2736
sl@0
  2737
		*pte=0;
sl@0
  2738
		CacheMaintenance::SinglePteUpdated((TLinAddr)pte);
sl@0
  2739
		InvalidateTLBForPage(va,KERNEL_MAPPING);
sl@0
  2740
sl@0
  2741
		colour = (colour+1)&KPageColourMask;
sl@0
  2742
		pa += KPageSize;
sl@0
  2743
		size -=KPageSize;
sl@0
  2744
		}
sl@0
  2745
	}
sl@0
  2746
sl@0
  2747
TInt ArmMmu::UnlockRamCachePages(TLinAddr aLinAddr, TInt aNumPages, DProcess* aProcess)
sl@0
  2748
	{
sl@0
  2749
	TInt asid = ((DMemModelProcess*)aProcess)->iOsAsid;
sl@0
  2750
	TInt page = aLinAddr>>KPageShift;
sl@0
  2751
	NKern::LockSystem();
sl@0
  2752
	for(;;)
sl@0
  2753
		{
sl@0
  2754
		TPde* pd = PageDirectory(asid)+(page>>(KChunkShift-KPageShift));
sl@0
  2755
		TPte* pt = SafePageTableFromPde(*pd++);
sl@0
  2756
		TInt pteIndex = page&(KChunkMask>>KPageShift);
sl@0
  2757
		if(!pt)
sl@0
  2758
			{
sl@0
  2759
			// whole page table has gone, so skip all pages in it...
sl@0
  2760
			TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex;
sl@0
  2761
			aNumPages -= pagesInPt;
sl@0
  2762
			page += pagesInPt;
sl@0
  2763
			if(aNumPages>0)
sl@0
  2764
				continue;
sl@0
  2765
			NKern::UnlockSystem();
sl@0
  2766
			return KErrNone;
sl@0
  2767
			}
sl@0
  2768
		pt += pteIndex;
sl@0
  2769
		do
sl@0
  2770
			{
sl@0
  2771
			TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex;
sl@0
  2772
			if(pagesInPt>aNumPages)
sl@0
  2773
				pagesInPt = aNumPages;
sl@0
  2774
			if(pagesInPt>KMaxPages)
sl@0
  2775
				pagesInPt = KMaxPages;
sl@0
  2776
sl@0
  2777
			aNumPages -= pagesInPt;
sl@0
  2778
			page += pagesInPt;
sl@0
  2779
sl@0
  2780
			do
sl@0
  2781
				{
sl@0
  2782
				TPte pte = *pt++;
sl@0
  2783
				if(pte) // pte may be null if page has already been unlocked and reclaimed by system
sl@0
  2784
					iRamCache->DonateRamCachePage(SPageInfo::FromPhysAddr(pte));
sl@0
  2785
				}
sl@0
  2786
			while(--pagesInPt);
sl@0
  2787
sl@0
  2788
			if(!aNumPages)
sl@0
  2789
				{
sl@0
  2790
				NKern::UnlockSystem();
sl@0
  2791
				return KErrNone;
sl@0
  2792
				}
sl@0
  2793
sl@0
  2794
			pteIndex = page&(KChunkMask>>KPageShift);
sl@0
  2795
			}
sl@0
  2796
		while(!NKern::FlashSystem() && pteIndex);
sl@0
  2797
		}
sl@0
  2798
	}
sl@0
  2799
sl@0
  2800
sl@0
  2801
TInt ArmMmu::LockRamCachePages(TLinAddr aLinAddr, TInt aNumPages, DProcess* aProcess)
sl@0
  2802
	{
sl@0
  2803
	TInt asid = ((DMemModelProcess*)aProcess)->iOsAsid;
sl@0
  2804
	TInt page = aLinAddr>>KPageShift;
sl@0
  2805
	NKern::LockSystem();
sl@0
  2806
	for(;;)
sl@0
  2807
		{
sl@0
  2808
		TPde* pd = PageDirectory(asid)+(page>>(KChunkShift-KPageShift));
sl@0
  2809
		TPte* pt = SafePageTableFromPde(*pd++);
sl@0
  2810
		TInt pteIndex = page&(KChunkMask>>KPageShift);
sl@0
  2811
		if(!pt)
sl@0
  2812
			goto not_found;
sl@0
  2813
		pt += pteIndex;
sl@0
  2814
		do
sl@0
  2815
			{
sl@0
  2816
			TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex;
sl@0
  2817
			if(pagesInPt>aNumPages)
sl@0
  2818
				pagesInPt = aNumPages;
sl@0
  2819
			if(pagesInPt>KMaxPages)
sl@0
  2820
				pagesInPt = KMaxPages;
sl@0
  2821
sl@0
  2822
			aNumPages -= pagesInPt;
sl@0
  2823
			page += pagesInPt;
sl@0
  2824
sl@0
  2825
			do
sl@0
  2826
				{
sl@0
  2827
				TPte pte = *pt++;
sl@0
  2828
				if(pte==0)
sl@0
  2829
					goto not_found;
sl@0
  2830
				if(!iRamCache->ReclaimRamCachePage(SPageInfo::FromPhysAddr(pte)))
sl@0
  2831
					goto not_found;
sl@0
  2832
				}
sl@0
  2833
			while(--pagesInPt);
sl@0
  2834
sl@0
  2835
			if(!aNumPages)
sl@0
  2836
				{
sl@0
  2837
				NKern::UnlockSystem();
sl@0
  2838
				return KErrNone;
sl@0
  2839
				}
sl@0
  2840
sl@0
  2841
			pteIndex = page&(KChunkMask>>KPageShift);
sl@0
  2842
			}
sl@0
  2843
		while(!NKern::FlashSystem() && pteIndex);
sl@0
  2844
		}
sl@0
  2845
not_found:
sl@0
  2846
	NKern::UnlockSystem();
sl@0
  2847
	return KErrNotFound;
sl@0
  2848
	}
sl@0
  2849
sl@0
  2850
sl@0
  2851
void RamCache::SetFree(SPageInfo* aPageInfo)
sl@0
  2852
	{
sl@0
  2853
	ArmMmu& m=::TheMmu;
sl@0
  2854
	// Make a page free
sl@0
  2855
	SPageInfo::TType type = aPageInfo->Type();
sl@0
  2856
	if(type==SPageInfo::EPagedCache)
sl@0
  2857
		{
sl@0
  2858
		TInt offset = aPageInfo->Offset()<<KPageShift;
sl@0
  2859
		DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
sl@0
  2860
		__NK_ASSERT_DEBUG(TUint(offset)<TUint(chunk->iMaxSize));
sl@0
  2861
		TLinAddr lin = ((TLinAddr)chunk->iBase)+offset;
sl@0
  2862
		TInt asid = ((DMemModelProcess*)chunk->iOwningProcess)->iOsAsid;
sl@0
  2863
		TPte* pt = PtePtrFromLinAddr(lin,asid);
sl@0
  2864
		TPhysAddr phys = (*pt)&~KPageMask;
sl@0
  2865
		*pt = KPteNotPresentEntry;
sl@0
  2866
		CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
sl@0
  2867
		InvalidateTLBForPage(lin,asid);
sl@0
  2868
		m.CacheMaintenanceOnDecommit(phys);
sl@0
  2869
sl@0
  2870
		// actually decommit it from chunk...
sl@0
  2871
		TInt ptid = ((TLinAddr)pt-KPageTableBase)>>KPageTableShift;
sl@0
  2872
		SPageTableInfo& ptinfo=((ArmMmu*)iMmu)->iPtInfo[ptid];
sl@0
  2873
		if(!--ptinfo.iCount)
sl@0
  2874
			{
sl@0
  2875
			chunk->iPageTables[offset>>KChunkShift] = 0xffff;
sl@0
  2876
			NKern::UnlockSystem();
sl@0
  2877
			((ArmMmu*)iMmu)->DoUnassignPageTable(lin, (TAny*)asid);
sl@0
  2878
			((ArmMmu*)iMmu)->FreePageTable(ptid);
sl@0
  2879
			NKern::LockSystem();
sl@0
  2880
			}
sl@0
  2881
		}
sl@0
  2882
	else
sl@0
  2883
		{
sl@0
  2884
		__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetFree() with bad page type = %d",aPageInfo->Type()));
sl@0
  2885
		Panic(EUnexpectedPageType);
sl@0
  2886
		}
sl@0
  2887
	}
sl@0
  2888
sl@0
  2889
sl@0
  2890
//
sl@0
  2891
// MemModelDemandPaging
sl@0
  2892
//
sl@0
  2893
sl@0
  2894
class MemModelDemandPaging : public DemandPaging
sl@0
  2895
	{
sl@0
  2896
public:
sl@0
  2897
	// From RamCacheBase
sl@0
  2898
	virtual void Init2();
sl@0
  2899
	virtual TInt Init3();
sl@0
  2900
	virtual TBool PageUnmapped(SPageInfo* aPageInfo);
sl@0
  2901
	// From DemandPaging
sl@0
  2902
	virtual TInt Fault(TAny* aExceptionInfo);
sl@0
  2903
	virtual void SetOld(SPageInfo* aPageInfo);
sl@0
  2904
	virtual void SetFree(SPageInfo* aPageInfo);
sl@0
  2905
	virtual void NotifyPageFree(TPhysAddr aPage);
sl@0
  2906
	virtual TInt EnsurePagePresent(TLinAddr aPage, DProcess* aProcess);
sl@0
  2907
	virtual TPhysAddr LinearToPhysical(TLinAddr aPage, DProcess* aProcess);
sl@0
  2908
	virtual void AllocLoadAddress(DPagingRequest& aReq, TInt aDeviceId);
sl@0
  2909
	virtual TInt PageState(TLinAddr aAddr);
sl@0
  2910
	virtual TBool NeedsMutexOrderCheck(TLinAddr aStartAddr, TUint aLength);
sl@0
  2911
	// New
sl@0
  2912
	inline ArmMmu& Mmu() { return (ArmMmu&)*iMmu; }
sl@0
  2913
	void InitRomPaging();
sl@0
  2914
	void InitCodePaging();
sl@0
  2915
	TInt HandleFault(TArmExcInfo& aExc, TLinAddr aFaultAddress, TInt aAsid);
sl@0
  2916
	TInt PageIn(TLinAddr aAddress, TInt aAsid, DMemModelCodeSegMemory* aCodeSegMemory);
sl@0
  2917
public:
sl@0
  2918
	// use of the folowing members is protected by the system lock..
sl@0
  2919
	TPte* iPurgePte;			// PTE used for temporary mappings during cache purge operations
sl@0
  2920
	TLinAddr iPurgeAddr;		// address corresponding to iPurgePte
sl@0
  2921
	};
sl@0
  2922
sl@0
  2923
extern void MakeGlobalPTEInaccessible(TPte* aPtePtr, TPte aNewPte, TLinAddr aLinAddr);
sl@0
  2924
extern void MakePTEInaccessible(TPte* aPtePtr, TPte aNewPte, TLinAddr aLinAddr, TInt aAsid);
sl@0
  2925
sl@0
  2926
//
sl@0
  2927
// MemModelDemandPaging
sl@0
  2928
//
sl@0
  2929
sl@0
  2930
sl@0
  2931
DemandPaging* DemandPaging::New()
sl@0
  2932
	{
sl@0
  2933
	return new MemModelDemandPaging();
sl@0
  2934
	}
sl@0
  2935
sl@0
  2936
sl@0
  2937
void MemModelDemandPaging::Init2()
sl@0
  2938
	{
sl@0
  2939
	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">MemModelDemandPaging::Init2"));
sl@0
  2940
	DemandPaging::Init2();
sl@0
  2941
sl@0
  2942
	iPurgeAddr = KDemandPagingTempAddr;
sl@0
  2943
	iPurgePte = PtePtrFromLinAddr(iPurgeAddr);
sl@0
  2944
sl@0
  2945
	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<MemModelDemandPaging::Init2"));
sl@0
  2946
	}
sl@0
  2947
sl@0
  2948
sl@0
  2949
void MemModelDemandPaging::AllocLoadAddress(DPagingRequest& aReq, TInt aReqId)
sl@0
  2950
	{
sl@0
  2951
	aReq.iLoadAddr = iTempPages + aReqId * KPageSize * KPageColourCount;
sl@0
  2952
	aReq.iLoadPte = PtePtrFromLinAddr(aReq.iLoadAddr);
sl@0
  2953
	}
sl@0
  2954
sl@0
  2955
sl@0
  2956
TInt MemModelDemandPaging::Init3()
sl@0
  2957
	{
sl@0
  2958
	TInt r=DemandPaging::Init3();
sl@0
  2959
	if(r!=KErrNone)
sl@0
  2960
		return r;
sl@0
  2961
	
sl@0
  2962
	// Create a region for mapping pages during page in
sl@0
  2963
	DPlatChunkHw* chunk;
sl@0
  2964
	TInt chunkSize = (KMaxPagingDevices * KPagingRequestsPerDevice + 1) * KPageColourCount * KPageSize;
sl@0
  2965
	DPlatChunkHw::DoNew(chunk, KPhysAddrInvalid, chunkSize, EMapAttrSupRw|EMapAttrFullyBlocking);
sl@0
  2966
	if(!chunk)
sl@0
  2967
		Panic(EInitialiseFailed);
sl@0
  2968
	TInt colourMask = KPageColourMask << KPageShift;
sl@0
  2969
	iTempPages = (chunk->iLinAddr + colourMask) & ~colourMask;
sl@0
  2970
sl@0
  2971
	if(RomPagingRequested())
sl@0
  2972
		InitRomPaging();
sl@0
  2973
sl@0
  2974
	if (CodePagingRequested())
sl@0
  2975
		InitCodePaging();
sl@0
  2976
sl@0
  2977
	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<MemModelDemandPaging::Init3"));
sl@0
  2978
	return KErrNone;
sl@0
  2979
	}
sl@0
  2980
	
sl@0
  2981
void MemModelDemandPaging::InitRomPaging()
sl@0
  2982
	{
sl@0
  2983
	// Make page tables for demand paged part of ROM...
sl@0
  2984
	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("MemModelDemandPaging::Init3 making page tables for paged ROM"));
sl@0
  2985
	TLinAddr lin = iRomPagedLinearBase&~KChunkMask; // first chunk with paged ROM in
sl@0
  2986
	TLinAddr linEnd = iRomLinearBase+iRomSize;
sl@0
  2987
	while(lin<linEnd)
sl@0
  2988
		{
sl@0
  2989
		// Get a Page Table
sl@0
  2990
		TInt ptid = Mmu().PageTableId(lin,0);
sl@0
  2991
		if(ptid<0)
sl@0
  2992
			{
sl@0
  2993
			MmuBase::Wait();
sl@0
  2994
			ptid = Mmu().AllocPageTable();
sl@0
  2995
			MmuBase::Signal();
sl@0
  2996
			__NK_ASSERT_DEBUG(ptid>=0);
sl@0
  2997
			Mmu().PtInfo(ptid).SetGlobal(lin >> KChunkShift);
sl@0
  2998
			}
sl@0
  2999
sl@0
  3000
		// Get new page table addresses
sl@0
  3001
		TPte* pt = PageTable(ptid);
sl@0
  3002
		TPhysAddr ptPhys=Mmu().LinearToPhysical((TLinAddr)pt,0);
sl@0
  3003
sl@0
  3004
		// Pointer to page directory entry
sl@0
  3005
		TPde* ppde = ::InitPageDirectory + (lin>>KChunkShift);
sl@0
  3006
sl@0
  3007
		// Fill in Page Table
sl@0
  3008
		TPte* ptEnd = pt+(1<<(KChunkShift-KPageShift));
sl@0
  3009
		pt += (lin&KChunkMask)>>KPageShift;
sl@0
  3010
		TLinAddr firstPte = (TLinAddr)pt; // Will need this to clean page table memory region from cache
sl@0
  3011
sl@0
  3012
		do
sl@0
  3013
			{
sl@0
  3014
			if(lin<iRomPagedLinearBase)
sl@0
  3015
				*pt++ = Mmu().LinearToPhysical(lin,0) | KRomPtePerm;
sl@0
  3016
			else
sl@0
  3017
				{
sl@0
  3018
				MakeGlobalPTEInaccessible(pt, KPteNotPresentEntry, lin);
sl@0
  3019
				++pt;
sl@0
  3020
				}
sl@0
  3021
			lin += KPageSize;
sl@0
  3022
			}
sl@0
  3023
		while(pt<ptEnd && lin<=linEnd);
sl@0
  3024
sl@0
  3025
		CacheMaintenance::MultiplePtesUpdated((TLinAddr)firstPte, (TUint)pt-firstPte);
sl@0
  3026
sl@0
  3027
		// Add new Page Table to the Page Directory
sl@0
  3028
		TPde newpde = ptPhys | KShadowPdePerm;
sl@0
  3029
		__KTRACE_OPT2(KPAGING,KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde));
sl@0
  3030
		TInt irq=NKern::DisableAllInterrupts();
sl@0
  3031
		*ppde = newpde;
sl@0
  3032
		CacheMaintenance::SinglePteUpdated((TLinAddr)ppde);
sl@0
  3033
		FlushTLBs();
sl@0
  3034
		NKern::RestoreInterrupts(irq);
sl@0
  3035
		}
sl@0
  3036
	}
sl@0
  3037
sl@0
  3038
sl@0
  3039
void MemModelDemandPaging::InitCodePaging()
sl@0
  3040
	{
sl@0
  3041
	// Initialise code paging info
sl@0
  3042
	iCodeLinearBase = Mmu().iUserCodeBase;
sl@0
  3043
	iCodeSize = Mmu().iMaxUserCodeSize;
sl@0
  3044
	}
sl@0
  3045
sl@0
  3046
sl@0
  3047
/**
sl@0
  3048
@return ETrue when the unmapped page should be freed, EFalse otherwise
sl@0
  3049
*/
sl@0
  3050
TBool MemModelDemandPaging::PageUnmapped(SPageInfo* aPageInfo)
sl@0
  3051
	{
sl@0
  3052
	SPageInfo::TType type = aPageInfo->Type();
sl@0
  3053
sl@0
  3054
	// Only have to deal with cache pages - pages containg code don't get returned to the system
sl@0
  3055
	// when they are decommitted from an individual process, only when the code segment is destroyed	
sl@0
  3056
	if(type!=SPageInfo::EPagedCache)
sl@0
  3057
		{
sl@0
  3058
		__NK_ASSERT_DEBUG(type!=SPageInfo::EPagedCode); // shouldn't happen
sl@0
  3059
		__NK_ASSERT_DEBUG(type!=SPageInfo::EPagedData); // not supported yet
sl@0
  3060
		return ETrue;
sl@0
  3061
		}
sl@0
  3062
sl@0
  3063
	RemovePage(aPageInfo);
sl@0
  3064
	AddAsFreePage(aPageInfo);
sl@0
  3065
	// Return false to stop DMemModelChunk::DoDecommit from freeing this page
sl@0
  3066
	return EFalse; 
sl@0
  3067
	}
sl@0
  3068
sl@0
  3069
sl@0
  3070
void DoSetCodeOld(SPageInfo* aPageInfo, DMemModelCodeSegMemory* aCodeSegMemory, TLinAddr aLinAddr)
sl@0
  3071
	{
sl@0
  3072
	NThread* currentThread = NKern::CurrentThread(); 
sl@0
  3073
	aPageInfo->SetModifier(currentThread);
sl@0
  3074
	// scan all address spaces...
sl@0
  3075
	TInt asid = -1;
sl@0
  3076
	TInt lastAsid = KArmV6NumAsids-1;
sl@0
  3077
	TUint32* ptr = aCodeSegMemory->iOsAsids->iMap;
sl@0
  3078
	do
sl@0
  3079
		{
sl@0
  3080
		TUint32 bits = *ptr++;
sl@0
  3081
		do
sl@0
  3082
			{
sl@0
  3083
			++asid;
sl@0
  3084
			if(bits&0x80000000u)
sl@0
  3085
				{
sl@0
  3086
				// codeseg is mapped in this address space, so update PTE...
sl@0
  3087
				TPte* pt = PtePtrFromLinAddr(aLinAddr,asid);
sl@0
  3088
				TPte pte = *pt;
sl@0
  3089
				if(pte&KPtePresentMask)
sl@0
  3090
					{
sl@0
  3091
					__NK_ASSERT_DEBUG((pte&~KPageMask) == aPageInfo->PhysAddr());
sl@0
  3092
					MakePTEInaccessible(pt, pte&~KPtePresentMask, aLinAddr, asid);
sl@0
  3093
					}
sl@0
  3094
				}
sl@0
  3095
			}
sl@0
  3096
		while(bits<<=1);
sl@0
  3097
		if(NKern::FlashSystem() && aPageInfo->CheckModified(currentThread))
sl@0
  3098
			return; // page was modified by another thread
sl@0
  3099
		asid |= 31;
sl@0
  3100
		}
sl@0
  3101
	while(asid<lastAsid);
sl@0
  3102
	}
sl@0
  3103
sl@0
  3104
sl@0
  3105
void MemModelDemandPaging::SetOld(SPageInfo* aPageInfo)
sl@0
  3106
	{
sl@0
  3107
	__ASSERT_SYSTEM_LOCK;
sl@0
  3108
	__NK_ASSERT_DEBUG(aPageInfo->State() == SPageInfo::EStatePagedOld);
sl@0
  3109
sl@0
  3110
	SPageInfo::TType type = aPageInfo->Type();
sl@0
  3111
sl@0
  3112
	if(type==SPageInfo::EPagedROM)
sl@0
  3113
		{
sl@0
  3114
		// get linear address of page...
sl@0
  3115
		TInt offset = aPageInfo->Offset()<<KPageShift;
sl@0
  3116
		__NK_ASSERT_DEBUG(TUint(offset)<iRomSize);
sl@0
  3117
sl@0
  3118
		// make page inaccessible...
sl@0
  3119
		TLinAddr lin = iRomLinearBase+offset;
sl@0
  3120
		TPte* pt = PtePtrFromLinAddr(lin);
sl@0
  3121
		MakeGlobalPTEInaccessible(pt, *pt&~KPtePresentMask, lin);
sl@0
  3122
		}
sl@0
  3123
	else if(type==SPageInfo::EPagedCode)
sl@0
  3124
		{
sl@0
  3125
		START_PAGING_BENCHMARK;
sl@0
  3126
sl@0
  3127
		// get linear address of page...
sl@0
  3128
		TInt offset = aPageInfo->Offset()<<KPageShift;
sl@0
  3129
		__NK_ASSERT_DEBUG(TUint(offset)<iCodeSize);
sl@0
  3130
		TLinAddr lin = iCodeLinearBase+offset;
sl@0
  3131
			
sl@0
  3132
		// get CodeSegMemory...
sl@0
  3133
		DMemModelCodeSegMemory* codeSegMemory = (DMemModelCodeSegMemory*)aPageInfo->Owner();
sl@0
  3134
		__NK_ASSERT_DEBUG(codeSegMemory && codeSegMemory->iPages && codeSegMemory->iIsDemandPaged);
sl@0
  3135
sl@0
  3136
#ifdef _DEBUG
sl@0
  3137
		TInt pageNumber = (lin - codeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift;
sl@0
  3138
		__NK_ASSERT_DEBUG(codeSegMemory->iPages[pageNumber] == aPageInfo->PhysAddr());
sl@0
  3139
#endif
sl@0
  3140
sl@0
  3141
		// make page inaccessible...
sl@0
  3142
		DoSetCodeOld(aPageInfo,codeSegMemory,lin);
sl@0
  3143
		
sl@0
  3144
		END_PAGING_BENCHMARK(this, EPagingBmSetCodePageOld);
sl@0
  3145
		}
sl@0
  3146
	else if(type==SPageInfo::EPagedCache)
sl@0
  3147
		{
sl@0
  3148
		// leave page accessible
sl@0
  3149
		}
sl@0
  3150
	else if(type!=SPageInfo::EPagedFree)
sl@0
  3151
		{
sl@0
  3152
		__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetOld() with bad page type = %d",aPageInfo->Type()));
sl@0
  3153
		Panic(EUnexpectedPageType);
sl@0
  3154
		}
sl@0
  3155
	NKern::FlashSystem();
sl@0
  3156
	}
sl@0
  3157
sl@0
  3158
sl@0
  3159
void DoSetCodeFree(SPageInfo* aPageInfo, TPhysAddr aPhysAddr, DMemModelCodeSegMemory* aCodeSegMemory, TLinAddr aLinAddr)
sl@0
  3160
	{
sl@0
  3161
	NThread* currentThread = NKern::CurrentThread();
sl@0
  3162
	aPageInfo->SetModifier(currentThread);
sl@0
  3163
	// scan all address spaces...
sl@0
  3164
	TInt asid = -1;
sl@0
  3165
	TInt lastAsid = KArmV6NumAsids-1;
sl@0
  3166
	TUint32* ptr = aCodeSegMemory->iOsAsids->iMap;
sl@0
  3167
	do
sl@0
  3168
		{
sl@0
  3169
		TUint32 bits = *ptr++;
sl@0
  3170
		do
sl@0
  3171
			{
sl@0
  3172
			++asid;
sl@0
  3173
			if(bits&0x80000000u)
sl@0
  3174
				{
sl@0
  3175
				// codeseg is mapped in this address space, so update PTE...
sl@0
  3176
				TPte* pt = PtePtrFromLinAddr(aLinAddr,asid);
sl@0
  3177
				TPte pte = *pt;
sl@0
  3178
				if (pte!=KPteNotPresentEntry && (pte&~KPageMask) == aPhysAddr)
sl@0
  3179
					MakePTEInaccessible(pt, KPteNotPresentEntry, aLinAddr, asid);
sl@0
  3180
				}
sl@0
  3181
			}
sl@0
  3182
		while(bits<<=1);
sl@0
  3183
		if(NKern::FlashSystem())
sl@0
  3184
			{
sl@0
  3185
			// nobody else should modify page!
sl@0
  3186
			__NK_ASSERT_DEBUG(!aPageInfo->CheckModified(currentThread));
sl@0
  3187
			}
sl@0
  3188
		asid |= 31;
sl@0
  3189
		}
sl@0
  3190
	while(asid<lastAsid);
sl@0
  3191
	}
sl@0
  3192
sl@0
  3193
sl@0
  3194
void MemModelDemandPaging::SetFree(SPageInfo* aPageInfo)
sl@0
  3195
	{
sl@0
  3196
	__ASSERT_SYSTEM_LOCK;
sl@0
  3197
	__ASSERT_MUTEX(MmuBase::RamAllocatorMutex);
sl@0
  3198
	__NK_ASSERT_DEBUG(aPageInfo->State() == SPageInfo::EStatePagedDead);
sl@0
  3199
	if(aPageInfo->LockCount())
sl@0
  3200
		Panic(ERamPageLocked);
sl@0
  3201
sl@0
  3202
	SPageInfo::TType type = aPageInfo->Type();
sl@0
  3203
	TPhysAddr phys = aPageInfo->PhysAddr();
sl@0
  3204
sl@0
  3205
	if(type==SPageInfo::EPagedROM)
sl@0
  3206
		{
sl@0
  3207
		// get linear address of page...
sl@0
  3208
		TInt offset = aPageInfo->Offset()<<KPageShift;
sl@0
  3209
		__NK_ASSERT_DEBUG(TUint(offset)<iRomSize);
sl@0
  3210
		TLinAddr lin = iRomLinearBase+offset;
sl@0
  3211
sl@0
  3212
		// unmap it...
sl@0
  3213
		TPte* pt = PtePtrFromLinAddr(lin);
sl@0
  3214
		MakeGlobalPTEInaccessible(pt, KPteNotPresentEntry, lin);
sl@0
  3215
sl@0
  3216
#ifdef BTRACE_PAGING
sl@0
  3217
		BTraceContext8(BTrace::EPaging,BTrace::EPagingPageOutROM,phys,lin);
sl@0
  3218
#endif
sl@0
  3219
		}
sl@0
  3220
	else if(type==SPageInfo::EPagedCode)
sl@0
  3221
		{
sl@0
  3222
		START_PAGING_BENCHMARK;
sl@0
  3223
		
sl@0
  3224
		// get linear address of page...
sl@0
  3225
		TInt offset = aPageInfo->Offset()<<KPageShift;
sl@0
  3226
		__NK_ASSERT_DEBUG(TUint(offset)<iCodeSize);
sl@0
  3227
		TLinAddr lin = iCodeLinearBase+offset;
sl@0
  3228
sl@0
  3229
		// get CodeSegMemory...
sl@0
  3230
		// NOTE, this cannot die because we hold the RamAlloc mutex, and the CodeSegMemory
sl@0
  3231
		// destructor also needs this mutex to do it's cleanup...
sl@0
  3232
		DMemModelCodeSegMemory* codeSegMemory = (DMemModelCodeSegMemory*)aPageInfo->Owner();
sl@0
  3233
		__NK_ASSERT_DEBUG(codeSegMemory && codeSegMemory->iPages && codeSegMemory->iIsDemandPaged);
sl@0
  3234
sl@0
  3235
		// remove page from CodeSegMemory (must come before System Lock is released)...
sl@0
  3236
		TInt pageNumber = (lin - codeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift;
sl@0
  3237
		__NK_ASSERT_DEBUG(codeSegMemory->iPages[pageNumber] == aPageInfo->PhysAddr());
sl@0
  3238
		codeSegMemory->iPages[pageNumber] = KPhysAddrInvalid;
sl@0
  3239
		
sl@0
  3240
		// unmap page from all processes it's mapped into...
sl@0
  3241
		DoSetCodeFree(aPageInfo,phys,codeSegMemory,lin);
sl@0
  3242
sl@0
  3243
		END_PAGING_BENCHMARK(this, EPagingBmSetCodePageFree);
sl@0
  3244
#ifdef BTRACE_PAGING
sl@0
  3245
		BTraceContext8(BTrace::EPaging,BTrace::EPagingPageOutCode,phys,lin);
sl@0
  3246
#endif
sl@0
  3247
		}
sl@0
  3248
	else if(type==SPageInfo::EPagedCache)
sl@0
  3249
		{
sl@0
  3250
		// get linear address of page...
sl@0
  3251
		TInt offset = aPageInfo->Offset()<<KPageShift;
sl@0
  3252
		DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
sl@0
  3253
		__NK_ASSERT_DEBUG(TUint(offset)<TUint(chunk->iMaxSize));
sl@0
  3254
		TLinAddr lin = ((TLinAddr)chunk->iBase)+offset;
sl@0
  3255
sl@0
  3256
		// unmap it...
sl@0
  3257
		TInt asid = ((DMemModelProcess*)chunk->iOwningProcess)->iOsAsid;
sl@0
  3258
		TPte* pt = PtePtrFromLinAddr(lin,asid);
sl@0
  3259
		*pt = KPteNotPresentEntry;
sl@0
  3260
		CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
sl@0
  3261
sl@0
  3262
		InvalidateTLBForPage(lin,asid);
sl@0
  3263
sl@0
  3264
		// actually decommit it from chunk...
sl@0
  3265
		TInt ptid = ((TLinAddr)pt-KPageTableBase)>>KPageTableShift;
sl@0
  3266
		SPageTableInfo& ptinfo=Mmu().iPtInfo[ptid];
sl@0
  3267
		if(!--ptinfo.iCount)
sl@0
  3268
			{
sl@0
  3269
			chunk->iPageTables[offset>>KChunkShift] = 0xffff;
sl@0
  3270
			NKern::UnlockSystem();
sl@0
  3271
			Mmu().DoUnassignPageTable(lin, (TAny*)asid);
sl@0
  3272
			Mmu().FreePageTable(ptid);
sl@0
  3273
			NKern::LockSystem();
sl@0
  3274
			}
sl@0
  3275
sl@0
  3276
#ifdef BTRACE_PAGING
sl@0
  3277
		BTraceContext8(BTrace::EPaging,BTrace::EPagingPageOutCache,phys,lin);
sl@0
  3278
#endif
sl@0
  3279
		}
sl@0
  3280
	else if(type==SPageInfo::EPagedFree)
sl@0
  3281
		{
sl@0
  3282
		// already free...
sl@0
  3283
#ifdef BTRACE_PAGING
sl@0
  3284
		BTraceContext4(BTrace::EPaging,BTrace::EPagingPageOutFree,phys);
sl@0
  3285
#endif
sl@0
  3286
		// fall through to cache purge code because cache may not have been
sl@0
  3287
		// cleaned for this page if PageUnmapped called
sl@0
  3288
		}
sl@0
  3289
	else
sl@0
  3290
		{
sl@0
  3291
		__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetFree() with bad page type = %d",aPageInfo->Type()));
sl@0
  3292
		Panic(EUnexpectedPageType);
sl@0
  3293
		return;
sl@0
  3294
		}
sl@0
  3295
sl@0
  3296
	NKern::UnlockSystem();
sl@0
  3297
sl@0
  3298
	// purge cache for page...
sl@0
  3299
	TInt colour = aPageInfo->Offset()&KPageColourMask;
sl@0
  3300
	TPte& pte=iPurgePte[colour];
sl@0
  3301
	TLinAddr va=iPurgeAddr+(colour<<KPageShift);
sl@0
  3302
	pte=phys|SP_PTE(KArmV6PermRWNO, TheMmu.iCacheMaintenanceTempMapAttr, 1, 1);
sl@0
  3303
	CacheMaintenance::SinglePteUpdated((TLinAddr)&pte);
sl@0
  3304
sl@0
  3305
	CacheMaintenance::PageToReuse(va,EMemAttNormalCached, KPhysAddrInvalid);
sl@0
  3306
sl@0
  3307
	pte=0;
sl@0
  3308
	CacheMaintenance::SinglePteUpdated((TLinAddr)&pte);
sl@0
  3309
	InvalidateTLBForPage(va,KERNEL_MAPPING);
sl@0
  3310
sl@0
  3311
	NKern::LockSystem();
sl@0
  3312
	}
sl@0
  3313
sl@0
  3314
sl@0
  3315
void MemModelDemandPaging::NotifyPageFree(TPhysAddr aPage)
sl@0
  3316
	{
sl@0
  3317
	__KTRACE_OPT(KPAGING, Kern::Printf("MemModelDemandPaging::NotifyPageFree %08x", aPage));
sl@0
  3318
	__ASSERT_SYSTEM_LOCK;
sl@0
  3319
sl@0
  3320
	SPageInfo* pageInfo = SPageInfo::FromPhysAddr(aPage);
sl@0
  3321
	__ASSERT_DEBUG(pageInfo->Type()==SPageInfo::EPagedCode, MM::Panic(MM::EUnexpectedPageType));
sl@0
  3322
	RemovePage(pageInfo);
sl@0
  3323
	SetFree(pageInfo);
sl@0
  3324
	AddAsFreePage(pageInfo);
sl@0
  3325
	}
sl@0
  3326
sl@0
  3327
sl@0
  3328
TInt MemModelDemandPaging::Fault(TAny* aExceptionInfo)
sl@0
  3329
	{
sl@0
  3330
	TArmExcInfo& exc=*(TArmExcInfo*)aExceptionInfo;
sl@0
  3331
sl@0
  3332
	// Get faulting address
sl@0
  3333
	TLinAddr faultAddress = exc.iFaultAddress;
sl@0
  3334
	if(exc.iExcCode==EArmExceptionDataAbort)
sl@0
  3335
		{
sl@0
  3336
		// Let writes take an exception rather than page in any memory...
sl@0
  3337
		if(exc.iFaultStatus&(1<<11))
sl@0
  3338
			return KErrUnknown;
sl@0
  3339
		}
sl@0
  3340
	else if (exc.iExcCode != EArmExceptionPrefetchAbort)
sl@0
  3341
		return KErrUnknown; // Not prefetch or data abort
sl@0
  3342
	
sl@0
  3343
	// Only handle page translation faults
sl@0
  3344
	if((exc.iFaultStatus & 0x40f) != 0x7)
sl@0
  3345
		return KErrUnknown;
sl@0
  3346
sl@0
  3347
	DMemModelThread* thread = (DMemModelThread*)TheCurrentThread;
sl@0
  3348
sl@0
  3349
	// check which ragion fault occured in...
sl@0
  3350
	TInt asid = 0; // asid != 0 => code paging fault
sl@0
  3351
	if(TUint(faultAddress-iRomPagedLinearBase)<iRomPagedSize)
sl@0
  3352
		{
sl@0
  3353
		// in ROM
sl@0
  3354
		}
sl@0
  3355
	else if(TUint(faultAddress-iCodeLinearBase)<iCodeSize)
sl@0
  3356
		{
sl@0
  3357
		// in code
sl@0
  3358
		asid = ((DMemModelProcess*)TheScheduler.iAddressSpace)->iOsAsid;
sl@0
  3359
		}
sl@0
  3360
	else if (thread->iAliasLinAddr && TUint(faultAddress - thread->iAliasLinAddr) < TUint(KPageSize))
sl@0
  3361
		{
sl@0
  3362
		// in aliased memory
sl@0
  3363
		faultAddress = (faultAddress - thread->iAliasLinAddr) + thread->iAliasTarget;
sl@0
  3364
		if(TUint(faultAddress-iCodeLinearBase)>=iCodeSize)
sl@0
  3365
			return KErrUnknown; // not in alias of code
sl@0
  3366
		asid = thread->iAliasOsAsid;
sl@0
  3367
		__NK_ASSERT_DEBUG(asid != 0);
sl@0
  3368
		}
sl@0
  3369
	else
sl@0
  3370
		return KErrUnknown; // Not in pageable region
sl@0
  3371
sl@0
  3372
	// Check if thread holds fast mutex and claim system lock
sl@0
  3373
	NFastMutex* fm = NKern::HeldFastMutex();
sl@0
  3374
	TPagingExcTrap* trap = thread->iPagingExcTrap;
sl@0
  3375
	if(!fm)
sl@0
  3376
		NKern::LockSystem();
sl@0
  3377
	else
sl@0
  3378
		{
sl@0
  3379
		if(!trap || fm!=&TheScheduler.iLock)
sl@0
  3380
			{
sl@0
  3381
			__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: Fault with FM Held! %x (%O pc=%x)",faultAddress,&Kern::CurrentThread(),exc.iR15));
sl@0
  3382
			Panic(EPageFaultWhilstFMHeld); // Not allowed to hold mutexes
sl@0
  3383
			}
sl@0
  3384
		// restore address space on multiple memory model (because the trap will
sl@0
  3385
		// bypass any code which would have done this.)...
sl@0
  3386
		DMemModelThread::RestoreAddressSpace();
sl@0
  3387
sl@0
  3388
		// Current thread already has the system lock...
sl@0
  3389
		NKern::FlashSystem(); // Let someone else have a go with the system lock.
sl@0
  3390
		}
sl@0
  3391
sl@0
  3392
	// System locked here
sl@0
  3393
sl@0
  3394
	TInt r = KErrNone;	
sl@0
  3395
	if(thread->IsRealtime())
sl@0
  3396
		r = CheckRealtimeThreadFault(thread, aExceptionInfo);
sl@0
  3397
	if (r == KErrNone)
sl@0
  3398
		r = HandleFault(exc, faultAddress, asid);
sl@0
  3399
	
sl@0
  3400
	// Restore system lock state
sl@0
  3401
	if (fm != NKern::HeldFastMutex())
sl@0
  3402
		{
sl@0
  3403
		if (fm)
sl@0
  3404
			NKern::LockSystem();
sl@0
  3405
		else
sl@0
  3406
			NKern::UnlockSystem();
sl@0
  3407
		}
sl@0
  3408
	
sl@0
  3409
	// Deal with XTRAP_PAGING
sl@0
  3410
	if(r == KErrNone && trap)
sl@0
  3411
		{
sl@0
  3412
		trap->Exception(1); // Return from exception trap with result '1' (value>0)
sl@0
  3413
		// code doesn't continue beyond this point.
sl@0
  3414
		}
sl@0
  3415
sl@0
  3416
	return r;
sl@0
  3417
	}
sl@0
  3418
sl@0
  3419
sl@0
  3420
sl@0
  3421
TInt MemModelDemandPaging::HandleFault(TArmExcInfo& aExc, TLinAddr aFaultAddress, TInt aAsid)
sl@0
  3422
	{
sl@0
  3423
	++iEventInfo.iPageFaultCount;
sl@0
  3424
sl@0
  3425
	// get page table entry...
sl@0
  3426
	TPte* pt = SafePtePtrFromLinAddr(aFaultAddress, aAsid);
sl@0
  3427
	if(!pt)
sl@0
  3428
		return KErrNotFound;
sl@0
  3429
	TPte pte = *pt;
sl@0
  3430
sl@0
  3431
	// Do what is required to make page accessible...
sl@0
  3432
sl@0
  3433
	if(pte&KPtePresentMask)
sl@0
  3434
		{
sl@0
  3435
		// PTE is present, so assume it has already been dealt with
sl@0
  3436
#ifdef BTRACE_PAGING
sl@0
  3437
		BTraceContext12(BTrace::EPaging,BTrace::EPagingPageNop,pte&~KPageMask,aFaultAddress,aExc.iR15);
sl@0
  3438
#endif
sl@0
  3439
		return KErrNone;
sl@0
  3440
		}
sl@0
  3441
sl@0
  3442
	if(pte!=KPteNotPresentEntry)
sl@0
  3443
		{
sl@0
  3444
		// PTE alread has a page
sl@0
  3445
		SPageInfo* pageInfo = SPageInfo::FromPhysAddr(pte);
sl@0
  3446
		if(pageInfo->State()==SPageInfo::EStatePagedDead)
sl@0
  3447
			{
sl@0
  3448
			// page currently being unmapped, so do that here...
sl@0
  3449
			MakePTEInaccessible(pt, KPteNotPresentEntry, aFaultAddress, aAsid);
sl@0
  3450
			}
sl@0
  3451
		else
sl@0
  3452
			{
sl@0
  3453
			// page just needs making young again...
sl@0
  3454
			*pt = TPte(pte|KArmV6PteSmallPage); // Update page table
sl@0
  3455
			CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
sl@0
  3456
			Rejuvenate(pageInfo);
sl@0
  3457
#ifdef BTRACE_PAGING
sl@0
  3458
			BTraceContext12(BTrace::EPaging,BTrace::EPagingRejuvenate,pte&~KPageMask,aFaultAddress,aExc.iR15);
sl@0
  3459
#endif
sl@0
  3460
			return KErrNone;
sl@0
  3461
			}
sl@0
  3462
		}
sl@0
  3463
sl@0
  3464
	// PTE not present, so page it in...
sl@0
  3465
	// check if fault in a CodeSeg...
sl@0
  3466
	DMemModelCodeSegMemory* codeSegMemory = NULL;
sl@0
  3467
	if (!aAsid)
sl@0
  3468
		NKern::ThreadEnterCS();
sl@0
  3469
	else
sl@0
  3470
		{
sl@0
  3471
		// find CodeSeg...
sl@0
  3472
		DMemModelCodeSeg* codeSeg = (DMemModelCodeSeg*)DCodeSeg::CodeSegsByAddress.Find(aFaultAddress);
sl@0
  3473
		if (!codeSeg)
sl@0
  3474
			return KErrNotFound;
sl@0
  3475
		codeSegMemory = codeSeg->Memory();
sl@0
  3476
		if (codeSegMemory==0 || !codeSegMemory->iIsDemandPaged || codeSegMemory->iOsAsids->NotFree(aAsid, 1))
sl@0
  3477
			return KErrNotFound;
sl@0
  3478
	
sl@0
  3479
		// check if it's paged in but not yet mapped into this process...			
sl@0
  3480
		TInt pageNumber = (aFaultAddress - codeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift;
sl@0
  3481
		TPhysAddr page = codeSegMemory->iPages[pageNumber];
sl@0
  3482
		if (page != KPhysAddrInvalid)
sl@0
  3483
			{
sl@0
  3484
			// map it into this process...
sl@0
  3485
			SPageInfo* pageInfo = SPageInfo::FromPhysAddr(page);
sl@0
  3486
			__NK_ASSERT_DEBUG(pageInfo->State()!=SPageInfo::EStatePagedDead);
sl@0
  3487
			*pt = page | (codeSegMemory->iCreator ? KUserCodeLoadPte : KUserCodeRunPte);
sl@0
  3488
			CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
sl@0
  3489
			Rejuvenate(pageInfo);
sl@0
  3490
#ifdef BTRACE_PAGING
sl@0
  3491
			BTraceContext8(BTrace::EPaging,BTrace::EPagingMapCode,page,aFaultAddress);
sl@0
  3492
#endif
sl@0
  3493
			return KErrNone;
sl@0
  3494
			}
sl@0
  3495
sl@0
  3496
		// open reference on CodeSegMemory
sl@0
  3497
		NKern::ThreadEnterCS();
sl@0
  3498
#ifdef _DEBUG
sl@0
  3499
		TInt r = 
sl@0
  3500
#endif
sl@0
  3501
				 codeSegMemory->Open();
sl@0
  3502
		__NK_ASSERT_DEBUG(r==KErrNone);
sl@0
  3503
		NKern::FlashSystem();
sl@0
  3504
		}		
sl@0
  3505
sl@0
  3506
#ifdef BTRACE_PAGING
sl@0
  3507
	BTraceContext8(BTrace::EPaging,BTrace::EPagingPageInBegin,aFaultAddress,aExc.iR15);
sl@0
  3508
#endif
sl@0
  3509
	TInt r = PageIn(aFaultAddress, aAsid, codeSegMemory);
sl@0
  3510
sl@0
  3511
	NKern::UnlockSystem();
sl@0
  3512
sl@0
  3513
	if(codeSegMemory)
sl@0
  3514
		codeSegMemory->Close();
sl@0
  3515
sl@0
  3516
	NKern::ThreadLeaveCS();
sl@0
  3517
	
sl@0
  3518
	return r;
sl@0
  3519
	}
sl@0
  3520
sl@0
  3521
sl@0
  3522
TInt MemModelDemandPaging::PageIn(TLinAddr aAddress, TInt aAsid, DMemModelCodeSegMemory* aCodeSegMemory)
sl@0
  3523
	{
sl@0
  3524
	// Get a request object - this may block until one is available
sl@0
  3525
	DPagingRequest* req = AcquireRequestObject();
sl@0
  3526
	
sl@0
  3527
	// Get page table entry
sl@0
  3528
	TPte* pt = SafePtePtrFromLinAddr(aAddress, aAsid);
sl@0
  3529
sl@0
  3530
	// Check page is still required...
sl@0
  3531
	if(!pt || *pt!=KPteNotPresentEntry)
sl@0
  3532
		{
sl@0
  3533
#ifdef BTRACE_PAGING
sl@0
  3534
		BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded);
sl@0
  3535
#endif
sl@0
  3536
		ReleaseRequestObject(req);
sl@0
  3537
		return pt ? KErrNone : KErrNotFound;
sl@0
  3538
		}
sl@0
  3539
sl@0
  3540
	++iEventInfo.iPageInReadCount;
sl@0
  3541
sl@0
  3542
	// Get a free page
sl@0
  3543
	SPageInfo* pageInfo = AllocateNewPage();
sl@0
  3544
	__NK_ASSERT_DEBUG(pageInfo);
sl@0
  3545
sl@0
  3546
	// Get physical address of free page
sl@0
  3547
	TPhysAddr phys = pageInfo->PhysAddr();
sl@0
  3548
	__NK_ASSERT_DEBUG(phys!=KPhysAddrInvalid);
sl@0
  3549
sl@0
  3550
	// Temporarily map free page
sl@0
  3551
	TInt colour = (aAddress>>KPageShift)&KPageColourMask;
sl@0
  3552
	__NK_ASSERT_DEBUG((req->iLoadAddr & (KPageColourMask << KPageShift)) == 0);
sl@0
  3553
	req->iLoadAddr |= colour << KPageShift;
sl@0
  3554
	TLinAddr loadAddr = req->iLoadAddr;
sl@0
  3555
	pt = req->iLoadPte+colour;
sl@0
  3556
//	*pt = phys | SP_PTE(KArmV6PermRWNO, KArmV6MemAttWTWAWTWA, 0, 1);
sl@0
  3557
	*pt = phys | SP_PTE(KArmV6PermRWNO, KNormalUncachedAttr, 0, 1);
sl@0
  3558
	CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
sl@0
  3559
sl@0
  3560
	// Read page from backing store
sl@0
  3561
	aAddress &= ~KPageMask;	
sl@0
  3562
	NKern::UnlockSystem();
sl@0
  3563
sl@0
  3564
	TInt r;
sl@0
  3565
	if (!aCodeSegMemory)
sl@0
  3566
		r = ReadRomPage(req, aAddress);
sl@0
  3567
	else
sl@0
  3568
		{
sl@0
  3569
		r = ReadCodePage(req, aCodeSegMemory, aAddress);
sl@0
  3570
		if (r == KErrNone)
sl@0
  3571
			aCodeSegMemory->ApplyCodeFixups((TUint32*)loadAddr, aAddress);
sl@0
  3572
		}
sl@0
  3573
	if(r!=KErrNone)
sl@0
  3574
		Panic(EPageInFailed);
sl@0
  3575
	
sl@0
  3576
	// make caches consistant...
sl@0
  3577
//	Cache::IMB_Range(loadAddr, KPageSize);
sl@0
  3578
	*pt = phys | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1);
sl@0
  3579
	CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
sl@0
  3580
	InvalidateTLBForPage(loadAddr,KERNEL_MAPPING);
sl@0
  3581
	CacheMaintenance::CodeChanged(loadAddr, KPageSize, CacheMaintenance::ECPUUncached);
sl@0
  3582
sl@0
  3583
	NKern::LockSystem();
sl@0
  3584
sl@0
  3585
	// Invalidate temporary mapping
sl@0
  3586
	MakeGlobalPTEInaccessible(pt, KPteNotPresentEntry, loadAddr);
sl@0
  3587
sl@0
  3588
	// Release request object now we're finished with it
sl@0
  3589
	req->iLoadAddr &= ~(KPageColourMask << KPageShift);
sl@0
  3590
	ReleaseRequestObject(req);
sl@0
  3591
	
sl@0
  3592
	// Get page table entry
sl@0
  3593
	pt = SafePtePtrFromLinAddr(aAddress, aAsid);
sl@0
  3594
sl@0
  3595
	// Check page still needs updating
sl@0
  3596
	TBool notNeeded = pt==0 || *pt!=KPteNotPresentEntry;
sl@0
  3597
	if(aCodeSegMemory)
sl@0
  3598
		notNeeded |= aCodeSegMemory->iOsAsids->NotFree(aAsid, 1);
sl@0
  3599
	if(notNeeded)
sl@0
  3600
		{
sl@0
  3601
		// We don't need the new page after all, so put it on the active list as a free page
sl@0
  3602
		__KTRACE_OPT(KPAGING,Kern::Printf("DP: PageIn (New page not used)"));
sl@0
  3603
#ifdef BTRACE_PAGING
sl@0
  3604
		BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded);
sl@0
  3605
#endif
sl@0
  3606
		AddAsFreePage(pageInfo);
sl@0
  3607
		return pt ? KErrNone : KErrNotFound;
sl@0
  3608
		}
sl@0
  3609
sl@0
  3610
	// Update page info
sl@0
  3611
	if (!aCodeSegMemory)
sl@0
  3612
		pageInfo->SetPagedROM((aAddress-iRomLinearBase)>>KPageShift);
sl@0
  3613
	else
sl@0
  3614
		{
sl@0
  3615
		// Check if page has been paged in and mapped into another process while we were waiting
sl@0
  3616
		TInt pageNumber = (aAddress - aCodeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift;
sl@0
  3617
		TPhysAddr page = aCodeSegMemory->iPages[pageNumber];
sl@0
  3618
		if (page != KPhysAddrInvalid)
sl@0
  3619
			{
sl@0
  3620
			// don't need page we've just paged in...
sl@0
  3621
			AddAsFreePage(pageInfo);
sl@0
  3622
sl@0
  3623
			// map existing page into this process...
sl@0
  3624
			pageInfo = SPageInfo::FromPhysAddr(page);
sl@0
  3625
			__NK_ASSERT_DEBUG(pageInfo->State()!=SPageInfo::EStatePagedDead);
sl@0
  3626
			*pt = page | (aCodeSegMemory->iCreator ? KUserCodeLoadPte : KUserCodeRunPte);
sl@0
  3627
			CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
sl@0
  3628
#ifdef BTRACE_PAGING
sl@0
  3629
			BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded);
sl@0
  3630
#endif
sl@0
  3631
			Rejuvenate(pageInfo);
sl@0
  3632
			return KErrNone;
sl@0
  3633
			}
sl@0
  3634
		aCodeSegMemory->iPages[pageNumber] = phys;
sl@0
  3635
		
sl@0
  3636
		pageInfo->SetPagedCode(aCodeSegMemory,(aAddress-Mmu().iUserCodeBase)>>KPageShift);
sl@0
  3637
		}
sl@0
  3638
sl@0
  3639
	// Map page into final location	
sl@0
  3640
	*pt = phys | (aCodeSegMemory ? (aCodeSegMemory->iCreator ? KUserCodeLoadPte : KUserCodeRunPte) : KRomPtePerm);
sl@0
  3641
	CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
sl@0
  3642
#ifdef BTRACE_PAGING
sl@0
  3643
	TInt subCat = aCodeSegMemory ? BTrace::EPagingPageInCode : BTrace::EPagingPageInROM;
sl@0
  3644
	BTraceContext8(BTrace::EPaging,subCat,phys,aAddress);
sl@0
  3645
#endif
sl@0
  3646
sl@0
  3647
	AddAsYoungest(pageInfo);
sl@0
  3648
	BalanceAges();
sl@0
  3649
sl@0
  3650
	return KErrNone;
sl@0
  3651
	}
sl@0
  3652
sl@0
  3653
sl@0
  3654
inline TUint8 ReadByte(TLinAddr aAddress)
sl@0
  3655
	{ return *(volatile TUint8*)aAddress; }
sl@0
  3656
sl@0
  3657
sl@0
  3658
TInt MemModelDemandPaging::EnsurePagePresent(TLinAddr aPage, DProcess* aProcess)
sl@0
  3659
	{
sl@0
  3660
	TInt r = KErrBadDescriptor;
sl@0
  3661
	XTRAPD(exc,XT_DEFAULT,
sl@0
  3662
		if (!aProcess)
sl@0
  3663
			{
sl@0
  3664
			XTRAP_PAGING_RETRY(CHECK_PAGING_SAFE; ReadByte(aPage););
sl@0
  3665
			r = KErrNone;
sl@0
  3666
			}
sl@0
  3667
		else
sl@0
  3668
			{
sl@0
  3669
			DMemModelThread& t=*(DMemModelThread*)TheCurrentThread;
sl@0
  3670
		retry:
sl@0
  3671
			TInt pagingFault;
sl@0
  3672
			XTRAP_PAGING_START(pagingFault);
sl@0
  3673
			CHECK_PAGING_SAFE;
sl@0
  3674
			// make alias of page in this process
sl@0
  3675
			TLinAddr alias_src;
sl@0
  3676
			TInt alias_size;
sl@0
  3677
			TInt aliasResult = t.Alias(aPage, (DMemModelProcess*)aProcess, 1, EMapAttrReadUser, alias_src, alias_size);
sl@0
  3678
			if (aliasResult>=0)
sl@0
  3679
				{
sl@0
  3680
				// ensure page to be locked is mapped in, by reading from it...
sl@0
  3681
				ReadByte(alias_src);
sl@0
  3682
				r = KErrNone;
sl@0
  3683
				}
sl@0
  3684
			XTRAP_PAGING_END;
sl@0
  3685
			t.RemoveAlias();
sl@0
  3686
			if(pagingFault>0)
sl@0
  3687
				goto retry;
sl@0
  3688
			}
sl@0
  3689
		); // end of XTRAPD
sl@0
  3690
	if(exc)
sl@0
  3691
		return KErrBadDescriptor;
sl@0
  3692
	return r;
sl@0
  3693
	}
sl@0
  3694
sl@0
  3695
sl@0
  3696
TPhysAddr MemModelDemandPaging::LinearToPhysical(TLinAddr aPage, DProcess* aProcess)
sl@0
  3697
	{
sl@0
  3698
	TInt asid = 0;
sl@0
  3699
	if (aProcess)
sl@0
  3700
		asid = ((DMemModelProcess*)aProcess)->iOsAsid;
sl@0
  3701
	return Mmu().LinearToPhysical(aPage, asid);
sl@0
  3702
	}
sl@0
  3703
sl@0
  3704
sl@0
  3705
TInt MemModelDemandPaging::PageState(TLinAddr aAddr)
sl@0
  3706
	{
sl@0
  3707
	DMemModelProcess* process = (DMemModelProcess*)TheCurrentThread->iOwningProcess;
sl@0
  3708
	TInt asid = 0;
sl@0
  3709
	TPte* ptePtr = 0;
sl@0
  3710
	TPte pte = 0;
sl@0
  3711
	TInt r = 0;
sl@0
  3712
	SPageInfo* pageInfo = NULL;
sl@0
  3713
sl@0
  3714
	NKern::LockSystem();
sl@0
  3715
sl@0
  3716
	DMemModelCodeSegMemory* codeSegMemory = 0;
sl@0
  3717
	if(TUint(aAddr-iRomPagedLinearBase)<iRomPagedSize)
sl@0
  3718
		r |= EPageStateInRom;
sl@0
  3719
	else if (TUint(aAddr-iCodeLinearBase)<iCodeSize)
sl@0
  3720
		{
sl@0
  3721
		DMemModelCodeSeg* codeSeg = (DMemModelCodeSeg*)DCodeSeg::CodeSegsByAddress.Find(aAddr);
sl@0
  3722
		if(codeSeg)
sl@0
  3723
			codeSegMemory = codeSeg->Memory();
sl@0
  3724
		asid = process->iOsAsid;
sl@0
  3725
		if (codeSegMemory && codeSegMemory->iOsAsids->NotAllocated(asid, 1))
sl@0
  3726
			{
sl@0
  3727
			r |= EPageStateInRamCode;
sl@0
  3728
			if (codeSegMemory->iIsDemandPaged)
sl@0
  3729
				r |= EPageStatePaged;
sl@0
  3730
			}
sl@0
  3731
		if(process->iCodeChunk)
sl@0
  3732
			r |= EPageStateCodeChunkPresent;
sl@0
  3733
		}
sl@0
  3734
sl@0
  3735
	ptePtr = SafePtePtrFromLinAddr(aAddr,asid);
sl@0
  3736
	if (!ptePtr)
sl@0
  3737
		goto done;
sl@0
  3738
	r |= EPageStatePageTablePresent;
sl@0
  3739
	pte = *ptePtr;
sl@0
  3740
	if (pte == KPteNotPresentEntry)
sl@0
  3741
		goto done;		
sl@0
  3742
	r |= EPageStatePtePresent;
sl@0
  3743
	if (pte & KPtePresentMask)
sl@0
  3744
		r |= EPageStatePteValid;
sl@0
  3745
	
sl@0
  3746
	pageInfo = SPageInfo::FromPhysAddr(pte);
sl@0
  3747
	r |= pageInfo->Type();
sl@0
  3748
	r |= pageInfo->State()<<8;
sl@0
  3749
sl@0
  3750
	if (codeSegMemory && codeSegMemory->iPages)
sl@0
  3751
		{
sl@0
  3752
		TPhysAddr phys = pte & ~KPageMask;
sl@0
  3753
		TInt pageNumber = (aAddr - codeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift;
sl@0
  3754
		if (codeSegMemory->iPages[pageNumber] == phys)
sl@0
  3755
			r |= EPageStatePhysAddrPresent;
sl@0
  3756
		}
sl@0
  3757
sl@0
  3758
done:
sl@0
  3759
	NKern::UnlockSystem();
sl@0
  3760
	return r;
sl@0
  3761
	}
sl@0
  3762
sl@0
  3763
sl@0
  3764
TBool MemModelDemandPaging::NeedsMutexOrderCheck(TLinAddr aStartAddr, TUint aLength)
sl@0
  3765
	{
sl@0
  3766
	// Don't check mutex order for reads from global area, except for the paged part of rom
sl@0
  3767
	TBool rangeInGlobalArea = aStartAddr >= KRomLinearBase;
sl@0
  3768
	TBool rangeInPagedRom = iRomPagedLinearBase != 0 && aStartAddr < (iRomLinearBase + iRomSize) && (aStartAddr + aLength) > iRomPagedLinearBase;
sl@0
  3769
	return !rangeInGlobalArea || rangeInPagedRom;
sl@0
  3770
	}
sl@0
  3771
sl@0
  3772
sl@0
  3773
EXPORT_C TBool DDemandPagingLock::Lock(DThread* aThread, TLinAddr aStart, TInt aSize)
sl@0
  3774
	{
sl@0
  3775
	MemModelDemandPaging* pager = (MemModelDemandPaging*)iThePager;
sl@0
  3776
	if(pager)
sl@0
  3777
		{
sl@0
  3778
		ArmMmu& m = pager->Mmu();
sl@0
  3779
		TLinAddr end = aStart+aSize;
sl@0
  3780
		
sl@0
  3781
		if ((aStart < TUint(pager->iRomPagedLinearBase+pager->iRomPagedSize) && end > pager->iRomPagedLinearBase) ||
sl@0
  3782
			(aStart < TUint(m.iUserCodeBase + m.iMaxUserCodeSize) && end > m.iUserCodeBase))
sl@0
  3783
			return pager->ReserveLock(aThread,aStart,aSize,*this);
sl@0
  3784
		}
sl@0
  3785
	return EFalse;
sl@0
  3786
	}
sl@0
  3787
sl@0
  3788
void ArmMmu::DisablePageModification(DMemModelChunk* aChunk, TInt aOffset)
sl@0
  3789
//
sl@0
  3790
// Mark the page at aOffset in aChunk read-only to prevent it being
sl@0
  3791
// modified while defrag is in progress. Save the required information
sl@0
  3792
// to allow the fault handler to deal with this.
sl@0
  3793
// Call this with the system unlocked.
sl@0
  3794
//
sl@0
  3795
	{
sl@0
  3796
	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DisablePageModification() offset=%08x", aOffset));
sl@0
  3797
sl@0
  3798
	TInt ptid = aChunk->iPageTables[aOffset>>KChunkShift];
sl@0
  3799
	if(ptid == 0xffff)
sl@0
  3800
		Panic(EDefragDisablePageFailed);	
sl@0
  3801
sl@0
  3802
	NKern::LockSystem();
sl@0
  3803
	TPte* pPte = PageTable(ptid) + ((aOffset&KChunkMask)>>KPageShift);
sl@0
  3804
	TPte pte = *pPte;
sl@0
  3805
	if ((pte & KArmV6PteSmallPage) != KArmV6PteSmallPage 
sl@0
  3806
			|| SP_PTE_PERM_GET(pte) != (TUint)KArmV6PermRWRW)
sl@0
  3807
		Panic(EDefragDisablePageFailed);
sl@0
  3808
sl@0
  3809
	iDisabledAddr = (TLinAddr)(aChunk->iBase) + aOffset;
sl@0
  3810
	if (aChunk->iOwningProcess)
sl@0
  3811
		iDisabledAddrAsid = ((DMemModelProcess*)(aChunk->iOwningProcess))->iOsAsid;
sl@0
  3812
	else
sl@0
  3813
		iDisabledAddrAsid = iDisabledAddr<KRomLinearBase ? UNKNOWN_MAPPING : KERNEL_MAPPING;
sl@0
  3814
	iDisabledPte = pPte;
sl@0
  3815
	iDisabledOldVal = pte;
sl@0
  3816
sl@0
  3817
	*pPte = SP_PTE_PERM_SET(pte, KArmV6PermRORO);
sl@0
  3818
	CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
sl@0
  3819
	InvalidateTLBForPage(iDisabledAddr, iDisabledAddrAsid);
sl@0
  3820
	NKern::UnlockSystem();
sl@0
  3821
	}
sl@0
  3822
sl@0
  3823
TInt ArmMmu::RamDefragFault(TAny* aExceptionInfo)
sl@0
  3824
	{
sl@0
  3825
	TArmExcInfo& exc=*(TArmExcInfo*)aExceptionInfo;
sl@0
  3826
sl@0
  3827
	// Get faulting address
sl@0
  3828
	TLinAddr faultAddress;
sl@0
  3829
	if(exc.iExcCode==EArmExceptionDataAbort)
sl@0
  3830
		{
sl@0
  3831
		faultAddress = exc.iFaultAddress;
sl@0
  3832
		// Defrag can only cause writes to fault on multiple model
sl@0
  3833
		if(!(exc.iFaultStatus&(1<<11)))
sl@0
  3834
			return KErrUnknown;
sl@0
  3835
		}
sl@0
  3836
	else
sl@0
  3837
		return KErrUnknown; // Not data abort
sl@0
  3838
sl@0
  3839
	// Only handle page permission faults
sl@0
  3840
	if((exc.iFaultStatus & 0x40f) != 0xf)
sl@0
  3841
		return KErrUnknown;
sl@0
  3842
sl@0
  3843
	DMemModelThread* thread = (DMemModelThread*)TheCurrentThread;
sl@0
  3844
	TInt asid = ((DMemModelProcess*)TheScheduler.iAddressSpace)->iOsAsid;
sl@0
  3845
sl@0
  3846
	TBool aliased = EFalse;
sl@0
  3847
	if (thread->iAliasLinAddr && TUint(faultAddress - thread->iAliasLinAddr) < TUint(KPageSize))
sl@0
  3848
		{
sl@0
  3849
		// in aliased memory
sl@0
  3850
		aliased = ETrue;
sl@0
  3851
		faultAddress = (faultAddress - thread->iAliasLinAddr) + thread->iAliasTarget;
sl@0
  3852
		asid = thread->iAliasOsAsid;
sl@0
  3853
		__NK_ASSERT_DEBUG(asid != 0);
sl@0
  3854
		}
sl@0
  3855
sl@0
  3856
	// Take system lock if not already held
sl@0
  3857
	NFastMutex* fm = NKern::HeldFastMutex();
sl@0
  3858
	if(!fm)
sl@0
  3859
		NKern::LockSystem();
sl@0
  3860
	else if(fm!=&TheScheduler.iLock)
sl@0
  3861
		{
sl@0
  3862
		__KTRACE_OPT2(KMMU,KPANIC,Kern::Printf("Defrag: Fault with FM Held! %x (%O pc=%x)",faultAddress,&Kern::CurrentThread(),exc.iR15));
sl@0
  3863
		Panic(EDefragFaultWhilstFMHeld); // Not allowed to hold mutexes
sl@0
  3864
		}
sl@0
  3865
sl@0
  3866
	TInt r = KErrUnknown;
sl@0
  3867
sl@0
  3868
	// check if write access to the page has already been restored and retry if so
sl@0
  3869
	TPte* pt = SafePtePtrFromLinAddr(faultAddress, asid);
sl@0
  3870
	if(!pt)
sl@0
  3871
		{
sl@0
  3872
		r = KErrNotFound;
sl@0
  3873
		goto leave;
sl@0
  3874
		}
sl@0
  3875
	if (SP_PTE_PERM_GET(*pt) == (TUint)KArmV6PermRWRW)
sl@0
  3876
		{
sl@0
  3877
		r = KErrNone;
sl@0
  3878
		goto leave;
sl@0
  3879
		}
sl@0
  3880
sl@0
  3881
	// check if the fault occurred in the page we are moving
sl@0
  3882
	if (	   iDisabledPte
sl@0
  3883
			&& TUint(faultAddress - iDisabledAddr) < TUint(KPageSize)
sl@0
  3884
			&& (iDisabledAddrAsid < 0 || asid == iDisabledAddrAsid) )
sl@0
  3885
		{
sl@0
  3886
		// restore access to the page
sl@0
  3887
		*iDisabledPte = iDisabledOldVal;
sl@0
  3888
		CacheMaintenance::SinglePteUpdated((TLinAddr)iDisabledPte);
sl@0
  3889
		InvalidateTLBForPage(iDisabledAddr, iDisabledAddrAsid);
sl@0
  3890
		if (aliased)
sl@0
  3891
			InvalidateTLBForPage(exc.iFaultAddress, ((DMemModelProcess*)TheScheduler.iAddressSpace)->iOsAsid);
sl@0
  3892
		iDisabledAddr = 0;
sl@0
  3893
		iDisabledAddrAsid = -1;
sl@0
  3894
		iDisabledPte = NULL;
sl@0
  3895
		iDisabledOldVal = 0;
sl@0
  3896
		r = KErrNone;
sl@0
  3897
		}
sl@0
  3898
sl@0
  3899
leave:
sl@0
  3900
	// Restore system lock state
sl@0
  3901
	if (!fm)
sl@0
  3902
		NKern::UnlockSystem();
sl@0
  3903
	
sl@0
  3904
	return r;
sl@0
  3905
	}