os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/arm/xmmu.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200 (2012-06-15)
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
//
sl@0
    15
sl@0
    16
#include "arm_mem.h"
sl@0
    17
#include "mm.h"
sl@0
    18
#include "mmu.h"
sl@0
    19
#include "mpager.h"
sl@0
    20
sl@0
    21
#include "cache_maintenance.inl"
sl@0
    22
#include "execs.h"
sl@0
    23
sl@0
    24
sl@0
    25
#ifdef BROADCAST_TLB_MAINTENANCE
sl@0
    26
class TTLBIPI : public TGenericIPI
sl@0
    27
	{
sl@0
    28
public:
sl@0
    29
	TTLBIPI();
sl@0
    30
	static void InvalidateIsr(TGenericIPI*);
sl@0
    31
	static void WaitAndInvalidateIsr(TGenericIPI*);
sl@0
    32
	void AddArg(TLinAddr aArg);
sl@0
    33
public:
sl@0
    34
	volatile TInt	iFlag;
sl@0
    35
	TLinAddr		iArg;
sl@0
    36
	};
sl@0
    37
sl@0
    38
TTLBIPI::TTLBIPI()
sl@0
    39
	:	iFlag(0), iArg(0)
sl@0
    40
	{
sl@0
    41
	}
sl@0
    42
sl@0
    43
void TTLBIPI::InvalidateIsr(TGenericIPI* aPtr)
sl@0
    44
	{
sl@0
    45
	TRACE2(("TLBInv"));
sl@0
    46
	TTLBIPI& a = *(TTLBIPI*)aPtr;
sl@0
    47
	TLinAddr arg = a.iArg;
sl@0
    48
	if (arg==0)
sl@0
    49
		LocalInvalidateTLB();
sl@0
    50
	else if (arg<256)
sl@0
    51
		LocalInvalidateTLBForAsid(arg);
sl@0
    52
	else
sl@0
    53
		LocalInvalidateTLBForPage(arg);
sl@0
    54
	}
sl@0
    55
sl@0
    56
void TTLBIPI::WaitAndInvalidateIsr(TGenericIPI* aPtr)
sl@0
    57
	{
sl@0
    58
	TRACE2(("TLBWtInv"));
sl@0
    59
	TTLBIPI& a = *(TTLBIPI*)aPtr;
sl@0
    60
	while (!a.iFlag)
sl@0
    61
		{ __chill(); }
sl@0
    62
	InvalidateIsr(aPtr);
sl@0
    63
	}
sl@0
    64
sl@0
    65
void TTLBIPI::AddArg(TLinAddr aArg)
sl@0
    66
	{
sl@0
    67
	iArg = aArg;
sl@0
    68
	NKern::Lock();
sl@0
    69
	InvalidateIsr(this);
sl@0
    70
	QueueAllOther(&InvalidateIsr);
sl@0
    71
	NKern::Unlock();
sl@0
    72
	WaitCompletion();
sl@0
    73
	}
sl@0
    74
sl@0
    75
void BroadcastInvalidateTLB(TLinAddr aLinAddrAndAsid)
sl@0
    76
	{
sl@0
    77
	TTLBIPI ipi;
sl@0
    78
	ipi.AddArg(aLinAddrAndAsid);
sl@0
    79
	}
sl@0
    80
#endif	// BROADCAST_TLB_MAINTENANCE
sl@0
    81
sl@0
    82
//
sl@0
    83
// Functions for class Mmu
sl@0
    84
//
sl@0
    85
sl@0
    86
/**
sl@0
    87
Return the physical address of the memory mapped by a Page Table Entry (PTE).
sl@0
    88
sl@0
    89
@param aPte			The value contained in the PTE.
sl@0
    90
@param aPteIndex	The index of the PTE within its page table.
sl@0
    91
*/
sl@0
    92
TPhysAddr Mmu::PtePhysAddr(TPte aPte, TUint aPteIndex)
sl@0
    93
	{
sl@0
    94
	if(aPte&KArmV6PteSmallPage)
sl@0
    95
		return aPte & KPteSmallPageAddrMask;
sl@0
    96
	if(aPte&KArmV6PteLargePage)
sl@0
    97
		return (aPte & KPteLargePageAddrMask) + (TPhysAddr(aPteIndex << KPageShift) & KLargePageMask);
sl@0
    98
	return KPhysAddrInvalid;
sl@0
    99
	}
sl@0
   100
sl@0
   101
sl@0
   102
/**
sl@0
   103
Return the virtual address of the page table referenced by the given
sl@0
   104
Page Directory Entry (PDE) \a aPde. If the PDE doesn't refer to a
sl@0
   105
page table then the null-pointer is returned.
sl@0
   106
sl@0
   107
If the page table was not one allocated by the kernel then the
sl@0
   108
results are unpredictable and may cause a system fault.
sl@0
   109
sl@0
   110
@pre #MmuLock held.
sl@0
   111
*/
sl@0
   112
TPte* Mmu::PageTableFromPde(TPde aPde)
sl@0
   113
	{
sl@0
   114
	if((aPde&KPdePresentMask)==KArmV6PdePageTable)
sl@0
   115
		{
sl@0
   116
		SPageInfo* pi = SPageInfo::FromPhysAddr(aPde);
sl@0
   117
		return (TPte*)(KPageTableBase+(pi->Index()<<KPageShift)+(aPde&(KPageMask&~KPageTableMask)));
sl@0
   118
		}
sl@0
   119
	return 0;
sl@0
   120
	}
sl@0
   121
sl@0
   122
sl@0
   123
/**
sl@0
   124
Perform the action of #PageTableFromPde but without the possibility of
sl@0
   125
a system fault caused the page table not being one allocated by the kernel.
sl@0
   126
sl@0
   127
@pre #MmuLock held.
sl@0
   128
*/
sl@0
   129
TPte* Mmu::SafePageTableFromPde(TPde aPde)
sl@0
   130
	{
sl@0
   131
	if((aPde&KPdeTypeMask)==KArmV6PdePageTable)
sl@0
   132
		{
sl@0
   133
		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aPde&~KPageMask);
sl@0
   134
		if(pi)
sl@0
   135
			return (TPte*)(KPageTableBase+(pi->Index()<<KPageShift)+(aPde&(KPageMask&~KPageTableMask)));
sl@0
   136
		}
sl@0
   137
	return 0;
sl@0
   138
	}
sl@0
   139
sl@0
   140
sl@0
   141
/**
sl@0
   142
Return the base phsical address of the section table referenced by the given
sl@0
   143
Page Directory Entry (PDE) \a aPde. If the PDE doesn't refer to a
sl@0
   144
section then KPhysAddrInvalid is returned.
sl@0
   145
sl@0
   146
@pre #MmuLock held.
sl@0
   147
*/
sl@0
   148
TPhysAddr Mmu::SectionBaseFromPde(TPde aPde)
sl@0
   149
	{
sl@0
   150
	if(PdeMapsSection(aPde))
sl@0
   151
		return aPde&KPdeSectionAddrMask;
sl@0
   152
	return KPhysAddrInvalid;
sl@0
   153
	}
sl@0
   154
sl@0
   155
sl@0
   156
/**
sl@0
   157
Return a pointer to the Page Table Entry (PTE) which maps the
sl@0
   158
virtual address \a aAddress in the address space \a aOsAsid.
sl@0
   159
sl@0
   160
If no page table exists or it was not one allocated by the kernel
sl@0
   161
then the results are unpredictable and may cause a system fault.
sl@0
   162
sl@0
   163
@pre #MmuLock held.
sl@0
   164
*/
sl@0
   165
TPte* Mmu::PtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid)
sl@0
   166
	{
sl@0
   167
	TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift];
sl@0
   168
	SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
sl@0
   169
	TPte* pt = (TPte*)(KPageTableBase+(pi->Index()<<KPageShift)+(pde&(KPageMask&~KPageTableMask)));
sl@0
   170
	pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
sl@0
   171
	return pt;
sl@0
   172
	}
sl@0
   173
sl@0
   174
sl@0
   175
/**
sl@0
   176
Perform the action of #PtePtrFromLinAddr but without the possibility
sl@0
   177
of a system fault. If the page table is not present or not one
sl@0
   178
allocated by the kernel then the null-pointer is returned.
sl@0
   179
sl@0
   180
@pre #MmuLock held.
sl@0
   181
*/
sl@0
   182
TPte* Mmu::SafePtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid)
sl@0
   183
	{
sl@0
   184
	TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift];
sl@0
   185
	TPte* pt = SafePageTableFromPde(pde);
sl@0
   186
	if(pt)
sl@0
   187
		pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
sl@0
   188
	return pt;
sl@0
   189
	}
sl@0
   190
sl@0
   191
sl@0
   192
/**
sl@0
   193
Return the physical address for the page table whose virtual
sl@0
   194
address is \a aPt.
sl@0
   195
sl@0
   196
If the page table was not one allocated by the kernel then the
sl@0
   197
results are unpredictable and may cause a system fault.
sl@0
   198
sl@0
   199
@pre #MmuLock held.
sl@0
   200
*/
sl@0
   201
TPhysAddr Mmu::PageTablePhysAddr(TPte* aPt)
sl@0
   202
	{
sl@0
   203
	__NK_ASSERT_DEBUG(MmuLock::IsHeld() || PageTablesLockIsHeld());
sl@0
   204
sl@0
   205
	TInt pdeIndex = ((TLinAddr)aPt)>>KChunkShift;
sl@0
   206
	TPde pde = PageDirectory(KKernelOsAsid)[pdeIndex];
sl@0
   207
	__NK_ASSERT_DEBUG((pde&KPdePresentMask)==KArmV6PdePageTable);
sl@0
   208
sl@0
   209
	SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
sl@0
   210
	TPte* pPte = (TPte*)(KPageTableBase+(pi->Index(true)<<KPageShift)+(pde&(KPageMask&~KPageTableMask)));
sl@0
   211
	TPte pte = pPte[(((TLinAddr)aPt)&KChunkMask)>>KPageShift];
sl@0
   212
	__NK_ASSERT_DEBUG(pte & KArmV6PteSmallPage);
sl@0
   213
sl@0
   214
	return (pte&KPteSmallPageAddrMask)|(((TLinAddr)aPt)&(KPageMask&~KPageTableMask));
sl@0
   215
	}
sl@0
   216
sl@0
   217
sl@0
   218
/**
sl@0
   219
Perform a page table walk to return the physical address of
sl@0
   220
the memory mapped at virtual address \a aLinAddr in the
sl@0
   221
address space \a aOsAsid.
sl@0
   222
sl@0
   223
If the page table used was not one allocated by the kernel
sl@0
   224
then the results are unpredictable and may cause a system fault.
sl@0
   225
sl@0
   226
Use of this function should be avoided, use instead Mmu::LinearToPhysical
sl@0
   227
which contains debug assertions for its preconditions.
sl@0
   228
sl@0
   229
@pre #MmuLock held.
sl@0
   230
*/
sl@0
   231
TPhysAddr Mmu::UncheckedLinearToPhysical(TLinAddr aLinAddr, TInt aOsAsid)
sl@0
   232
	{
sl@0
   233
	TRACE2(("Mmu::UncheckedLinearToPhysical(%08x,%d)",aLinAddr,aOsAsid));
sl@0
   234
	TInt pdeIndex = aLinAddr>>KChunkShift;
sl@0
   235
	TPde pde = PageDirectory(aOsAsid)[pdeIndex];
sl@0
   236
	if ((pde&KPdePresentMask)==KArmV6PdePageTable)
sl@0
   237
		{
sl@0
   238
		SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
sl@0
   239
		TPte* pPte = (TPte*)(KPageTableBase+(pi->Index(true)<<KPageShift)+(pde&(KPageMask&~KPageTableMask)));
sl@0
   240
		TPte pte = pPte[(aLinAddr&KChunkMask)>>KPageShift];
sl@0
   241
		if (pte & KArmV6PteSmallPage)
sl@0
   242
			{
sl@0
   243
			TPhysAddr pa=(pte&KPteSmallPageAddrMask)|(aLinAddr&~KPteSmallPageAddrMask);
sl@0
   244
			__KTRACE_OPT(KMMU,Kern::Printf("Mapped with small page - returning %08x",pa));
sl@0
   245
			return pa;
sl@0
   246
			}
sl@0
   247
		else if (pte & KArmV6PteLargePage)
sl@0
   248
			{
sl@0
   249
			TPhysAddr pa=(pte&KPteLargePageAddrMask)|(aLinAddr&~KPteLargePageAddrMask);
sl@0
   250
			__KTRACE_OPT(KMMU,Kern::Printf("Mapped with large page - returning %08x",pa));
sl@0
   251
			return pa;
sl@0
   252
			}
sl@0
   253
		}
sl@0
   254
	else if ((pde&KPdePresentMask)==KArmV6PdeSection)
sl@0
   255
		{
sl@0
   256
		TPhysAddr pa=(pde&KPdeSectionAddrMask)|(aLinAddr&~KPdeSectionAddrMask);
sl@0
   257
		__KTRACE_OPT(KMMU,Kern::Printf("Mapped with section - returning %08x",pa));
sl@0
   258
		return pa;
sl@0
   259
		}
sl@0
   260
	return KPhysAddrInvalid;
sl@0
   261
	}
sl@0
   262
sl@0
   263
sl@0
   264
extern TUint32 TTCR();
sl@0
   265
extern TUint32 CPUID(TInt /*aRegNum*/);
sl@0
   266
sl@0
   267
sl@0
   268
void Mmu::Init1()
sl@0
   269
	{
sl@0
   270
	TRACEB(("Mmu::Init1"));
sl@0
   271
sl@0
   272
	// check page local/global page directory split is correct...
sl@0
   273
	__NK_ASSERT_ALWAYS(TTCR()==1);
sl@0
   274
sl@0
   275
	// check cache type is supported and consistent with compile time macros...
sl@0
   276
	TInt iColourCount = 0;
sl@0
   277
	TInt dColourCount = 0;
sl@0
   278
	TUint32 ctr = InternalCache::TypeRegister();
sl@0
   279
	TRACEB(("CacheTypeRegister = %08x",ctr));
sl@0
   280
#ifdef __CPU_ARMV6
sl@0
   281
	__NK_ASSERT_ALWAYS((ctr>>29)==0);	// check ARMv6 format
sl@0
   282
	if(ctr&0x800)
sl@0
   283
		iColourCount = 4;
sl@0
   284
	if(ctr&0x800000)
sl@0
   285
		dColourCount = 4;
sl@0
   286
#else
sl@0
   287
	__NK_ASSERT_ALWAYS((ctr>>29)==4);	// check ARMv7 format
sl@0
   288
	TUint l1ip = (ctr>>14)&3;			// L1 instruction cache indexing and tagging policy
sl@0
   289
	__NK_ASSERT_ALWAYS(l1ip>=2);		// check I cache is physically tagged
sl@0
   290
sl@0
   291
	TUint32 clidr = InternalCache::LevelIDRegister();
sl@0
   292
	TRACEB(("CacheLevelIDRegister = %08x",clidr));
sl@0
   293
	TUint l1type = clidr&7;
sl@0
   294
	if(l1type)
sl@0
   295
		{
sl@0
   296
		if(l1type==2 || l1type==3 || l1type==4)
sl@0
   297
			{
sl@0
   298
			// we have an L1 data cache...
sl@0
   299
			TUint32 csir = InternalCache::SizeIdRegister(0,0);
sl@0
   300
			TUint sets = ((csir>>13)&0x7fff)+1;
sl@0
   301
			TUint ways = ((csir>>3)&0x3ff)+1;
sl@0
   302
			TUint lineSizeShift = (csir&7)+4;
sl@0
   303
			// assume L1 data cache is VIPT and alias checks broken and so we need data cache colouring...
sl@0
   304
			dColourCount = (sets<<lineSizeShift)>>KPageShift;
sl@0
   305
			if(l1type==4) // unified cache, so set instruction cache colour as well...
sl@0
   306
				iColourCount = (sets<<lineSizeShift)>>KPageShift;
sl@0
   307
			TRACEB(("L1DCache = 0x%x,0x%x,%d colourCount=%d",sets,ways,lineSizeShift,(sets<<lineSizeShift)>>KPageShift));
sl@0
   308
			}
sl@0
   309
sl@0
   310
		if(l1type==1 || l1type==3)
sl@0
   311
			{
sl@0
   312
			// we have a separate L1 instruction cache...
sl@0
   313
			TUint32 csir = InternalCache::SizeIdRegister(1,0);
sl@0
   314
			TUint sets = ((csir>>13)&0x7fff)+1;
sl@0
   315
			TUint ways = ((csir>>3)&0x3ff)+1;
sl@0
   316
			TUint lineSizeShift = (csir&7)+4;
sl@0
   317
			iColourCount = (sets<<lineSizeShift)>>KPageShift;
sl@0
   318
			TRACEB(("L1ICache = 0x%x,0x%x,%d colourCount=%d",sets,ways,lineSizeShift,(sets<<lineSizeShift)>>KPageShift));
sl@0
   319
			}
sl@0
   320
		}
sl@0
   321
	if(l1ip==3)
sl@0
   322
		{
sl@0
   323
		// PIPT cache, so no colouring restrictions...
sl@0
   324
		TRACEB(("L1ICache is PIPT"));
sl@0
   325
		iColourCount = 0;
sl@0
   326
		}
sl@0
   327
	else
sl@0
   328
		{
sl@0
   329
		// VIPT cache...
sl@0
   330
		TRACEB(("L1ICache is VIPT"));
sl@0
   331
		}
sl@0
   332
#endif
sl@0
   333
	TRACEB(("page colouring counts I=%d, D=%d",iColourCount,dColourCount));
sl@0
   334
	__NK_ASSERT_ALWAYS(iColourCount<=KPageColourCount);
sl@0
   335
	__NK_ASSERT_ALWAYS(dColourCount<=KPageColourCount);
sl@0
   336
	#ifndef __CPU_I_CACHE_HAS_COLOUR
sl@0
   337
	__NK_ASSERT_ALWAYS(iColourCount==0);
sl@0
   338
	#endif
sl@0
   339
	#ifndef __CPU_D_CACHE_HAS_COLOUR
sl@0
   340
	__NK_ASSERT_ALWAYS(dColourCount==0);
sl@0
   341
	#endif
sl@0
   342
	#ifndef __CPU_CACHE_HAS_COLOUR
sl@0
   343
	__NK_ASSERT_ALWAYS(iColourCount==0);
sl@0
   344
	__NK_ASSERT_ALWAYS(dColourCount==0);
sl@0
   345
	#endif
sl@0
   346
sl@0
   347
	// check MMU attributes match our assumptions...
sl@0
   348
	if(((CPUID(-1)>>16)&0xf)==0xf) // if have new CPUID format....
sl@0
   349
		{
sl@0
   350
		TUint mmfr1 = CPUID(5);
sl@0
   351
		TRACEB(("mmfr1 = %08x",mmfr1));
sl@0
   352
		#ifdef __CPU_NEEDS_BTAC_FLUSH_AFTER_ASID_CHANGE
sl@0
   353
			__NK_ASSERT_ALWAYS(((mmfr1>>28)&0xf)==1); // Branch Predictor needs invalidating after ASID change
sl@0
   354
		#else
sl@0
   355
			__NK_ASSERT_ALWAYS(((mmfr1>>28)&0xf)>=2); // Branch Predictor doesn't needs invalidating after ASID change
sl@0
   356
		#endif
sl@0
   357
sl@0
   358
		TUint mmfr2 = CPUID(6);
sl@0
   359
		TRACEB(("mmfr2 = %08x",mmfr2));
sl@0
   360
		__NK_ASSERT_ALWAYS(((mmfr2>>20)&0xf)>=2); // check Mem Barrier instructions are supported in CP15
sl@0
   361
sl@0
   362
		TUint mmfr3 = CPUID(7);
sl@0
   363
		TRACEB(("mmfr3 = %08x",mmfr3));
sl@0
   364
		(void)mmfr3;
sl@0
   365
sl@0
   366
		#if defined(__SMP__) && !defined(__CPU_ARM11MP__)
sl@0
   367
			__NK_ASSERT_ALWAYS(((mmfr3>>12)&0xf)>=2); // check Maintenance Broadcast is for all cache and TLB operations
sl@0
   368
		#endif	
sl@0
   369
		#ifdef __CPU_SUPPORTS_PAGE_TABLE_WALK_TO_L1_CACHE
sl@0
   370
			__NK_ASSERT_ALWAYS(((mmfr3>>20)&0xf)>=1); // check Coherent Walk for page tables
sl@0
   371
		#endif	
sl@0
   372
		}
sl@0
   373
sl@0
   374
	Arm::DefaultDomainAccess = KDefaultDomainAccess;
sl@0
   375
sl@0
   376
#ifdef __SMP__
sl@0
   377
	TInt i;
sl@0
   378
	for (i=0; i<KMaxCpus; ++i)
sl@0
   379
		{
sl@0
   380
		TSubScheduler& ss = TheSubSchedulers[i];
sl@0
   381
		TLinAddr a = KIPCAlias + (i<<KChunkShift);
sl@0
   382
		ss.i_AliasLinAddr = (TAny*)a;
sl@0
   383
		ss.i_AliasPdePtr = (TAny*)(KPageDirectoryBase + (a>>KChunkShift)*sizeof(TPde));
sl@0
   384
		}
sl@0
   385
#endif
sl@0
   386
sl@0
   387
	Init1Common();
sl@0
   388
	}
sl@0
   389
sl@0
   390
void Mmu::Init2()
sl@0
   391
	{
sl@0
   392
	TRACEB(("Mmu::Init2"));
sl@0
   393
sl@0
   394
	Init2Common();
sl@0
   395
	}
sl@0
   396
sl@0
   397
DMemoryObject* ExceptionStacks;
sl@0
   398
sl@0
   399
void Mmu::Init2Final()
sl@0
   400
	{
sl@0
   401
	TRACEB(("Mmu::Init2Final"));
sl@0
   402
sl@0
   403
	Init2FinalCommon();
sl@0
   404
sl@0
   405
	// initialise memory object for exception stacks...
sl@0
   406
	TMappingCreateFlags mapFlags = (TMappingCreateFlags)(EMappingCreateFixedVirtual|EMappingCreateReserveAllResources);
sl@0
   407
	TMemoryAttributes memAttr = EMemoryAttributeStandard;
sl@0
   408
	TUint size = 4*2*KPageSize; // 4 exception stacks each of one guard page and one mapped page
sl@0
   409
	size |= 1; // lower bit of size is set if region to be claimed contains gaps
sl@0
   410
	TInt r = MM::InitFixedKernelMemory(ExceptionStacks, KExcptStacksLinearBase, KExcptStacksLinearEnd, size, EMemoryObjectUnpaged, EMemoryCreateNoWipe, memAttr, mapFlags);
sl@0
   411
	__NK_ASSERT_ALWAYS(r==KErrNone);
sl@0
   412
	}
sl@0
   413
sl@0
   414
sl@0
   415
/**
sl@0
   416
Return the page directory entry (PDE) value to use for when mapping page tables intended
sl@0
   417
to map memory with the given attributes.
sl@0
   418
The returned value has the physical address component being zero, so a page table's physical
sl@0
   419
address can be simply ORed in.
sl@0
   420
*/
sl@0
   421
TPde Mmu::BlankPde(TMemoryAttributes aAttributes)
sl@0
   422
	{
sl@0
   423
	TPde pde = KArmV6PdePageTable;
sl@0
   424
	if(aAttributes&EMemoryAttributeUseECC)
sl@0
   425
		pde |= 1<<9;
sl@0
   426
sl@0
   427
	TRACE2(("Mmu::BlankPde(%x) returns 0x%x",aAttributes,pde));
sl@0
   428
	return pde;
sl@0
   429
	}
sl@0
   430
sl@0
   431
sl@0
   432
/**
sl@0
   433
Return the page directory entry (PDE) value to use for when creating a section mapping for memory
sl@0
   434
with the given attributes and #TPteType.
sl@0
   435
The returned value has the physical address component being zero, so the section's physical address
sl@0
   436
can be simply ORed in.
sl@0
   437
*/
sl@0
   438
TPde Mmu::BlankSectionPde(TMemoryAttributes aAttributes, TUint aPteType)
sl@0
   439
	{
sl@0
   440
	// reuse existing functions rather than duplicating the logic
sl@0
   441
	TPde pde = BlankPde(aAttributes);
sl@0
   442
	TPte pte = BlankPte(aAttributes, aPteType);
sl@0
   443
	return PageToSectionEntry(pte, pde);
sl@0
   444
	}
sl@0
   445
sl@0
   446
sl@0
   447
/**
sl@0
   448
Return the page table entry (PTE) to use when mapping memory pages
sl@0
   449
with the given attributes and #TPteType.
sl@0
   450
This value has the physical address component being zero, so a page's physical
sl@0
   451
address can be simply ORed in.
sl@0
   452
*/
sl@0
   453
sl@0
   454
TPte Mmu::BlankPte(TMemoryAttributes aAttributes, TUint aPteType)
sl@0
   455
	{
sl@0
   456
	TUint attr = CanonicalMemoryAttributes(aAttributes);
sl@0
   457
sl@0
   458
	// common PTE setup...
sl@0
   459
	TPte pte = KArmV6PteSmallPage|KArmV6PteAP0;
sl@0
   460
	if(aPteType&EPteTypeUserAccess)
sl@0
   461
		pte |= KArmV6PteAP1;					// AP1 = user access
sl@0
   462
	if((aPteType&EPteTypeWritable)==false)
sl@0
   463
		pte |= KArmV6PteAP2;					// AP2 = !writable
sl@0
   464
	if(attr&EMemoryAttributeShareable)
sl@0
   465
		pte |= KArmV6PteS;
sl@0
   466
	if((aPteType&EPteTypeGlobal)==false)
sl@0
   467
		pte |= KArmV6PteNG;
sl@0
   468
	if((aPteType&EPteTypeExecutable)==false)
sl@0
   469
		pte |= KArmV6PteSmallXN;
sl@0
   470
sl@0
   471
	#if defined(__CPU_MEMORY_TYPE_REMAPPING)
sl@0
   472
sl@0
   473
		// other PTE bits...
sl@0
   474
		if(pte&KArmV6PteSmallXN)
sl@0
   475
			pte |= KArmV6PteSmallTEX1;	// TEX1 is a copy of the XN
sl@0
   476
sl@0
   477
		// process memory type...
sl@0
   478
		TUint type = attr&EMemoryAttributeTypeMask;
sl@0
   479
		pte |= ((type&3)<<2) | ((type&4)<<4);
sl@0
   480
sl@0
   481
	#else
sl@0
   482
sl@0
   483
		// other PTE bits...
sl@0
   484
		if((pte&(KArmV6PteAP2|KArmV6PteAP1))==(KArmV6PteAP2|KArmV6PteAP1))
sl@0
   485
			pte &= ~KArmV6PteAP0;		// clear AP0 if user r/o
sl@0
   486
sl@0
   487
		// process memory type...
sl@0
   488
		TUint texcb;
sl@0
   489
		switch((TMemoryType)(attr&EMemoryAttributeTypeMask))
sl@0
   490
			{
sl@0
   491
		case EMemAttStronglyOrdered:
sl@0
   492
			texcb = KArmV6MemAttSO;
sl@0
   493
			break;
sl@0
   494
		case EMemAttDevice:
sl@0
   495
			if(attr&EMemoryAttributeShareable)
sl@0
   496
				texcb = KArmV6MemAttSD;
sl@0
   497
			else
sl@0
   498
				texcb = KArmV6MemAttSD;	// should be KArmV6MemAttNSD? (but this made H4 go bang)
sl@0
   499
			break;
sl@0
   500
		case EMemAttNormalUncached:
sl@0
   501
			texcb = KArmV6MemAttNCNC;
sl@0
   502
			break;
sl@0
   503
		case EMemAttNormalCached:
sl@0
   504
			texcb = KArmV6MemAttWBWAWBWA;
sl@0
   505
			break;
sl@0
   506
		default:
sl@0
   507
			__NK_ASSERT_ALWAYS(0);		// undefined memory type
sl@0
   508
			texcb = KArmV6MemAttSO;
sl@0
   509
			break;
sl@0
   510
			}
sl@0
   511
		pte |= ((texcb&0x1c)<<4) | ((texcb&0x03)<<2);
sl@0
   512
sl@0
   513
	#endif
sl@0
   514
sl@0
   515
	TRACE2(("Mmu::BlankPte(%x,%x) returns 0x%x",aAttributes,aPteType,pte));
sl@0
   516
	return pte;
sl@0
   517
	}
sl@0
   518
sl@0
   519
sl@0
   520
/**
sl@0
   521
Calculate PDE and PTE which represent a page table mapping for an existing
sl@0
   522
section mapping.
sl@0
   523
sl@0
   524
@param[in] aPde The PDE for the existing section mapping.
sl@0
   525
@param[out] aPde A PDE for a page table mapping, with physical address == 0.
sl@0
   526
sl@0
   527
@return The PTE value for the first entry in the page table.
sl@0
   528
*/
sl@0
   529
TPte Mmu::SectionToPageEntry(TPde& aPde)
sl@0
   530
	{
sl@0
   531
	TPde pde = aPde;
sl@0
   532
sl@0
   533
	// calculate new PTE...
sl@0
   534
	TPte pte = pde&0xc; // copy CB bits
sl@0
   535
	if(pde&KArmV6PdeSectionXN)
sl@0
   536
		pte |= KArmV6PteSmallXN; // copy XN bit
sl@0
   537
	pte |= (pde&(0xff<<10))>>6; // copy NG, S, APX, TEX, AP bits
sl@0
   538
	pte |= KArmV6PteSmallPage;
sl@0
   539
sl@0
   540
	// calculate new PDE...
sl@0
   541
	pde &= 0x3e0;	// keep IMP and DOMAIN
sl@0
   542
	pde |= KArmV6PdePageTable;
sl@0
   543
sl@0
   544
	aPde = pde;
sl@0
   545
	return pte;
sl@0
   546
	}
sl@0
   547
sl@0
   548
sl@0
   549
/**
sl@0
   550
Calculate a PDE entry which represents a section mapping for an existing
sl@0
   551
page table mapping.
sl@0
   552
sl@0
   553
@pre The existing page table contains mappings for a chunk sized and
sl@0
   554
	 aligned contiguous region.
sl@0
   555
sl@0
   556
@param aPte A PTE from the existing page table.
sl@0
   557
@param aPde The existing PDE for the page table mappings.
sl@0
   558
			(Physical address portion is ignored.)
sl@0
   559
sl@0
   560
@return A PDE entry value for a section mapping.
sl@0
   561
*/
sl@0
   562
TPde Mmu::PageToSectionEntry(TPte aPte, TPde aPde)
sl@0
   563
	{
sl@0
   564
	TPde pde = aPde&0x3e0;	// keep IMP and DOMAIN
sl@0
   565
	pde |= aPte&(KPdeSectionAddrMask|0xc); // copy address and CB bits
sl@0
   566
	if(aPte&KArmV6PteSmallXN)
sl@0
   567
		pde |= KArmV6PdeSectionXN; // copy XN bit
sl@0
   568
	pde |= (aPte&(0xff<<4))<<6;  // copy NG, S, APX, TEX, AP bits
sl@0
   569
	pde |= KArmV6PdeSection;
sl@0
   570
	return pde;
sl@0
   571
	}
sl@0
   572
sl@0
   573
sl@0
   574
/**
sl@0
   575
Tranform the specified memory attributes into the canonical form relevant to
sl@0
   576
the platform the code is running on. This applies defaults and overrides to
sl@0
   577
the attributes to return what should be used with the MMU.
sl@0
   578
*/
sl@0
   579
TMemoryAttributes Mmu::CanonicalMemoryAttributes(TMemoryAttributes aAttr)
sl@0
   580
	{
sl@0
   581
	TUint attr = aAttr;
sl@0
   582
	if(attr&EMemoryAttributeDefaultShareable)
sl@0
   583
		{
sl@0
   584
		// sharing not specified, use default...
sl@0
   585
#if defined	(__CPU_USE_SHARED_MEMORY)
sl@0
   586
		attr |= EMemoryAttributeShareable;
sl@0
   587
#else
sl@0
   588
		attr &= ~EMemoryAttributeShareable;
sl@0
   589
#endif
sl@0
   590
		}
sl@0
   591
sl@0
   592
#if defined(FAULTY_NONSHARED_DEVICE_MEMORY)
sl@0
   593
	if((attr&(EMemoryAttributeShareable|EMemoryAttributeTypeMask))==EMemoryAttributeDevice)
sl@0
   594
		{
sl@0
   595
		// make unshared device memory into shared strongly ordered memory...
sl@0
   596
		attr ^= EMemoryAttributeShareable;
sl@0
   597
		attr ^= EMemoryAttributeDevice^EMemoryAttributeStronglyOrdered;
sl@0
   598
		}
sl@0
   599
#endif
sl@0
   600
sl@0
   601
#if	defined(__SMP__) || defined(__CPU_FORCE_SHARED_MEMORY_IF_CACHED)
sl@0
   602
	TMemoryType type = (TMemoryType)(attr&KMemoryTypeMask);
sl@0
   603
	if(CacheMaintenance::IsCached(type))
sl@0
   604
		{
sl@0
   605
		// force cached memory to be shared memory on SMP systems...
sl@0
   606
		attr |= EMemoryAttributeShareable;
sl@0
   607
		}
sl@0
   608
#endif
sl@0
   609
sl@0
   610
	return (TMemoryAttributes)(attr&EMemoryAttributeMask);
sl@0
   611
	}
sl@0
   612
sl@0
   613
/**
sl@0
   614
Method called to initialise RAM pages when they are allocated for a new use.
sl@0
   615
This performs any cache synchronisation required to remove old entries
sl@0
   616
and also wipes the contents of the memory (if requested via \a aFlags).
sl@0
   617
sl@0
   618
@param aPageList	Pointer to a list of physical addresses for the RAM pages,
sl@0
   619
					or, if the least significant bit of this value is set, then
sl@0
   620
					the rest of the value is the physical address of a contiguous
sl@0
   621
					region of RAM pages being allocated.
sl@0
   622
sl@0
   623
@param aCount		The number of pages.
sl@0
   624
sl@0
   625
@param aFlags		A set of flag values from #TRamAllocFlags which indicate
sl@0
   626
					the memory type the pages will be used for and whether
sl@0
   627
					the contents should be wiped.
sl@0
   628
sl@0
   629
@param aReallocate	True, if the RAM pages have already been previously allocated
sl@0
   630
					and are being reinitilised e.g. by DMemoryManager::ReAllocDecommitted.
sl@0
   631
					False, to indicate that these pages have been newly allocated (are in
sl@0
   632
					the SPageInfo::EUnused state.)
sl@0
   633
sl@0
   634
@pre #RamAllocLock held.
sl@0
   635
*/
sl@0
   636
void Mmu::PagesAllocated(TPhysAddr* aPageList, TUint aCount, TRamAllocFlags aFlags, TBool aReallocate)
sl@0
   637
	{
sl@0
   638
	TRACE2(("Mmu::PagesAllocated(0x%08x,%d,0x%x,%d)",aPageList, aCount, aFlags, (bool)aReallocate));
sl@0
   639
	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
sl@0
   640
sl@0
   641
	TBool wipe = !(aFlags&EAllocNoWipe); // do we need to wipe page contents?
sl@0
   642
	TUint8 wipeByte = (aFlags&EAllocUseCustomWipeByte) ? (aFlags>>EAllocWipeByteShift)&0xff : 0x03; // value to wipe memory with
sl@0
   643
sl@0
   644
	// process each page in turn...
sl@0
   645
	while(aCount--)
sl@0
   646
		{
sl@0
   647
		// get physical address of next page...
sl@0
   648
		TPhysAddr pagePhys;
sl@0
   649
		if((TPhysAddr)aPageList&1)
sl@0
   650
			{
sl@0
   651
			// aPageList is actually the physical address to use...
sl@0
   652
			pagePhys = (TPhysAddr)aPageList&~1;
sl@0
   653
			*(TPhysAddr*)&aPageList += KPageSize;
sl@0
   654
			}
sl@0
   655
		else
sl@0
   656
			pagePhys = *aPageList++;
sl@0
   657
		__NK_ASSERT_DEBUG((pagePhys&KPageMask)==0);
sl@0
   658
sl@0
   659
		// get info about page...
sl@0
   660
		SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
sl@0
   661
		TMemoryType oldType = (TMemoryType)(pi->Flags(true)&KMemoryTypeMask);
sl@0
   662
		TBool oldTypeNormal = CacheMaintenance::IsNormal(oldType);
sl@0
   663
sl@0
   664
		TRACE2(("Mmu::PagesAllocated page=0x%08x, oldType=%d, wipe=%d, colour=%d",pagePhys,oldType,wipe,pi->Index(true)&KPageColourMask));
sl@0
   665
		if(wipe || oldTypeNormal)
sl@0
   666
			{
sl@0
   667
			// work out temporary mapping values...
sl@0
   668
			TUint colour = pi->Index(true)&KPageColourMask;
sl@0
   669
			TLinAddr tempLinAddr = iTempMap[0].iLinAddr+colour*KPageSize;
sl@0
   670
			TPte* tempPte = iTempMap[0].iPtePtr+colour;
sl@0
   671
sl@0
   672
			if(oldTypeNormal)
sl@0
   673
				{
sl@0
   674
				// cache maintenance required. Prepare temporary mapping.
sl@0
   675
				*tempPte = pagePhys | iTempPteCacheMaintenance;
sl@0
   676
				CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
sl@0
   677
				InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
sl@0
   678
sl@0
   679
				// will hold additional arguments in CacheMaintenance::PageToReuse call
sl@0
   680
				TInt pageToReuseMask = 0;
sl@0
   681
				
sl@0
   682
				// check if old and new mappings are the same. (Wiping needs temporary
sl@0
   683
				// mapping which may not be the same as the old and new mapping.)
sl@0
   684
				TMemoryType newType = (TMemoryType)(aFlags&KMemoryTypeMask); // memory type that pages will be used for
sl@0
   685
				if (!wipe && (newType ==oldType))
sl@0
   686
					pageToReuseMask |= CacheMaintenance::EOldAndNewMappingMatch;
sl@0
   687
sl@0
   688
				MmuLock::Lock();
sl@0
   689
sl@0
   690
				// decide wether to trigger maintenance of entire cache(s).
sl@0
   691
				if(CacheMaintenance::IsPageToReuseThresholdReached(iCacheInvalidatePageCount))
sl@0
   692
					{
sl@0
   693
					// enough pages to make it worth triggering maintenance of entire cache(s)
sl@0
   694
					pageToReuseMask |= CacheMaintenance::EThresholdReached;
sl@0
   695
					++iCacheInvalidateCounter;
sl@0
   696
					iCacheInvalidatePageCount = 0; // all pages will be partially synced 
sl@0
   697
					}
sl@0
   698
				
sl@0
   699
				if(CacheMaintenance::IsCached(oldType) && !aReallocate)
sl@0
   700
					{
sl@0
   701
					if(pi->CacheInvalidateCounter()==(TUint32)iCacheInvalidateCounter)
sl@0
   702
						{
sl@0
   703
						// one less unused page in the L1 cache...
sl@0
   704
						__NK_ASSERT_DEBUG(iCacheInvalidatePageCount);
sl@0
   705
						--iCacheInvalidatePageCount;
sl@0
   706
						}
sl@0
   707
					else
sl@0
   708
						{
sl@0
   709
						// our page has been already partially maintained in cache
sl@0
   710
						// by a previous PageToReuse call.
sl@0
   711
						pageToReuseMask |= CacheMaintenance::EPageHasBeenPartiallySynced;
sl@0
   712
						}
sl@0
   713
					}
sl@0
   714
				
sl@0
   715
				MmuLock::Unlock();
sl@0
   716
				
sl@0
   717
				TBool pageRemovedFromCache = CacheMaintenance::PageToReuse(tempLinAddr, oldType, pagePhys, pageToReuseMask);
sl@0
   718
				if(pageRemovedFromCache && !aReallocate)
sl@0
   719
					pi->SetUncached();
sl@0
   720
				}
sl@0
   721
sl@0
   722
			if(wipe)
sl@0
   723
				{
sl@0
   724
				//We need uncached normal temporary mapping to wipe. Change it if necessary.
sl@0
   725
				//or , in case of !oldTypeNormal it is not configured yet.
sl@0
   726
				if (!oldTypeNormal || (CacheMaintenance::TemporaryMapping()!=EMemAttNormalUncached))
sl@0
   727
					{
sl@0
   728
					*tempPte = pagePhys | iTempPteUncached;
sl@0
   729
					CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
sl@0
   730
					InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
sl@0
   731
					}
sl@0
   732
				// wipe contents of memory...
sl@0
   733
				memset((TAny*)tempLinAddr, wipeByte, KPageSize);
sl@0
   734
				CacheMaintenance::PageToReuse(tempLinAddr, EMemAttNormalUncached, pagePhys);
sl@0
   735
				}
sl@0
   736
sl@0
   737
			// invalidate temporary mapping...
sl@0
   738
			*tempPte = KPteUnallocatedEntry;
sl@0
   739
			CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
sl@0
   740
			InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
sl@0
   741
			}
sl@0
   742
sl@0
   743
		// indicate page has been allocated...
sl@0
   744
		if(!aReallocate)
sl@0
   745
			pi->SetAllocated();
sl@0
   746
sl@0
   747
		// loop round for next page...
sl@0
   748
		} // end of while(aCount--)
sl@0
   749
	}
sl@0
   750
sl@0
   751
sl@0
   752
/**
sl@0
   753
Method called to update the state of a RAM page when it is freed.
sl@0
   754
This sets the page state to SPageInfo::EUnused.
sl@0
   755
sl@0
   756
@param aPageInfo	The page information structure for the RAM page.
sl@0
   757
sl@0
   758
@pre #MmuLock held.
sl@0
   759
*/
sl@0
   760
void Mmu::PageFreed(SPageInfo* aPageInfo)
sl@0
   761
	{
sl@0
   762
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
   763
sl@0
   764
	if(aPageInfo->Type()==SPageInfo::EUnused)
sl@0
   765
		return;
sl@0
   766
sl@0
   767
	aPageInfo->SetUnused();
sl@0
   768
sl@0
   769
	TMemoryType type = (TMemoryType)(aPageInfo->Flags()&KMemoryTypeMask);
sl@0
   770
	if(CacheMaintenance::IsCached(type))
sl@0
   771
		{
sl@0
   772
		// another unused page with L1 cache entries...
sl@0
   773
		aPageInfo->SetCacheInvalidateCounter(iCacheInvalidateCounter);
sl@0
   774
		++iCacheInvalidatePageCount;
sl@0
   775
		}
sl@0
   776
sl@0
   777
	TRACE2(("Mmu::PageFreed page=0x%08x type=%d colour=%d",aPageInfo->PhysAddr(),aPageInfo->Flags()&KMemoryTypeMask,aPageInfo->Index()&KPageColourMask));
sl@0
   778
	}
sl@0
   779
sl@0
   780
/**
sl@0
   781
Remove the contents of RAM pages from any memory caches.
sl@0
   782
sl@0
   783
@param aPages		Pointer to a list of physical addresses for the RAM pages,
sl@0
   784
					or, if the least significant bit of this value is set, then
sl@0
   785
					the rest of the value is the physical address of a contiguous
sl@0
   786
					region of RAM pages.
sl@0
   787
sl@0
   788
@param aCount		The number of pages.
sl@0
   789
sl@0
   790
@param aAttributes	The memory attributes of the pages.
sl@0
   791
sl@0
   792
@param aColour 		The colour for the first page;
sl@0
   793
					consecutive pages will be coloured accordingly.
sl@0
   794
					Only #KPageColourShift least significant bits are used,
sl@0
   795
					therefore an index into a memory object's memory can be
sl@0
   796
					used for this value.
sl@0
   797
*/
sl@0
   798
void Mmu::CleanAndInvalidatePages(TPhysAddr* aPages, TUint aCount, TMemoryAttributes aAttributes, TUint aColour)
sl@0
   799
	{
sl@0
   800
	TMemoryType type = (TMemoryType)(aAttributes&EMemoryAttributeTypeMask);
sl@0
   801
sl@0
   802
	if(!CacheMaintenance::IsNormal(type))
sl@0
   803
		{
sl@0
   804
		TRACE2(("Mmu::CleanAndInvalidatePages - nothing to do"));
sl@0
   805
		return;
sl@0
   806
		}
sl@0
   807
	
sl@0
   808
	RamAllocLock::Lock();
sl@0
   809
sl@0
   810
	while(aCount--)
sl@0
   811
		{
sl@0
   812
		TPhysAddr pagePhys = *aPages++;
sl@0
   813
		TRACE2(("Mmu::CleanAndInvalidatePages 0x%08x",pagePhys));
sl@0
   814
sl@0
   815
		// work out temporary mapping values...
sl@0
   816
		aColour &= KPageColourMask;
sl@0
   817
		TLinAddr tempLinAddr = iTempMap[0].iLinAddr+aColour*KPageSize;
sl@0
   818
		TPte* tempPte = iTempMap[0].iPtePtr+aColour;
sl@0
   819
		++aColour;
sl@0
   820
sl@0
   821
		// temporarily map page...
sl@0
   822
		*tempPte = pagePhys | iTempPteCacheMaintenance;
sl@0
   823
		CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
sl@0
   824
		InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
sl@0
   825
sl@0
   826
		// preserve memory content and remove from cache...
sl@0
   827
		CacheMaintenance::PageToPreserveAndReuse(tempLinAddr, type, pagePhys);
sl@0
   828
sl@0
   829
		// invalidate temporary mapping...
sl@0
   830
		*tempPte = KPteUnallocatedEntry;
sl@0
   831
		CacheMaintenance::SinglePteUpdated((TLinAddr)tempPte);
sl@0
   832
		InvalidateTLBForPage(tempLinAddr|KKernelOsAsid);
sl@0
   833
sl@0
   834
		RamAllocLock::Flash();
sl@0
   835
		}
sl@0
   836
	RamAllocLock::Unlock();
sl@0
   837
	}
sl@0
   838
sl@0
   839
sl@0
   840
extern void UnlockIPCAlias();
sl@0
   841
extern void LockIPCAlias();
sl@0
   842
sl@0
   843
sl@0
   844
TInt DMemModelThread::Alias(TLinAddr aAddr, DMemModelProcess* aProcess, TInt aSize, TLinAddr& aAliasAddr, TUint& aAliasSize)
sl@0
   845
//
sl@0
   846
// Set up an alias mapping starting at address aAddr in specified process.
sl@0
   847
// Note: Alias is removed if an exception is trapped by DThread::IpcExcHandler.
sl@0
   848
//
sl@0
   849
	{
sl@0
   850
	TRACE2(("Thread %O Alias %08x+%x Process %O",this,aAddr,aSize,aProcess));
sl@0
   851
	__NK_ASSERT_DEBUG(this==TheCurrentThread); // many bad things can happen if false
sl@0
   852
	// If there is an existing alias it should be on the same process otherwise
sl@0
   853
	// the os asid reference may be leaked.
sl@0
   854
	__NK_ASSERT_DEBUG(!iAliasLinAddr || aProcess == iAliasProcess);
sl@0
   855
sl@0
   856
	if(TUint(aAddr^KIPCAlias)<TUint(KIPCAliasAreaSize))
sl@0
   857
		return KErrBadDescriptor; // prevent access to alias region
sl@0
   858
sl@0
   859
	// Grab the mmu lock before opening a reference on os asid so that this thread 
sl@0
   860
	// is in an implicit critical section and therefore can't leak the reference by
sl@0
   861
	// dying before iAliasLinAddr is set.
sl@0
   862
	MmuLock::Lock();
sl@0
   863
sl@0
   864
	TInt osAsid;
sl@0
   865
	if (!iAliasLinAddr)
sl@0
   866
		{// There isn't any existing alias.
sl@0
   867
		// Open a reference on the aProcess's os asid so that it is not freed and/or reused
sl@0
   868
		// while we are aliasing an address belonging to it.
sl@0
   869
		osAsid = aProcess->TryOpenOsAsid();
sl@0
   870
		if (osAsid < 0)
sl@0
   871
			{// Couldn't open os asid so aProcess is no longer running.
sl@0
   872
			MmuLock::Unlock();
sl@0
   873
			return KErrBadDescriptor;
sl@0
   874
			}
sl@0
   875
		}
sl@0
   876
	else
sl@0
   877
		{
sl@0
   878
		// Just read the os asid of the process being aliased we already have a reference on it.
sl@0
   879
		osAsid = aProcess->OsAsid();
sl@0
   880
		}
sl@0
   881
sl@0
   882
	// Now we have the os asid check access to kernel memory.
sl@0
   883
	if(aAddr >= KUserMemoryLimit && osAsid != (TUint)KKernelOsAsid)
sl@0
   884
		{
sl@0
   885
		NKern::ThreadEnterCS();
sl@0
   886
		MmuLock::Unlock();
sl@0
   887
		if (!iAliasLinAddr)
sl@0
   888
			{// Close the new reference as RemoveAlias won't do as iAliasLinAddr is not set.
sl@0
   889
			aProcess->AsyncCloseOsAsid();	// Asynchronous close as this method should be quick.
sl@0
   890
			}
sl@0
   891
		NKern::ThreadLeaveCS();
sl@0
   892
		return KErrBadDescriptor; // prevent access to supervisor only memory
sl@0
   893
		}
sl@0
   894
sl@0
   895
	// Now we know all accesses to global memory are safe so check if aAddr is global.
sl@0
   896
	if(aAddr >= KGlobalMemoryBase)
sl@0
   897
		{
sl@0
   898
		// address is in global section, don't bother aliasing it...
sl@0
   899
		if (!iAliasLinAddr)
sl@0
   900
			{// Close the new reference as not required.
sl@0
   901
			NKern::ThreadEnterCS();
sl@0
   902
			MmuLock::Unlock();
sl@0
   903
			aProcess->AsyncCloseOsAsid(); // Asynchronous close as this method should be quick.
sl@0
   904
			NKern::ThreadLeaveCS();
sl@0
   905
			}
sl@0
   906
		else
sl@0
   907
			{// Remove the existing alias as it is not required.
sl@0
   908
			DoRemoveAlias(iAliasLinAddr);	// Releases mmulock.
sl@0
   909
			}
sl@0
   910
		aAliasAddr = aAddr;
sl@0
   911
		TInt maxSize = KChunkSize-(aAddr&KChunkMask);
sl@0
   912
		aAliasSize = aSize<maxSize ? aSize : maxSize;
sl@0
   913
		TRACE2(("DMemModelThread::Alias() abandoned as memory is globally mapped"));
sl@0
   914
		return KErrNone;
sl@0
   915
		}
sl@0
   916
sl@0
   917
	TPde* pd = Mmu::PageDirectory(osAsid);
sl@0
   918
	TInt pdeIndex = aAddr>>KChunkShift;
sl@0
   919
	TPde pde = pd[pdeIndex];
sl@0
   920
	pde = PDE_IN_DOMAIN(pde, KIPCAliasDomain);	// change domain for PDE
sl@0
   921
	// Get os asid, this is the current thread's process so no need for reference.
sl@0
   922
	TUint32 local_asid = ((DMemModelProcess*)iOwningProcess)->OsAsid();
sl@0
   923
#ifdef __SMP__
sl@0
   924
	TLinAddr aliasAddr;
sl@0
   925
#else
sl@0
   926
	TLinAddr aliasAddr = KIPCAlias+(aAddr&(KChunkMask & ~KPageMask));
sl@0
   927
#endif
sl@0
   928
	if(pde==iAliasPde && iAliasLinAddr)
sl@0
   929
		{
sl@0
   930
		// pde already aliased, so just update linear address...
sl@0
   931
#ifdef __SMP__
sl@0
   932
		__NK_ASSERT_DEBUG(iCpuRestoreCookie>=0);
sl@0
   933
		aliasAddr = iAliasLinAddr & ~KChunkMask;
sl@0
   934
		aliasAddr |= (aAddr & (KChunkMask & ~KPageMask));
sl@0
   935
#endif
sl@0
   936
		iAliasLinAddr = aliasAddr;
sl@0
   937
		}
sl@0
   938
	else
sl@0
   939
		{
sl@0
   940
		// alias PDE changed...
sl@0
   941
		if(!iAliasLinAddr)
sl@0
   942
			{
sl@0
   943
			UnlockIPCAlias();
sl@0
   944
			TheMmu.iAliasList.Add(&iAliasLink); // add to list if not already aliased
sl@0
   945
#ifdef __SMP__
sl@0
   946
			__NK_ASSERT_DEBUG(iCpuRestoreCookie==-1);
sl@0
   947
			iCpuRestoreCookie = NKern::FreezeCpu();	// temporarily lock current thread to this processor
sl@0
   948
#endif
sl@0
   949
			}
sl@0
   950
		iAliasPde = pde;
sl@0
   951
		iAliasProcess = aProcess;
sl@0
   952
#ifdef __SMP__
sl@0
   953
		TSubScheduler& ss = SubScheduler();		// OK since we are locked to this CPU
sl@0
   954
		aliasAddr = TLinAddr(ss.i_AliasLinAddr) + (aAddr & (KChunkMask & ~KPageMask));
sl@0
   955
		iAliasPdePtr = (TPde*)(TLinAddr(ss.i_AliasPdePtr) + (local_asid << KPageDirectoryShift));
sl@0
   956
#endif
sl@0
   957
		iAliasLinAddr = aliasAddr;
sl@0
   958
		*iAliasPdePtr = pde;
sl@0
   959
		SinglePdeUpdated(iAliasPdePtr);
sl@0
   960
		}
sl@0
   961
sl@0
   962
	TRACE2(("DMemModelThread::Alias() PDEntry=%x, iAliasLinAddr=%x",pde, aliasAddr));
sl@0
   963
	LocalInvalidateTLBForPage(aliasAddr | local_asid);
sl@0
   964
	TInt offset = aAddr&KPageMask;
sl@0
   965
	aAliasAddr = aliasAddr | offset;
sl@0
   966
	TInt maxSize = KPageSize - offset;
sl@0
   967
	aAliasSize = aSize<maxSize ? aSize : maxSize;
sl@0
   968
	iAliasTarget = aAddr & ~KPageMask;
sl@0
   969
sl@0
   970
	MmuLock::Unlock();
sl@0
   971
sl@0
   972
	return KErrNone;
sl@0
   973
	}
sl@0
   974
sl@0
   975
sl@0
   976
void DMemModelThread::RemoveAlias()
sl@0
   977
//
sl@0
   978
// Remove alias mapping (if present)
sl@0
   979
//
sl@0
   980
	{
sl@0
   981
	TRACE2(("Thread %O RemoveAlias", this));
sl@0
   982
	__NK_ASSERT_DEBUG(this==TheCurrentThread); // many bad things can happen if false
sl@0
   983
sl@0
   984
	TLinAddr addr = iAliasLinAddr;
sl@0
   985
	if(addr)
sl@0
   986
		{
sl@0
   987
		MmuLock::Lock();
sl@0
   988
sl@0
   989
		DoRemoveAlias(addr);	// Unlocks mmulock.
sl@0
   990
		}
sl@0
   991
	}
sl@0
   992
sl@0
   993
sl@0
   994
/**
sl@0
   995
Remove the alias mapping.
sl@0
   996
sl@0
   997
@pre Mmulock held
sl@0
   998
@post MmuLock released.
sl@0
   999
*/
sl@0
  1000
void DMemModelThread::DoRemoveAlias(TLinAddr aAddr)
sl@0
  1001
	{
sl@0
  1002
	LockIPCAlias();
sl@0
  1003
	iAliasLinAddr = 0;
sl@0
  1004
	iAliasPde = KPdeUnallocatedEntry;
sl@0
  1005
	*iAliasPdePtr = KPdeUnallocatedEntry;
sl@0
  1006
	SinglePdeUpdated(iAliasPdePtr);
sl@0
  1007
	__NK_ASSERT_DEBUG((aAddr&KPageMask)==0);
sl@0
  1008
	// Invalidate the tlb using os asid, no need to open a reference as this
sl@0
  1009
	// is the current thread's process os asid.
sl@0
  1010
	LocalInvalidateTLBForPage(aAddr | ((DMemModelProcess*)iOwningProcess)->OsAsid());
sl@0
  1011
	iAliasLink.Deque();
sl@0
  1012
#ifdef __SMP__
sl@0
  1013
	__NK_ASSERT_DEBUG(iCpuRestoreCookie>=0);
sl@0
  1014
	NKern::EndFreezeCpu(iCpuRestoreCookie);
sl@0
  1015
	iCpuRestoreCookie = -1;
sl@0
  1016
#endif
sl@0
  1017
sl@0
  1018
	// Must close the os asid while in critical section to prevent it being 
sl@0
  1019
	// leaked.  However, we can't hold the mmu lock so we have to enter an 
sl@0
  1020
	// explict crtical section. It is ok to release the mmu lock as the 
sl@0
  1021
	// iAliasLinAddr and iAliasProcess members are only ever updated by the 
sl@0
  1022
	// current thread.
sl@0
  1023
	NKern::ThreadEnterCS();
sl@0
  1024
	MmuLock::Unlock();
sl@0
  1025
	iAliasProcess->AsyncCloseOsAsid(); // Asynchronous close as this method should be quick.
sl@0
  1026
	NKern::ThreadLeaveCS();
sl@0
  1027
	}
sl@0
  1028
sl@0
  1029
sl@0
  1030
TInt M::DemandPagingFault(TAny* aExceptionInfo)
sl@0
  1031
	{
sl@0
  1032
	TArmExcInfo& exc=*(TArmExcInfo*)aExceptionInfo;
sl@0
  1033
sl@0
  1034
	// permissions required by faulting memory access...
sl@0
  1035
	TUint accessPermissions = EUser; // we only allow paging of user memory
sl@0
  1036
sl@0
  1037
	// get faulting address...
sl@0
  1038
	TLinAddr faultAddress = exc.iFaultAddress;
sl@0
  1039
	if(exc.iExcCode==EArmExceptionPrefetchAbort)
sl@0
  1040
		{
sl@0
  1041
		// fault trying to read code to execute...
sl@0
  1042
		accessPermissions |= EExecute;
sl@0
  1043
		}
sl@0
  1044
	else if(exc.iExcCode!=EArmExceptionDataAbort)
sl@0
  1045
		return KErrUnknown; // not prefetch or data abort
sl@0
  1046
sl@0
  1047
	// check fault type...
sl@0
  1048
	if((exc.iFaultStatus&0x405) != 5 && (exc.iFaultStatus&0x40f) != 4)
sl@0
  1049
		return KErrUnknown; // not translation, permission or instruction cache maintenance fault.
sl@0
  1050
sl@0
  1051
	// check access type...
sl@0
  1052
	if(exc.iFaultStatus&(1<<11))
sl@0
  1053
		accessPermissions |= EReadWrite;
sl@0
  1054
sl@0
  1055
	// let TheMmu handle the fault...
sl@0
  1056
	return TheMmu.HandlePageFault(exc.iR15, faultAddress, accessPermissions, aExceptionInfo);
sl@0
  1057
	}
sl@0
  1058
sl@0
  1059