os/kernelhwsrv/kernel/eka/memmodel/epoc/moving/arm/xmmu.cpp
changeset 0 bde4ae8d615e
     1.1 --- /dev/null	Thu Jan 01 00:00:00 1970 +0000
     1.2 +++ b/os/kernelhwsrv/kernel/eka/memmodel/epoc/moving/arm/xmmu.cpp	Fri Jun 15 03:10:57 2012 +0200
     1.3 @@ -0,0 +1,2798 @@
     1.4 +// Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
     1.5 +// All rights reserved.
     1.6 +// This component and the accompanying materials are made available
     1.7 +// under the terms of the License "Eclipse Public License v1.0"
     1.8 +// which accompanies this distribution, and is available
     1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
    1.10 +//
    1.11 +// Initial Contributors:
    1.12 +// Nokia Corporation - initial contribution.
    1.13 +//
    1.14 +// Contributors:
    1.15 +//
    1.16 +// Description:
    1.17 +// e32\memmodel\epoc\moving\arm\xmmu.cpp
    1.18 +// 
    1.19 +//
    1.20 +
    1.21 +#include "arm_mem.h"
    1.22 +#include <mmubase.inl>
    1.23 +#include <ramcache.h>
    1.24 +#include <demand_paging.h>
    1.25 +#include "execs.h"
    1.26 +#include <defrag.h>
    1.27 +#include "cache_maintenance.h"
    1.28 +
    1.29 +
    1.30 +extern void FlushTLBs();
    1.31 +
    1.32 +#if defined(__CPU_SA1__)
    1.33 +const TPde KRomSectionPermissions	=	SECTION_PDE(KArmV45PermRORO, KArmV45MemAttWB, EDomainClient);
    1.34 +const TPde KShadowPdePerm			=	PT_PDE(EDomainClient);
    1.35 +const TPte KPtPtePerm				=	SP_PTE(KArmV45PermRWNO, KArmV45MemAttBuf);	// page tables not cached
    1.36 +const TPte KRomPtePermissions		=	SP_PTE(KArmV45PermRORO, KArmV45MemAttWB);	// ROM is cached, read-only for everyone
    1.37 +const TPte KShadowPtePerm			=	SP_PTE(KArmV45PermRWRO, KArmV45MemAttWB);	// shadowed ROM is cached, supervisor writeable
    1.38 +
    1.39 +#elif defined(__CPU_ARM710T__) || defined(__CPU_ARM720T__)
    1.40 +const TPde KRomSectionPermissions	=	SECTION_PDE(KArmV45PermRORO, KArmV45MemAttWB, EDomainClient);
    1.41 +const TPde KShadowPdePerm			=	PT_PDE(EDomainClient);
    1.42 +const TPte KPtPtePerm				=	SP_PTE(KArmV45PermRWNO, KArmV45MemAttWB);	// page tables cached (write-through)
    1.43 +const TPte KRomPtePermissions		=	SP_PTE(KArmV45PermRORO, KArmV45MemAttWB);	// ROM is cached, read-only for everyone
    1.44 +const TPte KShadowPtePerm			=	SP_PTE(KArmV45PermRWRO, KArmV45MemAttWB);	// shadowed ROM is cached, supervisor writeable
    1.45 +
    1.46 +#elif defined(__CPU_ARM920T__) || defined(__CPU_ARM925T__) || defined(__CPU_ARM926J__)
    1.47 +const TPde KRomSectionPermissions	=	SECTION_PDE(KArmV45PermRORO, KArmV45MemAttWT, EDomainClient);
    1.48 +const TPde KShadowPdePerm			=	PT_PDE(EDomainClient);
    1.49 +const TPte KPtPtePerm				=	SP_PTE(KArmV45PermRWNO, KArmV45MemAttWT);	// page tables cached write through
    1.50 +const TPte KRomPtePermissions		=	SP_PTE(KArmV45PermRORO, KArmV45MemAttWT);	// ROM is cached, read-only for everyone
    1.51 +const TPte KShadowPtePerm			=	SP_PTE(KArmV45PermRWRO, KArmV45MemAttWT);	// shadowed ROM is cached, supervisor writeable
    1.52 +
    1.53 +#elif defined(__CPU_XSCALE__)
    1.54 +	#ifdef __CPU_XSCALE_MANZANO__
    1.55 +const TPde KRomSectionPermissions	=	SECTION_PDE(KArmV45PermRORO, KXScaleMemAttWTRA_WBWA, EDomainClient);
    1.56 +const TPde KShadowPdePerm			=	PT_PDE(EDomainClient);
    1.57 +const TPte KPtPtePerm				=	SP_PTE(KArmV45PermRWNO, KXScaleMemAttWTRA_WBWA);	// page tables write-through cached
    1.58 +const TPte KRomPtePermissions		=	SP_PTE(KArmV45PermRORO, KXScaleMemAttWTRA_WBWA);	// ROM is cached, read-only for everyone
    1.59 +const TPte KShadowPtePerm			=	SP_PTE(KArmV45PermRWRO, KXScaleMemAttWTRA_WBWA);	// shadowed ROM is cached, supervisor writeable
    1.60 +	#else
    1.61 +const TPde KRomSectionPermissions	=	SECTION_PDE(KArmV45PermRORO, KXScaleMemAttWTRA, EDomainClient);
    1.62 +const TPde KShadowPdePerm			=	PT_PDE(EDomainClient);
    1.63 +const TPte KPtPtePerm				=	SP_PTE(KArmV45PermRWNO, KXScaleMemAttWTRA);	// page tables write-through cached
    1.64 +const TPte KRomPtePermissions		=	SP_PTE(KArmV45PermRORO, KXScaleMemAttWTRA);	// ROM is cached, read-only for everyone
    1.65 +const TPte KShadowPtePerm			=	SP_PTE(KArmV45PermRWRO, KXScaleMemAttWTRA);	// shadowed ROM is cached, supervisor writeable
    1.66 +	#endif
    1.67 +#endif
    1.68 +
    1.69 +const TPte KPtInfoPtePerm = KPtPtePerm;
    1.70 +const TPde KPtPdePerm = PT_PDE(EDomainClient);
    1.71 +
    1.72 +// Permissions for each chunk type
    1.73 +enum TPTEProperties
    1.74 +	{
    1.75 +	ESupRo	=	SP_PTE(KArmV45PermRORO, KDefaultCaching),
    1.76 +	ESupRw	=	SP_PTE(KArmV45PermRWNO, KDefaultCaching),
    1.77 +	EUserRo	=	SP_PTE(KArmV45PermRWRO, KDefaultCaching),
    1.78 +	EUserRw	=	SP_PTE(KArmV45PermRWRW, KDefaultCaching)
    1.79 +	};
    1.80 +
    1.81 +LOCAL_D const TPde ChunkPdePermissions[ENumChunkTypes] =
    1.82 +	{
    1.83 +	PT_PDE(EDomainClient),		// EKernelData
    1.84 +	PT_PDE(EDomainClient),		// EKernelStack
    1.85 +	PT_PDE(EDomainClient),		// EKernelCode
    1.86 +	PT_PDE(EDomainClient),		// EDll
    1.87 +	PT_PDE(EDomainClient),		// EUserCode - user/ro & sup/rw everywhere
    1.88 +	PT_PDE(EDomainClient),		// ERamDrive - sup/rw accessed by domain change
    1.89 +
    1.90 +	// user data or self modifying code is sup/rw, user no access at home. It's user/rw & sup/rw when running
    1.91 +	// note ARM MMU architecture prevents implementation of user read-only data
    1.92 +	PT_PDE(EDomainClient),		// EUserData
    1.93 +	PT_PDE(EDomainClient),		// EDllData
    1.94 +	PT_PDE(EDomainClient),		// EUserSelfModCode
    1.95 +	PT_PDE(EDomainClient),		// ESharedKernelSingle
    1.96 +	PT_PDE(EDomainClient),		// ESharedKernelMultiple
    1.97 +	PT_PDE(EDomainClient),		// ESharedIo
    1.98 +	PT_PDE(EDomainClient),		// ESharedKernelMirror (unused in this memory model)
    1.99 +	PT_PDE(EDomainClient),		// EKernelMessage
   1.100 +	};
   1.101 +
   1.102 +const TPde KUserDataRunningPermissions = PT_PDE(EDomainVarUserRun);
   1.103 +
   1.104 +LOCAL_D const TPte ChunkPtePermissions[ENumChunkTypes] =
   1.105 +	{
   1.106 +	ESupRw,					// EKernelData
   1.107 +	ESupRw,					// EKernelStack
   1.108 +	ESupRw,					// EKernelCode
   1.109 +	EUserRo,				// EDll
   1.110 +	EUserRo,				// EUserCode
   1.111 +	ESupRw,					// ERamDrive
   1.112 +	ESupRw,					// EUserData
   1.113 +	ESupRw,					// EDllData
   1.114 +	ESupRw,					// EUserSelfModCode
   1.115 +	ESupRw,					// ESharedKernelSingle
   1.116 +	ESupRw,					// ESharedKernelMultiple
   1.117 +	ESupRw,					// ESharedIo
   1.118 +	ESupRw,					// ESharedKernelMirror (unused in this memory model)
   1.119 +	ESupRw,					// EKernelMessage
   1.120 +	};
   1.121 +
   1.122 +const TPte KUserCodeLoadPte = (TPte)EUserRo;
   1.123 +const TPte KKernelCodeRunPte = (TPte)ESupRw;
   1.124 +
   1.125 +// Inline functions for simple transformations
   1.126 +inline TLinAddr PageTableLinAddr(TInt aId)
   1.127 +	{
   1.128 +	return KPageTableBase + (aId<<KPageTableShift);
   1.129 +	}
   1.130 +
   1.131 +inline TPte* PageTable(TInt aId)
   1.132 +	{
   1.133 +	return (TPte*)(KPageTableBase+(aId<<KPageTableShift));
   1.134 +	}
   1.135 +
   1.136 +inline TPde* PageDirectoryEntry(TLinAddr aLinAddr)
   1.137 +	{
   1.138 +	return PageDirectory + (aLinAddr>>KChunkShift);
   1.139 +	}
   1.140 +
   1.141 +inline TBool IsPageTable(TPde aPde)
   1.142 +	{
   1.143 +	return ((aPde&KPdeTypeMask)==KArmV45PdePageTable);
   1.144 +	}
   1.145 +
   1.146 +inline TBool IsSectionDescriptor(TPde aPde)
   1.147 +	{
   1.148 +	return ((aPde&KPdeTypeMask)==KArmV45PdeSection);
   1.149 +	}
   1.150 +
   1.151 +inline TBool IsPresent(TPte aPte)
   1.152 +	{
   1.153 +	return (aPte&KPtePresentMask);
   1.154 +	}
   1.155 +
   1.156 +inline TPhysAddr PageTablePhysAddr(TPde aPde)
   1.157 +	{
   1.158 +	return aPde & KPdePageTableAddrMask;
   1.159 +	}
   1.160 +
   1.161 +inline TPhysAddr PhysAddrFromSectionDescriptor(TPde aPde)
   1.162 +	{
   1.163 +	return aPde & KPdeSectionAddrMask;
   1.164 +	}
   1.165 +
   1.166 +extern void InvalidateTLBForPage(TLinAddr /*aLinAddr*/);
   1.167 +
   1.168 +void Mmu::SetupInitialPageInfo(SPageInfo* aPageInfo, TLinAddr aChunkAddr, TInt aPdeIndex)
   1.169 +	{
   1.170 +	__ASSERT_ALWAYS(aChunkAddr==0 || aChunkAddr>=KRamDriveEndAddress, Panic(EBadInitialPageAddr));
   1.171 +	TLinAddr addr = aChunkAddr + (aPdeIndex<<KPageShift);
   1.172 +	if (aPageInfo->Type()!=SPageInfo::EUnused)
   1.173 +		return;	// already set (page table)
   1.174 +	if (addr == KPageTableInfoBase)
   1.175 +		{
   1.176 +		aPageInfo->SetPtInfo(0);
   1.177 +		aPageInfo->Lock();
   1.178 +		}
   1.179 +	else if (addr>=KPageDirectoryBase && addr<(KPageDirectoryBase+KPageDirectorySize))
   1.180 +		{
   1.181 +		aPageInfo->SetPageDir(0,aPdeIndex);
   1.182 +		aPageInfo->Lock();
   1.183 +		}
   1.184 +	else
   1.185 +		aPageInfo->SetFixed();
   1.186 +	}
   1.187 +
   1.188 +void Mmu::SetupInitialPageTableInfo(TInt aId, TLinAddr aChunkAddr, TInt aNumPtes)
   1.189 +	{
   1.190 +	__ASSERT_ALWAYS(aChunkAddr==0 || aChunkAddr>=KRamDriveEndAddress, Panic(EBadInitialPageAddr));
   1.191 +	SPageTableInfo& pti=PtInfo(aId);
   1.192 +	pti.iCount=aNumPtes;
   1.193 +	pti.SetGlobal(aChunkAddr>>KChunkShift);
   1.194 +	}
   1.195 +
   1.196 +TInt Mmu::GetPageTableId(TLinAddr aAddr)
   1.197 +	{
   1.198 +	TInt id=-1;
   1.199 +	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::PageTableId(%08x)",aAddr));
   1.200 +	TInt pdeIndex=aAddr>>KChunkShift;
   1.201 +	TPde pde = PageDirectory[pdeIndex];
   1.202 +	if (IsPageTable(pde))
   1.203 +		{
   1.204 +		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
   1.205 +		if (pi)
   1.206 +			id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
   1.207 +		}
   1.208 +	__KTRACE_OPT(KMMU,Kern::Printf("ID=%d",id));
   1.209 +	return id;
   1.210 +	}
   1.211 +
   1.212 +// Used only during boot for recovery of RAM drive
   1.213 +TInt ArmMmu::BootPageTableId(TLinAddr aAddr, TPhysAddr& aPtPhys)
   1.214 +	{
   1.215 +	TInt id=KErrNotFound;
   1.216 +	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:BootPageTableId(%08x,&)",aAddr));
   1.217 +	TInt pdeIndex=aAddr>>KChunkShift;
   1.218 +	TPde pde = PageDirectory[pdeIndex];
   1.219 +	if (IsPageTable(pde))
   1.220 +		{
   1.221 +		aPtPhys = pde & KPdePageTableAddrMask;
   1.222 +		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
   1.223 +		if (pi)
   1.224 +			{
   1.225 +			SPageInfo::TType type = pi->Type();
   1.226 +			if (type == SPageInfo::EPageTable)
   1.227 +				id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
   1.228 +			else if (type == SPageInfo::EUnused)
   1.229 +				id = KErrUnknown;
   1.230 +			}
   1.231 +		}
   1.232 +	__KTRACE_OPT(KMMU,Kern::Printf("ID=%d",id));
   1.233 +	return id;
   1.234 +	}
   1.235 +
   1.236 +TBool ArmMmu::PteIsPresent(TPte aPte)
   1.237 +	{
   1.238 +	return aPte & KPtePresentMask;
   1.239 +	}
   1.240 +
   1.241 +TPhysAddr ArmMmu::PtePhysAddr(TPte aPte, TInt aPteIndex)
   1.242 +	{
   1.243 +	TUint pte_type = aPte & KPteTypeMask;
   1.244 +	if (pte_type == KArmV45PteLargePage)
   1.245 +		return (aPte & KPteLargePageAddrMask) + (TPhysAddr(aPteIndex << KPageShift) & KLargePageMask);
   1.246 +	else if (pte_type != 0)
   1.247 +		return aPte & KPteSmallPageAddrMask;
   1.248 +	return KPhysAddrInvalid;
   1.249 +	}
   1.250 +
   1.251 +TPhysAddr ArmMmu::PdePhysAddr(TLinAddr aAddr)
   1.252 +	{
   1.253 +	TPde pde = PageDirectory[aAddr>>KChunkShift];
   1.254 +	if (IsSectionDescriptor(pde))
   1.255 +		return PhysAddrFromSectionDescriptor(pde);
   1.256 +	return KPhysAddrInvalid;
   1.257 +	}
   1.258 +
   1.259 +TPte* SafePageTableFromPde(TPde aPde)
   1.260 +	{
   1.261 +	if((aPde&KPdeTypeMask)==KArmV45PdePageTable)
   1.262 +		{
   1.263 +		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aPde);
   1.264 +		if(pi)
   1.265 +			{
   1.266 +			TInt id = (pi->Offset()<<KPtClusterShift) | ((aPde>>KPageTableShift)&KPtClusterMask);
   1.267 +			return PageTable(id);
   1.268 +			}
   1.269 +		}
   1.270 +	return 0;
   1.271 +	}
   1.272 +
   1.273 +TPte* SafePtePtrFromLinAddr(TLinAddr aAddress)
   1.274 +	{
   1.275 +	TPde pde = PageDirectory[aAddress>>KChunkShift];
   1.276 +	TPte* pt = SafePageTableFromPde(pde);
   1.277 +	if(pt)
   1.278 +		pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
   1.279 +	return pt;
   1.280 +	}
   1.281 +
   1.282 +#ifdef __ARMCC__
   1.283 +	__forceinline /* RVCT ignores normal inline qualifier :-( */
   1.284 +#else
   1.285 +	inline
   1.286 +#endif
   1.287 +TPte* PtePtrFromLinAddr(TLinAddr aAddress)
   1.288 +	{
   1.289 +	TPde pde = PageDirectory[aAddress>>KChunkShift];
   1.290 +	SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
   1.291 +	TInt id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
   1.292 +	TPte* pt = PageTable(id);
   1.293 +	pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
   1.294 +	return pt;
   1.295 +	}
   1.296 +
   1.297 +
   1.298 +TInt ArmMmu::LinearToPhysical(TLinAddr aLinAddr, TInt aSize, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList)
   1.299 +	{
   1.300 +	TPhysAddr physStart = ArmMmu::LinearToPhysical(aLinAddr);
   1.301 +	TPhysAddr nextPhys = physStart&~KPageMask;
   1.302 +
   1.303 +	TUint32* pageList = aPhysicalPageList;
   1.304 +
   1.305 +	TInt pageIndex = aLinAddr>>KPageShift;
   1.306 +	TInt pagesLeft = ((aLinAddr+aSize-1)>>KPageShift)+1 - pageIndex;
   1.307 +	TPde* pdePtr = &PageDirectory[aLinAddr>>KChunkShift];
   1.308 +
   1.309 +	while(pagesLeft)
   1.310 +		{
   1.311 +		pageIndex &= KChunkMask>>KPageShift;
   1.312 +		TInt pagesLeftInChunk = (1<<(KChunkShift-KPageShift))-pageIndex;
   1.313 +		if(pagesLeftInChunk>pagesLeft)
   1.314 +			pagesLeftInChunk = pagesLeft;
   1.315 +		pagesLeft -= pagesLeftInChunk;
   1.316 +
   1.317 +		TPhysAddr phys;
   1.318 +		TPde pde = *pdePtr++;
   1.319 +		TUint pdeType = pde&KPdeTypeMask;
   1.320 +		if(pdeType==KArmV45PdeSection)
   1.321 +			{
   1.322 +			phys = (pde & KPdeSectionAddrMask) + (pageIndex*KPageSize);
   1.323 +			__KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Section phys=%8x",phys));
   1.324 +			TInt n=pagesLeftInChunk;
   1.325 +			phys==nextPhys ? nextPhys+=n*KPageSize : nextPhys=KPhysAddrInvalid;
   1.326 +			if(pageList)
   1.327 +				{
   1.328 +				TUint32* pageEnd = pageList+n;
   1.329 +				do
   1.330 +					{
   1.331 +					*pageList++ = phys;
   1.332 +					phys+=KPageSize;
   1.333 +					}
   1.334 +				while(pageList<pageEnd);
   1.335 +				}
   1.336 +			}
   1.337 +		else
   1.338 +			{
   1.339 +			TPte* pt = SafePageTableFromPde(pde);
   1.340 +			if(!pt)
   1.341 +				{
   1.342 +				__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical missing page table: PDE=%8x",pde));
   1.343 +				return KErrNotFound;
   1.344 +				}
   1.345 +			pt += pageIndex;
   1.346 +			for(;;)
   1.347 +				{
   1.348 +				TPte pte = *pt++;
   1.349 +				TUint pte_type = pte & KPteTypeMask;
   1.350 +				if (pte_type >= KArmV45PteSmallPage)
   1.351 +					{
   1.352 +					phys = (pte & KPteSmallPageAddrMask);
   1.353 +					__KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Small Page phys=%8x",phys));
   1.354 +					phys==nextPhys ? nextPhys+=KPageSize : nextPhys=KPhysAddrInvalid;
   1.355 +					if(pageList)
   1.356 +						*pageList++ = phys;
   1.357 +					if(--pagesLeftInChunk)
   1.358 +						continue;
   1.359 +					break;
   1.360 +					}
   1.361 +				if (pte_type == KArmV45PteLargePage)
   1.362 +					{
   1.363 +					--pt; // back up ptr
   1.364 +					TUint pageOffset = ((TUint)pt>>2)&(KLargeSmallPageRatio-1);
   1.365 +					phys = (pte & KPteLargePageAddrMask) + pageOffset*KPageSize;
   1.366 +					__KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Large Page phys=%8x",phys));
   1.367 +					TInt n=KLargeSmallPageRatio-pageOffset;
   1.368 +					if(n>pagesLeftInChunk)
   1.369 +						n = pagesLeftInChunk;
   1.370 +					phys==nextPhys ? nextPhys+=n*KPageSize : nextPhys=KPhysAddrInvalid;
   1.371 +					if(pageList)
   1.372 +						{
   1.373 +						TUint32* pageEnd = pageList+n;
   1.374 +						do
   1.375 +							{
   1.376 +							*pageList++ = phys;
   1.377 +							phys+=KPageSize;
   1.378 +							}
   1.379 +						while(pageList<pageEnd);
   1.380 +						}
   1.381 +					pt += n;
   1.382 +					if(pagesLeftInChunk-=n)
   1.383 +						continue;
   1.384 +					break;
   1.385 +					}
   1.386 +				__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical bad PTE %8x",pte));
   1.387 +				return KErrNotFound;
   1.388 +				}
   1.389 +			}
   1.390 +		if(!pageList && nextPhys==KPhysAddrInvalid)
   1.391 +			{
   1.392 +			__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical not contiguous"));
   1.393 +			return KErrNotFound;
   1.394 +			}
   1.395 +		pageIndex = 0;
   1.396 +		}
   1.397 +
   1.398 +	if(nextPhys==KPhysAddrInvalid)
   1.399 +		{
   1.400 +		// Memory is discontiguous...
   1.401 +		aPhysicalAddress = KPhysAddrInvalid;
   1.402 +		return 1;
   1.403 +		}
   1.404 +	else
   1.405 +		{
   1.406 +		// Memory is contiguous...
   1.407 +		aPhysicalAddress = physStart;
   1.408 +		return KErrNone;
   1.409 +		}
   1.410 +	}
   1.411 +
   1.412 +TPhysAddr ArmMmu::LinearToPhysical(TLinAddr aLinAddr)
   1.413 +	{
   1.414 +	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical(%08x)",aLinAddr));
   1.415 +	TPhysAddr phys = KPhysAddrInvalid;
   1.416 +	TPde pde = PageDirectory[aLinAddr>>KChunkShift];
   1.417 +	if (IsPageTable(pde))
   1.418 +		{
   1.419 +		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
   1.420 +		if (pi)
   1.421 +			{
   1.422 +			TInt id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
   1.423 +			TInt pteIndex = (aLinAddr & KChunkMask)>>KPageShift;
   1.424 +			TPte pte = PageTable(id)[pteIndex];
   1.425 +			TUint pte_type = pte & KPteTypeMask;
   1.426 +			if (pte_type == KArmV45PteLargePage)
   1.427 +				{
   1.428 +				phys = (pte & KPteLargePageAddrMask) + (aLinAddr & KLargePageMask);
   1.429 +				__KTRACE_OPT(KMMU,Kern::Printf("Mapped with 64K page - returning %08x", phys));
   1.430 +				}
   1.431 +			else if (pte_type != 0)
   1.432 +				{
   1.433 +				phys = (pte & KPteSmallPageAddrMask) + (aLinAddr & KPageMask);
   1.434 +				__KTRACE_OPT(KMMU,Kern::Printf("Mapped with 4K page - returning %08x", phys));
   1.435 +				}
   1.436 +			}
   1.437 +		}
   1.438 +	else if (IsSectionDescriptor(pde))
   1.439 +		{
   1.440 +		phys = (pde & KPdeSectionAddrMask) + (aLinAddr & KChunkMask);
   1.441 +		__KTRACE_OPT(KMMU,Kern::Printf("Mapped with section - returning %08x", phys));
   1.442 +		}
   1.443 +	else
   1.444 +		{
   1.445 +		__KTRACE_OPT(KMMU,Kern::Printf("Address invalid"));
   1.446 +		}
   1.447 +	return phys;
   1.448 +	}
   1.449 +
   1.450 +TInt ArmMmu::PreparePagesForDMA(TLinAddr aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList)
   1.451 +//Returns the list of physical pages belonging to the specified memory space.
   1.452 +//Checks these pages belong to a chunk marked as being trusted. 
   1.453 +//Locks these pages so they can not be moved by e.g. ram defragmenation.
   1.454 +	{
   1.455 +	SPageInfo* pi = NULL;
   1.456 +	DChunk* chunk = NULL;
   1.457 +	TInt err = KErrNone;
   1.458 +	
   1.459 +	__KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::PreparePagesForDMA %08x+%08x, asid=%d",aLinAddr,aSize));
   1.460 +
   1.461 +	TUint32* pageList = aPhysicalPageList;
   1.462 +	TInt pagesInList = 0;				//The number of pages we put in the list so far
   1.463 +	
   1.464 +	TInt pageIndex = (aLinAddr & KChunkMask) >> KPageShift;	// Index of the page within the section
   1.465 +	TInt pagesLeft = ((aLinAddr & KPageMask) + aSize + KPageMask) >> KPageShift;
   1.466 +
   1.467 +	MmuBase::Wait(); 	// RamAlloc Mutex for accessing page/directory tables.
   1.468 +	NKern::LockSystem();// SystemlLock for accessing SPageInfo objects.
   1.469 +	
   1.470 +	TPde* pdePtr = PageDirectory + (aLinAddr>>KChunkShift);
   1.471 +	
   1.472 +	while(pagesLeft)
   1.473 +		{
   1.474 +		TInt pagesLeftInChunk = (1<<(KChunkShift-KPageShift))-pageIndex;
   1.475 +		if(pagesLeftInChunk>pagesLeft)
   1.476 +			pagesLeftInChunk = pagesLeft;
   1.477 +		
   1.478 +		pagesLeft -= pagesLeftInChunk;
   1.479 +
   1.480 +		TPte* pt = SafePageTableFromPde(*pdePtr++);
   1.481 +		if(!pt) { err = KErrNotFound; goto fail; }// Cannot get page table.
   1.482 +
   1.483 +		pt += pageIndex;
   1.484 +
   1.485 +		for(;pagesLeftInChunk--;)
   1.486 +			{
   1.487 +			TPhysAddr phys = (*pt++ & KPteSmallPageAddrMask);
   1.488 +			pi =  SPageInfo::SafeFromPhysAddr(phys);
   1.489 +			if(!pi)	{ err = KErrNotFound; goto fail; }// Invalid address
   1.490 +
   1.491 +			__KTRACE_OPT(KMMU2,Kern::Printf("PageInfo: PA:%x T:%x S:%x O:%x C:%x",phys, pi->Type(), pi->State(), pi->Owner(), pi->LockCount()));
   1.492 +			if (chunk==NULL)
   1.493 +				{//This is the first page. Check 'trusted' bit.
   1.494 +				if (pi->Type()!= SPageInfo::EChunk)
   1.495 +					{ err = KErrAccessDenied; goto fail; }// The first page do not belong to chunk.	
   1.496 +
   1.497 +				chunk = (DChunk*)pi->Owner();
   1.498 +				if ( (chunk == NULL) || ((chunk->iAttributes & DChunk::ETrustedChunk)== 0) )
   1.499 +					{ err = KErrAccessDenied; goto fail; } // Not a trusted chunk
   1.500 +				}
   1.501 +			pi->Lock();
   1.502 +
   1.503 +			*pageList++ = phys;
   1.504 +			if ( (++pagesInList&127) == 0) //release system lock temporarily on every 512K
   1.505 +				NKern::FlashSystem();
   1.506 +			}
   1.507 +		pageIndex = 0;
   1.508 +		}
   1.509 +
   1.510 +	if (pi->Type()!= SPageInfo::EChunk)
   1.511 +		{ err = KErrAccessDenied; goto fail; }// The last page do not belong to chunk.	
   1.512 +
   1.513 +	if (chunk && (chunk != (DChunk*)pi->Owner()))
   1.514 +		{ err = KErrArgument; goto fail; }//The first & the last page do not belong to the same chunk.
   1.515 +
   1.516 +	NKern::UnlockSystem();
   1.517 +	MmuBase::Signal();
   1.518 +	return KErrNone;
   1.519 +
   1.520 +fail:
   1.521 +	__KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::PreparePagesForDMA failed"));
   1.522 +	NKern::UnlockSystem();
   1.523 +	MmuBase::Signal();
   1.524 +	ReleasePagesFromDMA(aPhysicalPageList, pagesInList);
   1.525 +	return err;
   1.526 +	}
   1.527 +
   1.528 +TInt ArmMmu::ReleasePagesFromDMA(TPhysAddr* aPhysicalPageList, TInt aPageCount)
   1.529 +// Unlocks physical pages.
   1.530 +// @param aPhysicalPageList - points to the list of physical pages that should be released.
   1.531 +// @param aPageCount		- the number of physical pages in the list.
   1.532 +	{
   1.533 +	NKern::LockSystem();
   1.534 +	__KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::ReleasePagesFromDMA count:%d",aPageCount));
   1.535 +
   1.536 +	while (aPageCount--)
   1.537 +		{
   1.538 +		SPageInfo* pi =  SPageInfo::SafeFromPhysAddr(*aPhysicalPageList++);
   1.539 +		if(!pi)
   1.540 +			{
   1.541 +			NKern::UnlockSystem();
   1.542 +			return KErrArgument;
   1.543 +			}
   1.544 +		__KTRACE_OPT(KMMU2,Kern::Printf("PageInfo: T:%x S:%x O:%x C:%x",pi->Type(), pi->State(), pi->Owner(), pi->LockCount()));
   1.545 +		pi->Unlock();
   1.546 +		}
   1.547 +	NKern::UnlockSystem();
   1.548 +	return KErrNone;
   1.549 +	}
   1.550 +
   1.551 +
   1.552 +void ArmMmu::Init1()
   1.553 +	{
   1.554 +	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("ArmMmu::Init1"));
   1.555 +
   1.556 +	// MmuBase data
   1.557 +	iPageSize=KPageSize;
   1.558 +	iPageMask=KPageMask;
   1.559 +	iPageShift=KPageShift;
   1.560 +	iChunkSize=KChunkSize;
   1.561 +	iChunkMask=KChunkMask;
   1.562 +	iChunkShift=KChunkShift;
   1.563 +	iPageTableSize=KPageTableSize;
   1.564 +	iPageTableMask=KPageTableMask;
   1.565 +	iPageTableShift=KPageTableShift;
   1.566 +	iPtClusterSize=KPtClusterSize;
   1.567 +	iPtClusterMask=KPtClusterMask;
   1.568 +	iPtClusterShift=KPtClusterShift;
   1.569 +	iPtBlockSize=KPtBlockSize;
   1.570 +	iPtBlockMask=KPtBlockMask;
   1.571 +	iPtBlockShift=KPtBlockShift;
   1.572 +	iPtGroupSize=KChunkSize/KPageTableSize;
   1.573 +	iPtGroupMask=iPtGroupSize-1;
   1.574 +	iPtGroupShift=iChunkShift-iPageTableShift;
   1.575 +	//TInt* iPtBlockCount;		// dynamically allocated - Init2
   1.576 +	//TInt* iPtGroupCount;		// dynamically allocated - Init2
   1.577 +	iPtInfo=(SPageTableInfo*)KPageTableInfoBase;
   1.578 +	iPageTableLinBase=KPageTableBase;
   1.579 +	//iRamPageAllocator;		// dynamically allocated - Init2
   1.580 +	//iAsyncFreeList;			// dynamically allocated - Init2
   1.581 +	//iPageTableAllocator;		// dynamically allocated - Init2
   1.582 +	//iPageTableLinearAllocator;// dynamically allocated - Init2
   1.583 +	iPtInfoPtePerm=KPtInfoPtePerm;
   1.584 +	iPtPtePerm=KPtPtePerm;
   1.585 +	iPtPdePerm=KPtPdePerm;
   1.586 +	iTempAddr=KTempAddr;
   1.587 +	iSecondTempAddr=KSecondTempAddr;
   1.588 +	iMapSizes=KPageSize|KLargePageSize|KChunkSize;
   1.589 +	iRomLinearBase = ::RomHeaderAddress;
   1.590 +	iRomLinearEnd = KRomLinearEnd;
   1.591 +	iShadowPtePerm = KShadowPtePerm;
   1.592 +	iShadowPdePerm = KShadowPdePerm;
   1.593 +
   1.594 +	// Mmu data
   1.595 +	TInt total_ram=TheSuperPage().iTotalRamSize;
   1.596 +
   1.597 +#if defined(__HAS_EXTERNAL_CACHE__) 
   1.598 +	//L2 cache on ARMv5 is always in write-back mode => must be always purged
   1.599 +	iDecommitThreshold = CacheMaintenance::SyncAllPerformanceThresholdPages();
   1.600 +#else
   1.601 +	iDecommitThreshold = 0; ///no cache consistency issues on decommit
   1.602 +#endif
   1.603 +
   1.604 +	iDataSectionBase = KDataSectionBase;
   1.605 +	iDataSectionEnd = KDataSectionEnd;
   1.606 +	iMaxDllDataSize=Min(total_ram/2, 0x08000000);					// phys RAM/2 up to 128Mb
   1.607 +	iMaxDllDataSize=(iMaxDllDataSize+iChunkMask)&~iChunkMask;		// round up to chunk size
   1.608 +	iMaxUserCodeSize=Min(total_ram, 0x10000000);					// phys RAM up to 256Mb
   1.609 +	iMaxUserCodeSize=(iMaxUserCodeSize+iChunkMask)&~iChunkMask;		// round up to chunk size
   1.610 +	iMaxKernelCodeSize=Min(total_ram/2, 0x04000000);				// phys RAM/2 up to 64Mb
   1.611 +	iMaxKernelCodeSize=(iMaxKernelCodeSize+iChunkMask)&~iChunkMask;	// round up to chunk size
   1.612 +	iPdeBase=KPageDirectoryBase;
   1.613 +	iUserCodeLoadPtePerm=KUserCodeLoadPte;
   1.614 +	iKernelCodePtePerm=KKernelCodeRunPte;
   1.615 +	iDllDataBase = KDataSectionEnd - iMaxDllDataSize;
   1.616 +	iUserCodeBase = KPageInfoLinearBase - iMaxUserCodeSize;
   1.617 +	iKernelCodeBase = iUserCodeBase - iMaxKernelCodeSize;
   1.618 +
   1.619 +	__KTRACE_OPT(KMMU,Kern::Printf("DDS %08x UCS %08x KCS %08x", iMaxDllDataSize, iMaxUserCodeSize, iMaxKernelCodeSize));
   1.620 +	__KTRACE_OPT(KMMU,Kern::Printf("DDB %08x KCB %08x UCB %08x RLB %08x", iDllDataBase, iKernelCodeBase, iUserCodeBase, iRomLinearBase));
   1.621 +
   1.622 +	// ArmMmu data
   1.623 +
   1.624 +	// other
   1.625 +	PP::MaxUserThreadStack=0x14000;			// 80K - STDLIB asks for 64K for PosixServer!!!!
   1.626 +	PP::UserThreadStackGuard=0x2000;		// 8K
   1.627 +	PP::MaxStackSpacePerProcess=0x200000;	// 2Mb
   1.628 +	K::SupervisorThreadStackSize=0x1000;	// 4K
   1.629 +	PP::SupervisorThreadStackGuard=0x1000;	// 4K
   1.630 +	K::MachineConfig=(TMachineConfig*)KMachineConfigLinAddr;
   1.631 +	PP::RamDriveStartAddress=KRamDriveStartAddress;
   1.632 +	PP::RamDriveRange=KRamDriveMaxSize;
   1.633 +	PP::RamDriveMaxSize=KRamDriveMaxSize;	// may be reduced later
   1.634 +
   1.635 +	__KTRACE_OPT(KBOOT,Kern::Printf("K::MaxMemCopyInOneGo=0x%x",K::MaxMemCopyInOneGo));
   1.636 +	K::MemModelAttributes=EMemModelTypeMoving|EMemModelAttrNonExProt|EMemModelAttrKernProt|EMemModelAttrWriteProt|
   1.637 +						EMemModelAttrVA|EMemModelAttrProcessProt|EMemModelAttrSameVA|EMemModelAttrSupportFixed|
   1.638 +						EMemModelAttrSvKernProt|EMemModelAttrIPCKernProt;
   1.639 +
   1.640 +	Arm::DefaultDomainAccess=KDefaultDomainAccess;
   1.641 +
   1.642 +	// Domains 0-3 are preallocated
   1.643 +	// 0=Variable user running, 1=Client, 2=Page tables, 3=RAM drive
   1.644 +	Domains=(~(0xffffffffu<<ENumDomains))&0xfffffff0u;
   1.645 +
   1.646 +	iMaxPageTables = 1<<(32-KChunkShift);		// possibly reduced when RAM size known
   1.647 +
   1.648 +	Mmu::Init1();
   1.649 +	}
   1.650 +
   1.651 +void ArmMmu::DoInit2()
   1.652 +	{
   1.653 +	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("ArmMmu::DoInit2"));
   1.654 +	iTempPte=PageTable(GetPageTableId(iTempAddr))+((iTempAddr&KChunkMask)>>KPageShift);
   1.655 +	iSecondTempPte=PageTable(GetPageTableId(iSecondTempAddr))+((iSecondTempAddr&KChunkMask)>>KPageShift);
   1.656 +	__KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iTempAddr=%08x, iTempPte=%08x, iSecondTempAddr=%08x, iSecondTempPte=%08x", iTempAddr, iTempPte, iSecondTempAddr, iSecondTempPte));
   1.657 +	CreateKernelSection(iKernelCodeBase, KPageShift);
   1.658 +	iHomePdeMap=(TUint32*)Kern::AllocZ(-KSuperPageLinAddr>>KChunkShift<<2);
   1.659 +	iHomePdeMap=(TUint32*)((TUint32)iHomePdeMap-(KSuperPageLinAddr>>KChunkShift<<2)); //adjust the pointer so it's indexed by address>>20
   1.660 +#if defined(__CPU_WRITE_BACK_CACHE)
   1.661 +#if defined(__CPU_HAS_SINGLE_ENTRY_DCACHE_FLUSH)
   1.662 +	if (InternalCache::Info[KCacheInfoD].iLineLength == 32)
   1.663 +		iCopyPageFn = &::CopyPageForRemap32;
   1.664 +	else if (InternalCache::Info[KCacheInfoD].iLineLength == 16)
   1.665 +		iCopyPageFn = &::CopyPageForRemap16;
   1.666 +	else
   1.667 +		Panic(ENoCopyPageFunction);		
   1.668 +#else
   1.669 +#error Write-back cache without single entry dcache flush is not supported
   1.670 +#endif
   1.671 +#else // !__CPU_HAS_WRITE_BACK_CACHE
   1.672 +	iCopyPageFn = &::CopyPageForRemapWT;
   1.673 +#endif
   1.674 +	Mmu::DoInit2();
   1.675 +	}
   1.676 +
   1.677 +#ifndef __MMU_MACHINE_CODED__
   1.678 +void ArmMmu::MapRamPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, const TPhysAddr* aPageList, TInt aNumPages, TPte aPtePerm)
   1.679 +//
   1.680 +// Map a list of physical RAM pages into a specified page table with specified PTE permissions.
   1.681 +// Update the page information array.
   1.682 +// Call this with the system locked.
   1.683 +//
   1.684 +	{
   1.685 +	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::MapRamPages() id=%d type=%d ptr=%08x off=%08x n=%d perm=%08x",
   1.686 +			aId, aType, aPtr, aOffset, aNumPages, aPtePerm));
   1.687 +
   1.688 +	SPageTableInfo& ptinfo=iPtInfo[aId];
   1.689 +	ptinfo.iCount+=aNumPages;
   1.690 +	aOffset>>=KPageShift;
   1.691 +	TInt ptOffset=aOffset & KPagesInPDEMask;				// entry number in page table
   1.692 +	TPte* pPte=PageTable(aId)+ptOffset;						// address of first PTE
   1.693 +	while(aNumPages--)
   1.694 +		{
   1.695 +		TPhysAddr pa = *aPageList++;
   1.696 +		*pPte++ =  pa | aPtePerm;					// insert PTE
   1.697 +		__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",pPte[-1],pPte-1));
   1.698 +		if (aType!=SPageInfo::EInvalid)
   1.699 +			{
   1.700 +			SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa);
   1.701 +			if(pi)
   1.702 +				{
   1.703 +				pi->Set(aType,aPtr,aOffset);
   1.704 +				__KTRACE_OPT(KMMU,Kern::Printf("I: %d %08x %08x",aType,aPtr,aOffset));
   1.705 +				++aOffset;	// increment offset for next page
   1.706 +				}
   1.707 +			}
   1.708 +		}
   1.709 +	__DRAIN_WRITE_BUFFER;
   1.710 +	}
   1.711 +
   1.712 +void ArmMmu::MapPhysicalPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, TPhysAddr aPhysAddr, TInt aNumPages, TPte aPtePerm)
   1.713 +//
   1.714 +// Map consecutive physical pages into a specified page table with specified PTE permissions.
   1.715 +// Update the page information array if RAM pages are being mapped.
   1.716 +// Call this with the system locked.
   1.717 +//
   1.718 +	{
   1.719 +	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::MapPhysicalPages() id=%d type=%d ptr=%08x off=%08x phys=%08x n=%d perm=%08x",
   1.720 +			aId, aType, aPtr, aOffset, aPhysAddr, aNumPages, aPtePerm));
   1.721 +	SPageTableInfo& ptinfo=iPtInfo[aId];
   1.722 +	ptinfo.iCount+=aNumPages;
   1.723 +	aOffset>>=KPageShift;
   1.724 +	TInt ptOffset=aOffset & KPagesInPDEMask;				// entry number in page table
   1.725 +	TPte* pPte=PageTable(aId)+ptOffset;						// address of first PTE
   1.726 +	SPageInfo* pi;
   1.727 +	if(aType==SPageInfo::EInvalid)
   1.728 +		pi = NULL;
   1.729 +	else
   1.730 +		pi = SPageInfo::SafeFromPhysAddr(aPhysAddr);
   1.731 +	while(aNumPages--)
   1.732 +		{
   1.733 +		*pPte++ = aPhysAddr|aPtePerm;						// insert PTE
   1.734 +		aPhysAddr+=KPageSize;
   1.735 +		__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",pPte[-1],pPte-1));
   1.736 +		if (pi)
   1.737 +			{
   1.738 +			pi->Set(aType,aPtr,aOffset);
   1.739 +			__KTRACE_OPT(KMMU,Kern::Printf("I: %d %08x %08x",aType,aPtr,aOffset));
   1.740 +			++aOffset;	// increment offset for next page
   1.741 +			++pi;
   1.742 +			}
   1.743 +		}
   1.744 +	__DRAIN_WRITE_BUFFER;
   1.745 +	}
   1.746 +
   1.747 +void ArmMmu::MapVirtual(TInt aId, TInt aNumPages)
   1.748 +//
   1.749 +// Called in place of MapRamPages or MapPhysicalPages to update mmu data structures when committing
   1.750 +// virtual address space to a chunk.  No pages are mapped.
   1.751 +// Call this with the system locked.
   1.752 +//
   1.753 +	{
   1.754 +	SPageTableInfo& ptinfo=iPtInfo[aId];
   1.755 +	ptinfo.iCount+=aNumPages;
   1.756 +	}
   1.757 +
   1.758 +void ArmMmu::RemapPage(TInt aId, TUint32 aAddr, TPhysAddr aOldAddr, TPhysAddr aNewAddr, TPte aPtePerm, DProcess* /*aProcess*/)
   1.759 +//
   1.760 +// Replace the mapping at address aAddr in page table aId.
   1.761 +// Update the page information array for both the old and new pages.
   1.762 +// Call this with the system locked.
   1.763 +//
   1.764 +	{
   1.765 +	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPage() id=%d addr=%08x old=%08x new=%08x perm=%08x", aId, aAddr, aOldAddr, aNewAddr, aPtePerm));
   1.766 +
   1.767 +	TInt ptOffset=(aAddr&KChunkMask)>>KPageShift;			// entry number in page table
   1.768 +	TPte* pPte=PageTable(aId)+ptOffset;						// address of PTE
   1.769 +	TPte pte=*pPte;
   1.770 +
   1.771 +	TUint pageType = (pte & KPteTypeMask);
   1.772 +	if (pageType == KArmPteSmallPage || pageType == 0)
   1.773 +		{
   1.774 +		__ASSERT_ALWAYS((pte & KPteSmallPageAddrMask) == aOldAddr || pte==KPteNotPresentEntry, Panic(ERemapPageFailed));
   1.775 +		SPageInfo* oldpi = SPageInfo::FromPhysAddr(aOldAddr);
   1.776 +		__ASSERT_DEBUG(oldpi->LockCount()==0,Panic(ERemapPageFailed));
   1.777 +
   1.778 +		// remap page
   1.779 +		*pPte = aNewAddr | aPtePerm;					// overwrite PTE
   1.780 +		__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",*pPte,pPte));
   1.781 +		__DRAIN_WRITE_BUFFER;
   1.782 +		InvalidateTLBForPage(aAddr);		// flush any corresponding TLB entry
   1.783 +
   1.784 +		// update new pageinfo, clear old
   1.785 +		SPageInfo* pi = SPageInfo::FromPhysAddr(aNewAddr);
   1.786 +		pi->Set(oldpi->Type(),oldpi->Owner(),oldpi->Offset());
   1.787 +		oldpi->SetUnused();
   1.788 +		}
   1.789 +	else
   1.790 +		{
   1.791 +		__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPage() called on a non-4K page!"));
   1.792 +		Panic(ERemapPageFailed);
   1.793 +		}
   1.794 +	}
   1.795 +
   1.796 +void ArmMmu::RemapKernelPage(TInt aId, TLinAddr aSrc, TLinAddr aDest, TPhysAddr aNewPhys, TPte aPtePerm)
   1.797 +//
   1.798 +// Replace the mapping at address aAddr in page table aId.
   1.799 +// Called with the system locked.
   1.800 +// MUST NOT INVOKE ANY TRACING - or do anything else that might touch the kernel heap
   1.801 +// We are depending on this not reintroducing any of the cache lines we previously
   1.802 +// invalidated.
   1.803 +//
   1.804 +	{
   1.805 +	TInt ptOffset=(aSrc&KChunkMask)>>KPageShift;			// entry number in page table
   1.806 +	TPte* pPte=PageTable(aId)+ptOffset;						// address of PTE
   1.807 +
   1.808 +	TInt irq = NKern::DisableAllInterrupts();
   1.809 +	CopyPageForRemap(aDest, aSrc);
   1.810 +	*pPte = aNewPhys | aPtePerm;					// overwrite PTE
   1.811 +	__DRAIN_WRITE_BUFFER;
   1.812 +	InvalidateTLBForPage(aSrc);		// flush any corresponding TLB entry
   1.813 +	NKern::RestoreInterrupts(irq);
   1.814 +	}
   1.815 +
   1.816 +TInt ArmMmu::UnmapPages(TInt aId, TUint32 aAddr, TInt aNumPages, TPhysAddr* aPageList, TBool aSetPagesFree, TInt& aNumPtes, TInt& aNumFree, DProcess*)
   1.817 +//
   1.818 +// Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped
   1.819 +// pages into aPageList, and count of unmapped pages into aNumPtes.
   1.820 +// Return number of pages still mapped using this page table.
   1.821 +// Call this with the system locked.
   1.822 +// @param aId 			Identifies Page Table to unmap PTEs(Page Table Entries) from.
   1.823 +// @param aAddr Base 	Base Virtual Address of the region to unmap. It (indirectly) specifies the first PTE in this Page Table to unmap.
   1.824 +// @param aNumPages 	The number of consecutive PTEs to unmap.
   1.825 +// @param aPageList 	Points to pre-allocated array. On return, it is filled in with the list of physical addresses of the unmapped 4K
   1.826 +//						memory blocks.
   1.827 +// @param aSetPagesFree	If true, pages a placed in the free state and only mapped pages are added
   1.828 +//						to aPageList.
   1.829 +// @param aNumPtes		On return, indicates how many PTEs are unmapped.
   1.830 +// @param aNumFree		On return, holds the number are freed 4K memory blocks. Not updated if aSetPagesFree is false.
   1.831 +// @return 				The number of PTEs still mapped in this Page Table (aId).
   1.832 +	{
   1.833 +	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::UnmapPages() id=%d addr=%08x n=%d pl=%08x set-free=%d",aId,aAddr,aNumPages,aPageList,aSetPagesFree));
   1.834 +	TInt ptOffset=(aAddr&KChunkMask)>>KPageShift;			// entry number in page table
   1.835 +	TPte* pPte=PageTable(aId)+ptOffset;						// address of first PTE
   1.836 +	TInt np=0;
   1.837 +	TInt nf=0;
   1.838 +	while(aNumPages--)
   1.839 +		{
   1.840 +		TPte pte=*pPte;							// get original PTE
   1.841 +		*pPte++=0;								// clear PTE
   1.842 +		TUint pageType = (pte & KPteTypeMask);
   1.843 +		if (pageType == KArmPteSmallPage)
   1.844 +			InvalidateTLBForPage(aAddr);		// flush any corresponding TLB entry
   1.845 +		if (pageType == KArmPteSmallPage || (pageType == 0 && pte != KPteNotPresentEntry))
   1.846 +			{
   1.847 +			++np;								// count unmapped pages
   1.848 +			TPhysAddr pa=pte & KPteSmallPageAddrMask;	// physical address of unmapped page
   1.849 +			if (aSetPagesFree)
   1.850 +				{
   1.851 +				SPageInfo* pi = SPageInfo::FromPhysAddr(pa);
   1.852 +				__NK_ASSERT_DEBUG(pageType == KArmPteSmallPage ||
   1.853 +								  (pi->Type()==SPageInfo::EPagedCode && pi->State()==SPageInfo::EStatePagedOld));
   1.854 +				if(iRamCache->PageUnmapped(pi))
   1.855 +					{
   1.856 +					pi->SetUnused();					// mark page as unused
   1.857 +					if (pi->LockCount()==0)
   1.858 +						{
   1.859 +						*aPageList++=pa;			// store in page list
   1.860 +						++nf;						// count free pages
   1.861 +						}
   1.862 +					}
   1.863 +				}
   1.864 +			else
   1.865 +				*aPageList++=pa;				// store in page list
   1.866 +			}
   1.867 +		aAddr+=KPageSize;
   1.868 +		}
   1.869 +	aNumPtes=np;
   1.870 +	aNumFree=nf;
   1.871 +	SPageTableInfo& ptinfo=iPtInfo[aId];
   1.872 +	TInt r=(ptinfo.iCount-=np);
   1.873 +	__DRAIN_WRITE_BUFFER;
   1.874 +	__KTRACE_OPT(KMMU,Kern::Printf("Unmapped %d; Freed: %d; Return %08x",np,nf,r));
   1.875 +	return r;								// return number of pages remaining in this page table
   1.876 +	}
   1.877 +#endif
   1.878 +
   1.879 +TInt ArmMmu::UnmapVirtual(TInt aId, TUint32 aAddr, TInt aNumPages, TPhysAddr* aPageList, TBool aSetPagesFree, TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess)
   1.880 +//
   1.881 +// Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped
   1.882 +// pages into aPageList, and count of unmapped pages into aNumPtes.
   1.883 +// Adjust the page table reference count as if aNumPages pages were unmapped.
   1.884 +// Return number of pages still mapped using this page table.
   1.885 +// Call this with the system locked.
   1.886 +//
   1.887 +	{
   1.888 +	SPageTableInfo& ptinfo=iPtInfo[aId];
   1.889 +	TInt newCount = ptinfo.iCount - aNumPages;
   1.890 +	UnmapPages(aId, aAddr, aNumPages, aPageList, aSetPagesFree, aNumPtes, aNumFree, aProcess);
   1.891 +	ptinfo.iCount = newCount;
   1.892 +	aNumPtes = aNumPages;
   1.893 +	return newCount;
   1.894 +	}
   1.895 +   
   1.896 +
   1.897 +#ifndef __MMU_MACHINE_CODED__
   1.898 +void ArmMmu::DoAssignPageTable(TInt aId, TLinAddr aAddr, TPde aPdePerm)
   1.899 +//
   1.900 +// Assign an allocated page table to map a given linear address with specified permissions.
   1.901 +// This should be called with the system locked and the MMU mutex held.
   1.902 +//
   1.903 +	{
   1.904 +	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DoAssignPageTable %d to %08x perm %08x",aId,aAddr,aPdePerm));
   1.905 +	TLinAddr ptLin=PageTableLinAddr(aId);
   1.906 +	TPhysAddr ptPhys=LinearToPhysical(ptLin);
   1.907 +	TInt pdeIndex=TInt(aAddr>>KChunkShift);
   1.908 +	PageDirectory[pdeIndex]=ptPhys|aPdePerm;
   1.909 +	__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", ptPhys|aPdePerm, PageDirectory+pdeIndex));
   1.910 +	__DRAIN_WRITE_BUFFER;
   1.911 +	}
   1.912 +
   1.913 +void ArmMmu::RemapPageTable(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr)
   1.914 +//
   1.915 +// Replace a page table mapping the specified linear address.
   1.916 +// This should be called with the system locked and the MMU mutex held.
   1.917 +//
   1.918 +	{
   1.919 +	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTable %08x to %08x at %08x",aOld,aNew,aAddr));
   1.920 +	TInt pdeIndex=TInt(aAddr>>KChunkShift);
   1.921 +	TPde pde=PageDirectory[pdeIndex];
   1.922 +	__ASSERT_ALWAYS((pde & KPdePageTableAddrMask) == aOld, Panic(ERemapPageTableFailed));
   1.923 +	TPde newPde=aNew|(pde&~KPdePageTableAddrMask);
   1.924 +	PageDirectory[pdeIndex]=newPde;
   1.925 +	__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", newPde, PageDirectory+pdeIndex));
   1.926 +	__DRAIN_WRITE_BUFFER;
   1.927 +	}
   1.928 +
   1.929 +void ArmMmu::DoUnassignPageTable(TLinAddr aAddr)
   1.930 +//
   1.931 +// Unassign a now-empty page table currently mapping the specified linear address.
   1.932 +// We assume that TLB and/or cache flushing has been done when any RAM pages were unmapped.
   1.933 +// This should be called with the system locked and the MMU mutex held.
   1.934 +//
   1.935 +	{
   1.936 +	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DoUnassignPageTable at %08x",aAddr));
   1.937 +	TInt pdeIndex=TInt(aAddr>>KChunkShift);
   1.938 +	PageDirectory[pdeIndex]=0;
   1.939 +	__KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x", PageDirectory+pdeIndex));
   1.940 +	__DRAIN_WRITE_BUFFER;
   1.941 +	}
   1.942 +#endif
   1.943 +
   1.944 +// Initialise page table at physical address aXptPhys to be used as page table aXptId
   1.945 +// to expand the virtual address range used for mapping page tables. Map the page table
   1.946 +// at aPhysAddr as page table aId using the expanded range.
   1.947 +// Assign aXptPhys to kernel's Page Directory.
   1.948 +// Called with system unlocked and MMU mutex held.
   1.949 +void ArmMmu::BootstrapPageTable(TInt aXptId, TPhysAddr aXptPhys, TInt aId, TPhysAddr aPhysAddr)
   1.950 +	{
   1.951 +	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::BootstrapPageTable xptid=%04x, xptphys=%08x, id=%04x, phys=%08x",
   1.952 +						aXptId, aXptPhys, aId, aPhysAddr));
   1.953 +	
   1.954 +	// put in a temporary mapping for aXptPhys
   1.955 +	// make it noncacheable
   1.956 +	TPhysAddr pa=aXptPhys&~KPageMask;
   1.957 +	*iTempPte = pa | SP_PTE(KArmV45PermRWNO, KMemAttNC);
   1.958 +	__DRAIN_WRITE_BUFFER;
   1.959 +
   1.960 +	// clear XPT
   1.961 +	TPte* xpt=(TPte*)(iTempAddr+(aXptPhys&KPageMask));
   1.962 +	memclr(xpt, KPageTableSize);
   1.963 +
   1.964 +	// must in fact have aXptPhys and aPhysAddr in same physical page
   1.965 +	__ASSERT_ALWAYS( TUint32(aXptPhys^aPhysAddr)<TUint32(KPageSize), MM::Panic(MM::EBootstrapPageTableBadAddr));
   1.966 +
   1.967 +	// so only need one mapping
   1.968 +	xpt[(aXptId>>KPtClusterShift)&KPagesInPDEMask] = pa | KPtPtePerm;
   1.969 +
   1.970 +	// remove temporary mapping
   1.971 +	*iTempPte=0;
   1.972 +	__DRAIN_WRITE_BUFFER;
   1.973 +	InvalidateTLBForPage(iTempAddr);
   1.974 +
   1.975 +	// initialise PtInfo...
   1.976 +	TLinAddr xptAddr = PageTableLinAddr(aXptId);
   1.977 +	iPtInfo[aXptId].SetGlobal(xptAddr>>KChunkShift);
   1.978 +
   1.979 +	// map xpt...
   1.980 +	TInt pdeIndex=TInt(xptAddr>>KChunkShift);
   1.981 +	NKern::LockSystem();
   1.982 +	PageDirectory[pdeIndex]=aXptPhys|KPtPdePerm;
   1.983 +	__DRAIN_WRITE_BUFFER;
   1.984 +	NKern::UnlockSystem();				
   1.985 +	}
   1.986 +
   1.987 +// Edit the self-mapping entry in page table aId, mapped at aTempMap, to
   1.988 +// change the physical address from aOld to aNew. Used when moving page
   1.989 +// tables which were created by BootstrapPageTable.
   1.990 +// Called with system locked and MMU mutex held.
   1.991 +void ArmMmu::FixupXPageTable(TInt aId, TLinAddr aTempMap, TPhysAddr aOld, TPhysAddr aNew)
   1.992 +	{
   1.993 +	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::FixupXPageTable id=%04x, tempmap=%08x, old=%08x, new=%08x",
   1.994 +						aId, aTempMap, aOld, aNew));
   1.995 +	
   1.996 +	// find correct page table inside the page
   1.997 +	TPte* xpt=(TPte*)(aTempMap + ((aId & KPtClusterMask) << KPageTableShift));
   1.998 +	// find the pte in that page table
   1.999 +	xpt += (aId>>KPtClusterShift)&KPagesInPDEMask;
  1.1000 +
  1.1001 +	// switch the mapping
  1.1002 +	__ASSERT_ALWAYS((*xpt&~KPageMask)==aOld, Panic(EFixupXPTFailed));
  1.1003 +	*xpt = aNew | KPtPtePerm;
  1.1004 +
  1.1005 +	// invalidate the TLB entry for the self-mapping page table
  1.1006 +	// the PDE has not yet been changed, but since we hold the
  1.1007 +	// system lock, nothing should bring this back into the TLB.
  1.1008 +	InvalidateTLBForPage(PageTableLinAddr(aId));
  1.1009 +	}
  1.1010 +
  1.1011 +// Set up a page table (specified by aId) to map a 1Mb section of ROM containing aRomAddr
  1.1012 +// using ROM at aOrigPhys.
  1.1013 +void ArmMmu::InitShadowPageTable(TInt aId, TLinAddr aRomAddr, TPhysAddr aOrigPhys)
  1.1014 +	{
  1.1015 +	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:InitShadowPageTable id=%04x aRomAddr=%08x aOrigPhys=%08x",
  1.1016 +		aId, aRomAddr, aOrigPhys));
  1.1017 +	TPte* ppte = PageTable(aId);
  1.1018 +	TPte* ppte_End = ppte + KChunkSize/KPageSize;
  1.1019 +	TPhysAddr phys = aOrigPhys - (aRomAddr & KChunkMask);
  1.1020 +	for (; ppte<ppte_End; ++ppte, phys+=KPageSize)
  1.1021 +		*ppte = phys | KRomPtePermissions;
  1.1022 +	__DRAIN_WRITE_BUFFER;
  1.1023 +	}
  1.1024 +
  1.1025 +// Copy the contents of ROM at aRomAddr to a shadow page at physical address aShadowPhys
  1.1026 +void ArmMmu::InitShadowPage(TPhysAddr aShadowPhys, TLinAddr aRomAddr)
  1.1027 +	{
  1.1028 +	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:InitShadowPage aShadowPhys=%08x aRomAddr=%08x",
  1.1029 +		aShadowPhys, aRomAddr));
  1.1030 +
  1.1031 +	// put in a temporary mapping for aShadowPhys
  1.1032 +	// make it noncacheable
  1.1033 +	*iTempPte = aShadowPhys | SP_PTE(KArmV45PermRWNO, KMemAttNC);
  1.1034 +	__DRAIN_WRITE_BUFFER;
  1.1035 +
  1.1036 +	// copy contents of ROM
  1.1037 +	wordmove( (TAny*)iTempAddr, (const TAny*)aRomAddr, KPageSize );
  1.1038 +	__DRAIN_WRITE_BUFFER;	// make sure contents are written to memory
  1.1039 +
  1.1040 +	// remove temporary mapping
  1.1041 +	*iTempPte=0;
  1.1042 +	__DRAIN_WRITE_BUFFER;
  1.1043 +	InvalidateTLBForPage(iTempAddr);
  1.1044 +	}
  1.1045 +
  1.1046 +// Assign a shadow page table to replace a ROM section mapping
  1.1047 +// Enter and return with system locked
  1.1048 +void ArmMmu::AssignShadowPageTable(TInt aId, TLinAddr aRomAddr)
  1.1049 +	{
  1.1050 +	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:AssignShadowPageTable aId=%04x aRomAddr=%08x",
  1.1051 +		aId, aRomAddr));
  1.1052 +	TLinAddr ptLin=PageTableLinAddr(aId);
  1.1053 +	TPhysAddr ptPhys=LinearToPhysical(ptLin);
  1.1054 +	TPde* ppde = PageDirectory + (aRomAddr>>KChunkShift);
  1.1055 +	TPde newpde = ptPhys | KShadowPdePerm;
  1.1056 +	__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde));
  1.1057 +	TInt irq=NKern::DisableAllInterrupts();
  1.1058 +	*ppde = newpde;		// map in the page table
  1.1059 +	__DRAIN_WRITE_BUFFER;	// make sure new PDE written to main memory
  1.1060 +	FlushTLBs();	// flush both TLBs (no need to flush cache yet)
  1.1061 +	NKern::RestoreInterrupts(irq);
  1.1062 +	}
  1.1063 +
  1.1064 +void ArmMmu::DoUnmapShadowPage(TInt aId, TLinAddr aRomAddr, TPhysAddr aOrigPhys)
  1.1065 +	{
  1.1066 +	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:DoUnmapShadowPage, id=%04x lin=%08x origphys=%08x", aId, aRomAddr, aOrigPhys));
  1.1067 +	TPte* ppte = PageTable(aId) + ((aRomAddr & KChunkMask)>>KPageShift);
  1.1068 +	TPte newpte = aOrigPhys | KRomPtePermissions;
  1.1069 +	__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", newpte, ppte));
  1.1070 +	TInt irq=NKern::DisableAllInterrupts();
  1.1071 +	*ppte = newpte;
  1.1072 +	__DRAIN_WRITE_BUFFER;
  1.1073 +	InvalidateTLBForPage(aRomAddr);
  1.1074 +	SyncCodeMappings();
  1.1075 +	CacheMaintenance::CodeChanged(aRomAddr, KPageSize, CacheMaintenance::EMemoryRemap);
  1.1076 +	CacheMaintenance::PageToReuse(aRomAddr, EMemAttNormalCached, KPhysAddrInvalid);
  1.1077 +	NKern::RestoreInterrupts(irq);
  1.1078 +	}
  1.1079 +
  1.1080 +TInt ArmMmu::UnassignShadowPageTable(TLinAddr aRomAddr, TPhysAddr aOrigPhys)
  1.1081 +	{
  1.1082 +	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:UnassignShadowPageTable, lin=%08x origphys=%08x", aRomAddr, aOrigPhys));
  1.1083 +	TPde* ppde = PageDirectory + (aRomAddr>>KChunkShift);
  1.1084 +	TPde newpde = (aOrigPhys &~ KChunkMask) | KRomSectionPermissions;
  1.1085 +	__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde));
  1.1086 +	TInt irq=NKern::DisableAllInterrupts();
  1.1087 +	*ppde = newpde;			// revert to section mapping
  1.1088 +	__DRAIN_WRITE_BUFFER;	// make sure new PDE written to main memory
  1.1089 +	FlushTLBs();			// flush both TLBs
  1.1090 +	NKern::RestoreInterrupts(irq);
  1.1091 +	return KErrNone;
  1.1092 +	}
  1.1093 +
  1.1094 +void ArmMmu::DoFreezeShadowPage(TInt aId, TLinAddr aRomAddr)
  1.1095 +	{
  1.1096 +	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:DoFreezeShadowPage aId=%04x aRomAddr=%08x",
  1.1097 +		aId, aRomAddr));
  1.1098 +	TPte* ppte = PageTable(aId) + ((aRomAddr & KChunkMask)>>KPageShift);
  1.1099 +	TPte newpte = (*ppte & KPteSmallPageAddrMask) | KRomPtePermissions;
  1.1100 +	__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", newpte, ppte));
  1.1101 +	*ppte = newpte;
  1.1102 +	__DRAIN_WRITE_BUFFER;
  1.1103 +	InvalidateTLBForPage(aRomAddr);
  1.1104 +	}
  1.1105 +
  1.1106 +void ArmMmu::Pagify(TInt aId, TLinAddr aLinAddr)
  1.1107 +	{
  1.1108 +	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:Pagify aId=%04x aLinAddr=%08x", aId, aLinAddr));
  1.1109 +	
  1.1110 +	TInt pteIndex = (aLinAddr & KChunkMask)>>KPageShift;
  1.1111 +	TPte* pte = PageTable(aId);
  1.1112 +	if ((pte[pteIndex] & KPteTypeMask) == KArmV45PteLargePage)
  1.1113 +		{
  1.1114 +		__KTRACE_OPT(KMMU,Kern::Printf("Converting 64K page to 4K pages"));
  1.1115 +		pteIndex &= ~0xf;
  1.1116 +		TPte source = pte[pteIndex];
  1.1117 +		source = (source & KPteLargePageAddrMask) | SP_PTE_FROM_LP_PTE(source);
  1.1118 +		pte += pteIndex;
  1.1119 +		for (TInt entry=0; entry<16; entry++)
  1.1120 +			{
  1.1121 +			pte[entry] = source | (entry<<12);
  1.1122 +			}
  1.1123 +		FlushTLBs();
  1.1124 +		}
  1.1125 +	}
  1.1126 +
  1.1127 +void ArmMmu::FlushShadow(TLinAddr aRomAddr)
  1.1128 +	{
  1.1129 +	CacheMaintenance::CodeChanged(aRomAddr, KPageSize, CacheMaintenance::EMemoryRemap);
  1.1130 +	CacheMaintenance::PageToReuse(aRomAddr, EMemAttNormalCached, KPhysAddrInvalid);
  1.1131 +	InvalidateTLBForPage(aRomAddr);		// remove all TLB references to original ROM page
  1.1132 +	SyncCodeMappings();
  1.1133 +	}
  1.1134 +
  1.1135 +
  1.1136 +inline void ZeroPdes(TLinAddr aBase, TLinAddr aEnd)
  1.1137 +	{
  1.1138 +	memclr(PageDirectory+(aBase>>KChunkShift), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde));
  1.1139 +	}
  1.1140 +
  1.1141 +void ArmMmu::ClearPageTable(TInt aId, TInt aFirstIndex)
  1.1142 +	{
  1.1143 +	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::ClearPageTable(%d,%d)",aId,aFirstIndex));
  1.1144 +	TPte* pte=PageTable(aId);
  1.1145 +	memclr(pte+aFirstIndex, KPageTableSize-aFirstIndex*sizeof(TPte));
  1.1146 +	__DRAIN_WRITE_BUFFER;
  1.1147 +	}
  1.1148 +
  1.1149 +void ArmMmu::ClearRamDrive(TLinAddr aStart)
  1.1150 +	{
  1.1151 +	// clear the page directory entries corresponding to the RAM drive
  1.1152 +	ZeroPdes(aStart, KRamDriveEndAddress);
  1.1153 +	__DRAIN_WRITE_BUFFER;
  1.1154 +	}
  1.1155 +
  1.1156 +void ArmMmu::ApplyTopLevelPermissions(TLinAddr aAddr, TUint aChunkSize, TPde aPdePerm)
  1.1157 +	{
  1.1158 +	__KTRACE_OPT(KMMU,Kern::Printf("ApplyTopLevelPermissions at %x",aAddr));
  1.1159 +	TInt pdeIndex=aAddr>>KChunkShift;
  1.1160 +	TInt numPdes=(aChunkSize+KChunkMask)>>KChunkShift;
  1.1161 +	TPde* pPde=PageDirectory+pdeIndex;
  1.1162 +	while(numPdes--)
  1.1163 +		{
  1.1164 +		*pPde=(*pPde)?((*pPde & KPdePageTableAddrMask)|aPdePerm):0;
  1.1165 +		pPde++;
  1.1166 +		}
  1.1167 +	__DRAIN_WRITE_BUFFER;
  1.1168 +	}
  1.1169 +
  1.1170 +void ArmMmu::ApplyPagePermissions(TInt aId, TInt aPageOffset, TInt aNumPages, TPte aPtePerm)
  1.1171 +	{
  1.1172 +	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::ApplyPagePermissions %04x:%03x+%03x perm %08x",
  1.1173 +												aId, aPageOffset, aNumPages, aPtePerm));
  1.1174 +	TPte* pPte=PageTable(aId)+aPageOffset;
  1.1175 +	TPde* pPteEnd=pPte+aNumPages;
  1.1176 +	NKern::LockSystem();
  1.1177 +	for (; pPte<pPteEnd; ++pPte)
  1.1178 +		{
  1.1179 +		TPte pte=*pPte;
  1.1180 +		if (pte)
  1.1181 +			*pPte = (pte&KPteSmallPageAddrMask)|aPtePerm;
  1.1182 +		}
  1.1183 +	NKern::UnlockSystem();
  1.1184 +	FlushTLBs();
  1.1185 +	__DRAIN_WRITE_BUFFER;
  1.1186 +	}
  1.1187 +
  1.1188 +void ArmMmu::MoveChunk(TLinAddr aInitAddr, TUint aSize, TLinAddr aFinalAddr, TPde aPdePerm)
  1.1189 +	{
  1.1190 +	__KTRACE_OPT(KMMU,Kern::Printf("MoveChunk at %08x to %08x size %08x PdePerm %08x",
  1.1191 +		aInitAddr, aFinalAddr, aSize, aPdePerm));
  1.1192 +	TInt numPdes=(aSize+KChunkMask)>>KChunkShift;
  1.1193 +	TInt iS=aInitAddr>>KChunkShift;
  1.1194 +	TInt iD=aFinalAddr>>KChunkShift;
  1.1195 +	TPde* pS=PageDirectory+iS;
  1.1196 +	TPde* pD=PageDirectory+iD;
  1.1197 +	while(numPdes--)
  1.1198 +		{
  1.1199 +		*pD++=(*pS)?((*pS & KPdePageTableAddrMask)|aPdePerm):0;
  1.1200 +		*pS++=KPdeNotPresentEntry;
  1.1201 +		}
  1.1202 +	__DRAIN_WRITE_BUFFER;
  1.1203 +	}
  1.1204 +
  1.1205 +void ArmMmu::MoveChunk(TLinAddr aInitAddr, TLinAddr aFinalAddr, TInt aNumPdes)
  1.1206 +//
  1.1207 +// Move a block of PDEs without changing permissions. Must work with overlapping initial and final
  1.1208 +// regions. Call this with kernel locked.
  1.1209 +//
  1.1210 +	{
  1.1211 +	__KTRACE_OPT(KMMU,Kern::Printf("MoveChunk at %08x to %08x numPdes %d", aInitAddr, aFinalAddr, aNumPdes));
  1.1212 +	if (aInitAddr==aFinalAddr || aNumPdes==0)
  1.1213 +		return;
  1.1214 +	TInt iS=aInitAddr>>KChunkShift;
  1.1215 +	TInt iD=aFinalAddr>>KChunkShift;
  1.1216 +	TBool forwardOverlap=(iS<iD && iD-iS<aNumPdes);
  1.1217 +	TBool backwardOverlap=(iS>iD && iS-iD<aNumPdes);
  1.1218 +	TInt iC=backwardOverlap?(iD+aNumPdes):iS;	// first index to clear
  1.1219 +	TInt iZ=forwardOverlap?iD:(iS+aNumPdes);	// last index to clear + 1
  1.1220 +	TPde* pS=PageDirectory+iS;
  1.1221 +	TPde* pD=PageDirectory+iD;
  1.1222 +	__KTRACE_OPT(KMMU,Kern::Printf("backwardOverlap=%d, forwardOverlap=%d",backwardOverlap,forwardOverlap));
  1.1223 +	__KTRACE_OPT(KMMU,Kern::Printf("first clear %03x, last clear %03x",iC,iZ));
  1.1224 +	wordmove(pD,pS,aNumPdes<<2);				// move PDEs
  1.1225 +	pD=PageDirectory+iC;						// pointer to first PDE to clear
  1.1226 +	iZ-=iC;										// number of PDEs to clear
  1.1227 +	memclr(pD, iZ<<2);							// clear PDEs
  1.1228 +	__DRAIN_WRITE_BUFFER;
  1.1229 +	}
  1.1230 +
  1.1231 +TPde ArmMmu::PdePermissions(TChunkType aChunkType, TInt aChunkState)
  1.1232 +	{
  1.1233 +	if ((aChunkType==EUserData || aChunkType==EDllData || aChunkType==EUserSelfModCode
  1.1234 +		|| aChunkType==ESharedKernelSingle || aChunkType==ESharedKernelMultiple || aChunkType==ESharedIo)
  1.1235 +		&& aChunkState!=0)
  1.1236 +		return KUserDataRunningPermissions;
  1.1237 +	return ChunkPdePermissions[aChunkType];
  1.1238 +	}
  1.1239 +
  1.1240 +TPte ArmMmu::PtePermissions(TChunkType aChunkType)
  1.1241 +	{
  1.1242 +	return ChunkPtePermissions[aChunkType];
  1.1243 +	}
  1.1244 +
  1.1245 +const TUint FBLK=(EMapAttrFullyBlocking>>12);
  1.1246 +const TUint BFNC=(EMapAttrBufferedNC>>12);
  1.1247 +const TUint BUFC=(EMapAttrBufferedC>>12);
  1.1248 +const TUint L1UN=(EMapAttrL1Uncached>>12);
  1.1249 +const TUint WTRA=(EMapAttrCachedWTRA>>12);
  1.1250 +const TUint WTWA=(EMapAttrCachedWTWA>>12);
  1.1251 +const TUint WBRA=(EMapAttrCachedWBRA>>12);
  1.1252 +const TUint WBWA=(EMapAttrCachedWBWA>>12);
  1.1253 +const TUint AWTR=(EMapAttrAltCacheWTRA>>12);
  1.1254 +const TUint AWTW=(EMapAttrAltCacheWTWA>>12);
  1.1255 +const TUint AWBR=(EMapAttrAltCacheWBRA>>12);
  1.1256 +const TUint AWBW=(EMapAttrAltCacheWBWA>>12);
  1.1257 +const TUint MAXC=(EMapAttrL1CachedMax>>12);
  1.1258 +
  1.1259 +const TUint L2UN=(EMapAttrL2Uncached>>12);
  1.1260 +
  1.1261 +const TUint16 UNS=0xffffu;	// Unsupported attribute
  1.1262 +const TUint16 SPE=0xfffeu;	// Special processing required
  1.1263 +
  1.1264 +#if defined(__CPU_ARM710T__) || defined(__CPU_ARM720T__)
  1.1265 +// Original definition of C B
  1.1266 +static const TUint16 CacheBuffAttributes[16]=
  1.1267 +	{0x00,0x00,0x04,0x04,0x0C,0x0C,0x0C,0x0C, UNS, UNS, UNS, UNS, UNS, UNS, UNS,0x0C};
  1.1268 +static const TUint8 CacheBuffActual[16]=
  1.1269 +	{FBLK,FBLK,BUFC,BUFC,WTRA,WTRA,WTRA,WTRA,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,WTRA};
  1.1270 +
  1.1271 +#elif defined(__CPU_ARM920T__) || defined(__CPU_ARM925T__) || defined(__CPU_ARM926J__)
  1.1272 +// Newer definition of C B
  1.1273 +static const TUint16 CacheBuffAttributes[16]=
  1.1274 +	{0x00,0x00,0x04,0x04,0x08,0x08,0x0C,0x0C, UNS, UNS, UNS, UNS, UNS, UNS, UNS,0x0C};
  1.1275 +static const TUint8 CacheBuffActual[16]=
  1.1276 +	{FBLK,FBLK,BUFC,BUFC,WTRA,WTRA,WBRA,WBRA,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,WBRA};
  1.1277 +
  1.1278 +#elif defined(__CPU_SA1__)
  1.1279 +// Special definition of C B
  1.1280 +static const TUint16 CacheBuffAttributes[16]=
  1.1281 +	{0x00,0x00,0x04,0x04,0x04,0x04,0x0C,0x0C,0x04,0x04,0x08,0x08, UNS, UNS, UNS,0x0C};
  1.1282 +static const TUint8 CacheBuffActual[16]=
  1.1283 +	{FBLK,FBLK,BUFC,BUFC,BUFC,BUFC,WBRA,WBRA,FBLK,FBLK,AWBR,AWBR,FBLK,FBLK,FBLK,WBRA};
  1.1284 +
  1.1285 +#elif defined(__CPU_XSCALE__)
  1.1286 +#ifdef __CPU_XSCALE_MANZANO__
  1.1287 +#ifdef __HAS_EXTERNAL_CACHE__
  1.1288 +// ***MANZANO with L2 cache****** //
  1.1289 +
  1.1290 +//Specifies TEX::CB bits for different L1/L2 cache attributes
  1.1291 +//  ...876543201
  1.1292 +//  ...TEX..CB..
  1.1293 +static const TUint16 CacheBuffAttributes[80]=
  1.1294 +	{									// L1CACHE:
  1.1295 +//  FBLK  BFNC  BUFC   L1UN   WTRA   WTWA   WBRA   WBWA  AWTR AWTW AWBR AWBT UNS UNS UNS MAX     L2CACHE:
  1.1296 +	0x00, 0x44, 0x40,  0x40, 0x108, 0x108, 0x10c, 0x10c, SPE, SPE, SPE, SPE, UNS,UNS,UNS,0x10c,  //NC
  1.1297 +	0x00, 0x44, 0x40,  0x40, 0x108, 0x108, 0x10c, 0x10c, SPE, SPE, SPE, SPE, UNS,UNS,UNS,0x10c,  //WTRA
  1.1298 +	0x00, 0x44, 0x40,  0x40, 0x108, 0x108, 0x10c, 0x10c, SPE, SPE, SPE, SPE, UNS,UNS,UNS,0x10c,  //WTWA
  1.1299 +	0x00, 0x44, 0x40, 0x140, 0x148, 0x148, 0x14c, 0x14c, SPE, SPE, SPE, SPE, UNS,UNS,UNS,0x14c,  //WBRA
  1.1300 +	0x00, 0x44, 0x40, 0x140, 0x148, 0x148, 0x14c, 0x14c, SPE, SPE, SPE, SPE, UNS,UNS,UNS,0x14c,  //WBWA
  1.1301 +   	};
  1.1302 +
  1.1303 +extern TUint MiniCacheConfig();
  1.1304 +//Converts page table attributes(TEX:CB) into appropriate cache attributes.
  1.1305 +TInt CacheAttributesActual(TUint& cacheL1, TUint& cacheL2, TUint cbatt)
  1.1306 +	{
  1.1307 +	switch (cbatt)
  1.1308 +		{
  1.1309 +		case 0: 	cacheL1 = FBLK; cacheL2 = L2UN; return KErrNone;
  1.1310 +		case 0x40: 	cacheL1 = L1UN; cacheL2 = L2UN; return KErrNone;
  1.1311 +		case 0x44: 	cacheL1 = BFNC; cacheL2 = L2UN; return KErrNone;
  1.1312 +		case 0x48: 	cacheL1 = MiniCacheConfig(); cacheL2 = L2UN; return KErrNone;
  1.1313 +		case 0x108: cacheL1 = WTRA; cacheL2 = L2UN; return KErrNone;
  1.1314 +		case 0x10c: cacheL1 = WBRA; cacheL2 = L2UN; return KErrNone;
  1.1315 +		case 0x140: cacheL1 = L1UN; cacheL2 = WBWA; return KErrNone;
  1.1316 +		case 0x148: cacheL1 = WTRA; cacheL2 = WBWA; return KErrNone;
  1.1317 +		case 0x14c: cacheL1 = WBRA; cacheL2 = WBWA; return KErrNone;
  1.1318 +		}
  1.1319 +	return KErrNotSupported;
  1.1320 +	}
  1.1321 +#else //__HAS_EXTERNAL_CACHE__
  1.1322 +// ***MANZANO without L2 cache****** //
  1.1323 +
  1.1324 +static const TUint16 CacheBuffAttributes[16]=
  1.1325 +//  FBLK BFNC BUFC L1UN WTRA  WTWA  WBRA   WBWA -----------AltCache--------  MAXC 
  1.1326 +   {0x00,0x44,0x40,0x40,0x148,0x148,0x14C,0x14C,SPE,SPE,SPE,SPE,UNS,UNS,UNS,0x14C};
  1.1327 +static const TUint8 CacheBuffActual[16]=
  1.1328 +	{FBLK,BFNC,BUFC,BUFC,WTRA,WTRA,WBRA,WBRA,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,WBRA};
  1.1329 +#endif //__HAS_EXTERNAL_CACHE__
  1.1330 +
  1.1331 +#else 
  1.1332 +// ***XSCALE that is not MANZANO (no L2 cache)****** //
  1.1333 +
  1.1334 +// X C B
  1.1335 +static const TUint16 CacheBuffAttributes[16]=
  1.1336 +	{0x00,0x44,0x04,0x04,0x08,0x08,0x0C,0x4C,SPE,SPE,SPE,SPE,UNS,UNS,UNS,0x4C};
  1.1337 +static const TUint8 CacheBuffActual[16]=
  1.1338 +	{FBLK,BFNC,BUFC,BUFC,WTRA,WTRA,WBRA,WBWA,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,WBWA};
  1.1339 +#endif
  1.1340 +
  1.1341 +// ***Common code for all XSCALE cores*** //
  1.1342 +
  1.1343 +extern TUint MiniCacheConfig();
  1.1344 +void ProcessSpecialCacheAttr(TUint& cache, TUint& cbatt)
  1.1345 +	{
  1.1346 +	// If writeback requested, give writeback or writethrough
  1.1347 +	// If writethrough requested, give writethrough or uncached
  1.1348 +	// Give other allocation policy if necessary.
  1.1349 +	TUint mccfg=MiniCacheConfig();
  1.1350 +	__KTRACE_OPT(KMMU,Kern::Printf("MiniCacheConfig: %x",mccfg));
  1.1351 +
  1.1352 +	if (cache<AWBR && mccfg>=AWBR)	// asked for WT but cache is set for WB
  1.1353 +		{
  1.1354 +		cache=BUFC;					// so give uncached, buffered, coalescing
  1.1355 +		#if defined (__CPU_XSCALE_MANZANO__)
  1.1356 +		cbatt=0x40;
  1.1357 +		#else
  1.1358 +		cbatt=0x04;
  1.1359 +		#endif
  1.1360 +		}
  1.1361 +	else
  1.1362 +		{
  1.1363 +		cache=mccfg;	// give whatever minicache is configured for
  1.1364 +		cbatt=0x48;		// minicache attributes
  1.1365 +		}
  1.1366 +	}
  1.1367 +#endif
  1.1368 +
  1.1369 +static const TUint8 ActualReadPrivilegeLevel[4]={4,1,4,4};		// RORO,RWNO,RWRO,RWRW
  1.1370 +static const TUint8 ActualWritePrivilegeLevel[4]={0,1,1,4};	// RORO,RWNO,RWRO,RWRW
  1.1371 +
  1.1372 +/** Calculates cb attributes for page table and sets actual cache attributes*/
  1.1373 +TInt GetCacheAttr(TUint& cacheL1, TUint& cacheL2, TUint& cbatt)
  1.1374 +	{
  1.1375 +	TInt r = KErrNone;
  1.1376 +	// Scale down L2 to 0-4 : NC, WTRA, WTWA, WBRA, WBWA
  1.1377 +#if defined (__CPU_XSCALE_MANZANO__) && defined(__HAS_EXTERNAL_CACHE__)
  1.1378 +	if      (cacheL2 == MAXC) cacheL2 = WBWA-3;			//	Scale down L2 cache attributes...
  1.1379 +	else if (cacheL2 > WBWA)  return KErrNotSupported;	//	... to 0-4 for...
  1.1380 +	else if (cacheL2 < WTRA)  cacheL2 = L2UN;			//	... L2UN to WBWA 
  1.1381 +	else					  cacheL2-=3;				//
  1.1382 +#else
  1.1383 +	cacheL2 = 0; // Either no L2 cache or L2 cache attributes will be just a copy of L1 cache attributes.
  1.1384 +#endif
  1.1385 +
  1.1386 +	//Get cb page attributes. (On some platforms, tex bits are includded as well.)
  1.1387 +	cbatt = CacheBuffAttributes[cacheL1 + (cacheL2<<4)];
  1.1388 +	__KTRACE_OPT(KMMU,Kern::Printf("GetCacheAttr, table returned:%x",cbatt));
  1.1389 +
  1.1390 +#if defined(__CPU_XSCALE__)
  1.1391 +	//Check if altDCache/LLR cache attributes are defined
  1.1392 +	if (cbatt == SPE)
  1.1393 +		{
  1.1394 +		cacheL2 = 0; //Not L2 cached in such case
  1.1395 +		ProcessSpecialCacheAttr(cacheL1,cbatt);
  1.1396 +		__KTRACE_OPT(KMMU,Kern::Printf("GetCacheAttr, spec case returned:%x",cbatt));
  1.1397 +		}
  1.1398 +#endif
  1.1399 +
  1.1400 +	if(cbatt == UNS)
  1.1401 +		return KErrNotSupported;
  1.1402 +	
  1.1403 +	//W Got CB page attributes. Now, find out what are the actual cache attributes.
  1.1404 +#if defined(__CPU_XSCALE_MANZANO__) && defined(__HAS_EXTERNAL_CACHE__)
  1.1405 +	r = CacheAttributesActual(cacheL1, cacheL2, cbatt);
  1.1406 +#else
  1.1407 +	cacheL1 = CacheBuffActual[cacheL1];
  1.1408 +#if defined(__HAS_EXTERNAL_CACHE__)
  1.1409 +	cacheL2 = cacheL1;
  1.1410 +#else
  1.1411 +	cacheL2 = 0;
  1.1412 +#endif	
  1.1413 +#endif
  1.1414 +	return r;
  1.1415 +	}
  1.1416 +
  1.1417 +TInt ArmMmu::PdePtePermissions(TUint& aMapAttr, TPde& aPde, TPte& aPte)
  1.1418 +	{
  1.1419 +	__KTRACE_OPT(KMMU,Kern::Printf(">ArmMmu::PdePtePermissions, mapattr=%08x",aMapAttr));
  1.1420 +	TUint read=aMapAttr & EMapAttrReadMask;
  1.1421 +	TUint write=(aMapAttr & EMapAttrWriteMask)>>4;
  1.1422 +	TUint exec=(aMapAttr & EMapAttrExecMask)>>8;
  1.1423 +
  1.1424 +	// if execute access is greater than read, adjust read (since there are no separate execute permissions on ARM)
  1.1425 +	if (exec>read)
  1.1426 +		read=exec;
  1.1427 +	TUint ap;
  1.1428 +	if (write==0)
  1.1429 +		{
  1.1430 +		// read-only
  1.1431 +		if (read>=4)
  1.1432 +			ap=KArmV45PermRORO;			// user and supervisor read-only
  1.1433 +		else
  1.1434 +			ap=KArmV45PermRWNO;			// supervisor r/w user no access (since no RO/NO access is available)
  1.1435 +		}
  1.1436 +	else if (write<4)
  1.1437 +		{
  1.1438 +		// only supervisor can write
  1.1439 +		if (read>=4)
  1.1440 +			ap=KArmV45PermRWRO;			// supervisor r/w user r/o
  1.1441 +		else
  1.1442 +			ap=KArmV45PermRWNO;			// supervisor r/w user no access
  1.1443 +		}
  1.1444 +	else
  1.1445 +		ap=KArmV45PermRWRW;				// supervisor r/w user r/w
  1.1446 +	read=ActualReadPrivilegeLevel[ap];
  1.1447 +	write=ActualWritePrivilegeLevel[ap];
  1.1448 +#ifndef __CPU_USE_MMU_TEX_FIELD
  1.1449 +	ap|=(ap<<2);
  1.1450 +	ap|=(ap<<4);						// replicate permissions in all four subpages
  1.1451 +#endif
  1.1452 +	ap<<=4;								// shift access permissions into correct position for PTE
  1.1453 +	ap|=KArmPteSmallPage;				// add in mandatory small page bits
  1.1454 +
  1.1455 +	// Get cb atributes for the page table and the actual cache attributes
  1.1456 +	TUint cbatt;
  1.1457 +	TUint cacheL1=(aMapAttr & EMapAttrL1CacheMask)>>12;
  1.1458 +	TUint cacheL2=(aMapAttr & EMapAttrL2CacheMask)>>16;
  1.1459 +	TInt r = GetCacheAttr(cacheL1, cacheL2, cbatt);
  1.1460 +
  1.1461 +	if (r==KErrNone)
  1.1462 +		{
  1.1463 +		aPde=PT_PDE(EDomainClient);
  1.1464 +		aPte=ap|cbatt;
  1.1465 +		aMapAttr=read|(write<<4)|(read<<8)|(cacheL1<<12)|(cacheL2<<16);
  1.1466 +		}
  1.1467 +	__KTRACE_OPT(KMMU,Kern::Printf("<ArmMmu::PdePtePermissions, r=%d, mapattr=%08x, pde=%08x, pte=%08x",
  1.1468 +								r,aMapAttr,aPde,aPte));
  1.1469 +	return r;
  1.1470 +	}
  1.1471 +
  1.1472 +void ArmMmu::Map(TLinAddr aLinAddr, TPhysAddr aPhysAddr, TInt aSize, TPde aPdePerm, TPte aPtePerm, TInt aMapShift)
  1.1473 +//
  1.1474 +// Map a region of physical addresses aPhysAddr to aPhysAddr+aSize-1 to virtual address aLinAddr.
  1.1475 +// Use permissions specified by aPdePerm and aPtePerm. Use mapping sizes up to and including (1<<aMapShift).
  1.1476 +// Assume any page tables required are already assigned.
  1.1477 +// aLinAddr, aPhysAddr, aSize must be page-aligned.
  1.1478 +//
  1.1479 +	{
  1.1480 +	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu::Map lin=%08x phys=%08x size=%08x", aLinAddr, aPhysAddr, aSize));
  1.1481 +	__KTRACE_OPT(KMMU, Kern::Printf("pde=%08x pte=%08x mapshift=%d", aPdePerm, aPtePerm, aMapShift));
  1.1482 +	TPde pt_pde=aPdePerm;
  1.1483 +	TPte sp_pte=aPtePerm;
  1.1484 +	TPde section_pde=SECTION_PDE_FROM_PDEPTE(pt_pde, sp_pte);
  1.1485 +	TPte lp_pte=LP_PTE_FROM_SP_PTE(sp_pte);
  1.1486 +	TLinAddr la=aLinAddr;
  1.1487 +	TPhysAddr pa=aPhysAddr;
  1.1488 +	TInt remain=aSize;
  1.1489 +	while (remain)
  1.1490 +		{
  1.1491 +		if (aMapShift>=KChunkShift && (la & KChunkMask)==0 && remain>=KChunkSize)
  1.1492 +			{
  1.1493 +			// use sections
  1.1494 +			TInt npdes = remain>>KChunkShift;
  1.1495 +			TPde* p_pde = PageDirectory + (la>>KChunkShift);
  1.1496 +			TPde* p_pde_E = p_pde + npdes;
  1.1497 +			TPde pde = pa|section_pde;
  1.1498 +			NKern::LockSystem();
  1.1499 +			for (; p_pde < p_pde_E; pde+=KChunkSize)
  1.1500 +				{
  1.1501 +				__ASSERT_DEBUG(*p_pde==0, MM::Panic(MM::EPdeAlreadyInUse));
  1.1502 +				__KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", pde, p_pde));
  1.1503 +				*p_pde++=pde;
  1.1504 +				}
  1.1505 +			NKern::UnlockSystem();
  1.1506 +			npdes<<=KChunkShift;
  1.1507 +			la+=npdes, pa+=npdes, remain-=npdes;
  1.1508 +			continue;
  1.1509 +			}
  1.1510 +		TInt block_size = Min(remain, KChunkSize-(la&KChunkMask));
  1.1511 +		TPte pa_mask=~KPageMask;
  1.1512 +		TPte pte_perm=sp_pte;
  1.1513 +		if (aMapShift>=KLargePageShift && block_size>=KLargePageSize)
  1.1514 +			{
  1.1515 +			if ((la & KLargePageMask)==0)
  1.1516 +				{
  1.1517 +				// use 64K large pages
  1.1518 +				pa_mask=~KLargePageMask;
  1.1519 +				pte_perm=lp_pte;
  1.1520 +				}
  1.1521 +			else
  1.1522 +				block_size = Min(remain, KLargePageSize-(la&KLargePageMask));
  1.1523 +			}
  1.1524 +		block_size &= pa_mask;
  1.1525 +
  1.1526 +		// use pages (large or small)
  1.1527 +		TInt id = PageTableId(la);
  1.1528 +		__ASSERT_DEBUG(id>=0, MM::Panic(MM::EMmuMapNoPageTable));
  1.1529 +		TPte* p_pte = PageTable(id) + ((la&KChunkMask)>>KPageShift);
  1.1530 +		TPte* p_pte_E = p_pte + (block_size>>KPageShift);
  1.1531 +		SPageTableInfo& ptinfo = iPtInfo[id];
  1.1532 +		NKern::LockSystem();
  1.1533 +		for (; p_pte < p_pte_E; pa+=KPageSize)
  1.1534 +			{
  1.1535 +			__ASSERT_DEBUG(*p_pte==0, MM::Panic(MM::EPteAlreadyInUse));
  1.1536 +			TPte pte = (pa & pa_mask) | pte_perm;
  1.1537 +			__KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", pte, p_pte));
  1.1538 +			*p_pte++=pte;
  1.1539 +			++ptinfo.iCount;
  1.1540 +			NKern::FlashSystem();
  1.1541 +			}
  1.1542 +		NKern::UnlockSystem();
  1.1543 +		la+=block_size, remain-=block_size;
  1.1544 +		}
  1.1545 +	}
  1.1546 +
  1.1547 +void ArmMmu::Unmap(TLinAddr aLinAddr, TInt aSize)
  1.1548 +//
  1.1549 +// Remove all mappings in the specified range of addresses.
  1.1550 +// Assumes there are only global mappings involved.
  1.1551 +// Don't free page tables.
  1.1552 +// aLinAddr, aSize must be page-aligned.
  1.1553 +//
  1.1554 +	{
  1.1555 +	__KTRACE_OPT(KMMU, Kern::Printf("ArmMmu::Unmap lin=%08x size=%08x", aLinAddr, aSize));
  1.1556 +	TLinAddr a=aLinAddr;
  1.1557 +	TLinAddr end=a+aSize;
  1.1558 +	__KTRACE_OPT(KMMU,Kern::Printf("a=%08x end=%08x",a,end));
  1.1559 +	NKern::LockSystem();
  1.1560 +	while(a!=end)
  1.1561 +		{
  1.1562 +		TInt pdeIndex=a>>KChunkShift;
  1.1563 +		TLinAddr next=(pdeIndex<<KChunkShift)+KChunkSize;
  1.1564 +		TInt to_do = Min(TInt(end-a), TInt(next-a))>>KPageShift;
  1.1565 +		__KTRACE_OPT(KMMU,Kern::Printf("a=%08x next=%08x to_do=%d",a,next,to_do));
  1.1566 +		TPde pde = PageDirectory[pdeIndex];
  1.1567 +		if ( (pde&KPdePresentMask)==KArmV45PdeSection )
  1.1568 +			{
  1.1569 +			__ASSERT_DEBUG(!(a&KChunkMask), MM::Panic(MM::EUnmapBadAlignment));
  1.1570 +			PageDirectory[pdeIndex]=0;
  1.1571 +			InvalidateTLBForPage(a);
  1.1572 +			a=next;
  1.1573 +			NKern::FlashSystem();
  1.1574 +			continue;
  1.1575 +			}
  1.1576 +		TInt ptid = GetPageTableId(a);
  1.1577 +		SPageTableInfo& ptinfo=iPtInfo[ptid];
  1.1578 +		if (ptid>=0)
  1.1579 +			{
  1.1580 +			TPte* ppte = PageTable(ptid) + ((a&KChunkMask)>>KPageShift);
  1.1581 +			TPte* ppte_End = ppte + to_do;
  1.1582 +			for (; ppte<ppte_End; ++ppte, a+=KPageSize)
  1.1583 +				{
  1.1584 +				TUint pte_type = *ppte & KPteTypeMask;
  1.1585 +				if (pte_type && pte_type != KArmV45PteLargePage)
  1.1586 +					{
  1.1587 +					--ptinfo.iCount;
  1.1588 +					*ppte=0;
  1.1589 +					InvalidateTLBForPage(a);
  1.1590 +					}
  1.1591 +				else if (pte_type)
  1.1592 +					{
  1.1593 +					__ASSERT_DEBUG(!(a&KLargePageMask), MM::Panic(MM::EUnmapBadAlignment));
  1.1594 +					ptinfo.iCount-=KLargeSmallPageRatio;
  1.1595 +					memclr(ppte, KLargeSmallPageRatio*sizeof(TPte));
  1.1596 +					InvalidateTLBForPage(a);
  1.1597 +					a+=(KLargePageSize-KPageSize);
  1.1598 +					ppte+=(KLargeSmallPageRatio-1);
  1.1599 +					}
  1.1600 +				NKern::FlashSystem();
  1.1601 +				}
  1.1602 +			}
  1.1603 +		else
  1.1604 +			a += (to_do<<KPageShift);
  1.1605 +		}
  1.1606 +	NKern::UnlockSystem();
  1.1607 +	}
  1.1608 +
  1.1609 +TInt ArmMmu::AllocDomain()
  1.1610 +	{
  1.1611 +	NKern::FMWait(&DomainLock);
  1.1612 +	TInt r=-1;
  1.1613 +	if (Domains)
  1.1614 +		{
  1.1615 +		r=__e32_find_ls1_32(Domains);
  1.1616 +		Domains &= ~(1<<r);
  1.1617 +		}
  1.1618 +	NKern::FMSignal(&DomainLock);
  1.1619 +	return r;
  1.1620 +	}
  1.1621 +
  1.1622 +void ArmMmu::FreeDomain(TInt aDomain)
  1.1623 +	{
  1.1624 +	__ASSERT_ALWAYS(aDomain>=0 && aDomain<ENumDomains, MM::Panic(MM::EFreeInvalidDomain));
  1.1625 +	TUint32 m=1<<aDomain;
  1.1626 +	NKern::FMWait(&DomainLock);
  1.1627 +	__ASSERT_ALWAYS(!(Domains&m), MM::Panic(MM::EFreeDomainNotAllocated));
  1.1628 +	Domains|=m;
  1.1629 +	NKern::FMSignal(&DomainLock);
  1.1630 +	}
  1.1631 +
  1.1632 +void ArmMmu::ClearPages(TInt aNumPages, TPhysAddr* aPageList, TUint8 aClearByte)
  1.1633 +	{
  1.1634 +	//map the pages at a temporary address, clear them and unmap
  1.1635 +	__ASSERT_MUTEX(RamAllocatorMutex);
  1.1636 +	while (--aNumPages >= 0)
  1.1637 +		{
  1.1638 +		TPhysAddr pa;
  1.1639 +		if((TInt)aPageList&1)
  1.1640 +			{
  1.1641 +			pa = (TPhysAddr)aPageList&~1;
  1.1642 +			*(TPhysAddr*)&aPageList += iPageSize;
  1.1643 +			}
  1.1644 +		else
  1.1645 +			pa = *aPageList++;
  1.1646 +		*iTempPte = pa | SP_PTE(KArmV45PermRWNO, KMemAttBuf);
  1.1647 +		__DRAIN_WRITE_BUFFER;
  1.1648 +		InvalidateTLBForPage(iTempAddr);
  1.1649 +		memset((TAny*)iTempAddr, aClearByte, iPageSize);
  1.1650 +		}
  1.1651 +	*iTempPte=0;
  1.1652 +	__DRAIN_WRITE_BUFFER;
  1.1653 +	InvalidateTLBForPage(iTempAddr);
  1.1654 +	}
  1.1655 +
  1.1656 +TLinAddr DoMapTemp(TPhysAddr aPage, TBool aCached, TLinAddr aTempAddr, TPte* aTempPte)
  1.1657 +	{
  1.1658 +	__ASSERT_DEBUG(!*aTempPte,MM::Panic(MM::ETempMappingAlreadyInUse));
  1.1659 +	*aTempPte = (aPage&~KPageMask) | SP_PTE(KArmV45PermRWNO, aCached?KDefaultCaching:KMemAttBuf);
  1.1660 +	__DRAIN_WRITE_BUFFER;
  1.1661 +	return aTempAddr;
  1.1662 +	}
  1.1663 +
  1.1664 +/**
  1.1665 +Create a temporary mapping of a physical page.
  1.1666 +The RamAllocatorMutex must be held before this function is called and not released
  1.1667 +until after UnmapTemp has been called.
  1.1668 +
  1.1669 +@param aPage	The physical address of the page to be mapped.
  1.1670 +@param aCached	Whether to map the page cached or not.
  1.1671 +
  1.1672 +@return The linear address of where the page has been mapped.
  1.1673 +*/
  1.1674 +TLinAddr ArmMmu::MapTemp(TPhysAddr aPage, TBool aCached)
  1.1675 +	{
  1.1676 +	__ASSERT_MUTEX(RamAllocatorMutex);
  1.1677 +	return DoMapTemp(aPage, aCached, iTempAddr, iTempPte);
  1.1678 +	}
  1.1679 +
  1.1680 +/**
  1.1681 +Create a temporary mapping of a physical page, distinct from that created by MapTemp.
  1.1682 +The RamAllocatorMutex must be held before this function is called and not released
  1.1683 +until after UnmapSecondTemp has been called.
  1.1684 +
  1.1685 +@param aPage	The physical address of the page to be mapped.
  1.1686 +@param aCached	Whether to map the page cached or not.
  1.1687 +
  1.1688 +@return The linear address of where the page has been mapped.
  1.1689 +*/
  1.1690 +TLinAddr ArmMmu::MapSecondTemp(TPhysAddr aPage, TBool aCached)
  1.1691 +	{
  1.1692 +	__ASSERT_MUTEX(RamAllocatorMutex);
  1.1693 +	return DoMapTemp(aPage, aCached, iSecondTempAddr, iSecondTempPte);
  1.1694 +	}
  1.1695 +
  1.1696 +void DoUnmapTemp(TLinAddr aTempAddr, TPte* aTempPte)
  1.1697 +	{
  1.1698 +	*aTempPte = 0;
  1.1699 +	__DRAIN_WRITE_BUFFER;
  1.1700 +	InvalidateTLBForPage(aTempAddr);
  1.1701 +	}
  1.1702 +
  1.1703 +/**
  1.1704 +Remove the temporary mapping created with MapTemp.
  1.1705 +*/
  1.1706 +void ArmMmu::UnmapTemp()
  1.1707 +	{
  1.1708 +	__ASSERT_MUTEX(RamAllocatorMutex);
  1.1709 +	DoUnmapTemp(iTempAddr, iTempPte);
  1.1710 +	}
  1.1711 +
  1.1712 +/**
  1.1713 +Remove the temporary mapping created with MapSecondTemp.
  1.1714 +*/
  1.1715 +void ArmMmu::UnmapSecondTemp()
  1.1716 +	{
  1.1717 +	__ASSERT_MUTEX(RamAllocatorMutex);
  1.1718 +	DoUnmapTemp(iSecondTempAddr, iSecondTempPte);
  1.1719 +	}
  1.1720 +
  1.1721 +/*
  1.1722 + * Performs cache maintenance on physical cache (VIPT & PIPT) for a page to be reused.
  1.1723 + */
  1.1724 +void ArmMmu::CacheMaintenanceOnDecommit(TPhysAddr aAddr)
  1.1725 +	{
  1.1726 +	CacheMaintenance::PageToReusePhysicalCache(aAddr);
  1.1727 +	}
  1.1728 +
  1.1729 +void ArmMmu::CacheMaintenanceOnDecommit(const TPhysAddr* aAddr, TInt aCount)
  1.1730 +	{
  1.1731 +	while (--aCount>=0)
  1.1732 +		ArmMmu::CacheMaintenanceOnDecommit(*aAddr++);
  1.1733 +	}
  1.1734 +
  1.1735 +void ArmMmu::CacheMaintenanceOnPreserve(TPhysAddr, TUint)
  1.1736 +	{
  1.1737 +	//Not required for moving memory model
  1.1738 +	__ASSERT_ALWAYS(0, Panic(ECacheMaintenance));
  1.1739 +	}
  1.1740 +
  1.1741 +void ArmMmu::CacheMaintenanceOnPreserve(const TPhysAddr*, TInt, TUint)
  1.1742 +	{
  1.1743 +	//Not required for moving memory model
  1.1744 +	__ASSERT_ALWAYS(0, Panic(ECacheMaintenance));
  1.1745 +	}
  1.1746 +
  1.1747 +void ArmMmu::CacheMaintenanceOnPreserve(TPhysAddr , TInt , TLinAddr , TUint )
  1.1748 +	{
  1.1749 +	//Not required for moving memory model
  1.1750 +	__ASSERT_ALWAYS(0, Panic(ECacheMaintenance));
  1.1751 +	}
  1.1752 +
  1.1753 +
  1.1754 +TInt ArmMmu::UnlockRamCachePages(TUint8* volatile & aBase, TInt aStartPage, TInt aNumPages)
  1.1755 +	{
  1.1756 +	NKern::LockSystem();
  1.1757 +	for(;;)
  1.1758 +		{
  1.1759 +		TInt page = ((TLinAddr)aBase>>KPageShift)+aStartPage;
  1.1760 +		TPde* pd = PageDirectory+(page>>(KChunkShift-KPageShift));
  1.1761 +		TPte* pt = SafePageTableFromPde(*pd++);
  1.1762 +		TInt pteIndex = page&(KChunkMask>>KPageShift);
  1.1763 +		if(!pt)
  1.1764 +			{
  1.1765 +			// whole page table has gone, so skip all pages in it...
  1.1766 +			TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex;
  1.1767 +			aNumPages -= pagesInPt;
  1.1768 +			aStartPage += pagesInPt;
  1.1769 +			if(aNumPages>0)
  1.1770 +				continue;
  1.1771 +			NKern::UnlockSystem();
  1.1772 +			return KErrNone;
  1.1773 +			}
  1.1774 +		pt += pteIndex;
  1.1775 +		do
  1.1776 +			{
  1.1777 +			TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex;
  1.1778 +			if(pagesInPt>aNumPages)
  1.1779 +				pagesInPt = aNumPages;
  1.1780 +			if(pagesInPt>KMaxPages)
  1.1781 +				pagesInPt = KMaxPages;
  1.1782 +
  1.1783 +			aNumPages -= pagesInPt;
  1.1784 +			aStartPage += pagesInPt;
  1.1785 +
  1.1786 +			do
  1.1787 +				{
  1.1788 +				TPte pte = *pt++;
  1.1789 +				if(pte!=KPteNotPresentEntry) // pte may be null if page has already been unlocked and reclaimed by system
  1.1790 +					iRamCache->DonateRamCachePage(SPageInfo::FromPhysAddr(pte));
  1.1791 +				}
  1.1792 +			while(--pagesInPt);
  1.1793 +
  1.1794 +			if(!aNumPages)
  1.1795 +				{
  1.1796 +				NKern::UnlockSystem();
  1.1797 +				return KErrNone;
  1.1798 +				}
  1.1799 +
  1.1800 +			pteIndex = aStartPage&(KChunkMask>>KPageShift);
  1.1801 +			}
  1.1802 +		while(!NKern::FlashSystem() && pteIndex);
  1.1803 +		}
  1.1804 +	}
  1.1805 +
  1.1806 +
  1.1807 +TInt ArmMmu::LockRamCachePages(TUint8* volatile & aBase, TInt aStartPage, TInt aNumPages)
  1.1808 +	{
  1.1809 +	NKern::LockSystem();
  1.1810 +	for(;;)
  1.1811 +		{
  1.1812 +		TInt page = ((TLinAddr)aBase>>KPageShift)+aStartPage;
  1.1813 +		TPde* pd = PageDirectory+(page>>(KChunkShift-KPageShift));
  1.1814 +		TPte* pt = SafePageTableFromPde(*pd++);
  1.1815 +		TInt pteIndex = page&(KChunkMask>>KPageShift);
  1.1816 +		if(!pt)
  1.1817 +			goto not_found;
  1.1818 +		pt += pteIndex;
  1.1819 +		do
  1.1820 +			{
  1.1821 +			TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex;
  1.1822 +			if(pagesInPt>aNumPages)
  1.1823 +				pagesInPt = aNumPages;
  1.1824 +			if(pagesInPt>KMaxPages)
  1.1825 +				pagesInPt = KMaxPages;
  1.1826 +
  1.1827 +			aNumPages -= pagesInPt;
  1.1828 +			aStartPage += pagesInPt;
  1.1829 +
  1.1830 +			do
  1.1831 +				{
  1.1832 +				TPte pte = *pt++;
  1.1833 +				if(pte==KPteNotPresentEntry)
  1.1834 +					goto not_found;
  1.1835 +				if(!iRamCache->ReclaimRamCachePage(SPageInfo::FromPhysAddr(pte)))
  1.1836 +					goto not_found;
  1.1837 +				}
  1.1838 +			while(--pagesInPt);
  1.1839 +
  1.1840 +			if(!aNumPages)
  1.1841 +				{
  1.1842 +				NKern::UnlockSystem();
  1.1843 +				return KErrNone;
  1.1844 +				}
  1.1845 +
  1.1846 +			pteIndex = aStartPage&(KChunkMask>>KPageShift);
  1.1847 +			}
  1.1848 +		while(!NKern::FlashSystem() && pteIndex);
  1.1849 +		}
  1.1850 +not_found:
  1.1851 +	NKern::UnlockSystem();
  1.1852 +	return KErrNotFound;
  1.1853 +	}
  1.1854 +
  1.1855 +
  1.1856 +void RamCache::SetFree(SPageInfo* aPageInfo)
  1.1857 +	{
  1.1858 +	// Make a page free
  1.1859 +	SPageInfo::TType type = aPageInfo->Type();
  1.1860 +	if(type==SPageInfo::EPagedCache)
  1.1861 +		{
  1.1862 +		TInt offset = aPageInfo->Offset()<<KPageShift;
  1.1863 +		DArmPlatChunk* chunk = (DArmPlatChunk*)aPageInfo->Owner();
  1.1864 +		__NK_ASSERT_DEBUG(TUint(offset)<TUint(chunk->iMaxSize));
  1.1865 +		TLinAddr lin = ((TLinAddr)chunk->iBase)+offset;
  1.1866 +		TPte* pt = PtePtrFromLinAddr(lin);
  1.1867 +		*pt = KPteNotPresentEntry;
  1.1868 +		__DRAIN_WRITE_BUFFER;
  1.1869 +		InvalidateTLBForPage(lin);
  1.1870 +		((ArmMmu*)iMmu)->SyncCodeMappings();
  1.1871 +		CacheMaintenance::PageToReuseVirtualCache(lin);
  1.1872 +		// actually decommit it from chunk...
  1.1873 +		TInt ptid = ((TLinAddr)pt-KPageTableBase)>>KPageTableShift;
  1.1874 +		SPageTableInfo& ptinfo=((ArmMmu*)iMmu)->iPtInfo[ptid];
  1.1875 +		if(!--ptinfo.iCount)
  1.1876 +			{
  1.1877 +			((ArmMmu*)iMmu)->DoUnassignPageTable(lin);
  1.1878 +			chunk->RemovePde(offset);
  1.1879 +			NKern::UnlockSystem();
  1.1880 +			((ArmMmu*)iMmu)->FreePageTable(ptid);
  1.1881 +			NKern::LockSystem();
  1.1882 +			}
  1.1883 +		}
  1.1884 +	else
  1.1885 +		{
  1.1886 +		__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetFree() with bad page type = %d",aPageInfo->Type()));
  1.1887 +		Panic(EUnexpectedPageType);
  1.1888 +		}
  1.1889 +	}
  1.1890 +
  1.1891 +
  1.1892 +//
  1.1893 +// MemModelDemandPaging
  1.1894 +//
  1.1895 +
  1.1896 +class MemModelDemandPaging : public DemandPaging
  1.1897 +	{
  1.1898 +public:
  1.1899 +	// From RamCacheBase
  1.1900 +	virtual void Init2();
  1.1901 +	virtual TInt Init3();
  1.1902 +	virtual TBool PageUnmapped(SPageInfo* aPageInfo);
  1.1903 +	// From DemandPaging
  1.1904 +	virtual TInt Fault(TAny* aExceptionInfo);
  1.1905 +	virtual void SetOld(SPageInfo* aPageInfo);
  1.1906 +	virtual void SetFree(SPageInfo* aPageInfo);
  1.1907 +	virtual void NotifyPageFree(TPhysAddr aPage);
  1.1908 +	virtual TInt EnsurePagePresent(TLinAddr aPage, DProcess* aProcess);
  1.1909 +	virtual TPhysAddr LinearToPhysical(TLinAddr aPage, DProcess* aProcess);
  1.1910 +	virtual void AllocLoadAddress(DPagingRequest& aReq, TInt aDeviceId);
  1.1911 +	virtual TInt PageState(TLinAddr aAddr);
  1.1912 +	virtual TBool NeedsMutexOrderCheck(TLinAddr aStartAddr, TUint aLength);
  1.1913 +	// New
  1.1914 +	inline ArmMmu& Mmu() { return (ArmMmu&)*iMmu; }
  1.1915 +	void InitRomPaging();
  1.1916 +	void InitCodePaging();
  1.1917 +	TInt HandleFault(TArmExcInfo& aExc, TLinAddr aFaultAddress, TBool aInRom);
  1.1918 +	TInt PageIn(TLinAddr aAddress, DMemModelCodeSegMemory* aCodeSegMemory);
  1.1919 +private:
  1.1920 +	TLinAddr GetLinearAddress(SPageInfo* aPageInfo);
  1.1921 +	};
  1.1922 +
  1.1923 +
  1.1924 +//
  1.1925 +// MemModelDemandPaging
  1.1926 +//
  1.1927 +
  1.1928 +
  1.1929 +DemandPaging* DemandPaging::New()
  1.1930 +	{
  1.1931 +	return new MemModelDemandPaging();
  1.1932 +	}
  1.1933 +
  1.1934 +
  1.1935 +void MemModelDemandPaging::Init2()
  1.1936 +	{
  1.1937 +	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">MemModelDemandPaging::Init2"));
  1.1938 +	DemandPaging::Init2();
  1.1939 +	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<MemModelDemandPaging::Init2"));
  1.1940 +	}
  1.1941 +
  1.1942 +
  1.1943 +void MemModelDemandPaging::AllocLoadAddress(DPagingRequest& aReq, TInt aReqId)
  1.1944 +	{
  1.1945 +	aReq.iLoadAddr = iTempPages + aReqId * KPageSize;
  1.1946 +	aReq.iLoadPte = PtePtrFromLinAddr(aReq.iLoadAddr);
  1.1947 +	}
  1.1948 +
  1.1949 +
  1.1950 +TInt MemModelDemandPaging::Init3()
  1.1951 +	{
  1.1952 +	TInt r=DemandPaging::Init3();
  1.1953 +	if(r!=KErrNone)
  1.1954 +		return r;
  1.1955 +
  1.1956 +	// Create a region for mapping pages during page in
  1.1957 +	DPlatChunkHw* chunk;
  1.1958 +	TInt chunkSize = KMaxPagingDevices * KPagingRequestsPerDevice * KPageSize;
  1.1959 +	DPlatChunkHw::DoNew(chunk, KPhysAddrInvalid, chunkSize, EMapAttrSupRw|EMapAttrFullyBlocking);
  1.1960 +	if(!chunk)
  1.1961 +		Panic(EInitialiseFailed);
  1.1962 +	iTempPages = chunk->iLinAddr;
  1.1963 +
  1.1964 +	if(RomPagingRequested())
  1.1965 +		InitRomPaging();
  1.1966 +
  1.1967 +	if (CodePagingRequested())
  1.1968 +		InitCodePaging();
  1.1969 +
  1.1970 +	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<MemModelDemandPaging::Init3"));
  1.1971 +	return KErrNone;
  1.1972 +	}
  1.1973 +
  1.1974 +	
  1.1975 +void MemModelDemandPaging::InitRomPaging()
  1.1976 +	{
  1.1977 +	// Make page tables for demand paged part of ROM...
  1.1978 +	__KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("MemModelDemandPaging::Init3 making page tables for paged ROM"));
  1.1979 +	TLinAddr lin = iRomPagedLinearBase&~KChunkMask; // first chunk with paged ROM in
  1.1980 +	TLinAddr linEnd = iRomLinearBase+iRomSize;
  1.1981 +	while(lin<linEnd)
  1.1982 +		{
  1.1983 +		// Get a Page Table
  1.1984 +		TInt ptid = Mmu().PageTableId(lin);
  1.1985 +		if(ptid<0)
  1.1986 +			{
  1.1987 +			MmuBase::Wait();
  1.1988 +			ptid = Mmu().AllocPageTable();
  1.1989 +			MmuBase::Signal();
  1.1990 +			__NK_ASSERT_DEBUG(ptid>=0);
  1.1991 +			Mmu().PtInfo(ptid).SetGlobal(lin >> KChunkShift);
  1.1992 +			}
  1.1993 +
  1.1994 +		// Get new page table addresses
  1.1995 +		TPte* pt = PageTable(ptid);
  1.1996 +		TPhysAddr ptPhys=Mmu().LinearToPhysical((TLinAddr)pt);
  1.1997 +
  1.1998 +		// Pointer to page dirctory entry
  1.1999 +		TPde* ppde = PageDirectory + (lin>>KChunkShift);
  1.2000 +
  1.2001 +		// Fill in Page Table
  1.2002 +		TPte* ptEnd = pt+(1<<(KChunkShift-KPageShift));
  1.2003 +		pt += (lin&KChunkMask)>>KPageShift;
  1.2004 +		do
  1.2005 +			{
  1.2006 +			if(lin<iRomPagedLinearBase)
  1.2007 +				*pt++ = Mmu().LinearToPhysical(lin) | KRomPtePermissions;
  1.2008 +			else
  1.2009 +				*pt++ = KPteNotPresentEntry;
  1.2010 +			lin += KPageSize;
  1.2011 +			}
  1.2012 +		while(pt<ptEnd && lin<=linEnd);
  1.2013 +		__DRAIN_WRITE_BUFFER;
  1.2014 +
  1.2015 +		// Add new Page Table to the Page Directory
  1.2016 +		TPde newpde = ptPhys | KShadowPdePerm;
  1.2017 +		__KTRACE_OPT2(KPAGING,KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde));
  1.2018 +		TInt irq=NKern::DisableAllInterrupts();
  1.2019 +		*ppde = newpde;
  1.2020 +		__DRAIN_WRITE_BUFFER;
  1.2021 +		FlushTLBs();
  1.2022 +		NKern::RestoreInterrupts(irq);
  1.2023 +		}
  1.2024 +	}
  1.2025 +
  1.2026 +
  1.2027 +void MemModelDemandPaging::InitCodePaging()
  1.2028 +	{
  1.2029 +	// Initialise code paging info
  1.2030 +	iCodeLinearBase = Mmu().iUserCodeBase;
  1.2031 +	iCodeSize = Mmu().iMaxUserCodeSize;
  1.2032 +	}
  1.2033 +
  1.2034 +/**
  1.2035 +@return ETrue when the unmapped page should be freed, EFalse otherwise
  1.2036 +*/
  1.2037 +TBool MemModelDemandPaging::PageUnmapped(SPageInfo* aPageInfo)
  1.2038 +	{
  1.2039 +	SPageInfo::TType type = aPageInfo->Type();
  1.2040 +
  1.2041 +	if(type!=SPageInfo::EPagedCache && type!=SPageInfo::EPagedCode)
  1.2042 +		{
  1.2043 +		__NK_ASSERT_DEBUG(type!=SPageInfo::EPagedData); // not supported yet
  1.2044 +		return ETrue;
  1.2045 +		}
  1.2046 +
  1.2047 +	RemovePage(aPageInfo);
  1.2048 +	AddAsFreePage(aPageInfo);
  1.2049 +	// Return false to stop DMemModelChunk::DoDecommit from freeing this page
  1.2050 +	return EFalse;
  1.2051 +	}
  1.2052 +
  1.2053 +
  1.2054 +TLinAddr MemModelDemandPaging::GetLinearAddress(SPageInfo* aPageInfo)
  1.2055 +	{
  1.2056 +	TInt offset = aPageInfo->Offset()<<KPageShift;
  1.2057 +	SPageInfo::TType type = aPageInfo->Type();
  1.2058 +	__NK_ASSERT_DEBUG(TUint(offset)<(type==SPageInfo::EPagedROM ? iRomSize : iCodeSize));
  1.2059 +	TLinAddr base = type==SPageInfo::EPagedROM ? iRomLinearBase : iCodeLinearBase;
  1.2060 +	return base + offset;
  1.2061 +	}
  1.2062 +
  1.2063 +
  1.2064 +void MemModelDemandPaging::SetOld(SPageInfo* aPageInfo)
  1.2065 +	{
  1.2066 +	__NK_ASSERT_DEBUG(aPageInfo->State() == SPageInfo::EStatePagedOld);
  1.2067 +	SPageInfo::TType type = aPageInfo->Type();
  1.2068 +
  1.2069 +	if(type==SPageInfo::EPagedROM || type==SPageInfo::EPagedCode)
  1.2070 +		{
  1.2071 +		START_PAGING_BENCHMARK;
  1.2072 +		
  1.2073 +		// get linear address of page...
  1.2074 +		TLinAddr lin = GetLinearAddress(aPageInfo);
  1.2075 +
  1.2076 +		// make page inaccessible...
  1.2077 +		TPte* pt = PtePtrFromLinAddr(lin);
  1.2078 +		*pt &= ~KPtePresentMask;
  1.2079 +		__DRAIN_WRITE_BUFFER;
  1.2080 +		InvalidateTLBForPage(lin);
  1.2081 +		Mmu().SyncCodeMappings();
  1.2082 +
  1.2083 +		if (type==SPageInfo::EPagedCode)
  1.2084 +			END_PAGING_BENCHMARK(this, EPagingBmSetCodePageOld);
  1.2085 +		}
  1.2086 +	else if(type==SPageInfo::EPagedCache)
  1.2087 +		{
  1.2088 +		// leave page accessible
  1.2089 +		}
  1.2090 +	else if(type!=SPageInfo::EPagedFree)
  1.2091 +		{
  1.2092 +		__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetOld() with bad page type = %d",aPageInfo->Type()));
  1.2093 +		Panic(EUnexpectedPageType);
  1.2094 +		}
  1.2095 +	NKern::FlashSystem();
  1.2096 +	}
  1.2097 +
  1.2098 +
  1.2099 +void MemModelDemandPaging::SetFree(SPageInfo* aPageInfo)
  1.2100 +	{
  1.2101 +	__ASSERT_SYSTEM_LOCK;
  1.2102 +	__ASSERT_MUTEX(MmuBase::RamAllocatorMutex);
  1.2103 +	__NK_ASSERT_DEBUG(aPageInfo->State() == SPageInfo::EStatePagedDead);
  1.2104 +	if(aPageInfo->LockCount())
  1.2105 +		Panic(ERamPageLocked);
  1.2106 +
  1.2107 +	SPageInfo::TType type = aPageInfo->Type();
  1.2108 +
  1.2109 +	if(type==SPageInfo::EPagedROM || type==SPageInfo::EPagedCode)
  1.2110 +		{
  1.2111 +		START_PAGING_BENCHMARK;
  1.2112 +		
  1.2113 +		// get linear address of page...
  1.2114 +		TLinAddr lin = GetLinearAddress(aPageInfo);
  1.2115 +
  1.2116 +		// unmap it...
  1.2117 +		TPte* pt = PtePtrFromLinAddr(lin);
  1.2118 +		*pt = KPteNotPresentEntry;
  1.2119 +		__DRAIN_WRITE_BUFFER;
  1.2120 +		InvalidateTLBForPage(lin);
  1.2121 +		Mmu().SyncCodeMappings();
  1.2122 +
  1.2123 +		if (type==SPageInfo::EPagedCode)
  1.2124 +			END_PAGING_BENCHMARK(this, EPagingBmSetCodePageFree);
  1.2125 +#ifdef BTRACE_PAGING
  1.2126 +		TInt subCat = type==SPageInfo::EPagedCode ? BTrace::EPagingPageOutCode : BTrace::EPagingPageOutROM;
  1.2127 +		TPhysAddr phys = aPageInfo->PhysAddr();
  1.2128 +		BTraceContext8(BTrace::EPaging,subCat,phys,lin); 
  1.2129 +#endif
  1.2130 +		}
  1.2131 +	else if(type==SPageInfo::EPagedCache)
  1.2132 +		{
  1.2133 +		// get linear address of page...
  1.2134 +		TInt offset = aPageInfo->Offset()<<KPageShift;
  1.2135 +		DArmPlatChunk* chunk = (DArmPlatChunk*)aPageInfo->Owner();
  1.2136 +		__NK_ASSERT_DEBUG(TUint(offset)<TUint(chunk->iMaxSize));
  1.2137 +		TLinAddr lin = ((TLinAddr)chunk->iBase)+offset;
  1.2138 +
  1.2139 +		// unmap it...
  1.2140 +		TPte* pt = PtePtrFromLinAddr(lin);
  1.2141 +		*pt = KPteNotPresentEntry;
  1.2142 +		__DRAIN_WRITE_BUFFER;
  1.2143 +		InvalidateTLBForPage(lin);
  1.2144 +		Mmu().SyncCodeMappings();
  1.2145 +		NKern::UnlockSystem();
  1.2146 +		CacheMaintenance::PageToReuseVirtualCache(lin);
  1.2147 +		NKern::LockSystem();
  1.2148 +
  1.2149 +		// actually decommit it from chunk...
  1.2150 +		TInt ptid = ((TLinAddr)pt-KPageTableBase)>>KPageTableShift;
  1.2151 +		SPageTableInfo& ptinfo=((ArmMmu*)iMmu)->iPtInfo[ptid];
  1.2152 +		if(!--ptinfo.iCount)
  1.2153 +			{
  1.2154 +			((ArmMmu*)iMmu)->DoUnassignPageTable(lin);
  1.2155 +			chunk->RemovePde(offset);
  1.2156 +			NKern::UnlockSystem();
  1.2157 +			((ArmMmu*)iMmu)->FreePageTable(ptid);
  1.2158 +			NKern::LockSystem();
  1.2159 +			}
  1.2160 +
  1.2161 +#ifdef BTRACE_PAGING
  1.2162 +		TPhysAddr phys = aPageInfo->PhysAddr();
  1.2163 +		BTraceContext8(BTrace::EPaging,BTrace::EPagingPageOutCache,phys,lin);
  1.2164 +#endif
  1.2165 +		}
  1.2166 +	else if(type==SPageInfo::EPagedFree)
  1.2167 +		{
  1.2168 +		// already free...
  1.2169 +#ifdef BTRACE_PAGING
  1.2170 +		TPhysAddr phys = aPageInfo->PhysAddr();
  1.2171 +		BTraceContext4(BTrace::EPaging,BTrace::EPagingPageOutFree,phys);
  1.2172 +#endif
  1.2173 +		// external cache may not have been cleaned if PageUnmapped called
  1.2174 +		CacheMaintenance::PageToReusePhysicalCache(aPageInfo->PhysAddr());
  1.2175 +		}
  1.2176 +	else
  1.2177 +		{
  1.2178 +		__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetFree() with bad page type = %d",aPageInfo->Type()));
  1.2179 +		Panic(EUnexpectedPageType);
  1.2180 +		}
  1.2181 +	NKern::FlashSystem();
  1.2182 +	}
  1.2183 +
  1.2184 +
  1.2185 +void MemModelDemandPaging::NotifyPageFree(TPhysAddr aPage)
  1.2186 +	{
  1.2187 +	MM::Panic(MM::EOperationNotImplemented);
  1.2188 +	}
  1.2189 +
  1.2190 +
  1.2191 +/**
  1.2192 +Return True if exception was caused by a memory write access.
  1.2193 +This function can cause a paging exception!
  1.2194 +*/
  1.2195 +static TBool FaultDuringWrite(TArmExcInfo& aExc)
  1.2196 +	{
  1.2197 +	// We can't decode jazelle instruction to determine if they faulted during a read.
  1.2198 +	// Therefore we will treat them as writes (which will panic the thread)...
  1.2199 +	if(aExc.iCpsr&(1<<24))
  1.2200 +		return ETrue; 
  1.2201 +
  1.2202 +	if(aExc.iCpsr&(1<<5))
  1.2203 +		{
  1.2204 +		// thumb
  1.2205 +		TUint32 op = *(TUint16*)aExc.iR15;
  1.2206 +		switch((op>>13)&7)
  1.2207 +			{
  1.2208 +		case 2:
  1.2209 +			if((op&0xfa00)==0x5000)
  1.2210 +				return ETrue;			// STR (2) and STRB (2)
  1.2211 +			if((op&0xfe00)==0x5200)
  1.2212 +				return ETrue;			// STRH (2)
  1.2213 +			return EFalse;
  1.2214 +		case 3:
  1.2215 +			return !(op&(1<<11));		// STR (1) and STRB (1)
  1.2216 +		case 4:
  1.2217 +			return !(op&(1<<11));		// STR (3) and STRH (1)
  1.2218 +		case 5:
  1.2219 +			return (op&0xfe00)==0xb400;	// PUSH
  1.2220 +		case 6:
  1.2221 +			return (op&0xf800)==0xc000; // STMIA
  1.2222 +			}
  1.2223 +		}
  1.2224 +	else
  1.2225 +		{
  1.2226 +		// ARM
  1.2227 +		TUint32 op = *(TUint32*)aExc.iR15;
  1.2228 +		if(op<0xf0000000)
  1.2229 +			{
  1.2230 +			switch((op>>25)&7)
  1.2231 +				{
  1.2232 +			case 0:
  1.2233 +				if((op&0xf0)==(0xb0))
  1.2234 +					return !(op&(1<<20));		// load/store halfword
  1.2235 +				else if((op&0x0e1000f0)==(0x000000f0))
  1.2236 +					return ETrue;				// store double
  1.2237 +				else if((op&0x0fb000f0) == 0x010000f0)
  1.2238 +					return ETrue;				// swap instruction
  1.2239 +				else if((op&0x0ff000f0) == 0x01800090)
  1.2240 +					return ETrue;				// strex
  1.2241 +				return EFalse;
  1.2242 +			case 2:
  1.2243 +				return !(op&(1<<20));			 // load/store immediate
  1.2244 +			case 3:
  1.2245 +				if(!(op&0x10))
  1.2246 +					return !(op&(1<<20));		// load/store register offset
  1.2247 +				return EFalse;
  1.2248 +			case 4:
  1.2249 +				return !(op&(1<<20));			// load/store multiple
  1.2250 +			case 6:
  1.2251 +				return !(op&(1<<20));			// coproc store 
  1.2252 +				}
  1.2253 +			}
  1.2254 +		else
  1.2255 +			{
  1.2256 +			switch((op>>25)&7)
  1.2257 +				{
  1.2258 +			case 4:
  1.2259 +				if((op&0xfe5f0f00)==(0xf84d0500))
  1.2260 +					return ETrue;				// SRS instructions
  1.2261 +				return EFalse;
  1.2262 +			case 6:
  1.2263 +				return !(op&(1<<20));			// coproc store (STC2)
  1.2264 +				}
  1.2265 +			}
  1.2266 +		}
  1.2267 +	return EFalse;
  1.2268 +	}
  1.2269 +
  1.2270 +
  1.2271 +TInt MemModelDemandPaging::Fault(TAny* aExceptionInfo)
  1.2272 +	{
  1.2273 +	TArmExcInfo& exc=*(TArmExcInfo*)aExceptionInfo;
  1.2274 +
  1.2275 +	// Get faulting address
  1.2276 +	TLinAddr faultAddress = exc.iFaultAddress;
  1.2277 +	if(exc.iExcCode==EArmExceptionDataAbort)
  1.2278 +		{
  1.2279 +		// Only handle page translation faults
  1.2280 +		if((exc.iFaultStatus&0xf)!=0x7)
  1.2281 +			return KErrUnknown;
  1.2282 +		// Let writes take an exception rather than page in any memory...
  1.2283 +		if(FaultDuringWrite(exc))
  1.2284 +			return KErrUnknown;
  1.2285 +		}
  1.2286 +	else if (exc.iExcCode != EArmExceptionPrefetchAbort)
  1.2287 +		return KErrUnknown; // Not prefetch or data abort
  1.2288 +
  1.2289 +	DThread* thread = TheCurrentThread;
  1.2290 +
  1.2291 +	// check which ragion fault occured in...
  1.2292 +	TBool inRom=ETrue;
  1.2293 +	if(TUint(faultAddress-iRomPagedLinearBase)<iRomPagedSize)
  1.2294 +		{
  1.2295 +		// in ROM
  1.2296 +		}
  1.2297 +	else if(TUint(faultAddress-iCodeLinearBase)<iCodeSize)
  1.2298 +		{
  1.2299 +		// in code
  1.2300 +		inRom=EFalse;
  1.2301 +		}
  1.2302 +	else
  1.2303 +		return KErrUnknown; // Not in pageable region
  1.2304 +
  1.2305 +	// Check if thread holds fast mutex and claim system lock
  1.2306 +	NFastMutex* fm = NKern::HeldFastMutex();
  1.2307 +	TPagingExcTrap* trap = thread->iPagingExcTrap;
  1.2308 +	if(!fm)
  1.2309 +		NKern::LockSystem();
  1.2310 +	else
  1.2311 +		{
  1.2312 +		if(!trap || fm!=&TheScheduler.iLock)
  1.2313 +			{
  1.2314 +			__KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: Fault with FM Held! %x (%O pc=%x)",faultAddress,&Kern::CurrentThread(),exc.iR15));
  1.2315 +			Panic(EPageFaultWhilstFMHeld); // Not allowed to hold mutexes
  1.2316 +			}
  1.2317 +
  1.2318 +		// Current thread already has the system lock...
  1.2319 +		NKern::FlashSystem(); // Let someone else have a go with the system lock.
  1.2320 +		}
  1.2321 +
  1.2322 +	// System locked here
  1.2323 +
  1.2324 +	TInt r = KErrNone;	
  1.2325 +	if(thread->IsRealtime())
  1.2326 +		r = CheckRealtimeThreadFault(thread, aExceptionInfo);
  1.2327 +	if (r == KErrNone)
  1.2328 +		r = HandleFault(exc, faultAddress, inRom);
  1.2329 +	
  1.2330 +	// Restore system lock state
  1.2331 +	if (fm != NKern::HeldFastMutex())
  1.2332 +		{
  1.2333 +		if (fm)
  1.2334 +			NKern::LockSystem();
  1.2335 +		else
  1.2336 +			NKern::UnlockSystem();
  1.2337 +		}
  1.2338 +	
  1.2339 +	// Deal with XTRAP_PAGING
  1.2340 +	if(r == KErrNone && trap)
  1.2341 +		{
  1.2342 +		trap->Exception(1); // Return from exception trap with result '1' (value>0)
  1.2343 +		// code doesn't continue beyond this point.
  1.2344 +		}
  1.2345 +
  1.2346 +	return r;
  1.2347 +	}
  1.2348 +
  1.2349 +
  1.2350 +TInt MemModelDemandPaging::HandleFault(TArmExcInfo& aExc, TLinAddr aFaultAddress, TBool aInRom)
  1.2351 +	{
  1.2352 +	++iEventInfo.iPageFaultCount;
  1.2353 +
  1.2354 +	// get page table entry...
  1.2355 +	TPte* pt = SafePtePtrFromLinAddr(aFaultAddress);
  1.2356 +	if(!pt)
  1.2357 +		return KErrNotFound;
  1.2358 +	TPte pte = *pt;
  1.2359 +
  1.2360 +	// Do what is required to make page accessible...
  1.2361 +
  1.2362 +	if(pte&KPtePresentMask)
  1.2363 +		{
  1.2364 +		// PTE is present, so assume it has already been dealt with
  1.2365 +#ifdef BTRACE_PAGING
  1.2366 +		BTraceContext12(BTrace::EPaging,BTrace::EPagingPageNop,pte&~KPageMask,aFaultAddress,aExc.iR15);
  1.2367 +#endif
  1.2368 +		return KErrNone;
  1.2369 +		}
  1.2370 +
  1.2371 +	if(pte!=KPteNotPresentEntry)
  1.2372 +		{
  1.2373 +		// PTE alread has a page
  1.2374 +		SPageInfo* pageInfo = SPageInfo::FromPhysAddr(pte);
  1.2375 +		if(pageInfo->State()==SPageInfo::EStatePagedDead)
  1.2376 +			{
  1.2377 +			// page currently being unmapped, so do that here...
  1.2378 +			*pt = KPteNotPresentEntry; // Update page table
  1.2379 +			__DRAIN_WRITE_BUFFER;
  1.2380 +			}
  1.2381 +		else
  1.2382 +			{
  1.2383 +			// page just needs making young again...
  1.2384 +			*pt = TPte(pte|KArmPteSmallPage); // Update page table
  1.2385 +			__DRAIN_WRITE_BUFFER;
  1.2386 +			Rejuvenate(pageInfo);
  1.2387 +#ifdef BTRACE_PAGING
  1.2388 +			BTraceContext12(BTrace::EPaging,BTrace::EPagingRejuvenate,pte&~KPageMask,aFaultAddress,aExc.iR15);
  1.2389 +#endif
  1.2390 +			return KErrNone;
  1.2391 +			}
  1.2392 +		}
  1.2393 +
  1.2394 +	// PTE not present, so page it in...
  1.2395 +	// check if fault in a CodeSeg...
  1.2396 +	DMemModelCodeSegMemory* codeSegMemory = NULL;
  1.2397 +	if (aInRom)
  1.2398 +		NKern::ThreadEnterCS();
  1.2399 +	else
  1.2400 +		{
  1.2401 +		// find CodeSeg...
  1.2402 +		DMemModelCodeSeg* codeSeg = (DMemModelCodeSeg*)DCodeSeg::CodeSegsByAddress.Find(aFaultAddress);
  1.2403 +		if (!codeSeg)
  1.2404 +			return KErrNotFound;
  1.2405 +		codeSegMemory = codeSeg->Memory();
  1.2406 +		if (codeSegMemory==0 || !codeSegMemory->iIsDemandPaged)
  1.2407 +			return KErrNotFound;
  1.2408 +		// open reference on CodeSegMemory
  1.2409 +		NKern::ThreadEnterCS();
  1.2410 +#ifdef _DEBUG
  1.2411 +		TInt r = 
  1.2412 +#endif
  1.2413 +				 codeSegMemory->Open();
  1.2414 +		__NK_ASSERT_DEBUG(r==KErrNone);
  1.2415 +		NKern::FlashSystem();
  1.2416 +		}		
  1.2417 +
  1.2418 +#ifdef BTRACE_PAGING
  1.2419 +	BTraceContext8(BTrace::EPaging,BTrace::EPagingPageInBegin,aFaultAddress,aExc.iR15);
  1.2420 +#endif
  1.2421 +	
  1.2422 +	TInt r = PageIn(aFaultAddress,codeSegMemory);
  1.2423 +
  1.2424 +	NKern::UnlockSystem();
  1.2425 +
  1.2426 +	if(codeSegMemory)
  1.2427 +		codeSegMemory->Close();
  1.2428 +
  1.2429 +	NKern::ThreadLeaveCS();
  1.2430 +	
  1.2431 +	return r;
  1.2432 +	}
  1.2433 +
  1.2434 +
  1.2435 +TInt MemModelDemandPaging::PageIn(TLinAddr aAddress, DMemModelCodeSegMemory* aCodeSegMemory)
  1.2436 +	{
  1.2437 +	// Get a request object - this may block until one is available
  1.2438 +	DPagingRequest* req = AcquireRequestObject();
  1.2439 +	
  1.2440 +	// Get page table entry
  1.2441 +	TPte* pt = SafePtePtrFromLinAddr(aAddress);
  1.2442 +
  1.2443 +	// Check page is still required...
  1.2444 +	if(!pt || *pt!=KPteNotPresentEntry)
  1.2445 +		{
  1.2446 +#ifdef BTRACE_PAGING
  1.2447 +		BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded);
  1.2448 +#endif
  1.2449 +		ReleaseRequestObject(req);
  1.2450 +		return pt ? KErrNone : KErrNotFound;
  1.2451 +		}
  1.2452 +
  1.2453 +	++iEventInfo.iPageInReadCount;
  1.2454 +
  1.2455 +	// Get a free page
  1.2456 +	SPageInfo* pageInfo = AllocateNewPage();
  1.2457 +	__NK_ASSERT_DEBUG(pageInfo);
  1.2458 +
  1.2459 +	// Get physical address of free page
  1.2460 +	TPhysAddr phys = pageInfo->PhysAddr();
  1.2461 +	__NK_ASSERT_DEBUG(phys!=KPhysAddrInvalid);
  1.2462 +
  1.2463 +	// Temporarily map free page
  1.2464 +	TLinAddr loadAddr = req->iLoadAddr;
  1.2465 +	pt = req->iLoadPte;
  1.2466 +	*pt = phys | SP_PTE(KArmV45PermRWNO, KMemAttTempDemandPaging);
  1.2467 +	__DRAIN_WRITE_BUFFER;
  1.2468 +
  1.2469 +	// Read page from backing store
  1.2470 +	aAddress &= ~KPageMask;	
  1.2471 +	NKern::UnlockSystem();
  1.2472 +
  1.2473 +	TInt r;
  1.2474 +	if (!aCodeSegMemory)
  1.2475 +		r = ReadRomPage(req, aAddress);
  1.2476 +	else
  1.2477 +		{
  1.2478 +		r = ReadCodePage(req, aCodeSegMemory, aAddress);
  1.2479 +		if (r == KErrNone)
  1.2480 +			aCodeSegMemory->ApplyCodeFixups((TUint32*)loadAddr, aAddress);
  1.2481 +		}
  1.2482 +	if(r!=KErrNone)
  1.2483 +		Panic(EPageInFailed);
  1.2484 +
  1.2485 +	// make caches consistant (uncached memory is used for page loading)
  1.2486 +	__DRAIN_WRITE_BUFFER;
  1.2487 +	NKern::LockSystem();
  1.2488 +
  1.2489 +	// Invalidate temporary mapping
  1.2490 +	*pt = KPteNotPresentEntry;
  1.2491 +	__DRAIN_WRITE_BUFFER;
  1.2492 +	InvalidateTLBForPage(loadAddr);
  1.2493 +
  1.2494 +	ReleaseRequestObject(req);
  1.2495 +	
  1.2496 +	// Get page table entry
  1.2497 +	pt = SafePtePtrFromLinAddr(aAddress);
  1.2498 +
  1.2499 +	// Check page still needs updating
  1.2500 +	TBool notNeeded = pt==0 || *pt!=KPteNotPresentEntry;
  1.2501 +	if(notNeeded)
  1.2502 +		{
  1.2503 +		// We don't need the new page after all, so put it on the active list as a free page
  1.2504 +		__KTRACE_OPT(KPAGING,Kern::Printf("DP: PageIn (New page not used)"));
  1.2505 +#ifdef BTRACE_PAGING
  1.2506 +		BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded);
  1.2507 +#endif
  1.2508 +		AddAsFreePage(pageInfo);
  1.2509 +		return pt ? KErrNone : KErrNotFound;
  1.2510 +		}
  1.2511 +
  1.2512 +	// Update page info
  1.2513 +	if (!aCodeSegMemory)
  1.2514 +		pageInfo->SetPagedROM((aAddress-iRomLinearBase)>>KPageShift);
  1.2515 +	else
  1.2516 +		pageInfo->SetPagedCode(aCodeSegMemory,(aAddress-Mmu().iUserCodeBase)>>KPageShift);
  1.2517 +
  1.2518 +	// Map page into final location
  1.2519 +	*pt = phys | (aCodeSegMemory ? KUserCodeLoadPte : KRomPtePermissions);
  1.2520 +	__DRAIN_WRITE_BUFFER;
  1.2521 +#ifdef BTRACE_PAGING
  1.2522 +	TInt subCat = aCodeSegMemory ? BTrace::EPagingPageInCode : BTrace::EPagingPageInROM;
  1.2523 +	BTraceContext8(BTrace::EPaging,subCat,phys,aAddress);
  1.2524 +#endif
  1.2525 +
  1.2526 +	AddAsYoungest(pageInfo);
  1.2527 +	BalanceAges();
  1.2528 +
  1.2529 +	return KErrNone;
  1.2530 +	}
  1.2531 +
  1.2532 +
  1.2533 +inline TUint8 ReadByte(TLinAddr aAddress)
  1.2534 +	{ return *(volatile TUint8*)aAddress; }
  1.2535 +
  1.2536 +
  1.2537 +TInt MemModelDemandPaging::EnsurePagePresent(TLinAddr aPage, DProcess* aProcess)
  1.2538 +	{
  1.2539 +	XTRAPD(exc,XT_DEFAULT,XTRAP_PAGING_RETRY(CHECK_PAGING_SAFE; ReadByte(aPage);));
  1.2540 +	return exc;
  1.2541 +	}
  1.2542 +
  1.2543 +
  1.2544 +TPhysAddr MemModelDemandPaging::LinearToPhysical(TLinAddr aPage, DProcess* aProcess)
  1.2545 +	{
  1.2546 +	return Mmu().LinearToPhysical(aPage);
  1.2547 +	}
  1.2548 +
  1.2549 +
  1.2550 +TInt MemModelDemandPaging::PageState(TLinAddr aAddr)
  1.2551 +	{
  1.2552 +	TPte* ptePtr = 0;
  1.2553 +	TPte pte = 0;
  1.2554 +	TInt r = 0;
  1.2555 +	SPageInfo* pageInfo = NULL;
  1.2556 +
  1.2557 +	NKern::LockSystem();
  1.2558 +
  1.2559 +	DMemModelCodeSegMemory* codeSegMemory = 0;
  1.2560 +	if(TUint(aAddr-iRomPagedLinearBase)<iRomPagedSize)
  1.2561 +		r |= EPageStateInRom;
  1.2562 +	else if (TUint(aAddr-iCodeLinearBase)<iCodeSize)
  1.2563 +		{
  1.2564 +		DMemModelCodeSeg* codeSeg = (DMemModelCodeSeg*)DCodeSeg::CodeSegsByAddress.Find(aAddr);
  1.2565 +		if(codeSeg)
  1.2566 +			codeSegMemory = codeSeg->Memory();
  1.2567 +		if (codeSegMemory)
  1.2568 +			{
  1.2569 +			r |= EPageStateInRamCode;
  1.2570 +			if (codeSegMemory->iIsDemandPaged)
  1.2571 +				r |= EPageStatePaged;
  1.2572 +			}
  1.2573 +		}
  1.2574 +
  1.2575 +	ptePtr = SafePtePtrFromLinAddr(aAddr);
  1.2576 +	if (!ptePtr)
  1.2577 +		goto done;
  1.2578 +	r |= EPageStatePageTablePresent;
  1.2579 +	pte = *ptePtr;
  1.2580 +	if (pte == KPteNotPresentEntry)
  1.2581 +		goto done;		
  1.2582 +	r |= EPageStatePtePresent;
  1.2583 +	if (pte & KPtePresentMask)
  1.2584 +		r |= EPageStatePteValid;
  1.2585 +	
  1.2586 +	pageInfo = SPageInfo::FromPhysAddr(pte);
  1.2587 +	r |= pageInfo->Type();
  1.2588 +	r |= pageInfo->State()<<8;
  1.2589 +
  1.2590 +done:
  1.2591 +	NKern::UnlockSystem();
  1.2592 +	return r;
  1.2593 +	}
  1.2594 +
  1.2595 +
  1.2596 +TBool MemModelDemandPaging::NeedsMutexOrderCheck(TLinAddr aStartAddr, TUint aLength)
  1.2597 +	{
  1.2598 +	// Don't check mutex order for reads from unpaged rom, kernel data area and kernel stack chunk
  1.2599 +	TLinAddr endAddr = aStartAddr + aLength;
  1.2600 +	TLinAddr stackBase = (TLinAddr)MM::SvStackChunk->Base();
  1.2601 +	TLinAddr stackEnd = stackBase + MM::SvStackChunk->iMaxSize;
  1.2602 +	TLinAddr unpagedRomEnd = iRomPagedLinearBase ? iRomPagedLinearBase : iRomLinearBase + iRomSize;
  1.2603 +	TBool rangeInUnpagedRom = aStartAddr >= iRomLinearBase && endAddr <= unpagedRomEnd;
  1.2604 +	TBool rangeInKernelData = aStartAddr >= KKernelDataBase && endAddr <= KKernelDataEnd;
  1.2605 +	TBool rangeInKernelStack = aStartAddr >= stackBase && endAddr <= stackEnd;
  1.2606 +	return !rangeInUnpagedRom && !rangeInKernelData && !rangeInKernelStack;
  1.2607 +	}
  1.2608 +
  1.2609 +
  1.2610 +EXPORT_C TBool DDemandPagingLock::Lock(DThread* aThread, TLinAddr aStart, TInt aSize)
  1.2611 +	{
  1.2612 +	MemModelDemandPaging* pager = (MemModelDemandPaging*)iThePager;
  1.2613 +	if(pager)
  1.2614 +		{
  1.2615 +		ArmMmu& m = pager->Mmu();
  1.2616 +		TLinAddr end = aStart+aSize;
  1.2617 +		
  1.2618 +		if ((aStart < TUint(pager->iRomPagedLinearBase+pager->iRomPagedSize) && end > pager->iRomPagedLinearBase) ||
  1.2619 +			(aStart < TUint(m.iUserCodeBase + m.iMaxUserCodeSize) && end > m.iUserCodeBase))
  1.2620 +			return pager->ReserveLock(aThread,aStart,aSize,*this);
  1.2621 +		}
  1.2622 +		
  1.2623 +	return EFalse;
  1.2624 +	}
  1.2625 +
  1.2626 +void ArmMmu::DisablePageModification(DMemModelChunk* aChunk, TInt aOffset)
  1.2627 +//
  1.2628 +// Mark the page at aOffset in aChunk inaccessible to prevent it being
  1.2629 +// modified while defrag is in progress. Save the required information
  1.2630 +// to allow the fault handler to deal with this.
  1.2631 +// Flush the cache for the page so that it can be aliased elsewhere for
  1.2632 +// copying.
  1.2633 +// Call this with the system unlocked.
  1.2634 +//
  1.2635 +	{
  1.2636 +	__KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DisablePageModification() offset=%08x", aOffset));
  1.2637 +
  1.2638 +	// Acquire the system lock here for atomic access to aChunk->iBase as moving 
  1.2639 +	// between the home and run addresses (a reschedule) may update aChunk->iBase.
  1.2640 +	NKern::LockSystem();
  1.2641 +
  1.2642 +	iDisabledAddr = (TLinAddr)(aChunk->iBase) + aOffset;
  1.2643 +	TInt ptid=GetPageTableId(iDisabledAddr);
  1.2644 +	if(ptid<0)
  1.2645 +		Panic(EDefragDisablePageFailed);	
  1.2646 +
  1.2647 +	TPte* pPte = PageTable(ptid) + ((aOffset&KChunkMask)>>KPageShift);
  1.2648 +	TPte pte = *pPte;
  1.2649 +	if ((pte & KPteTypeMask) != KArmPteSmallPage)
  1.2650 +		Panic(EDefragDisablePageFailed);
  1.2651 +
  1.2652 +	iDisabledPte = pPte;
  1.2653 +	iDisabledOldVal = pte;
  1.2654 +
  1.2655 +	*pPte = 0;
  1.2656 +	__DRAIN_WRITE_BUFFER;
  1.2657 +	InvalidateTLBForPage(iDisabledAddr);
  1.2658 +	NKern::UnlockSystem();
  1.2659 +
  1.2660 +	CacheMaintenance::PageToPreserveAndReuseVirtualCache(iDisabledAddr);
  1.2661 +	__DRAIN_WRITE_BUFFER;
  1.2662 +	}
  1.2663 +
  1.2664 +TBool FaultStatusFromLinAddr(TLinAddr aAddr, TBool aKernel, TUint32& aFaultStatus)
  1.2665 +	// Walk the page tables looking for the given linear address. If access
  1.2666 +	// would've caused a fault, return ETrue and fill in aFaultStatus with a
  1.2667 +	// FSR value. Otherwise, return EFalse. Assumes it was a read.
  1.2668 +	{
  1.2669 +	TPde pde = PageDirectory[aAddr>>KChunkShift];
  1.2670 +	TPde pdetype = pde & KPdeTypeMask;
  1.2671 +	if (pdetype == 0)
  1.2672 +		{
  1.2673 +		// section translation fault
  1.2674 +		aFaultStatus = 0x5;
  1.2675 +		return ETrue;
  1.2676 +		}
  1.2677 +
  1.2678 +	TPte pte=0;
  1.2679 +	TInt domain = (pde >> 5) & 0xf;
  1.2680 +	TUint32 dacr = Arm::Dacr();
  1.2681 +	TInt domaccess = (dacr >> (domain<<1)) & 0x3;
  1.2682 +	TInt ispage = (pdetype == KArmV45PdeSection) ? 0 : 0x2;
  1.2683 +
  1.2684 +	if (ispage)
  1.2685 +		{
  1.2686 +		pte = *PtePtrFromLinAddr(aAddr);
  1.2687 +		if ((pte & KPteTypeMask) == 0)
  1.2688 +			{
  1.2689 +			// page translation fault
  1.2690 +			aFaultStatus = 0x7;
  1.2691 +			return ETrue;
  1.2692 +			}
  1.2693 +		}
  1.2694 +
  1.2695 +	if (domaccess == 0x3)
  1.2696 +		{
  1.2697 +		// manager access
  1.2698 +		return EFalse;
  1.2699 +		}
  1.2700 +	if (domaccess == 0)
  1.2701 +		{
  1.2702 +		// domain fault
  1.2703 +		aFaultStatus = 0x9 | ispage;
  1.2704 +		return ETrue;
  1.2705 +		}
  1.2706 +
  1.2707 +	TInt perms;
  1.2708 +	if (ispage)
  1.2709 +		perms = (pte >> 4) & 0x3;
  1.2710 +	else
  1.2711 +		perms = (pde >> 10) & 0x3;
  1.2712 +	
  1.2713 +	if (aKernel || perms != 0x1)
  1.2714 +		return EFalse;
  1.2715 +
  1.2716 +	// permission fault
  1.2717 +	aFaultStatus = 0xd | ispage;
  1.2718 +	return ETrue;
  1.2719 +	}
  1.2720 +
  1.2721 +TInt ArmMmu::RamDefragFault(TAny* aExceptionInfo)
  1.2722 +	{
  1.2723 +	TArmExcInfo& exc=*(TArmExcInfo*)aExceptionInfo;
  1.2724 +
  1.2725 +	// Get faulting address
  1.2726 +	TLinAddr faultAddress;
  1.2727 +	TBool prefetch=EFalse;
  1.2728 +	if(exc.iExcCode==EArmExceptionDataAbort)
  1.2729 +		{
  1.2730 +		// Only handle page translation faults
  1.2731 +		if((exc.iFaultStatus & 0xf) != 0x7)
  1.2732 +			return KErrUnknown;
  1.2733 +		faultAddress = exc.iFaultAddress;
  1.2734 +		}
  1.2735 +	else if(exc.iExcCode==EArmExceptionPrefetchAbort)
  1.2736 +		{
  1.2737 +		prefetch = ETrue;
  1.2738 +		faultAddress = exc.iR15;
  1.2739 +		}
  1.2740 +	else
  1.2741 +		return KErrUnknown; // Not data/prefetch abort
  1.2742 +
  1.2743 +	TBool kernelmode = exc.iCpsr&EMaskMode != EUserMode;
  1.2744 +
  1.2745 +	// Take system lock if not already held
  1.2746 +	NFastMutex* fm = NKern::HeldFastMutex();
  1.2747 +	if(!fm)
  1.2748 +		NKern::LockSystem();
  1.2749 +	else if(fm!=&TheScheduler.iLock)
  1.2750 +		{
  1.2751 +		__KTRACE_OPT2(KMMU,KPANIC,Kern::Printf("Defrag: Fault with FM Held! %x (%O pc=%x)",faultAddress,TheCurrentThread,exc.iR15));
  1.2752 +		Panic(EDefragFaultWhilstFMHeld); // Not allowed to hold mutexes
  1.2753 +		}
  1.2754 +
  1.2755 +	TInt r = KErrUnknown;
  1.2756 +
  1.2757 +	// check if the mapping of the page has already been restored and retry if so
  1.2758 +	if (prefetch)
  1.2759 +		{
  1.2760 +		TUint32 fsr;
  1.2761 +		if (!FaultStatusFromLinAddr(faultAddress, kernelmode, fsr))
  1.2762 +			{
  1.2763 +			r = KErrNone;
  1.2764 +			goto leave;
  1.2765 +			}
  1.2766 +		}
  1.2767 +	else
  1.2768 +		{
  1.2769 +		TPte* pt = SafePtePtrFromLinAddr(faultAddress);
  1.2770 +		if(!pt)
  1.2771 +			{
  1.2772 +			r = KErrNotFound;
  1.2773 +			goto leave;
  1.2774 +			}
  1.2775 +		if ((*pt & 0x3) != 0)
  1.2776 +			{
  1.2777 +			r = KErrNone;
  1.2778 +			goto leave;
  1.2779 +			}
  1.2780 +		}
  1.2781 +
  1.2782 +	// check if the fault occurred in the page we are moving
  1.2783 +	if (iDisabledPte && TUint(faultAddress - iDisabledAddr) < TUint(KPageSize))
  1.2784 +		{
  1.2785 +		// restore access to the page
  1.2786 +		*iDisabledPte = iDisabledOldVal;
  1.2787 +		__DRAIN_WRITE_BUFFER;
  1.2788 +		InvalidateTLBForPage(iDisabledAddr);
  1.2789 +		iDisabledAddr = 0;
  1.2790 +		iDisabledPte = NULL;
  1.2791 +		iDisabledOldVal = 0;
  1.2792 +		r = KErrNone;
  1.2793 +		}
  1.2794 +
  1.2795 +leave:
  1.2796 +	// Restore system lock state
  1.2797 +	if (!fm)
  1.2798 +		NKern::UnlockSystem();
  1.2799 +	
  1.2800 +	return r;
  1.2801 +	}