1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/memmodel/epoc/multiple/arm/xmmu.cpp Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,3905 @@
1.4 +// Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32\memmodel\epoc\multiple\arm\xmmu.cpp
1.18 +//
1.19 +//
1.20 +
1.21 +#include "arm_mem.h"
1.22 +#include <mmubase.inl>
1.23 +#include <ramcache.h>
1.24 +#include <demand_paging.h>
1.25 +#include "execs.h"
1.26 +#include <defrag.h>
1.27 +#include "cache_maintenance.inl"
1.28 +
1.29 +#undef __MMU_MACHINE_CODED__
1.30 +
1.31 +// SECTION_PDE(perm, attr, domain, execute, global)
1.32 +// PT_PDE(domain)
1.33 +// LP_PTE(perm, attr, execute, global)
1.34 +// SP_PTE(perm, attr, execute, global)
1.35 +
1.36 +const TInt KPageColourShift=2;
1.37 +const TInt KPageColourCount=(1<<KPageColourShift);
1.38 +const TInt KPageColourMask=KPageColourCount-1;
1.39 +
1.40 +
1.41 +const TPde KPdPdePerm=PT_PDE(0);
1.42 +const TPde KPtPdePerm=PT_PDE(0);
1.43 +const TPde KShadowPdePerm=PT_PDE(0);
1.44 +
1.45 +#if defined(__CPU_MEMORY_TYPE_REMAPPING)
1.46 +// ARM1176, ARM11MPCore, ARMv7 and later
1.47 +// __CPU_MEMORY_TYPE_REMAPPING means that only three bits (TEX0:C:B) in page table define
1.48 +// memory attributes. Kernel runs with a limited set of memory types: stronlgy ordered,
1.49 +// device, normal un-cached & and normal WBWA. Due to lack of write through mode, page tables are
1.50 +// write-back which means that cache has to be cleaned on every page/directory table update.
1.51 +const TPte KPdPtePerm= SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1);
1.52 +const TPte KPtPtePerm= SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1);
1.53 +const TPte KPtInfoPtePerm= SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1);
1.54 +const TPte KRomPtePerm= SP_PTE(KArmV6PermRORO, EMemAttNormalCached, 1, 1);
1.55 +const TPte KShadowPtePerm= SP_PTE(KArmV6PermRORO, EMemAttNormalCached, 1, 1);
1.56 +const TPde KRomSectionPermissions= SECTION_PDE(KArmV6PermRORO, EMemAttNormalCached, 0, 1, 1);
1.57 +const TPte KUserCodeLoadPte= SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 1, 0);
1.58 +const TPte KUserCodeRunPte= SP_PTE(KArmV6PermRORO, EMemAttNormalCached, 1, 0);
1.59 +const TPte KGlobalCodeRunPte= SP_PTE(KArmV6PermRORO, EMemAttNormalCached, 1, 1);
1.60 +const TPte KKernelCodeRunPte= SP_PTE(KArmV6PermRONO, EMemAttNormalCached, 1, 1);
1.61 +
1.62 +const TInt KNormalUncachedAttr = EMemAttNormalUncached;
1.63 +const TInt KNormalCachedAttr = EMemAttNormalCached;
1.64 +
1.65 +#else
1.66 +
1.67 +//ARM1136
1.68 +const TPte KPtInfoPtePerm=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1);
1.69 +#if defined (__CPU_WriteThroughDisabled)
1.70 +const TPte KPdPtePerm=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1);
1.71 +const TPte KPtPtePerm=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1);
1.72 +const TPte KRomPtePerm=SP_PTE(KArmV6PermRORO, KArmV6MemAttWBWAWBWA, 1, 1);
1.73 +const TPte KShadowPtePerm=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWBWAWBWA, 1, 1);
1.74 +const TPde KRomSectionPermissions = SECTION_PDE(KArmV6PermRORO, KArmV6MemAttWBWAWBWA, 0, 1, 1);
1.75 +const TPte KUserCodeLoadPte=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 1, 0);
1.76 +const TPte KUserCodeRunPte=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWBWAWBWA, 1, 0);
1.77 +const TPte KGlobalCodeRunPte=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWBWAWBWA, 1, 1);
1.78 +const TInt KKernelCodeRunPteAttr = KArmV6MemAttWBWAWBWA;
1.79 +#else
1.80 +const TPte KPdPtePerm=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBRAWTRA, 0, 1);
1.81 +const TPte KPtPtePerm=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBRAWTRA, 0, 1);
1.82 +const TPte KRomPtePerm=SP_PTE(KArmV6PermRORO, KArmV6MemAttWTRAWTRA, 1, 1);
1.83 +const TPte KShadowPtePerm=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWTRAWTRA, 1, 1);
1.84 +const TPde KRomSectionPermissions = SECTION_PDE(KArmV6PermRORO, KArmV6MemAttWTRAWTRA, 0, 1, 1);
1.85 +const TPte KUserCodeLoadPte=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWTRAWTRA, 1, 0);
1.86 +const TPte KUserCodeRunPte=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWTRAWTRA, 1, 0);
1.87 +const TPte KGlobalCodeRunPte=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWTRAWTRA, 1, 1);
1.88 +const TInt KKernelCodeRunPteAttr = KArmV6MemAttWTRAWTRA;
1.89 +#endif
1.90 +
1.91 +
1.92 +#if defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
1.93 +const TInt KKernelCodeRunPtePerm = KArmV6PermRONO;
1.94 +#else
1.95 +const TInt KKernelCodeRunPtePerm = KArmV6PermRORO;
1.96 +#endif
1.97 +const TPte KKernelCodeRunPte=SP_PTE(KKernelCodeRunPtePerm, KKernelCodeRunPteAttr, 1, 1);
1.98 +
1.99 +const TInt KNormalUncachedAttr = KArmV6MemAttNCNC;
1.100 +const TInt KNormalCachedAttr = KArmV6MemAttWBWAWBWA;
1.101 +
1.102 +#endif
1.103 +
1.104 +
1.105 +extern void __FlushBtb();
1.106 +
1.107 +#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
1.108 +extern void remove_and_invalidate_page(TPte* aPte, TLinAddr aAddr, TInt aAsid);
1.109 +extern void remove_and_invalidate_section(TPde* aPde, TLinAddr aAddr, TInt aAsid);
1.110 +#endif
1.111 +
1.112 +
1.113 +LOCAL_D const TPte ChunkPtePermissions[ENumChunkTypes] =
1.114 + {
1.115 +#if defined(__CPU_MEMORY_TYPE_REMAPPING)
1.116 +// ARM1176, ARM11 mcore, ARMv7 and later
1.117 + SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1), // EKernelData
1.118 + SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1), // EKernelStack
1.119 + SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 1, 1), // EKernelCode - loading
1.120 + SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 1, 1), // EDll (used for global code) - loading
1.121 + SP_PTE(KArmV6PermRORO, EMemAttNormalCached, 1, 0), // EUserCode - run
1.122 + SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 1), // ERamDrive
1.123 + SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 0), // EUserData
1.124 + SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 0), // EDllData
1.125 + SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 1, 0), // EUserSelfModCode
1.126 + SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 0), // ESharedKernelSingle
1.127 + SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 0), // ESharedKernelMultiple
1.128 + SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 0), // ESharedIo
1.129 + SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1), // ESharedKernelMirror
1.130 + SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1), // EKernelMessage
1.131 +#else
1.132 + SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1), // EKernelData
1.133 + SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1), // EKernelStack
1.134 +#if defined (__CPU_WriteThroughDisabled)
1.135 + SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 1, 1), // EKernelCode - loading
1.136 + SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 1, 1), // EDll (used for global code) - loading
1.137 + SP_PTE(KArmV6PermRWRO, KArmV6MemAttWBWAWBWA, 1, 0), // EUserCode - run
1.138 +#else
1.139 + SP_PTE(KArmV6PermRWNO, KArmV6MemAttWTRAWTRA, 1, 1), // EKernelCode - loading
1.140 + SP_PTE(KArmV6PermRWNO, KArmV6MemAttWTRAWTRA, 1, 1), // EDll (used for global code) - loading
1.141 + SP_PTE(KArmV6PermRWRO, KArmV6MemAttWTRAWTRA, 1, 0), // EUserCode - run
1.142 +#endif
1.143 + SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 1), // ERamDrive
1.144 + SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 0), // EUserData
1.145 + SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 0), // EDllData
1.146 + SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 1, 0), // EUserSelfModCode
1.147 + SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 0), // ESharedKernelSingle
1.148 + SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 0), // ESharedKernelMultiple
1.149 + SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 0), // ESharedIo
1.150 + SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1), // ESharedKernelMirror
1.151 + SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1), // EKernelMessage
1.152 +#endif
1.153 + };
1.154 +
1.155 +// The domain for each chunk is selected according to its type.
1.156 +// The RamDrive lives in a separate domain, to minimise the risk
1.157 +// of accidental access and corruption. User chunks may also be
1.158 +// located in a separate domain (15) in DEBUG builds.
1.159 +LOCAL_D const TPde ChunkPdePermissions[ENumChunkTypes] =
1.160 + {
1.161 + PT_PDE(0), // EKernelData
1.162 + PT_PDE(0), // EKernelStack
1.163 + PT_PDE(0), // EKernelCode
1.164 + PT_PDE(0), // EDll
1.165 + PT_PDE(USER_MEMORY_DOMAIN), // EUserCode
1.166 + PT_PDE(1), // ERamDrive
1.167 + PT_PDE(USER_MEMORY_DOMAIN), // EUserData
1.168 + PT_PDE(USER_MEMORY_DOMAIN), // EDllData
1.169 + PT_PDE(USER_MEMORY_DOMAIN), // EUserSelfModCode
1.170 + PT_PDE(USER_MEMORY_DOMAIN), // ESharedKernelSingle
1.171 + PT_PDE(USER_MEMORY_DOMAIN), // ESharedKernelMultiple
1.172 + PT_PDE(0), // ESharedIo
1.173 + PT_PDE(0), // ESharedKernelMirror
1.174 + PT_PDE(0), // EKernelMessage
1.175 + };
1.176 +
1.177 +// Inline functions for simple transformations
1.178 +inline TLinAddr PageTableLinAddr(TInt aId)
1.179 + {
1.180 + return (KPageTableBase+(aId<<KPageTableShift));
1.181 + }
1.182 +
1.183 +inline TPte* PageTable(TInt aId)
1.184 + {
1.185 + return (TPte*)(KPageTableBase+(aId<<KPageTableShift));
1.186 + }
1.187 +
1.188 +inline TPte* PageTableEntry(TInt aId, TLinAddr aAddress)
1.189 + {
1.190 + return PageTable(aId) + ((aAddress >> KPageShift) & (KChunkMask >> KPageShift));
1.191 + }
1.192 +
1.193 +inline TLinAddr PageDirectoryLinAddr(TInt aOsAsid)
1.194 + {
1.195 + return (KPageDirectoryBase+(aOsAsid<<KPageDirectoryShift));
1.196 + }
1.197 +
1.198 +inline TPde* PageDirectoryEntry(TInt aOsAsid, TLinAddr aAddress)
1.199 + {
1.200 + return PageDirectory(aOsAsid) + (aAddress >> KChunkShift);
1.201 + }
1.202 +
1.203 +extern void InvalidateTLBForPage(TLinAddr /*aLinAddr*/, TInt /*aAsid*/);
1.204 +extern void FlushTLBs();
1.205 +extern TUint32 TTCR();
1.206 +
1.207 +TPte* SafePageTableFromPde(TPde aPde)
1.208 + {
1.209 + if((aPde&KPdeTypeMask)==KArmV6PdePageTable)
1.210 + {
1.211 + SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aPde);
1.212 + if(pi)
1.213 + {
1.214 + TInt id = (pi->Offset()<<KPtClusterShift) | ((aPde>>KPageTableShift)&KPtClusterMask);
1.215 + return PageTable(id);
1.216 + }
1.217 + }
1.218 + return 0;
1.219 + }
1.220 +
1.221 +TPte* SafePtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid=0)
1.222 + {
1.223 + if ((TInt)(aAddress>>KChunkShift)>=(TheMmu.iLocalPdSize>>2))
1.224 + aOsAsid = 0;
1.225 + TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift];
1.226 + TPte* pt = SafePageTableFromPde(pde);
1.227 + if(pt)
1.228 + pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
1.229 + return pt;
1.230 + }
1.231 +
1.232 +#ifndef _DEBUG
1.233 +// inline in UREL builds...
1.234 +#ifdef __ARMCC__
1.235 + __forceinline /* RVCT ignores normal inline qualifier :-( */
1.236 +#else
1.237 + inline
1.238 +#endif
1.239 +#endif
1.240 +TPte* PtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid=0)
1.241 + {
1.242 + // this function only works for process local memory addresses, or for kernel memory (asid==0).
1.243 + __NK_ASSERT_DEBUG(aOsAsid==0 || (TInt)(aAddress>>KChunkShift)<(TheMmu.iLocalPdSize>>2));
1.244 + TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift];
1.245 + SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
1.246 + TInt id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
1.247 + TPte* pt = PageTable(id);
1.248 + pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
1.249 + return pt;
1.250 + }
1.251 +
1.252 +
1.253 +TInt ArmMmu::LinearToPhysical(TLinAddr aLinAddr, TInt aSize, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList, TInt aOsAsid)
1.254 + {
1.255 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical %08x+%08x, asid=%d",aLinAddr,aSize,aOsAsid));
1.256 + TPhysAddr physStart = ArmMmu::LinearToPhysical(aLinAddr,aOsAsid);
1.257 + TPhysAddr nextPhys = physStart&~KPageMask;
1.258 +
1.259 + TUint32* pageList = aPhysicalPageList;
1.260 +
1.261 + TInt pageIndex = aLinAddr>>KPageShift;
1.262 + TInt pagesLeft = ((aLinAddr+aSize-1)>>KPageShift)+1 - pageIndex;
1.263 + TInt pdeIndex = aLinAddr>>KChunkShift;
1.264 + TPde* pdePtr = (pdeIndex<(iLocalPdSize>>2) || (iAsidInfo[aOsAsid]&1))
1.265 + ? PageDirectory(aOsAsid)
1.266 + : ::InitPageDirectory;
1.267 + pdePtr += pdeIndex;
1.268 + while(pagesLeft)
1.269 + {
1.270 + pageIndex &= KChunkMask>>KPageShift;
1.271 + TInt pagesLeftInChunk = (1<<(KChunkShift-KPageShift))-pageIndex;
1.272 + if(pagesLeftInChunk>pagesLeft)
1.273 + pagesLeftInChunk = pagesLeft;
1.274 + pagesLeft -= pagesLeftInChunk;
1.275 +
1.276 + TPhysAddr phys;
1.277 + TPde pde = *pdePtr++;
1.278 + TUint pdeType = pde&KPdeTypeMask;
1.279 + if(pdeType==KArmV6PdeSection)
1.280 + {
1.281 + phys = (pde & KPdeSectionAddrMask) + (pageIndex*KPageSize);
1.282 + __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Section phys=%8x",phys));
1.283 + TInt n=pagesLeftInChunk;
1.284 + phys==nextPhys ? nextPhys+=n*KPageSize : nextPhys=KPhysAddrInvalid;
1.285 + if(pageList)
1.286 + {
1.287 + TUint32* pageEnd = pageList+n;
1.288 + do
1.289 + {
1.290 + *pageList++ = phys;
1.291 + phys+=KPageSize;
1.292 + }
1.293 + while(pageList<pageEnd);
1.294 + }
1.295 + }
1.296 + else
1.297 + {
1.298 + TPte* pt = SafePageTableFromPde(pde);
1.299 + if(!pt)
1.300 + {
1.301 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical missing page table: PDE=%8x",pde));
1.302 + return KErrNotFound;
1.303 + }
1.304 + pt += pageIndex;
1.305 + for(;;)
1.306 + {
1.307 + TPte pte = *pt++;
1.308 + TUint pte_type = pte & KPteTypeMask;
1.309 + if (pte_type >= KArmV6PteSmallPage)
1.310 + {
1.311 + phys = (pte & KPteSmallPageAddrMask);
1.312 + __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Small Page phys=%8x",phys));
1.313 + phys==nextPhys ? nextPhys+=KPageSize : nextPhys=KPhysAddrInvalid;
1.314 + if(pageList)
1.315 + *pageList++ = phys;
1.316 + if(--pagesLeftInChunk)
1.317 + continue;
1.318 + break;
1.319 + }
1.320 + if (pte_type == KArmV6PteLargePage)
1.321 + {
1.322 + --pt; // back up ptr
1.323 + TUint pageOffset = ((TUint)pt>>2)&(KLargeSmallPageRatio-1);
1.324 + phys = (pte & KPteLargePageAddrMask) + pageOffset*KPageSize;
1.325 + __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Large Page phys=%8x",phys));
1.326 + TInt n=KLargeSmallPageRatio-pageOffset;
1.327 + if(n>pagesLeftInChunk)
1.328 + n = pagesLeftInChunk;
1.329 + phys==nextPhys ? nextPhys+=n*KPageSize : nextPhys=KPhysAddrInvalid;
1.330 + if(pageList)
1.331 + {
1.332 + TUint32* pageEnd = pageList+n;
1.333 + do
1.334 + {
1.335 + *pageList++ = phys;
1.336 + phys+=KPageSize;
1.337 + }
1.338 + while(pageList<pageEnd);
1.339 + }
1.340 + pt += n;
1.341 + if(pagesLeftInChunk-=n)
1.342 + continue;
1.343 + break;
1.344 + }
1.345 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical bad PTE %8x",pte));
1.346 + return KErrNotFound;
1.347 + }
1.348 + }
1.349 + if(!pageList && nextPhys==KPhysAddrInvalid)
1.350 + {
1.351 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical not contiguous"));
1.352 + return KErrNotFound;
1.353 + }
1.354 + pageIndex = 0;
1.355 + }
1.356 +
1.357 + if(nextPhys==KPhysAddrInvalid)
1.358 + {
1.359 + // Memory is discontiguous...
1.360 + aPhysicalAddress = KPhysAddrInvalid;
1.361 + return 1;
1.362 + }
1.363 + else
1.364 + {
1.365 + // Memory is contiguous...
1.366 + aPhysicalAddress = physStart;
1.367 + return KErrNone;
1.368 + }
1.369 + }
1.370 +
1.371 +TInt ArmMmu::PreparePagesForDMA(TLinAddr aLinAddr, TInt aSize, TInt aOsAsid, TPhysAddr* aPhysicalPageList)
1.372 +//Returns the list of physical pages belonging to the specified memory space.
1.373 +//Checks these pages belong to a chunk marked as being trusted.
1.374 +//Locks these pages so they can not be moved by e.g. ram defragmenation.
1.375 + {
1.376 + SPageInfo* pi = NULL;
1.377 + DChunk* chunk = NULL;
1.378 + TInt err = KErrNone;
1.379 +
1.380 + __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::PreparePagesForDMA %08x+%08x, asid=%d",aLinAddr,aSize,aOsAsid));
1.381 +
1.382 + TUint32* pageList = aPhysicalPageList;
1.383 + TInt pagesInList = 0; //The number of pages we put in the list so far
1.384 +
1.385 + TInt pageIndex = (aLinAddr & KChunkMask) >> KPageShift; // Index of the page within the section
1.386 + TInt pagesLeft = ((aLinAddr & KPageMask) + aSize + KPageMask) >> KPageShift;
1.387 +
1.388 + TInt pdeIndex = aLinAddr>>KChunkShift;
1.389 +
1.390 +
1.391 + MmuBase::Wait(); // RamAlloc Mutex for accessing page/directory tables.
1.392 + NKern::LockSystem();// SystemlLock for accessing SPageInfo objects.
1.393 +
1.394 + TPde* pdePtr = (pdeIndex<(iLocalPdSize>>2) || (iAsidInfo[aOsAsid]&1)) ? PageDirectory(aOsAsid) : ::InitPageDirectory;
1.395 + pdePtr += pdeIndex;//This points to the first pde
1.396 +
1.397 + while(pagesLeft)
1.398 + {
1.399 + TInt pagesLeftInChunk = (1<<(KChunkShift-KPageShift))-pageIndex;
1.400 + if(pagesLeftInChunk>pagesLeft)
1.401 + pagesLeftInChunk = pagesLeft;
1.402 +
1.403 + pagesLeft -= pagesLeftInChunk;
1.404 +
1.405 + TPte* pt = SafePageTableFromPde(*pdePtr++);
1.406 + if(!pt) { err = KErrNotFound; goto fail; }// Cannot get page table.
1.407 +
1.408 + pt += pageIndex;
1.409 +
1.410 + for(;pagesLeftInChunk--;)
1.411 + {
1.412 + TPhysAddr phys = (*pt++ & KPteSmallPageAddrMask);
1.413 + pi = SPageInfo::SafeFromPhysAddr(phys);
1.414 + if(!pi) { err = KErrNotFound; goto fail; }// Invalid address
1.415 +
1.416 + __KTRACE_OPT(KMMU2,Kern::Printf("PageInfo: PA:%x T:%x S:%x O:%x C:%x",phys, pi->Type(), pi->State(), pi->Owner(), pi->LockCount()));
1.417 + if (chunk==NULL)
1.418 + {//This is the first page. Check 'trusted' bit.
1.419 + if (pi->Type()!= SPageInfo::EChunk)
1.420 + { err = KErrAccessDenied; goto fail; }// The first page do not belong to chunk.
1.421 +
1.422 + chunk = (DChunk*)pi->Owner();
1.423 + if ( (chunk == NULL) || ((chunk->iAttributes & DChunk::ETrustedChunk)== 0) )
1.424 + { err = KErrAccessDenied; goto fail; }// Not a trusted chunk
1.425 + }
1.426 + pi->Lock();
1.427 +
1.428 + *pageList++ = phys;
1.429 + if ( (++pagesInList&127) == 0) //release system lock temporarily on every 512K
1.430 + NKern::FlashSystem();
1.431 + }
1.432 + pageIndex = 0;
1.433 + }
1.434 +
1.435 + if (pi->Type()!= SPageInfo::EChunk)
1.436 + { err = KErrAccessDenied; goto fail; }// The last page do not belong to chunk.
1.437 +
1.438 + if (chunk && (chunk != (DChunk*)pi->Owner()))
1.439 + { err = KErrArgument; goto fail; }//The first & the last page do not belong to the same chunk.
1.440 +
1.441 + NKern::UnlockSystem();
1.442 + MmuBase::Signal();
1.443 + return KErrNone;
1.444 +
1.445 +fail:
1.446 + __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::PreparePagesForDMA failed"));
1.447 + NKern::UnlockSystem();
1.448 + MmuBase::Signal();
1.449 + ReleasePagesFromDMA(aPhysicalPageList, pagesInList);
1.450 + return err;
1.451 + }
1.452 +
1.453 +TInt ArmMmu::ReleasePagesFromDMA(TPhysAddr* aPhysicalPageList, TInt aPageCount)
1.454 +// Unlocks physical pages.
1.455 +// @param aPhysicalPageList - points to the list of physical pages that should be released.
1.456 +// @param aPageCount - the number of physical pages in the list.
1.457 + {
1.458 + NKern::LockSystem();
1.459 + __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::ReleasePagesFromDMA count:%d",aPageCount));
1.460 +
1.461 + while (aPageCount--)
1.462 + {
1.463 + SPageInfo* pi = SPageInfo::SafeFromPhysAddr(*aPhysicalPageList++);
1.464 + if(!pi)
1.465 + {
1.466 + NKern::UnlockSystem();
1.467 + return KErrArgument;
1.468 + }
1.469 + __KTRACE_OPT(KMMU2,Kern::Printf("PageInfo: T:%x S:%x O:%x C:%x",pi->Type(), pi->State(), pi->Owner(), pi->LockCount()));
1.470 + pi->Unlock();
1.471 + }
1.472 + NKern::UnlockSystem();
1.473 + return KErrNone;
1.474 + }
1.475 +
1.476 +TPhysAddr ArmMmu::LinearToPhysical(TLinAddr aLinAddr, TInt aOsAsid)
1.477 +//
1.478 +// Find the physical address corresponding to a given linear address in a specified OS
1.479 +// address space. Call with system locked.
1.480 +//
1.481 + {
1.482 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical(%08x,%d)",aLinAddr,aOsAsid));
1.483 + TInt pdeIndex=aLinAddr>>KChunkShift;
1.484 + TPde pde = (pdeIndex<(iLocalPdSize>>2) || (iAsidInfo[aOsAsid]&1)) ? PageDirectory(aOsAsid)[pdeIndex] : ::InitPageDirectory[pdeIndex];
1.485 + TPhysAddr pa=KPhysAddrInvalid;
1.486 + if ((pde&KPdePresentMask)==KArmV6PdePageTable)
1.487 + {
1.488 + SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
1.489 + if (pi)
1.490 + {
1.491 + TInt id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
1.492 + TPte* pPte=PageTable(id);
1.493 + TPte pte=pPte[(aLinAddr&KChunkMask)>>KPageShift];
1.494 + if (pte & KArmV6PteSmallPage)
1.495 + {
1.496 + pa=(pte&KPteSmallPageAddrMask)+(aLinAddr&~KPteSmallPageAddrMask);
1.497 + __KTRACE_OPT(KMMU,Kern::Printf("Mapped with small page - returning %08x",pa));
1.498 + }
1.499 + else if ((pte & KArmV6PteTypeMask) == KArmV6PteLargePage)
1.500 + {
1.501 + pa=(pte&KPteLargePageAddrMask)+(aLinAddr&~KPteLargePageAddrMask);
1.502 + __KTRACE_OPT(KMMU,Kern::Printf("Mapped with large page - returning %08x",pa));
1.503 + }
1.504 + }
1.505 + }
1.506 + else if ((pde&KPdePresentMask)==KArmV6PdeSection)
1.507 + {
1.508 + pa=(pde&KPdeSectionAddrMask)|(aLinAddr&~KPdeSectionAddrMask);
1.509 + __KTRACE_OPT(KMMU,Kern::Printf("Mapped with section - returning %08x",pa));
1.510 + }
1.511 + return pa;
1.512 + }
1.513 +
1.514 +// permission table indexed by XN:APX:AP1:AP0
1.515 +static const TInt PermissionLookup[16]=
1.516 + { //XN:APX:AP1:AP0
1.517 + 0, //0 0 0 0 no access
1.518 + EMapAttrWriteSup|EMapAttrReadSup|EMapAttrExecSup, //0 0 0 1 RW sup execute
1.519 + EMapAttrWriteSup|EMapAttrReadUser|EMapAttrExecUser, //0 0 1 0 supRW usrR execute
1.520 + EMapAttrWriteUser|EMapAttrReadUser|EMapAttrExecUser,//0 0 1 1 supRW usrRW execute
1.521 + 0, //0 1 0 0 reserved
1.522 + EMapAttrReadSup|EMapAttrExecSup, //0 1 0 1 supR execute
1.523 + EMapAttrReadUser|EMapAttrExecUser, //0 1 1 0 supR usrR execute
1.524 + 0, //0 1 1 1 reserved
1.525 + 0, //1 0 0 0 no access
1.526 + EMapAttrWriteSup|EMapAttrReadSup, //1 0 0 1 RW sup
1.527 + EMapAttrWriteSup|EMapAttrReadUser, //1 0 1 0 supRW usrR
1.528 + EMapAttrWriteUser|EMapAttrReadUser, //1 0 1 1 supRW usrRW
1.529 + 0, //1 1 0 0 reserved
1.530 + EMapAttrReadSup, //1 1 0 1 supR
1.531 + EMapAttrReadUser, //1 1 1 0 supR usrR
1.532 + EMapAttrReadUser, //1 1 1 1 supR usrR
1.533 + };
1.534 +
1.535 +TInt ArmMmu::PageTableId(TLinAddr aAddr, TInt aOsAsid)
1.536 + {
1.537 + TInt id=-1;
1.538 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::PageTableId(%08x,%d)",aAddr,aOsAsid));
1.539 + TInt pdeIndex=aAddr>>KChunkShift;
1.540 + TPde pde = (pdeIndex<(iLocalPdSize>>2) || (iAsidInfo[aOsAsid]&1)) ? PageDirectory(aOsAsid)[pdeIndex] : ::InitPageDirectory[pdeIndex];
1.541 + if ((pde&KArmV6PdeTypeMask)==KArmV6PdePageTable)
1.542 + {
1.543 + SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
1.544 + if (pi)
1.545 + id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
1.546 + }
1.547 + __KTRACE_OPT(KMMU,Kern::Printf("ID=%d",id));
1.548 + return id;
1.549 + }
1.550 +
1.551 +// Used only during boot for recovery of RAM drive
1.552 +TInt ArmMmu::BootPageTableId(TLinAddr aAddr, TPhysAddr& aPtPhys)
1.553 + {
1.554 + TInt id=KErrNotFound;
1.555 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:BootPageTableId(%08x,&)",aAddr));
1.556 + TPde* kpd=(TPde*)KPageDirectoryBase; // kernel page directory
1.557 + TInt pdeIndex=aAddr>>KChunkShift;
1.558 + TPde pde = kpd[pdeIndex];
1.559 + if ((pde & KArmV6PdeTypeMask) == KArmV6PdePageTable)
1.560 + {
1.561 + aPtPhys = pde & KPdePageTableAddrMask;
1.562 + SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
1.563 + if (pi)
1.564 + {
1.565 + SPageInfo::TType type = pi->Type();
1.566 + if (type == SPageInfo::EPageTable)
1.567 + id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
1.568 + else if (type == SPageInfo::EUnused)
1.569 + id = KErrUnknown;
1.570 + }
1.571 + }
1.572 + __KTRACE_OPT(KMMU,Kern::Printf("ID=%d",id));
1.573 + return id;
1.574 + }
1.575 +
1.576 +TBool ArmMmu::PteIsPresent(TPte aPte)
1.577 + {
1.578 + return aPte & KArmV6PteTypeMask;
1.579 + }
1.580 +
1.581 +TPhysAddr ArmMmu::PtePhysAddr(TPte aPte, TInt aPteIndex)
1.582 + {
1.583 + TUint32 pte_type = aPte & KArmV6PteTypeMask;
1.584 + if (pte_type == KArmV6PteLargePage)
1.585 + return (aPte & KPteLargePageAddrMask) + (TPhysAddr(aPteIndex << KPageShift) & KLargePageMask);
1.586 + else if (pte_type != 0)
1.587 + return aPte & KPteSmallPageAddrMask;
1.588 + return KPhysAddrInvalid;
1.589 + }
1.590 +
1.591 +TPhysAddr ArmMmu::PdePhysAddr(TLinAddr aAddr)
1.592 + {
1.593 + TPde* kpd = (TPde*)KPageDirectoryBase; // kernel page directory
1.594 + TPde pde = kpd[aAddr>>KChunkShift];
1.595 + if ((pde & KPdePresentMask) == KArmV6PdeSection)
1.596 + return pde & KPdeSectionAddrMask;
1.597 + return KPhysAddrInvalid;
1.598 + }
1.599 +
1.600 +void ArmMmu::Init1()
1.601 + {
1.602 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("ArmMmu::Init1"));
1.603 +
1.604 + // MmuBase data
1.605 + iPageSize=KPageSize;
1.606 + iPageMask=KPageMask;
1.607 + iPageShift=KPageShift;
1.608 + iChunkSize=KChunkSize;
1.609 + iChunkMask=KChunkMask;
1.610 + iChunkShift=KChunkShift;
1.611 + iPageTableSize=KPageTableSize;
1.612 + iPageTableMask=KPageTableMask;
1.613 + iPageTableShift=KPageTableShift;
1.614 + iPtClusterSize=KPtClusterSize;
1.615 + iPtClusterMask=KPtClusterMask;
1.616 + iPtClusterShift=KPtClusterShift;
1.617 + iPtBlockSize=KPtBlockSize;
1.618 + iPtBlockMask=KPtBlockMask;
1.619 + iPtBlockShift=KPtBlockShift;
1.620 + iPtGroupSize=KChunkSize/KPageTableSize;
1.621 + iPtGroupMask=iPtGroupSize-1;
1.622 + iPtGroupShift=iChunkShift-iPageTableShift;
1.623 + //TInt* iPtBlockCount; // dynamically allocated - Init2
1.624 + //TInt* iPtGroupCount; // dynamically allocated - Init2
1.625 + iPtInfo=(SPageTableInfo*)KPageTableInfoBase;
1.626 + iPageTableLinBase=KPageTableBase;
1.627 + //iRamPageAllocator; // dynamically allocated - Init2
1.628 + //iAsyncFreeList; // dynamically allocated - Init2
1.629 + //iPageTableAllocator; // dynamically allocated - Init2
1.630 + //iPageTableLinearAllocator;// dynamically allocated - Init2
1.631 + iPtInfoPtePerm=KPtInfoPtePerm;
1.632 + iPtPtePerm=KPtPtePerm;
1.633 + iPtPdePerm=KPtPdePerm;
1.634 + iUserCodeLoadPtePerm=KUserCodeLoadPte;
1.635 + iKernelCodePtePerm=KKernelCodeRunPte;
1.636 + iTempAddr=KTempAddr;
1.637 + iSecondTempAddr=KSecondTempAddr;
1.638 + iMapSizes=KPageSize|KLargePageSize|KChunkSize;
1.639 + iRomLinearBase = ::RomHeaderAddress;
1.640 + iRomLinearEnd = KRomLinearEnd;
1.641 + iShadowPtePerm = KShadowPtePerm;
1.642 + iShadowPdePerm = KShadowPdePerm;
1.643 +
1.644 + // Mmu data
1.645 + TInt total_ram=TheSuperPage().iTotalRamSize;
1.646 +
1.647 + // Large or small configuration?
1.648 + // This is determined by the bootstrap based on RAM size
1.649 + TUint32 ttcr=TTCR();
1.650 + __NK_ASSERT_ALWAYS(ttcr==1 || ttcr==2);
1.651 + TBool large = (ttcr==1);
1.652 +
1.653 + // calculate cache colouring...
1.654 + TInt iColourCount = 0;
1.655 + TInt dColourCount = 0;
1.656 + TUint32 ctr = InternalCache::TypeRegister();
1.657 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("CacheTypeRegister = %08x",ctr));
1.658 +#ifdef __CPU_ARMV6
1.659 + __NK_ASSERT_ALWAYS((ctr>>29)==0); // check ARMv6 format
1.660 + if(ctr&0x800)
1.661 + iColourCount = 4;
1.662 + if(ctr&0x800000)
1.663 + dColourCount = 4;
1.664 +#else
1.665 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("CacheTypeRegister = %08x",ctr));
1.666 + __NK_ASSERT_ALWAYS((ctr>>29)==4); // check ARMv7 format
1.667 + TUint l1ip = (ctr>>14)&3; // L1 instruction cache indexing and tagging policy
1.668 + __NK_ASSERT_ALWAYS(l1ip>=2); // check I cache is physically tagged
1.669 +
1.670 + TUint32 clidr = InternalCache::LevelIDRegister();
1.671 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("CacheLevelIDRegister = %08x",clidr));
1.672 + TUint l1type = clidr&7;
1.673 + if(l1type)
1.674 + {
1.675 + if(l1type==2 || l1type==3 || l1type==4)
1.676 + {
1.677 + // we have an L1 data cache...
1.678 + TUint32 csir = InternalCache::SizeIdRegister(0,0);
1.679 + TUint sets = ((csir>>13)&0x7fff)+1;
1.680 + TUint ways = ((csir>>3)&0x3ff)+1;
1.681 + TUint lineSizeShift = (csir&7)+4;
1.682 + // assume L1 data cache is VIPT and alias checks broken and so we need data cache colouring...
1.683 + dColourCount = (sets<<lineSizeShift)>>KPageShift;
1.684 + if(l1type==4) // unified cache, so set instruction cache colour as well...
1.685 + iColourCount = (sets<<lineSizeShift)>>KPageShift;
1.686 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("L1DCache = 0x%x,0x%x,%d colourCount=%d",sets,ways,lineSizeShift,(sets<<lineSizeShift)>>KPageShift));
1.687 + }
1.688 +
1.689 + if(l1type==1 || l1type==3)
1.690 + {
1.691 + // we have a separate L1 instruction cache...
1.692 + TUint32 csir = InternalCache::SizeIdRegister(1,0);
1.693 + TUint sets = ((csir>>13)&0x7fff)+1;
1.694 + TUint ways = ((csir>>3)&0x3ff)+1;
1.695 + TUint lineSizeShift = (csir&7)+4;
1.696 + iColourCount = (sets<<lineSizeShift)>>KPageShift;
1.697 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("L1ICache = 0x%x,0x%x,%d colourCount=%d",sets,ways,lineSizeShift,(sets<<lineSizeShift)>>KPageShift));
1.698 + }
1.699 + }
1.700 + if(l1ip==3)
1.701 + {
1.702 + // PIPT cache, so no colouring restrictions...
1.703 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("L1ICache is PIPT"));
1.704 + iColourCount = 0;
1.705 + }
1.706 + else
1.707 + {
1.708 + // VIPT cache...
1.709 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("L1ICache is VIPT"));
1.710 + }
1.711 +#endif
1.712 + TUint colourShift = 0;
1.713 + for(TUint colourCount=Max(iColourCount,dColourCount); colourCount!=0; colourCount>>=1)
1.714 + ++colourShift;
1.715 + iAliasSize=KPageSize<<colourShift;
1.716 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iAliasSize=0x%x",iAliasSize));
1.717 + iAliasMask=iAliasSize-1;
1.718 + iAliasShift=KPageShift+colourShift;
1.719 +
1.720 + iDecommitThreshold = CacheMaintenance::SyncAllPerformanceThresholdPages();
1.721 +
1.722 + iNumOsAsids=KArmV6NumAsids;
1.723 + iNumGlobalPageDirs=1;
1.724 + //iOsAsidAllocator; // dynamically allocated - Init2
1.725 + iGlobalPdSize=KPageDirectorySize;
1.726 + iGlobalPdShift=KPageDirectoryShift;
1.727 + iAsidGroupSize=KChunkSize/KPageDirectorySize;
1.728 + iAsidGroupMask=iAsidGroupSize-1;
1.729 + iAsidGroupShift=KChunkShift-KPageDirectoryShift;
1.730 + iUserLocalBase=KUserLocalDataBase;
1.731 + iAsidInfo=(TUint32*)KAsidInfoBase;
1.732 + iPdeBase=KPageDirectoryBase;
1.733 + iPdPtePerm=KPdPtePerm;
1.734 + iPdPdePerm=KPdPdePerm;
1.735 + iRamDriveMask=0x00f00000;
1.736 + iGlobalCodePtePerm=KGlobalCodeRunPte;
1.737 +#if defined(__CPU_MEMORY_TYPE_REMAPPING)
1.738 + iCacheMaintenanceTempMapAttr = CacheMaintenance::TemporaryMapping();
1.739 +#else
1.740 + switch(CacheMaintenance::TemporaryMapping())
1.741 + {
1.742 + case EMemAttNormalUncached:
1.743 + iCacheMaintenanceTempMapAttr = KArmV6MemAttNCNC;
1.744 + break;
1.745 + case EMemAttNormalCached:
1.746 + iCacheMaintenanceTempMapAttr = KArmV6MemAttWBWAWBWA;
1.747 + break;
1.748 + default:
1.749 + Panic(ETempMappingFailed);
1.750 + }
1.751 +#endif
1.752 + iMaxDllDataSize=Min(total_ram/2, 0x08000000); // phys RAM/2 up to 128Mb
1.753 + iMaxDllDataSize=(iMaxDllDataSize+iChunkMask)&~iChunkMask; // round up to chunk size
1.754 + iMaxUserCodeSize=Min(total_ram, 0x10000000); // phys RAM up to 256Mb
1.755 + iMaxUserCodeSize=(iMaxUserCodeSize+iChunkMask)&~iChunkMask; // round up to chunk size
1.756 + if (large)
1.757 + {
1.758 + iLocalPdSize=KPageDirectorySize/2;
1.759 + iLocalPdShift=KPageDirectoryShift-1;
1.760 + iUserSharedBase=KUserSharedDataBase2GB;
1.761 + iUserLocalEnd=iUserSharedBase-iMaxDllDataSize;
1.762 + iUserSharedEnd=KUserSharedDataEnd2GB-iMaxUserCodeSize;
1.763 + iDllDataBase=iUserLocalEnd;
1.764 + iUserCodeBase=iUserSharedEnd;
1.765 + }
1.766 + else
1.767 + {
1.768 + iLocalPdSize=KPageDirectorySize/4;
1.769 + iLocalPdShift=KPageDirectoryShift-2;
1.770 + iUserSharedBase=KUserSharedDataBase1GB;
1.771 + iUserLocalEnd=iUserSharedBase;
1.772 + iDllDataBase=KUserSharedDataEnd1GB-iMaxDllDataSize;
1.773 + iUserCodeBase=iDllDataBase-iMaxUserCodeSize;
1.774 + iUserSharedEnd=iUserCodeBase;
1.775 + }
1.776 + __KTRACE_OPT(KMMU,Kern::Printf("LPD size %08x GPD size %08x Alias size %08x",
1.777 + iLocalPdSize, iGlobalPdSize, iAliasSize));
1.778 + __KTRACE_OPT(KMMU,Kern::Printf("ULB %08x ULE %08x USB %08x USE %08x",iUserLocalBase,iUserLocalEnd,
1.779 + iUserSharedBase,iUserSharedEnd));
1.780 + __KTRACE_OPT(KMMU,Kern::Printf("DDB %08x UCB %08x",iDllDataBase,iUserCodeBase));
1.781 +
1.782 + // ArmMmu data
1.783 +
1.784 + // other
1.785 + PP::MaxUserThreadStack=0x14000; // 80K - STDLIB asks for 64K for PosixServer!!!!
1.786 + PP::UserThreadStackGuard=0x2000; // 8K
1.787 + PP::MaxStackSpacePerProcess=0x200000; // 2Mb
1.788 + K::SupervisorThreadStackSize=0x1000; // 4K
1.789 + PP::SupervisorThreadStackGuard=0x1000; // 4K
1.790 + K::MachineConfig=(TMachineConfig*)KMachineConfigLinAddr;
1.791 + PP::RamDriveStartAddress=KRamDriveStartAddress;
1.792 + PP::RamDriveRange=KRamDriveMaxSize;
1.793 + PP::RamDriveMaxSize=KRamDriveMaxSize; // may be reduced later
1.794 + K::MemModelAttributes=EMemModelTypeMultiple|EMemModelAttrNonExProt|EMemModelAttrKernProt|EMemModelAttrWriteProt|
1.795 + EMemModelAttrVA|EMemModelAttrProcessProt|EMemModelAttrSameVA|EMemModelAttrSvKernProt|
1.796 + EMemModelAttrIPCKernProt|EMemModelAttrRamCodeProt;
1.797 +
1.798 + Arm::DefaultDomainAccess=KDefaultDomainAccess;
1.799 +
1.800 + Mmu::Init1();
1.801 + }
1.802 +
1.803 +void ArmMmu::DoInit2()
1.804 + {
1.805 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("ArmMmu::DoInit2"));
1.806 + iTempPte=PageTable(PageTableId(iTempAddr,0))+((iTempAddr&KChunkMask)>>KPageShift);
1.807 + iSecondTempPte=PageTable(PageTableId(iSecondTempAddr,0))+((iSecondTempAddr&KChunkMask)>>KPageShift);
1.808 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iTempAddr=%08x, iTempPte=%08x, iSecondTempAddr=%08x, iSecondTempPte=%08x",
1.809 + iTempAddr, iTempPte, iSecondTempAddr, iSecondTempPte));
1.810 + CreateKernelSection(KKernelSectionEnd, iAliasShift);
1.811 + CreateUserGlobalSection(KUserGlobalDataBase, KUserGlobalDataEnd);
1.812 + Mmu::DoInit2();
1.813 + }
1.814 +
1.815 +#ifndef __MMU_MACHINE_CODED__
1.816 +void ArmMmu::MapRamPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, const TPhysAddr* aPageList, TInt aNumPages, TPte aPtePerm)
1.817 +//
1.818 +// Map a list of physical RAM pages into a specified page table with specified PTE permissions.
1.819 +// Update the page information array.
1.820 +// Call this with the system locked.
1.821 +//
1.822 + {
1.823 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::MapRamPages() id=%d type=%d ptr=%08x off=%08x n=%d perm=%08x",
1.824 + aId, aType, aPtr, aOffset, aNumPages, aPtePerm));
1.825 +
1.826 + SPageTableInfo& ptinfo=iPtInfo[aId];
1.827 + ptinfo.iCount+=aNumPages;
1.828 + aOffset>>=KPageShift;
1.829 + TInt ptOffset=aOffset & KPagesInPDEMask; // entry number in page table
1.830 + TPte* pPte=PageTable(aId)+ptOffset; // address of first PTE
1.831 +
1.832 + TLinAddr firstPte = (TLinAddr)pPte; //Will need this to clean page table changes in cache.
1.833 +
1.834 + while(aNumPages--)
1.835 + {
1.836 + TPhysAddr pa = *aPageList++;
1.837 + if(pa==KPhysAddrInvalid)
1.838 + {
1.839 + ++pPte;
1.840 + __NK_ASSERT_DEBUG(aType==SPageInfo::EInvalid);
1.841 + continue;
1.842 + }
1.843 + *pPte++ = pa | aPtePerm; // insert PTE
1.844 + __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",pPte[-1],pPte-1));
1.845 + if (aType!=SPageInfo::EInvalid)
1.846 + {
1.847 + SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa);
1.848 + if(pi)
1.849 + {
1.850 + pi->Set(aType,aPtr,aOffset);
1.851 + __KTRACE_OPT(KMMU,Kern::Printf("I: %d %08x %08x",aType,aPtr,aOffset));
1.852 + ++aOffset; // increment offset for next page
1.853 + }
1.854 + }
1.855 + }
1.856 + CacheMaintenance::MultiplePtesUpdated(firstPte, (TUint)pPte-firstPte);
1.857 + }
1.858 +
1.859 +void ArmMmu::MapPhysicalPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, TPhysAddr aPhysAddr, TInt aNumPages, TPte aPtePerm)
1.860 +//
1.861 +// Map consecutive physical pages into a specified page table with specified PTE permissions.
1.862 +// Update the page information array if RAM pages are being mapped.
1.863 +// Call this with the system locked.
1.864 +//
1.865 + {
1.866 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::MapPhysicalPages() id=%d type=%d ptr=%08x off=%08x phys=%08x n=%d perm=%08x",
1.867 + aId, aType, aPtr, aOffset, aPhysAddr, aNumPages, aPtePerm));
1.868 + SPageTableInfo& ptinfo=iPtInfo[aId];
1.869 + ptinfo.iCount+=aNumPages;
1.870 + aOffset>>=KPageShift;
1.871 + TInt ptOffset=aOffset & KPagesInPDEMask; // entry number in page table
1.872 + TPte* pPte=(TPte*)(PageTableLinAddr(aId))+ptOffset; // address of first PTE
1.873 +
1.874 + TLinAddr firstPte = (TLinAddr)pPte; //Will need this to clean page table changes in cache
1.875 +
1.876 + SPageInfo* pi;
1.877 + if(aType==SPageInfo::EInvalid)
1.878 + pi = NULL;
1.879 + else
1.880 + pi = SPageInfo::SafeFromPhysAddr(aPhysAddr);
1.881 + while(aNumPages--)
1.882 + {
1.883 + *pPte++ = aPhysAddr|aPtePerm; // insert PTE
1.884 + aPhysAddr+=KPageSize;
1.885 + __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",pPte[-1],pPte-1));
1.886 + if (pi)
1.887 + {
1.888 + pi->Set(aType,aPtr,aOffset);
1.889 + __KTRACE_OPT(KMMU,Kern::Printf("I: %d %08x %08x",aType,aPtr,aOffset));
1.890 + ++aOffset; // increment offset for next page
1.891 + ++pi;
1.892 + }
1.893 + }
1.894 +
1.895 + CacheMaintenance::MultiplePtesUpdated(firstPte, (TUint)pPte-firstPte);
1.896 + }
1.897 +
1.898 +void ArmMmu::MapVirtual(TInt aId, TInt aNumPages)
1.899 +//
1.900 +// Called in place of MapRamPages or MapPhysicalPages to update mmu data structures when committing
1.901 +// virtual address space to a chunk. No pages are mapped.
1.902 +// Call this with the system locked.
1.903 +//
1.904 + {
1.905 + SPageTableInfo& ptinfo=iPtInfo[aId];
1.906 + ptinfo.iCount+=aNumPages;
1.907 + }
1.908 +
1.909 +void ArmMmu::RemapPage(TInt aId, TUint32 aAddr, TPhysAddr aOldAddr, TPhysAddr aNewAddr, TPte aPtePerm, DProcess* aProcess)
1.910 +//
1.911 +// Replace the mapping at address aAddr in page table aId.
1.912 +// Update the page information array for both the old and new pages.
1.913 +// Return physical address of old page if it is now ready to be freed.
1.914 +// Call this with the system locked.
1.915 +// May be called with interrupts disabled, do not enable/disable them.
1.916 +//
1.917 + {
1.918 + TInt ptOffset=(aAddr&KChunkMask)>>KPageShift; // entry number in page table
1.919 + TPte* pPte=PageTable(aId)+ptOffset; // address of PTE
1.920 + TPte pte=*pPte;
1.921 + TInt asid=aProcess ? ((DMemModelProcess*)aProcess)->iOsAsid :
1.922 + (aAddr<KRomLinearBase ? (TInt)UNKNOWN_MAPPING : (TInt)KERNEL_MAPPING );
1.923 +
1.924 + if (pte & KArmV6PteSmallPage)
1.925 + {
1.926 + __ASSERT_ALWAYS((pte & KPteSmallPageAddrMask) == aOldAddr, Panic(ERemapPageFailed));
1.927 + SPageInfo* oldpi = SPageInfo::FromPhysAddr(aOldAddr);
1.928 + __ASSERT_DEBUG(oldpi->LockCount()==0,Panic(ERemapPageFailed));
1.929 +
1.930 + // remap page
1.931 + *pPte = aNewAddr | aPtePerm; // overwrite PTE
1.932 + CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
1.933 + InvalidateTLBForPage(aAddr,asid); // flush TLB entry
1.934 +
1.935 + // update new pageinfo, clear old
1.936 + SPageInfo* pi = SPageInfo::FromPhysAddr(aNewAddr);
1.937 + pi->Set(oldpi->Type(),oldpi->Owner(),oldpi->Offset());
1.938 + oldpi->SetUnused();
1.939 + }
1.940 + else
1.941 + {
1.942 + Panic(ERemapPageFailed);
1.943 + }
1.944 + }
1.945 +
1.946 +void ArmMmu::RemapPageByAsid(TBitMapAllocator* aOsAsids, TLinAddr aLinAddr, TPhysAddr aOldAddr, TPhysAddr aNewAddr, TPte aPtePerm)
1.947 +//
1.948 +// Replace the mapping at address aLinAddr in the relevant page table for all
1.949 +// ASIDs specified in aOsAsids, but only if the currently mapped address is
1.950 +// aOldAddr.
1.951 +// Update the page information array for both the old and new pages.
1.952 +// Call this with the system unlocked.
1.953 +//
1.954 + {
1.955 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageByAsid() linaddr=%08x oldaddr=%08x newaddr=%08x perm=%08x", aLinAddr, aOldAddr, aNewAddr, aPtePerm));
1.956 +
1.957 + TInt asid = -1;
1.958 + TInt lastAsid = KArmV6NumAsids - 1;
1.959 + TUint32* ptr = aOsAsids->iMap;
1.960 + NKern::LockSystem();
1.961 + do
1.962 + {
1.963 + TUint32 bits = *ptr++;
1.964 + do
1.965 + {
1.966 + ++asid;
1.967 + if(bits & 0x80000000u)
1.968 + {
1.969 + // mapped in this address space, so update PTE...
1.970 + TPte* pPte = PtePtrFromLinAddr(aLinAddr, asid);
1.971 + TPte pte = *pPte;
1.972 + if ((pte&~KPageMask) == aOldAddr)
1.973 + {
1.974 + *pPte = aNewAddr | aPtePerm;
1.975 + __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x in asid %d",*pPte,pPte,asid));
1.976 + CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
1.977 + InvalidateTLBForPage(aLinAddr,asid); // flush TLB entry
1.978 + }
1.979 + }
1.980 + }
1.981 + while(bits<<=1);
1.982 + NKern::FlashSystem();
1.983 + asid |= 31;
1.984 + }
1.985 + while(asid<lastAsid);
1.986 +
1.987 + // copy pageinfo attributes and mark old page unused
1.988 + SPageInfo* oldpi = SPageInfo::FromPhysAddr(aOldAddr);
1.989 + SPageInfo::FromPhysAddr(aNewAddr)->Set(oldpi->Type(),oldpi->Owner(),oldpi->Offset());
1.990 + oldpi->SetUnused();
1.991 +
1.992 + NKern::UnlockSystem();
1.993 + }
1.994 +
1.995 +TInt ArmMmu::UnmapPages(TInt aId, TUint32 aAddr, TInt aNumPages, TPhysAddr* aPageList, TBool aSetPagesFree, TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess)
1.996 +//
1.997 +// Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped
1.998 +// pages into aPageList, and count of unmapped pages into aNumPtes.
1.999 +// Return number of pages still mapped using this page table.
1.1000 +// Call this with the system locked.
1.1001 +// On multiple memory model, do not call this method with aSetPagesFree false. Call UnmapUnownedPages instead.
1.1002 + {
1.1003 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::UnmapPages() id=%d addr=%08x n=%d pl=%08x set-free=%d",aId,aAddr,aNumPages,aPageList,aSetPagesFree));
1.1004 + TInt ptOffset=(aAddr&KChunkMask)>>KPageShift; // entry number in page table
1.1005 + TPte* pPte=PageTable(aId)+ptOffset; // address of first PTE
1.1006 + TInt np=0;
1.1007 + TInt nf=0;
1.1008 + TUint32 ng=0;
1.1009 + TInt asid=aProcess ? ((DMemModelProcess*)aProcess)->iOsAsid :
1.1010 + (aAddr<KRomLinearBase ? (TInt)UNKNOWN_MAPPING : (TInt)KERNEL_MAPPING );
1.1011 +
1.1012 +
1.1013 + while(aNumPages--)
1.1014 + {
1.1015 + TPte pte=*pPte; // get original PTE
1.1016 +#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
1.1017 + remove_and_invalidate_page(pPte, aAddr, asid);
1.1018 + ++pPte;
1.1019 +#else
1.1020 + *pPte++=0; // clear PTE
1.1021 +#endif
1.1022 +
1.1023 + // We count all unmapped pages in np, including demand paged 'old' pages - but we don't pass
1.1024 + // these to PageUnmapped, as the page doesn't become free until it's unmapped from all
1.1025 + // processes
1.1026 + if (pte != KPteNotPresentEntry)
1.1027 + ++np;
1.1028 +
1.1029 + if (pte & KArmV6PteSmallPage)
1.1030 + {
1.1031 + ng |= pte;
1.1032 +#if !defined(__CPU_ARM1136__) || defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
1.1033 + // Remove_and_invalidate_page will sort out cache and TLB.
1.1034 + // When __CPU_ARM1136_ERRATUM_353494_FIXED, we have to do it here.
1.1035 + CacheMaintenance::SinglePteUpdated((TLinAddr)(pPte-1));
1.1036 + if (asid >= 0) //otherwise, KUnmapPagesTLBFlushDeferred will be returned.
1.1037 + InvalidateTLBForPage(aAddr,asid); // flush any corresponding TLB entry
1.1038 +#endif
1.1039 + TPhysAddr pa=pte & KPteSmallPageAddrMask; // physical address of unmapped page
1.1040 + if (aSetPagesFree)
1.1041 + {
1.1042 + SPageInfo* pi = SPageInfo::FromPhysAddr(pa);
1.1043 + if(iRamCache->PageUnmapped(pi))
1.1044 + {
1.1045 + pi->SetUnused(); // mark page as unused
1.1046 + if (pi->LockCount()==0)
1.1047 + {
1.1048 + *aPageList++=pa; // store in page list
1.1049 + ++nf; // count free pages
1.1050 + }
1.1051 + }
1.1052 + }
1.1053 + else
1.1054 + *aPageList++=pa; // store in page list
1.1055 + }
1.1056 + aAddr+=KPageSize;
1.1057 + }
1.1058 +
1.1059 + aNumPtes=np;
1.1060 + aNumFree=nf;
1.1061 + SPageTableInfo& ptinfo=iPtInfo[aId];
1.1062 + TInt r=(ptinfo.iCount-=np);
1.1063 + if (asid<0)
1.1064 + r|=KUnmapPagesTLBFlushDeferred;
1.1065 +
1.1066 +
1.1067 + #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
1.1068 + __FlushBtb();
1.1069 + #endif
1.1070 +
1.1071 + __KTRACE_OPT(KMMU,Kern::Printf("Unmapped %d; Freed: %d; Return %08x",np,nf,r));
1.1072 + return r; // return number of pages remaining in this page table
1.1073 + }
1.1074 +
1.1075 +TInt ArmMmu::UnmapVirtual(TInt aId, TUint32 aAddr, TInt aNumPages, TPhysAddr* aPageList, TBool aSetPagesFree, TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess)
1.1076 +//
1.1077 +// Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped
1.1078 +// pages into aPageList, and count of unmapped pages into aNumPtes.
1.1079 +// Adjust the page table reference count as if aNumPages pages were unmapped.
1.1080 +// Return number of pages still mapped using this page table.
1.1081 +// Call this with the system locked.
1.1082 +// On multiple memory model, do not call this method with aSetPagesFree false. Call UnmapUnownedVirtual instead.
1.1083 +//
1.1084 + {
1.1085 + SPageTableInfo& ptinfo=iPtInfo[aId];
1.1086 + TInt newCount = ptinfo.iCount - aNumPages;
1.1087 + UnmapPages(aId, aAddr, aNumPages, aPageList, aSetPagesFree, aNumPtes, aNumFree, aProcess);
1.1088 + ptinfo.iCount = newCount;
1.1089 + aNumPtes = aNumPages;
1.1090 + return newCount;
1.1091 + }
1.1092 +
1.1093 +TInt ArmMmu::UnmapUnownedPages(TInt aId, TUint32 aAddr, TInt aNumPages,
1.1094 + TPhysAddr* aPageList, TLinAddr* aLAPageList,TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess)
1.1095 +/*
1.1096 + * Unmaps specified area at address aAddr in page table aId.
1.1097 + * Places physical addresses of not-demaned-paged unmapped pages into aPageList.
1.1098 + * Corresponding linear addresses are placed into aLAPageList.
1.1099 + * 'Old' demand-paged pages (holds invalid PE entry with physucal address) are neither unmapped nor
1.1100 + * encountered in aPageList but are still counted in aNumPtes.
1.1101 + *
1.1102 + * This method should be called to decommit physical memory not owned by the chunk. As we do not know
1.1103 + * the origin of such memory, PtInfo could be invalid (or does't exist) so cache maintenance may not be
1.1104 + * able to obtain mapping colour. For that reason, this also returns former linear address of each page
1.1105 + * in aPageList.
1.1106 + *
1.1107 + * @pre All pages are mapped within a single page table identified by aId.
1.1108 + * @pre On entry, system locked is held and is not released during the execution.
1.1109 + *
1.1110 + * @arg aId Id of the page table that maps tha pages.
1.1111 + * @arg aAddr Linear address of the start of the area.
1.1112 + * @arg aNumPages The number of pages to unmap.
1.1113 + * @arg aProcess The owning process of the mamory area to unmap.
1.1114 + * @arg aPageList On exit, holds the list of unmapped pages.
1.1115 + * @arg aLAPageList On exit, holds the list of linear addresses of unmapped pages.
1.1116 + * @arg aNumFree On exit, holds the number of pages in aPageList.
1.1117 + * @arg aNumPtes On exit, holds the number of unmapped pages. This includes demand-paged 'old'
1.1118 + * pages (with invalid page table entry still holding the address of physical page.)
1.1119 + *
1.1120 + * @return The number of pages still mapped using this page table. It is orred by
1.1121 + * KUnmapPagesTLBFlushDeferred if TLB flush is not executed - which requires
1.1122 + * the caller to do global TLB flush.
1.1123 + */
1.1124 + {
1.1125 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::UnmapUnownedPages() id=%d addr=%08x n=%d pl=%08x",aId,aAddr,aNumPages,aPageList));
1.1126 + TInt ptOffset=(aAddr&KChunkMask)>>KPageShift; // entry number in page table
1.1127 + TPte* pPte=PageTable(aId)+ptOffset; // address of first PTE
1.1128 + TInt np=0;
1.1129 + TInt nf=0;
1.1130 + TUint32 ng=0;
1.1131 + TInt asid=aProcess ? ((DMemModelProcess*)aProcess)->iOsAsid :
1.1132 + (aAddr<KRomLinearBase ? (TInt)UNKNOWN_MAPPING : (TInt)KERNEL_MAPPING );
1.1133 +
1.1134 + while(aNumPages--)
1.1135 + {
1.1136 + TPte pte=*pPte; // get original PTE
1.1137 +#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
1.1138 + remove_and_invalidate_page(pPte, aAddr, asid);
1.1139 + ++pPte;
1.1140 +#else
1.1141 + *pPte++=0; // clear PTE
1.1142 +#endif
1.1143 +
1.1144 + // We count all unmapped pages in np, including demand paged 'old' pages - but we don't pass
1.1145 + // these to PageUnmapped, as the page doesn't become free until it's unmapped from all
1.1146 + // processes
1.1147 + if (pte != KPteNotPresentEntry)
1.1148 + ++np;
1.1149 +
1.1150 + if (pte & KArmV6PteSmallPage)
1.1151 + {
1.1152 + ng |= pte;
1.1153 +#if !defined(__CPU_ARM1136__) || defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
1.1154 + // Remove_and_invalidate_page will sort out cache and TLB.
1.1155 + // When __CPU_ARM1136_ERRATUM_353494_FIXED, we have to do it here.
1.1156 + CacheMaintenance::SinglePteUpdated((TLinAddr)(pPte-1));
1.1157 + if (asid >= 0) //otherwise, KUnmapPagesTLBFlushDeferred will be returned.
1.1158 + InvalidateTLBForPage(aAddr,asid); // flush any corresponding TLB entry
1.1159 +#endif
1.1160 + TPhysAddr pa=pte & KPteSmallPageAddrMask; // physical address of unmapped page
1.1161 + ++nf;
1.1162 + *aPageList++=pa; // store physical aaddress in page list
1.1163 + *aLAPageList++=aAddr; // store linear address in page list
1.1164 + }
1.1165 + aAddr+=KPageSize;
1.1166 + }
1.1167 +
1.1168 + aNumPtes=np;
1.1169 + aNumFree=nf;
1.1170 + SPageTableInfo& ptinfo=iPtInfo[aId];
1.1171 + TInt r=(ptinfo.iCount-=np);
1.1172 + if (asid<0)
1.1173 + r|=KUnmapPagesTLBFlushDeferred;
1.1174 +
1.1175 +
1.1176 + #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
1.1177 + __FlushBtb();
1.1178 + #endif
1.1179 +
1.1180 + __KTRACE_OPT(KMMU,Kern::Printf("Unmapped %d; Freed: %d; Return %08x",np,nf,r));
1.1181 + return r; // return number of pages remaining in this page table
1.1182 + }
1.1183 +
1.1184 +
1.1185 +TInt ArmMmu::UnmapUnownedVirtual(TInt aId, TUint32 aAddr, TInt aNumPages,
1.1186 + TPhysAddr* aPageList, TLinAddr* aLAPageList,TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess)
1.1187 +//
1.1188 +// Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped
1.1189 +// pages into aPageList, and count of unmapped pages into aNumPtes.
1.1190 +// Adjust the page table reference count as if aNumPages pages were unmapped.
1.1191 +// Return number of pages still mapped using this page table.
1.1192 +// Call this with the system locked.
1.1193 +//
1.1194 + {
1.1195 + SPageTableInfo& ptinfo=iPtInfo[aId];
1.1196 + TInt newCount = ptinfo.iCount - aNumPages;
1.1197 + UnmapUnownedPages(aId, aAddr, aNumPages, aPageList, aLAPageList, aNumPtes, aNumFree, aProcess);
1.1198 + ptinfo.iCount = newCount;
1.1199 + aNumPtes = aNumPages;
1.1200 + return newCount;
1.1201 + }
1.1202 +
1.1203 +void ArmMmu::DoAssignPageTable(TInt aId, TLinAddr aAddr, TPde aPdePerm, const TAny* aOsAsids)
1.1204 +//
1.1205 +// Assign an allocated page table to map a given linear address with specified permissions.
1.1206 +// This should be called with the system unlocked and the MMU mutex held.
1.1207 +//
1.1208 + {
1.1209 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DoAssignPageTable %d to %08x perm %08x asid %08x",aId,aAddr,aPdePerm,aOsAsids));
1.1210 + TLinAddr ptLin=PageTableLinAddr(aId);
1.1211 + TPhysAddr ptPhys=LinearToPhysical(ptLin,0);
1.1212 + TInt pdeIndex=TInt(aAddr>>KChunkShift);
1.1213 + TBool gpd=(pdeIndex>=(iLocalPdSize>>2));
1.1214 + TInt os_asid=(TInt)aOsAsids;
1.1215 + if (TUint32(os_asid)<TUint32(iNumOsAsids))
1.1216 + {
1.1217 + // single OS ASID
1.1218 + TPde* pageDir=PageDirectory(os_asid);
1.1219 + NKern::LockSystem();
1.1220 + pageDir[pdeIndex]=ptPhys|aPdePerm; // will blow up here if address is in global region aOsAsid doesn't have a global PD
1.1221 + CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
1.1222 + NKern::UnlockSystem();
1.1223 +
1.1224 + __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",ptPhys|aPdePerm,pageDir+pdeIndex));
1.1225 + }
1.1226 + else if (os_asid==-1 && gpd)
1.1227 + {
1.1228 + // all OS ASIDs, address in global region
1.1229 + TInt num_os_asids=iNumGlobalPageDirs;
1.1230 + const TBitMapAllocator& b=*(const TBitMapAllocator*)iOsAsidAllocator;
1.1231 + for (os_asid=0; num_os_asids; ++os_asid)
1.1232 + {
1.1233 + if (!b.NotAllocated(os_asid,1) && (iAsidInfo[os_asid]&1))
1.1234 + {
1.1235 + // this OS ASID exists and has a global page directory
1.1236 + TPde* pageDir=PageDirectory(os_asid);
1.1237 + NKern::LockSystem();
1.1238 + pageDir[pdeIndex]=ptPhys|aPdePerm;
1.1239 + CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
1.1240 + NKern::UnlockSystem();
1.1241 +
1.1242 + __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",ptPhys|aPdePerm,pageDir+pdeIndex));
1.1243 + --num_os_asids;
1.1244 + }
1.1245 + }
1.1246 + }
1.1247 + else
1.1248 + {
1.1249 + // selection of OS ASIDs or all OS ASIDs
1.1250 + const TBitMapAllocator* pB=(const TBitMapAllocator*)aOsAsids;
1.1251 + if (os_asid==-1)
1.1252 + pB=iOsAsidAllocator; // 0's in positions which exist
1.1253 + TInt num_os_asids=pB->iSize-pB->iAvail;
1.1254 + for (os_asid=0; num_os_asids; ++os_asid)
1.1255 + {
1.1256 + if (pB->NotAllocated(os_asid,1))
1.1257 + continue; // os_asid is not needed
1.1258 + TPde* pageDir=PageDirectory(os_asid);
1.1259 + NKern::LockSystem();
1.1260 + pageDir[pdeIndex]=ptPhys|aPdePerm;
1.1261 + CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
1.1262 + NKern::UnlockSystem();
1.1263 +
1.1264 + __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",ptPhys|aPdePerm,pageDir+pdeIndex));
1.1265 + --num_os_asids;
1.1266 + }
1.1267 + }
1.1268 + }
1.1269 +
1.1270 +void ArmMmu::RemapPageTableSingle(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr, TInt aOsAsid)
1.1271 +//
1.1272 +// Replace a single page table mapping the specified linear address.
1.1273 +// This should be called with the system locked and the MMU mutex held.
1.1274 +//
1.1275 + {
1.1276 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTableSingle %08x to %08x at %08x asid %d",aOld,aNew,aAddr,aOsAsid));
1.1277 + TPde* pageDir=PageDirectory(aOsAsid);
1.1278 + TInt pdeIndex=TInt(aAddr>>KChunkShift);
1.1279 + TPde pde=pageDir[pdeIndex];
1.1280 + __ASSERT_ALWAYS((pde & KPdePageTableAddrMask) == aOld, Panic(ERemapPageTableFailed));
1.1281 + TPde newPde=aNew|(pde&~KPdePageTableAddrMask);
1.1282 + pageDir[pdeIndex]=newPde; // will blow up here if address is in global region aOsAsid doesn't have a global PD
1.1283 + CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
1.1284 +
1.1285 + __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",newPde,pageDir+pdeIndex));
1.1286 + }
1.1287 +
1.1288 +void ArmMmu::RemapPageTableGlobal(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr)
1.1289 +//
1.1290 +// Replace a global page table mapping the specified linear address.
1.1291 +// This should be called with the system locked and the MMU mutex held.
1.1292 +//
1.1293 + {
1.1294 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTableGlobal %08x to %08x at %08x",aOld,aNew,aAddr));
1.1295 + TInt pdeIndex=TInt(aAddr>>KChunkShift);
1.1296 + TInt num_os_asids=iNumGlobalPageDirs;
1.1297 + const TBitMapAllocator& b=*(const TBitMapAllocator*)iOsAsidAllocator;
1.1298 + for (TInt os_asid=0; num_os_asids; ++os_asid)
1.1299 + {
1.1300 + if (!b.NotAllocated(os_asid,1) && (iAsidInfo[os_asid]&1))
1.1301 + {
1.1302 + // this OS ASID exists and has a global page directory
1.1303 + TPde* pageDir=PageDirectory(os_asid);
1.1304 + TPde pde=pageDir[pdeIndex];
1.1305 + if ((pde & KPdePageTableAddrMask) == aOld)
1.1306 + {
1.1307 + TPde newPde=aNew|(pde&~KPdePageTableAddrMask);
1.1308 + pageDir[pdeIndex]=newPde;
1.1309 + CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
1.1310 +
1.1311 + __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",newPde,pageDir+pdeIndex));
1.1312 + }
1.1313 + --num_os_asids;
1.1314 + }
1.1315 + if ((os_asid&31)==31)
1.1316 + NKern::FlashSystem();
1.1317 + }
1.1318 + }
1.1319 +
1.1320 +void ArmMmu::RemapPageTableMultiple(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr, const TAny* aOsAsids)
1.1321 +//
1.1322 +// Replace multiple page table mappings of the specified linear address.
1.1323 +// This should be called with the system locked and the MMU mutex held.
1.1324 +//
1.1325 + {
1.1326 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTableMultiple %08x to %08x at %08x asids %08x",aOld,aNew,aAddr,aOsAsids));
1.1327 + TInt pdeIndex=TInt(aAddr>>KChunkShift);
1.1328 + const TBitMapAllocator* pB=(const TBitMapAllocator*)aOsAsids;
1.1329 + if ((TInt)aOsAsids==-1)
1.1330 + pB=iOsAsidAllocator; // 0's in positions which exist
1.1331 +
1.1332 + TInt asid = -1;
1.1333 + TInt lastAsid = KArmV6NumAsids - 1;
1.1334 + const TUint32* ptr = pB->iMap;
1.1335 + do
1.1336 + {
1.1337 + TUint32 bits = *ptr++;
1.1338 + do
1.1339 + {
1.1340 + ++asid;
1.1341 + if ((bits & 0x80000000u) == 0)
1.1342 + {
1.1343 + // mapped in this address space - bitmap is inverted
1.1344 + TPde* pageDir=PageDirectory(asid);
1.1345 + TPde pde=pageDir[pdeIndex];
1.1346 + if ((pde & KPdePageTableAddrMask) == aOld)
1.1347 + {
1.1348 + TPde newPde=aNew|(pde&~KPdePageTableAddrMask);
1.1349 + pageDir[pdeIndex]=newPde;
1.1350 + CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
1.1351 +
1.1352 + __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",newPde,pageDir+pdeIndex));
1.1353 + }
1.1354 + }
1.1355 + }
1.1356 + while(bits<<=1);
1.1357 + NKern::FlashSystem();
1.1358 + asid |= 31;
1.1359 + }
1.1360 + while(asid<lastAsid);
1.1361 + }
1.1362 +
1.1363 +void ArmMmu::RemapPageTableAliases(TPhysAddr aOld, TPhysAddr aNew)
1.1364 +//
1.1365 +// Replace aliases of the specified page table.
1.1366 +// This should be called with the system locked and the MMU mutex held.
1.1367 +//
1.1368 + {
1.1369 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTableAliases %08x to %08x",aOld,aNew));
1.1370 + SDblQue checkedList;
1.1371 + SDblQueLink* next;
1.1372 +
1.1373 + while(!iAliasList.IsEmpty())
1.1374 + {
1.1375 + next = iAliasList.First()->Deque();
1.1376 + checkedList.Add(next);
1.1377 + DMemModelThread* thread = (DMemModelThread*)((TInt)next-_FOFF(DMemModelThread,iAliasLink));
1.1378 + TPde pde = thread->iAliasPde;
1.1379 + if ((pde & ~KPageMask) == aOld)
1.1380 + {
1.1381 + // a page table in this page is being aliased by the thread, so update it...
1.1382 + thread->iAliasPde = (pde & KPageMask) | aNew;
1.1383 + }
1.1384 + NKern::FlashSystem();
1.1385 + }
1.1386 +
1.1387 + // copy checkedList back to iAliasList
1.1388 + iAliasList.MoveFrom(&checkedList);
1.1389 + }
1.1390 +
1.1391 +void ArmMmu::DoUnassignPageTable(TLinAddr aAddr, const TAny* aOsAsids)
1.1392 +//
1.1393 +// Unassign a now-empty page table currently mapping the specified linear address.
1.1394 +// We assume that TLB and/or cache flushing has been done when any RAM pages were unmapped.
1.1395 +// This should be called with the system unlocked and the MMU mutex held.
1.1396 +//
1.1397 + {
1.1398 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DoUnassignPageTable at %08x a=%08x",aAddr,aOsAsids));
1.1399 + TInt pdeIndex=TInt(aAddr>>KChunkShift);
1.1400 + TBool gpd=(pdeIndex>=(iLocalPdSize>>2));
1.1401 + TInt os_asid=(TInt)aOsAsids;
1.1402 + TUint pde=0;
1.1403 +
1.1404 + SDblQue checkedList;
1.1405 + SDblQueLink* next;
1.1406 +
1.1407 + if (TUint32(os_asid)<TUint32(iNumOsAsids))
1.1408 + {
1.1409 + // single OS ASID
1.1410 + TPde* pageDir=PageDirectory(os_asid);
1.1411 + NKern::LockSystem();
1.1412 + pde = pageDir[pdeIndex];
1.1413 + pageDir[pdeIndex]=0;
1.1414 + CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
1.1415 + __KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x",pageDir+pdeIndex));
1.1416 +
1.1417 + // remove any aliases of the page table...
1.1418 + TUint ptId = pde>>KPageTableShift;
1.1419 + while(!iAliasList.IsEmpty())
1.1420 + {
1.1421 + next = iAliasList.First()->Deque();
1.1422 + checkedList.Add(next);
1.1423 + DMemModelThread* thread = (DMemModelThread*)((TInt)next-_FOFF(DMemModelThread,iAliasLink));
1.1424 + if(thread->iAliasOsAsid==os_asid && (thread->iAliasPde>>KPageTableShift)==ptId)
1.1425 + {
1.1426 + // the page table is being aliased by the thread, so remove it...
1.1427 + thread->iAliasPde = 0;
1.1428 + }
1.1429 + NKern::FlashSystem();
1.1430 + }
1.1431 + }
1.1432 + else if (os_asid==-1 && gpd)
1.1433 + {
1.1434 + // all OS ASIDs, address in global region
1.1435 + TInt num_os_asids=iNumGlobalPageDirs;
1.1436 + const TBitMapAllocator& b=*(const TBitMapAllocator*)iOsAsidAllocator;
1.1437 + for (os_asid=0; num_os_asids; ++os_asid)
1.1438 + {
1.1439 + if (!b.NotAllocated(os_asid,1) && (iAsidInfo[os_asid]&1))
1.1440 + {
1.1441 + // this OS ASID exists and has a global page directory
1.1442 + TPde* pageDir=PageDirectory(os_asid);
1.1443 + NKern::LockSystem();
1.1444 + pageDir[pdeIndex]=0;
1.1445 + CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
1.1446 + NKern::UnlockSystem();
1.1447 +
1.1448 + __KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x",pageDir+pdeIndex));
1.1449 + --num_os_asids;
1.1450 + }
1.1451 + }
1.1452 + // we don't need to look for aliases in this case, because these aren't
1.1453 + // created for page tables in the global region.
1.1454 + NKern::LockSystem();
1.1455 + }
1.1456 + else
1.1457 + {
1.1458 + // selection of OS ASIDs or all OS ASIDs
1.1459 + const TBitMapAllocator* pB=(const TBitMapAllocator*)aOsAsids;
1.1460 + if (os_asid==-1)
1.1461 + pB=iOsAsidAllocator; // 0's in positions which exist
1.1462 + TInt num_os_asids=pB->iSize-pB->iAvail;
1.1463 + for (os_asid=0; num_os_asids; ++os_asid)
1.1464 + {
1.1465 + if (pB->NotAllocated(os_asid,1))
1.1466 + continue; // os_asid is not needed
1.1467 + TPde* pageDir=PageDirectory(os_asid);
1.1468 + NKern::LockSystem();
1.1469 + pde = pageDir[pdeIndex];
1.1470 + pageDir[pdeIndex]=0;
1.1471 + CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
1.1472 + NKern::UnlockSystem();
1.1473 +
1.1474 + __KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x",pageDir+pdeIndex));
1.1475 + --num_os_asids;
1.1476 + }
1.1477 +
1.1478 + // remove any aliases of the page table...
1.1479 + TUint ptId = pde>>KPageTableShift;
1.1480 + NKern::LockSystem();
1.1481 + while(!iAliasList.IsEmpty())
1.1482 + {
1.1483 + next = iAliasList.First()->Deque();
1.1484 + checkedList.Add(next);
1.1485 + DMemModelThread* thread = (DMemModelThread*)((TInt)next-_FOFF(DMemModelThread,iAliasLink));
1.1486 + if((thread->iAliasPde>>KPageTableShift)==ptId && !pB->NotAllocated(thread->iAliasOsAsid,1))
1.1487 + {
1.1488 + // the page table is being aliased by the thread, so remove it...
1.1489 + thread->iAliasPde = 0;
1.1490 + }
1.1491 + NKern::FlashSystem();
1.1492 + }
1.1493 + }
1.1494 +
1.1495 + // copy checkedList back to iAliasList
1.1496 + iAliasList.MoveFrom(&checkedList);
1.1497 +
1.1498 + NKern::UnlockSystem();
1.1499 + }
1.1500 +#endif
1.1501 +
1.1502 +// Initialise page table at physical address aXptPhys to be used as page table aXptId
1.1503 +// to expand the virtual address range used for mapping page tables. Map the page table
1.1504 +// at aPhysAddr as page table aId using the expanded range.
1.1505 +// Assign aXptPhys to kernel's Page Directory.
1.1506 +// Called with system unlocked and MMU mutex held.
1.1507 +void ArmMmu::BootstrapPageTable(TInt aXptId, TPhysAddr aXptPhys, TInt aId, TPhysAddr aPhysAddr)
1.1508 + {
1.1509 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::BootstrapPageTable xptid=%04x, xptphys=%08x, id=%04x, phys=%08x",
1.1510 + aXptId, aXptPhys, aId, aPhysAddr));
1.1511 +
1.1512 + // put in a temporary mapping for aXptPhys
1.1513 + // make it noncacheable
1.1514 + TPhysAddr pa=aXptPhys&~KPageMask;
1.1515 + *iTempPte = pa | SP_PTE(KArmV6PermRWNO, KNormalUncachedAttr, 0, 1);
1.1516 + CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte);
1.1517 +
1.1518 + // clear XPT
1.1519 + TPte* xpt=(TPte*)(iTempAddr+(aXptPhys&KPageMask));
1.1520 + memclr(xpt, KPageTableSize);
1.1521 +
1.1522 + // must in fact have aXptPhys and aPhysAddr in same physical page
1.1523 + __ASSERT_ALWAYS( TUint32(aXptPhys^aPhysAddr)<TUint32(KPageSize), MM::Panic(MM::EBootstrapPageTableBadAddr));
1.1524 +
1.1525 + // so only need one mapping
1.1526 + xpt[(aXptId>>KPtClusterShift)&KPagesInPDEMask] = pa | KPtPtePerm;
1.1527 + CacheMaintenance::MultiplePtesUpdated((TLinAddr)xpt, KPageTableSize);
1.1528 +
1.1529 + // remove temporary mapping
1.1530 + *iTempPte=0;
1.1531 + CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte);
1.1532 +
1.1533 + InvalidateTLBForPage(iTempAddr, KERNEL_MAPPING);
1.1534 +
1.1535 + // initialise PtInfo...
1.1536 + TLinAddr xptAddr = PageTableLinAddr(aXptId);
1.1537 + iPtInfo[aXptId].SetGlobal(xptAddr>>KChunkShift);
1.1538 +
1.1539 + // map xpt...
1.1540 + TInt pdeIndex=TInt(xptAddr>>KChunkShift);
1.1541 + TPde* pageDir=PageDirectory(0);
1.1542 + NKern::LockSystem();
1.1543 + pageDir[pdeIndex]=aXptPhys|KPtPdePerm;
1.1544 + CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
1.1545 +
1.1546 + NKern::UnlockSystem();
1.1547 + }
1.1548 +
1.1549 +// Edit the self-mapping entry in page table aId, mapped at aTempMap, to
1.1550 +// change the physical address from aOld to aNew. Used when moving page
1.1551 +// tables which were created by BootstrapPageTable.
1.1552 +// Called with system locked and MMU mutex held.
1.1553 +void ArmMmu::FixupXPageTable(TInt aId, TLinAddr aTempMap, TPhysAddr aOld, TPhysAddr aNew)
1.1554 + {
1.1555 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::FixupXPageTable id=%04x, tempmap=%08x, old=%08x, new=%08x",
1.1556 + aId, aTempMap, aOld, aNew));
1.1557 +
1.1558 + // find correct page table inside the page
1.1559 + TPte* xpt=(TPte*)(aTempMap + ((aId & KPtClusterMask) << KPageTableShift));
1.1560 + // find the pte in that page table
1.1561 + xpt += (aId>>KPtClusterShift)&KPagesInPDEMask;
1.1562 +
1.1563 + // switch the mapping
1.1564 + __ASSERT_ALWAYS((*xpt&~KPageMask)==aOld, Panic(EFixupXPTFailed));
1.1565 + *xpt = aNew | KPtPtePerm;
1.1566 + // mapped with MapTemp, and thus not mapped as a PTE - have to do real cache clean.
1.1567 + CacheMaintenance::SinglePteUpdated((TLinAddr)xpt);
1.1568 + }
1.1569 +
1.1570 +TInt ArmMmu::NewPageDirectory(TInt aOsAsid, TBool aSeparateGlobal, TPhysAddr& aPhysAddr, TInt& aNumPages)
1.1571 + {
1.1572 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::NewPageDirectory(%d,%d)",aOsAsid,aSeparateGlobal));
1.1573 + TInt r=0;
1.1574 + TInt nlocal=iLocalPdSize>>KPageShift;
1.1575 + aNumPages=aSeparateGlobal ? KPageDirectorySize/KPageSize : nlocal;
1.1576 + __KTRACE_OPT(KMMU,Kern::Printf("nlocal=%d, aNumPages=%d",nlocal,aNumPages));
1.1577 + if (aNumPages>1)
1.1578 + {
1.1579 + TInt align=aSeparateGlobal ? KPageDirectoryShift : KPageDirectoryShift-1;
1.1580 + r=AllocContiguousRam(aNumPages<<KPageShift, aPhysAddr, EPageFixed, align);
1.1581 + }
1.1582 + else
1.1583 + r=AllocRamPages(&aPhysAddr,1, EPageFixed);
1.1584 + __KTRACE_OPT(KMMU,Kern::Printf("r=%d, phys=%08x",r,aPhysAddr));
1.1585 + if (r!=KErrNone)
1.1586 + return r;
1.1587 +#ifdef BTRACE_KERNEL_MEMORY
1.1588 + BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, aNumPages<<KPageShift);
1.1589 + Epoc::KernelMiscPages += aNumPages;
1.1590 +#endif
1.1591 + SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
1.1592 + NKern::LockSystem();
1.1593 + TInt i;
1.1594 + for (i=0; i<aNumPages; ++i)
1.1595 + pi[i].SetPageDir(aOsAsid,i);
1.1596 + NKern::UnlockSystem();
1.1597 + return KErrNone;
1.1598 + }
1.1599 +
1.1600 +inline void CopyPdes(TPde* aDest, const TPde* aSrc, TLinAddr aBase, TLinAddr aEnd)
1.1601 + {
1.1602 + memcpy(aDest+(aBase>>KChunkShift), aSrc+(aBase>>KChunkShift), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde));
1.1603 + CacheMaintenance::MultiplePtesUpdated((TLinAddr)(aDest+(aBase>>KChunkShift)), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde));
1.1604 + }
1.1605 +
1.1606 +inline void ZeroPdes(TPde* aDest, TLinAddr aBase, TLinAddr aEnd)
1.1607 + {
1.1608 + memclr(aDest+(aBase>>KChunkShift), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde));
1.1609 + CacheMaintenance::MultiplePtesUpdated((TLinAddr)(aDest+(aBase>>KChunkShift)), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde));
1.1610 + }
1.1611 +
1.1612 +void ArmMmu::InitPageDirectory(TInt aOsAsid, TBool aSeparateGlobal)
1.1613 + {
1.1614 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::InitPageDirectory(%d,%d)",aOsAsid,aSeparateGlobal));
1.1615 + TPde* newpd=PageDirectory(aOsAsid); // new page directory
1.1616 + memclr(newpd, iLocalPdSize); // clear local page directory
1.1617 + CacheMaintenance::MultiplePtesUpdated((TLinAddr)newpd, iLocalPdSize);
1.1618 + if (aSeparateGlobal)
1.1619 + {
1.1620 + const TPde* kpd=(const TPde*)KPageDirectoryBase; // kernel page directory
1.1621 + if (iLocalPdSize==KPageSize)
1.1622 + ZeroPdes(newpd, KUserSharedDataEnd1GB, KUserSharedDataEnd2GB);
1.1623 + ZeroPdes(newpd, KRamDriveStartAddress, KRamDriveEndAddress); // don't copy RAM drive
1.1624 + CopyPdes(newpd, kpd, KRomLinearBase, KUserGlobalDataEnd); // copy ROM + user global
1.1625 + CopyPdes(newpd, kpd, KRamDriveEndAddress, 0x00000000); // copy kernel mappings
1.1626 + }
1.1627 + }
1.1628 +
1.1629 +void ArmMmu::ClearPageTable(TInt aId, TInt aFirstIndex)
1.1630 + {
1.1631 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::ClearPageTable(%d,%d)",aId,aFirstIndex));
1.1632 + TPte* pte=PageTable(aId);
1.1633 + memclr(pte+aFirstIndex, KPageTableSize-aFirstIndex*sizeof(TPte));
1.1634 + CacheMaintenance::MultiplePtesUpdated((TLinAddr)(pte+aFirstIndex), KPageTableSize-aFirstIndex*sizeof(TPte));
1.1635 + }
1.1636 +
1.1637 +void ArmMmu::ApplyTopLevelPermissions(TLinAddr aAddr, TInt aOsAsid, TInt aNumPdes, TPde aPdePerm)
1.1638 + {
1.1639 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::ApplyTopLevelPermissions %04x:%08x->%08x count %d",
1.1640 + aOsAsid, aAddr, aPdePerm, aNumPdes));
1.1641 + TInt ix=aAddr>>KChunkShift;
1.1642 + TPde* pPde=PageDirectory(aOsAsid)+ix;
1.1643 + TLinAddr firstPde = (TLinAddr)pPde; //Will need this to clean page table memory region in cache
1.1644 +
1.1645 + TPde* pPdeEnd=pPde+aNumPdes;
1.1646 + NKern::LockSystem();
1.1647 + for (; pPde<pPdeEnd; ++pPde)
1.1648 + {
1.1649 + TPde pde=*pPde;
1.1650 + if (pde)
1.1651 + *pPde = (pde&KPdePageTableAddrMask)|aPdePerm;
1.1652 + }
1.1653 + CacheMaintenance::MultiplePtesUpdated(firstPde, aNumPdes*sizeof(TPde));
1.1654 + FlushTLBs();
1.1655 + NKern::UnlockSystem();
1.1656 + }
1.1657 +
1.1658 +void ArmMmu::ApplyPagePermissions(TInt aId, TInt aPageOffset, TInt aNumPages, TPte aPtePerm)
1.1659 + {
1.1660 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::ApplyPagePermissions %04x:%03x+%03x perm %08x",
1.1661 + aId, aPageOffset, aNumPages, aPtePerm));
1.1662 + TPte* pPte=PageTable(aId)+aPageOffset;
1.1663 + TLinAddr firstPte = (TLinAddr)pPte; //Will need this to clean page table memory region in cache
1.1664 +
1.1665 + TPde* pPteEnd=pPte+aNumPages;
1.1666 + NKern::LockSystem();
1.1667 + for (; pPte<pPteEnd; ++pPte)
1.1668 + {
1.1669 + TPte pte=*pPte;
1.1670 + if (pte)
1.1671 + *pPte = (pte&KPteSmallPageAddrMask)|aPtePerm;
1.1672 + }
1.1673 + CacheMaintenance::MultiplePtesUpdated(firstPte, aNumPages*sizeof(TPte));
1.1674 + FlushTLBs();
1.1675 + NKern::UnlockSystem();
1.1676 + }
1.1677 +
1.1678 +void ArmMmu::ClearRamDrive(TLinAddr aStart)
1.1679 + {
1.1680 + // clear the page directory entries corresponding to the RAM drive
1.1681 + TPde* kpd=(TPde*)KPageDirectoryBase; // kernel page directory
1.1682 + ZeroPdes(kpd, aStart, KRamDriveEndAddress);
1.1683 + }
1.1684 +
1.1685 +TPde ArmMmu::PdePermissions(TChunkType aChunkType, TBool aRO)
1.1686 + {
1.1687 +// if (aChunkType==EUserData && aRO)
1.1688 +// return KPdePtePresent|KPdePteUser;
1.1689 + return ChunkPdePermissions[aChunkType];
1.1690 + }
1.1691 +
1.1692 +TPte ArmMmu::PtePermissions(TChunkType aChunkType)
1.1693 + {
1.1694 + return ChunkPtePermissions[aChunkType];
1.1695 + }
1.1696 +
1.1697 +// Set up a page table (specified by aId) to map a 1Mb section of ROM containing aRomAddr
1.1698 +// using ROM at aOrigPhys.
1.1699 +void ArmMmu::InitShadowPageTable(TInt aId, TLinAddr aRomAddr, TPhysAddr aOrigPhys)
1.1700 + {
1.1701 + __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:InitShadowPageTable id=%04x aRomAddr=%08x aOrigPhys=%08x",
1.1702 + aId, aRomAddr, aOrigPhys));
1.1703 + TPte* ppte = PageTable(aId);
1.1704 + TLinAddr firstPte = (TLinAddr)ppte; //Will need this to clean page table memory region in cache
1.1705 +
1.1706 + TPte* ppte_End = ppte + KChunkSize/KPageSize;
1.1707 + TPhysAddr phys = aOrigPhys - (aRomAddr & KChunkMask);
1.1708 + for (; ppte<ppte_End; ++ppte, phys+=KPageSize)
1.1709 + *ppte = phys | KRomPtePerm;
1.1710 + CacheMaintenance::MultiplePtesUpdated(firstPte, sizeof(TPte)*KChunkSize/KPageSize);
1.1711 + }
1.1712 +
1.1713 +// Copy the contents of ROM at aRomAddr to a shadow page at physical address aShadowPhys
1.1714 +// It is assumed aShadowPage is not mapped, therefore any mapping colour is OK.
1.1715 +void ArmMmu::InitShadowPage(TPhysAddr aShadowPhys, TLinAddr aRomAddr)
1.1716 + {
1.1717 + __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:InitShadowPage aShadowPhys=%08x aRomAddr=%08x",
1.1718 + aShadowPhys, aRomAddr));
1.1719 +
1.1720 + // put in a temporary mapping for aShadowPhys
1.1721 + // make it noncacheable
1.1722 + *iTempPte = aShadowPhys | SP_PTE(KArmV6PermRWNO, KNormalUncachedAttr, 0, 1);
1.1723 + CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte);
1.1724 +
1.1725 + // copy contents of ROM
1.1726 + wordmove( (TAny*)iTempAddr, (const TAny*)aRomAddr, KPageSize );
1.1727 + //Temp address is uncached. No need to clean cache, just flush write buffer
1.1728 + CacheMaintenance::MemoryToPreserveAndReuse((TLinAddr)iTempAddr, KPageSize, EMapAttrBufferedC);
1.1729 +
1.1730 + // remove temporary mapping
1.1731 + *iTempPte=0;
1.1732 + CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte);
1.1733 + InvalidateTLBForPage(iTempAddr, KERNEL_MAPPING);
1.1734 + }
1.1735 +
1.1736 +// Assign a shadow page table to replace a ROM section mapping
1.1737 +// Enter and return with system locked
1.1738 +void ArmMmu::AssignShadowPageTable(TInt aId, TLinAddr aRomAddr)
1.1739 + {
1.1740 + __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:AssignShadowPageTable aId=%04x aRomAddr=%08x",
1.1741 + aId, aRomAddr));
1.1742 + TLinAddr ptLin=PageTableLinAddr(aId);
1.1743 + TPhysAddr ptPhys=LinearToPhysical(ptLin, 0);
1.1744 + TPde* ppde = ::InitPageDirectory + (aRomAddr>>KChunkShift);
1.1745 + TPde newpde = ptPhys | KShadowPdePerm;
1.1746 + __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde));
1.1747 + TInt irq=NKern::DisableAllInterrupts();
1.1748 + *ppde = newpde; // map in the page table
1.1749 + CacheMaintenance::SinglePteUpdated((TLinAddr)ppde);
1.1750 +
1.1751 + FlushTLBs(); // flush both TLBs (no need to flush cache yet)
1.1752 + NKern::RestoreInterrupts(irq);
1.1753 + }
1.1754 +
1.1755 +void ArmMmu::DoUnmapShadowPage(TInt aId, TLinAddr aRomAddr, TPhysAddr aOrigPhys)
1.1756 + {
1.1757 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:DoUnmapShadowPage, id=%04x lin=%08x origphys=%08x", aId, aRomAddr, aOrigPhys));
1.1758 + TPte* ppte = PageTable(aId) + ((aRomAddr & KChunkMask)>>KPageShift);
1.1759 + TPte newpte = aOrigPhys | KRomPtePerm;
1.1760 + __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", newpte, ppte));
1.1761 + TInt irq=NKern::DisableAllInterrupts();
1.1762 + *ppte = newpte;
1.1763 + CacheMaintenance::SinglePteUpdated((TLinAddr)ppte);
1.1764 +
1.1765 + InvalidateTLBForPage(aRomAddr, KERNEL_MAPPING);
1.1766 + #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
1.1767 + __FlushBtb();
1.1768 + #endif
1.1769 +
1.1770 + CacheMaintenance::CodeChanged(aRomAddr, KPageSize, CacheMaintenance::EMemoryRemap);
1.1771 + CacheMaintenance::PageToReuse(aRomAddr, EMemAttNormalCached, KPhysAddrInvalid);
1.1772 + NKern::RestoreInterrupts(irq);
1.1773 + }
1.1774 +
1.1775 +TInt ArmMmu::UnassignShadowPageTable(TLinAddr aRomAddr, TPhysAddr aOrigPhys)
1.1776 + {
1.1777 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:UnassignShadowPageTable, lin=%08x origphys=%08x", aRomAddr, aOrigPhys));
1.1778 + TPde* ppde = ::InitPageDirectory + (aRomAddr>>KChunkShift);
1.1779 + TPde newpde = (aOrigPhys &~ KChunkMask) | KRomSectionPermissions;
1.1780 + __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde));
1.1781 + TInt irq=NKern::DisableAllInterrupts();
1.1782 + *ppde = newpde; // revert to section mapping
1.1783 + CacheMaintenance::SinglePteUpdated((TLinAddr)ppde);
1.1784 +
1.1785 + FlushTLBs(); // flush both TLBs
1.1786 + NKern::RestoreInterrupts(irq);
1.1787 + return KErrNone;
1.1788 + }
1.1789 +
1.1790 +
1.1791 +#if defined(__CPU_MEMORY_TYPE_REMAPPING) // arm1176, arm11mcore, armv7, ...
1.1792 +/**
1.1793 +Shadow pages on platforms with remapping (mpcore, 1176, cortex...) are not writable.
1.1794 +This will map the region into writable memory first.
1.1795 +@pre No Fast Mutex held
1.1796 +*/
1.1797 +TInt ArmMmu::CopyToShadowMemory(TLinAddr aDest, TLinAddr aSrc, TUint32 aLength)
1.1798 + {
1.1799 + __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:CopyToShadowMemory aDest=%08x aSrc=%08x aLength=%08x", aDest, aSrc, aLength));
1.1800 +
1.1801 + // Check that destination is ROM
1.1802 + if (aDest<iRomLinearBase || (aDest+aLength) > iRomLinearEnd)
1.1803 + {
1.1804 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:CopyToShadowMemory: Destination not entirely in ROM"));
1.1805 + return KErrArgument;
1.1806 + }
1.1807 + // do operation with RamAlloc mutex held (to prevent shadow pages from being released from under us)
1.1808 + MmuBase::Wait();
1.1809 +
1.1810 +
1.1811 + TInt r = KErrNone;
1.1812 + while (aLength)
1.1813 + {
1.1814 + // Calculate memory size to copy in this loop. A single page region will be copied per loop
1.1815 + TInt copySize = Min(aLength, iPageSize - (aDest&iPageMask));
1.1816 +
1.1817 + // Get physical address
1.1818 + TPhysAddr physAddr = LinearToPhysical(aDest&~iPageMask, 0);
1.1819 + if (KPhysAddrInvalid==physAddr)
1.1820 + {
1.1821 + r = KErrArgument;
1.1822 + break;
1.1823 + }
1.1824 +
1.1825 + //check whether it is shadowed rom
1.1826 + SPageInfo* pi = SPageInfo::SafeFromPhysAddr(physAddr);
1.1827 + if (pi==0 || pi->Type()!=SPageInfo::EShadow)
1.1828 + {
1.1829 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:CopyToShadowMemory: No shadow page at this address"));
1.1830 + r = KErrArgument;
1.1831 + break;
1.1832 + }
1.1833 +
1.1834 + //Temporarily map into writable memory and copy data. RamAllocator DMutex is required
1.1835 + TLinAddr tempAddr = MapTemp (physAddr, aDest&~iPageMask);
1.1836 + __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:CopyToShadowMemory Copy aDest=%08x aSrc=%08x aSize=%08x", tempAddr+(aDest&iPageMask), aSrc, copySize));
1.1837 + memcpy ((TAny*)(tempAddr+(aDest&iPageMask)), (const TAny*)aSrc, copySize); //Kernel-to-Kernel copy is presumed
1.1838 + UnmapTemp();
1.1839 +
1.1840 + //Update variables for the next loop/page
1.1841 + aDest+=copySize;
1.1842 + aSrc+=copySize;
1.1843 + aLength-=copySize;
1.1844 + }
1.1845 + MmuBase::Signal();
1.1846 + return r;
1.1847 + }
1.1848 +#endif
1.1849 +
1.1850 +void ArmMmu::DoFreezeShadowPage(TInt aId, TLinAddr aRomAddr)
1.1851 + {
1.1852 +#if defined(__CPU_MEMORY_TYPE_REMAPPING) //arm1176, arm11mcore, armv7 and later
1.1853 + __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:DoFreezeShadowPage not required with MEMORY_TYPE_REMAPPING"));
1.1854 +#else
1.1855 + __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:DoFreezeShadowPage aId=%04x aRomAddr=%08x",
1.1856 + aId, aRomAddr));
1.1857 + TPte* ppte = PageTable(aId) + ((aRomAddr & KChunkMask)>>KPageShift);
1.1858 + TPte newpte = (*ppte & KPteSmallPageAddrMask) | KRomPtePerm;
1.1859 + __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", newpte, ppte));
1.1860 + *ppte = newpte;
1.1861 + CacheMaintenance::SinglePteUpdated((TLinAddr)ppte);
1.1862 + InvalidateTLBForPage(aRomAddr, KERNEL_MAPPING);
1.1863 +#endif
1.1864 + }
1.1865 +
1.1866 +/** Replaces large page(64K) entry in page table with small page(4K) entries.*/
1.1867 +void ArmMmu::Pagify(TInt aId, TLinAddr aLinAddr)
1.1868 + {
1.1869 + __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:Pagify aId=%04x aLinAddr=%08x", aId, aLinAddr));
1.1870 +
1.1871 + TInt pteIndex = (aLinAddr & KChunkMask)>>KPageShift;
1.1872 + TPte* pte = PageTable(aId);
1.1873 + if ((pte[pteIndex] & KArmV6PteTypeMask) == KArmV6PteLargePage)
1.1874 + {
1.1875 + __KTRACE_OPT(KMMU,Kern::Printf("Converting 64K page to 4K pages"));
1.1876 + pteIndex &= ~0xf;
1.1877 + TPte source = pte[pteIndex];
1.1878 + source = (source & KPteLargePageAddrMask) | SP_PTE_FROM_LP_PTE(source);
1.1879 + pte += pteIndex;
1.1880 + for (TInt entry=0; entry<16; entry++)
1.1881 + {
1.1882 + pte[entry] = source | (entry<<12);
1.1883 + }
1.1884 + CacheMaintenance::MultiplePtesUpdated((TLinAddr)pte, 16*sizeof(TPte));
1.1885 + FlushTLBs();
1.1886 + }
1.1887 + }
1.1888 +
1.1889 +void ArmMmu::FlushShadow(TLinAddr aRomAddr)
1.1890 + {
1.1891 + CacheMaintenance::CodeChanged(aRomAddr, KPageSize, CacheMaintenance::EMemoryRemap);
1.1892 + CacheMaintenance::PageToReuse(aRomAddr, EMemAttNormalCached, KPhysAddrInvalid);
1.1893 + InvalidateTLBForPage(aRomAddr, KERNEL_MAPPING); // remove all TLB references to original ROM page
1.1894 + }
1.1895 +
1.1896 +
1.1897 +#if defined(__CPU_MEMORY_TYPE_REMAPPING) //arm1176, arm11mcore, armv7
1.1898 +/**
1.1899 +Calculates page directory/table entries for memory type described in aMapAttr.
1.1900 +Global, small page (4KB) mapping is assumed.
1.1901 +(All magic numbers come from ARM page table descriptions.)
1.1902 +@param aMapAttr On entry, holds description(memory type, access permisions,...) of the memory.
1.1903 + It is made up of TMappingAttributes constants or TMappingAttributes2 object. If TMappingAttributes,
1.1904 + may be altered on exit to hold the actual cache attributes & access permissions.
1.1905 +@param aPde On exit, holds page-table-entry for the 1st level descriptor
1.1906 + for given type of memory, with base address set to 0.
1.1907 +@param aPte On exit, holds small-page-entry (4K) for the 2nd level descriptor
1.1908 + for given type of memory, with base address set to 0.
1.1909 +@return KErrNotSupported If memory described in aMapAttr is not supported
1.1910 + KErrNone Otherwise
1.1911 +*/
1.1912 +TInt ArmMmu::PdePtePermissions(TUint& aMapAttr, TPde& aPde, TPte& aPte)
1.1913 + {
1.1914 + __KTRACE_OPT(KMMU,Kern::Printf(">ArmMmu::PdePtePermissions, mapattr=%08x",aMapAttr));
1.1915 +
1.1916 + TMappingAttributes2& memory = (TMappingAttributes2&)aMapAttr;
1.1917 +
1.1918 + if(memory.ObjectType2())
1.1919 + {
1.1920 +//---------Memory described by TMappingAttributes2 object-----------------
1.1921 + aPde = KArmV6PdePageTable |
1.1922 + (memory.Parity() ? KArmV6PdeECCEnable : 0);
1.1923 +#if defined(FAULTY_NONSHARED_DEVICE_MEMORY)
1.1924 + if(!memory.Shared() && (memory.Type() == EMemAttDevice ))
1.1925 + {
1.1926 + aMapAttr ^= EMapAttrBufferedNC;
1.1927 + aMapAttr |= EMapAttrFullyBlocking;
1.1928 + // Clear EMemAttDevice
1.1929 + aMapAttr ^= (EMemAttDevice << 26);
1.1930 + aMapAttr |= (EMemAttStronglyOrdered << 26);
1.1931 + }
1.1932 +#endif
1.1933 + aPte = KArmV6PteSmallPage |
1.1934 + KArmV6PteAP0 | // AP0 bit always 1
1.1935 + ((memory.Type()&3)<<2) | ((memory.Type()&4)<<4) | // memory type
1.1936 + (memory.Executable() ? 0 : KArmV6PteSmallXN) | // eXecuteNever bit
1.1937 +#if defined (__CPU_USE_SHARED_MEMORY)
1.1938 + KArmV6PteS | // Memory is always shared.
1.1939 +#else
1.1940 + (memory.Shared() ? KArmV6PteS : 0) | // Shared bit
1.1941 +#endif
1.1942 + (memory.Writable() ? 0 : KArmV6PteAPX) | // APX = !Writable
1.1943 + (memory.UserAccess() ? KArmV6PteAP1: 0); // AP1 = UserAccess
1.1944 + // aMapAttr remains the same
1.1945 + }
1.1946 + else
1.1947 + {
1.1948 +//---------Memory described by TMappingAttributes bitmask-----------------
1.1949 +#if defined(FAULTY_NONSHARED_DEVICE_MEMORY)
1.1950 + if(((aMapAttr & EMapAttrL1CacheMask) == EMapAttrBufferedNC) && !(aMapAttr & EMapAttrShared))
1.1951 + {
1.1952 + // Clear EMapAttrBufferedNC attribute
1.1953 + aMapAttr ^= EMapAttrBufferedNC;
1.1954 + aMapAttr |= EMapAttrFullyBlocking;
1.1955 + }
1.1956 +#endif
1.1957 + // 1. Calculate TEX0:C:B bits in page table and actual cache attributes.
1.1958 + // Only L1 cache attribute from aMapAttr matters. Outer (L2) cache policy will be the same as inner one.
1.1959 + TUint l1cache=aMapAttr & EMapAttrL1CacheMask; // Inner cache attributes. May change to actual value.
1.1960 + TUint l2cache; // Will hold actual L2 cache attributes (in terms of TMappingAttributes constants)
1.1961 + TUint tex0_c_b; // Will hold TEX[0]:C:B value in page table
1.1962 +
1.1963 + switch (l1cache)
1.1964 + {
1.1965 + case EMapAttrFullyBlocking:
1.1966 + tex0_c_b = EMemAttStronglyOrdered;
1.1967 + l2cache = EMapAttrL2Uncached;
1.1968 + break;
1.1969 + case EMapAttrBufferedNC:
1.1970 + tex0_c_b = EMemAttDevice;
1.1971 + l2cache = EMapAttrL2Uncached;
1.1972 + break;
1.1973 + case EMapAttrBufferedC:
1.1974 + case EMapAttrL1Uncached:
1.1975 + case EMapAttrCachedWTRA:
1.1976 + case EMapAttrCachedWTWA:
1.1977 + tex0_c_b = EMemAttNormalUncached;
1.1978 + l1cache = EMapAttrBufferedC;
1.1979 + l2cache = EMapAttrL2Uncached;
1.1980 + break;
1.1981 + case EMapAttrCachedWBRA:
1.1982 + case EMapAttrCachedWBWA:
1.1983 + case EMapAttrL1CachedMax:
1.1984 + tex0_c_b = EMemAttNormalCached;
1.1985 + l1cache = EMapAttrCachedWBWA;
1.1986 + l2cache = EMapAttrL2CachedWBWA;
1.1987 + break;
1.1988 + default:
1.1989 + return KErrNotSupported;
1.1990 + }
1.1991 +
1.1992 + // 2. Step 2 has been removed :)
1.1993 +
1.1994 + // 3. Calculate access permissions (apx:ap bits in page table + eXecute it)
1.1995 + TUint read=aMapAttr & EMapAttrReadMask;
1.1996 + TUint write=(aMapAttr & EMapAttrWriteMask)>>4;
1.1997 + TUint exec=(aMapAttr & EMapAttrExecMask)>>8;
1.1998 +
1.1999 + read|=exec; // User/Sup execute access requires User/Sup read access.
1.2000 + if (exec) exec = 1; // There is a single eXecute bit in page table. Set to one if User or Sup exec is required.
1.2001 +
1.2002 + TUint apxap=0;
1.2003 + if (write==0) // no write required
1.2004 + {
1.2005 + if (read>=4) apxap=KArmV6PermRORO; // user read required
1.2006 + else if (read==1) apxap=KArmV6PermRONO; // supervisor read required
1.2007 + else return KErrNotSupported; // no read required
1.2008 + }
1.2009 + else if (write<4) // supervisor write required
1.2010 + {
1.2011 + if (read<4) apxap=KArmV6PermRWNO; // user read not required
1.2012 + else return KErrNotSupported; // user read required
1.2013 + }
1.2014 + else // user & supervisor writes required
1.2015 + {
1.2016 + apxap=KArmV6PermRWRW;
1.2017 + }
1.2018 +
1.2019 + // 4. Calculate page-table-entry for the 1st level (aka page directory) descriptor
1.2020 + aPde=((aMapAttr&EMapAttrUseECC)>>8)|KArmV6PdePageTable;
1.2021 +
1.2022 + // 5. Calculate small-page-entry for the 2nd level (aka page table) descriptor
1.2023 + aPte=SP_PTE(apxap, tex0_c_b, exec, 1); // always global
1.2024 + if (aMapAttr&EMapAttrShared)
1.2025 + aPte |= KArmV6PteS;
1.2026 +
1.2027 + // 6. Fix aMapAttr to hold the actual values for access permission & cache attributes
1.2028 + TUint xnapxap=((aPte<<3)&8)|((aPte>>7)&4)|((aPte>>4)&3);
1.2029 + aMapAttr &= ~(EMapAttrAccessMask|EMapAttrL1CacheMask|EMapAttrL2CacheMask);
1.2030 + aMapAttr |= PermissionLookup[xnapxap]; // Set actual access permissions
1.2031 + aMapAttr |= l1cache; // Set actual inner cache attributes
1.2032 + aMapAttr |= l2cache; // Set actual outer cache attributes
1.2033 + }
1.2034 +
1.2035 + __KTRACE_OPT(KMMU,Kern::Printf("<ArmMmu::PdePtePermissions, mapattr=%08x, pde=%08x, pte=%08x", aMapAttr, aPde, aPte));
1.2036 + return KErrNone;
1.2037 + }
1.2038 +
1.2039 +#else //ARMv6 (arm1136)
1.2040 +
1.2041 +const TUint FBLK=(EMapAttrFullyBlocking>>12);
1.2042 +const TUint BFNC=(EMapAttrBufferedNC>>12);
1.2043 +//const TUint BUFC=(EMapAttrBufferedC>>12);
1.2044 +const TUint L1UN=(EMapAttrL1Uncached>>12);
1.2045 +const TUint WTRA=(EMapAttrCachedWTRA>>12);
1.2046 +//const TUint WTWA=(EMapAttrCachedWTWA>>12);
1.2047 +const TUint WBRA=(EMapAttrCachedWBRA>>12);
1.2048 +const TUint WBWA=(EMapAttrCachedWBWA>>12);
1.2049 +const TUint AWTR=(EMapAttrAltCacheWTRA>>12);
1.2050 +//const TUint AWTW=(EMapAttrAltCacheWTWA>>12);
1.2051 +//const TUint AWBR=(EMapAttrAltCacheWBRA>>12);
1.2052 +const TUint AWBW=(EMapAttrAltCacheWBWA>>12);
1.2053 +const TUint MAXC=(EMapAttrL1CachedMax>>12);
1.2054 +
1.2055 +const TUint L2UN=(EMapAttrL2Uncached>>16);
1.2056 +
1.2057 +const TUint8 UNS=0xffu; // Unsupported attribute
1.2058 +
1.2059 +//Maps L1 & L2 cache attributes into TEX[4:2]:CB[1:0]
1.2060 +//ARMv6 doesn't do WTWA so we use WTRA instead
1.2061 +
1.2062 +#if !defined(__CPU_ARM1136_ERRATUM_399234_FIXED)
1.2063 +// L1 Write-Through mode is outlawed, L1WT acts as L1UN.
1.2064 +static const TUint8 CBTEX[40]=
1.2065 + { // L1CACHE:
1.2066 +// FBLK BFNC BUFC L1UN WTRA WTWA WBRA WBWA L2CACHE:
1.2067 + 0x00, 0x01, 0x01, 0x04, 0x04, 0x04, 0x13, 0x11, //NC
1.2068 + 0x00, 0x01, 0x01, 0x18, 0x18, 0x18, 0x1b, 0x19, //WTRA
1.2069 + 0x00, 0x01, 0x01, 0x18, 0x18, 0x18, 0x1b, 0x19, //WTWA
1.2070 + 0x00, 0x01, 0x01, 0x1c, 0x1c, 0x1c, 0x1f, 0x1d, //WBRA
1.2071 + 0x00, 0x01, 0x01, 0x14, 0x14, 0x14, 0x17, 0x15 //WBWA
1.2072 + };
1.2073 +#else
1.2074 +static const TUint8 CBTEX[40]=
1.2075 + { // L1CACHE:
1.2076 +// FBLK BFNC BUFC L1UN WTRA WTWA WBRA WBWA L2CACHE:
1.2077 + 0x00, 0x01, 0x01, 0x04, 0x12, 0x12, 0x13, 0x11, //NC
1.2078 + 0x00, 0x01, 0x01, 0x18, 0x02, 0x02, 0x1b, 0x19, //WTRA
1.2079 + 0x00, 0x01, 0x01, 0x18, 0x02, 0x02, 0x1b, 0x19, //WTWA
1.2080 + 0x00, 0x01, 0x01, 0x1c, 0x1e, 0x1e, 0x1f, 0x1d, //WBRA
1.2081 + 0x00, 0x01, 0x01, 0x14, 0x16, 0x16, 0x17, 0x15 //WBWA
1.2082 + };
1.2083 +#endif
1.2084 +
1.2085 +//Maps TEX[4:2]:CB[1:0] value into L1 cache attributes
1.2086 +static const TUint8 L1Actual[32]=
1.2087 + {
1.2088 +//CB 00 01 10 11 //TEX
1.2089 + FBLK, BFNC, WTRA, WBRA, //000
1.2090 + L1UN, UNS, UNS, WBWA, //001
1.2091 + BFNC, UNS, UNS, UNS, //010
1.2092 + UNS, UNS, UNS, UNS, //011
1.2093 + L1UN, WBWA, WTRA, WBRA, //100
1.2094 + L1UN, WBWA, WTRA, WBRA, //101
1.2095 + L1UN, WBWA, WTRA, WBRA, //110
1.2096 + L1UN, WBWA, WTRA, WBRA //111
1.2097 + };
1.2098 +
1.2099 +//Maps TEX[4:2]:CB[1:0] value into L2 cache attributes
1.2100 +static const TUint8 L2Actual[32]=
1.2101 + {
1.2102 +//CB 00 01 10 11 //TEX
1.2103 + L2UN, L2UN, WTRA, WBRA, //000
1.2104 + L2UN, UNS, UNS, WBWA, //001
1.2105 + L2UN, UNS, UNS, UNS, //010
1.2106 + UNS, UNS, UNS, UNS, //011
1.2107 + L2UN, L2UN, L2UN, L2UN, //100
1.2108 + WBWA, WBWA, WBWA, WBWA, //101
1.2109 + WTRA, WTRA, WTRA, WTRA, //110
1.2110 + WBRA, WBRA, WBRA, WBRA //111
1.2111 + };
1.2112 +
1.2113 +TInt ArmMmu::PdePtePermissions(TUint& aMapAttr, TPde& aPde, TPte& aPte)
1.2114 + {
1.2115 + __KTRACE_OPT(KMMU,Kern::Printf(">ArmMmu::PdePtePermissions, mapattr=%08x",aMapAttr));
1.2116 +
1.2117 + TUint read=aMapAttr & EMapAttrReadMask;
1.2118 + TUint write=(aMapAttr & EMapAttrWriteMask)>>4;
1.2119 + TUint exec=(aMapAttr & EMapAttrExecMask)>>8;
1.2120 + TUint l1cache=(aMapAttr & EMapAttrL1CacheMask)>>12;
1.2121 + TUint l2cache=(aMapAttr & EMapAttrL2CacheMask)>>16;
1.2122 + if (l1cache==MAXC) l1cache=WBRA; // map max cache to WBRA
1.2123 + if (l1cache>AWBW)
1.2124 + return KErrNotSupported; // undefined attribute
1.2125 + if (l1cache>=AWTR) l1cache-=4; // no alternate cache, so use normal cache
1.2126 + if (l1cache<L1UN) l2cache=0; // for blocking/device, don't cache L2
1.2127 + if (l2cache==MAXC) l2cache=WBRA; // map max cache to WBRA
1.2128 + if (l2cache>WBWA)
1.2129 + return KErrNotSupported; // undefined attribute
1.2130 + if (l2cache) l2cache-=(WTRA-1); // l2cache now in range 0-4
1.2131 + aPde=((aMapAttr&EMapAttrUseECC)>>8)|KArmV6PdePageTable;
1.2132 +
1.2133 +#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
1.2134 + // if broken 1136, can't have supervisor only code
1.2135 + if (exec)
1.2136 + exec = TUint(EMapAttrExecUser>>8);
1.2137 +#endif
1.2138 +
1.2139 + // if any execute access, must have read=execute
1.2140 + if (exec)
1.2141 + (void)(read>=exec || (read=exec)!=0), exec=1;
1.2142 +
1.2143 + // l1cache between 0 and 7, l2cache between 0 and 4; look up CBTEX
1.2144 + TUint cbtex=CBTEX[(l2cache<<3)|l1cache];
1.2145 +
1.2146 + // work out apx:ap
1.2147 + TUint apxap;
1.2148 + if (write==0)
1.2149 + apxap=(read>=4)?KArmV6PermRORO:(read?KArmV6PermRONO:KArmV6PermNONO);
1.2150 + else if (write<4)
1.2151 + apxap=(read>=4)?KArmV6PermRWRO:KArmV6PermRWNO;
1.2152 + else
1.2153 + apxap=KArmV6PermRWRW;
1.2154 + TPte pte=SP_PTE(apxap, cbtex, exec, 1); // always global
1.2155 + if (aMapAttr&EMapAttrShared)
1.2156 + pte |= KArmV6PteS;
1.2157 +
1.2158 + // Translate back to get actual map attributes
1.2159 + TUint xnapxap=((pte<<3)&8)|((pte>>7)&4)|((pte>>4)&3);
1.2160 + cbtex=((pte>>4)&0x1c)|((pte>>2)&3); // = TEX[4:2]::CB[1:0]
1.2161 + aMapAttr &= ~(EMapAttrAccessMask|EMapAttrL1CacheMask|EMapAttrL2CacheMask);
1.2162 + aMapAttr |= PermissionLookup[xnapxap];
1.2163 + aMapAttr |= (L1Actual[cbtex]<<12);
1.2164 + aMapAttr |= (L2Actual[cbtex]<<16);
1.2165 + aPte=pte;
1.2166 + __KTRACE_OPT(KMMU,Kern::Printf("<ArmMmu::PdePtePermissions, mapattr=%08x, pde=%08x, pte=%08x",
1.2167 + aMapAttr, aPde, aPte));
1.2168 + return KErrNone;
1.2169 + }
1.2170 +#endif
1.2171 +
1.2172 +void ArmMmu::Map(TLinAddr aLinAddr, TPhysAddr aPhysAddr, TInt aSize, TPde aPdePerm, TPte aPtePerm, TInt aMapShift)
1.2173 +//
1.2174 +// Map a region of physical addresses aPhysAddr to aPhysAddr+aSize-1 to virtual address aLinAddr.
1.2175 +// Use permissions specified by aPdePerm and aPtePerm. Use mapping sizes up to and including (1<<aMapShift).
1.2176 +// Assume any page tables required are already assigned.
1.2177 +// aLinAddr, aPhysAddr, aSize must be page-aligned.
1.2178 +//
1.2179 + {
1.2180 + __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu::Map lin=%08x phys=%08x size=%08x", aLinAddr, aPhysAddr, aSize));
1.2181 + __KTRACE_OPT(KMMU, Kern::Printf("pde=%08x pte=%08x mapshift=%d", aPdePerm, aPtePerm, aMapShift));
1.2182 + TPde pt_pde=aPdePerm;
1.2183 + TPte sp_pte=aPtePerm;
1.2184 + TPde section_pde=SECTION_PDE_FROM_PDEPTE(pt_pde, sp_pte);
1.2185 + TPte lp_pte=LP_PTE_FROM_SP_PTE(sp_pte);
1.2186 + TLinAddr la=aLinAddr;
1.2187 + TPhysAddr pa=aPhysAddr;
1.2188 + TInt remain=aSize;
1.2189 + while (remain)
1.2190 + {
1.2191 + if (aMapShift>=KChunkShift && (la & KChunkMask)==0 && remain>=KChunkSize)
1.2192 + {
1.2193 + // use sections - ASSUMES ADDRESS IS IN GLOBAL REGION
1.2194 + TInt npdes=remain>>KChunkShift;
1.2195 + const TBitMapAllocator& b=*iOsAsidAllocator;
1.2196 + TInt num_os_asids=iNumGlobalPageDirs;
1.2197 + TInt os_asid=0;
1.2198 + for (; num_os_asids; ++os_asid)
1.2199 + {
1.2200 + if (b.NotAllocated(os_asid,1) || (iAsidInfo[os_asid]&1)==0)
1.2201 + continue; // os_asid is not needed
1.2202 + TPde* p_pde=PageDirectory(os_asid)+(la>>KChunkShift);
1.2203 + TPde* p_pde_E=p_pde+npdes;
1.2204 + TPde pde=pa|section_pde;
1.2205 + TLinAddr firstPde = (TLinAddr)p_pde; //Will need this to clean page table memory region from cache
1.2206 +
1.2207 + NKern::LockSystem();
1.2208 + for (; p_pde < p_pde_E; pde+=KChunkSize)
1.2209 + {
1.2210 + __ASSERT_DEBUG(*p_pde==0, MM::Panic(MM::EPdeAlreadyInUse));
1.2211 + __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", pde, p_pde));
1.2212 + *p_pde++=pde;
1.2213 + }
1.2214 + CacheMaintenance::MultiplePtesUpdated(firstPde, (TUint)p_pde-firstPde);
1.2215 + NKern::UnlockSystem();
1.2216 + --num_os_asids;
1.2217 + }
1.2218 + npdes<<=KChunkShift;
1.2219 + la+=npdes, pa+=npdes, remain-=npdes;
1.2220 + continue;
1.2221 + }
1.2222 + TInt block_size = Min(remain, KChunkSize-(la&KChunkMask));
1.2223 + TPte pa_mask=~KPageMask;
1.2224 + TPte pte_perm=sp_pte;
1.2225 + if (aMapShift>=KLargePageShift && block_size>=KLargePageSize)
1.2226 + {
1.2227 + if ((la & KLargePageMask)==0)
1.2228 + {
1.2229 + // use 64K large pages
1.2230 + pa_mask=~KLargePageMask;
1.2231 + pte_perm=lp_pte;
1.2232 + }
1.2233 + else
1.2234 + block_size = Min(remain, KLargePageSize-(la&KLargePageMask));
1.2235 + }
1.2236 + block_size &= pa_mask;
1.2237 +
1.2238 + // use pages (large or small)
1.2239 + TInt id=PageTableId(la, 0);
1.2240 + __ASSERT_DEBUG(id>=0, MM::Panic(MM::EMmuMapNoPageTable));
1.2241 + TPte* p_pte=PageTable(id)+((la&KChunkMask)>>KPageShift);
1.2242 + TPte* p_pte_E=p_pte + (block_size>>KPageShift);
1.2243 + SPageTableInfo& ptinfo=iPtInfo[id];
1.2244 + TLinAddr firstPte = (TLinAddr)p_pte; //Will need this to clean page table memory region from cache
1.2245 +
1.2246 + NKern::LockSystem();
1.2247 + for (; p_pte < p_pte_E; pa+=KPageSize)
1.2248 + {
1.2249 + __ASSERT_DEBUG(*p_pte==0, MM::Panic(MM::EPteAlreadyInUse));
1.2250 + TPte pte = (pa & pa_mask) | pte_perm;
1.2251 + __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", pte, p_pte));
1.2252 + *p_pte++=pte;
1.2253 + ++ptinfo.iCount;
1.2254 + NKern::FlashSystem();
1.2255 + }
1.2256 + CacheMaintenance::MultiplePtesUpdated(firstPte, (TUint)p_pte-firstPte);
1.2257 + NKern::UnlockSystem();
1.2258 + la+=block_size, remain-=block_size;
1.2259 + }
1.2260 + }
1.2261 +
1.2262 +void ArmMmu::Unmap(TLinAddr aLinAddr, TInt aSize)
1.2263 +//
1.2264 +// Remove all mappings in the specified range of addresses.
1.2265 +// Assumes there are only global mappings involved.
1.2266 +// Don't free page tables.
1.2267 +// aLinAddr, aSize must be page-aligned.
1.2268 +//
1.2269 + {
1.2270 + __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu::Unmap lin=%08x size=%08x", aLinAddr, aSize));
1.2271 + TLinAddr a=aLinAddr;
1.2272 + TLinAddr end=a+aSize;
1.2273 + __KTRACE_OPT(KMMU,Kern::Printf("a=%08x end=%08x",a,end));
1.2274 + NKern::LockSystem();
1.2275 + while(a!=end)
1.2276 + {
1.2277 + TInt pdeIndex=a>>KChunkShift;
1.2278 + TLinAddr next=(pdeIndex<<KChunkShift)+KChunkSize;
1.2279 + TInt to_do = Min(TInt(end-a), TInt(next-a))>>KPageShift;
1.2280 + __KTRACE_OPT(KMMU,Kern::Printf("a=%08x next=%08x to_do=%d",a,next,to_do));
1.2281 + TPde pde=::InitPageDirectory[pdeIndex];
1.2282 + if ( (pde&KArmV6PdeTypeMask)==KArmV6PdeSection )
1.2283 + {
1.2284 + __ASSERT_DEBUG(!(a&KChunkMask), MM::Panic(MM::EUnmapBadAlignment));
1.2285 +#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
1.2286 + remove_and_invalidate_section(::InitPageDirectory + pdeIndex, a, KERNEL_MAPPING);
1.2287 +#else
1.2288 + ::InitPageDirectory[pdeIndex]=0;
1.2289 + CacheMaintenance::SinglePteUpdated(TLinAddr(::InitPageDirectory + pdeIndex));
1.2290 + InvalidateTLBForPage(a, KERNEL_MAPPING); // ASID irrelevant since global
1.2291 +#endif
1.2292 + a=next;
1.2293 + NKern::FlashSystem();
1.2294 + continue;
1.2295 + }
1.2296 + TInt ptid=PageTableId(a,0);
1.2297 + SPageTableInfo& ptinfo=iPtInfo[ptid];
1.2298 + if (ptid>=0)
1.2299 + {
1.2300 + TPte* ppte=PageTable(ptid)+((a&KChunkMask)>>KPageShift);
1.2301 + TPte* ppte_End=ppte+to_do;
1.2302 + for (; ppte<ppte_End; ++ppte, a+=KPageSize)
1.2303 + {
1.2304 + if (*ppte & KArmV6PteSmallPage)
1.2305 + {
1.2306 + --ptinfo.iCount;
1.2307 +#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
1.2308 + remove_and_invalidate_page(ppte, a, KERNEL_MAPPING);
1.2309 +#else
1.2310 + *ppte=0;
1.2311 + CacheMaintenance::SinglePteUpdated((TLinAddr)ppte);
1.2312 + InvalidateTLBForPage(a, KERNEL_MAPPING);
1.2313 +#endif
1.2314 + }
1.2315 + else if ((*ppte & KArmV6PteTypeMask) == KArmV6PteLargePage)
1.2316 + {
1.2317 + __ASSERT_DEBUG(!(a&KLargePageMask), MM::Panic(MM::EUnmapBadAlignment));
1.2318 + ptinfo.iCount-=KLargeSmallPageRatio;
1.2319 +#if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
1.2320 + remove_and_invalidate_page(ppte, a, KERNEL_MAPPING);
1.2321 +#else
1.2322 + memclr(ppte, KLargeSmallPageRatio*sizeof(TPte));
1.2323 + CacheMaintenance::MultiplePtesUpdated((TLinAddr)ppte, KLargeSmallPageRatio*sizeof(TPte));
1.2324 + InvalidateTLBForPage(a, KERNEL_MAPPING);
1.2325 +#endif
1.2326 + a+=(KLargePageSize-KPageSize);
1.2327 + ppte+=(KLargeSmallPageRatio-1);
1.2328 + }
1.2329 + NKern::FlashSystem();
1.2330 + }
1.2331 + }
1.2332 + else
1.2333 + a += (to_do<<KPageShift);
1.2334 + }
1.2335 + NKern::UnlockSystem();
1.2336 + #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
1.2337 + __FlushBtb();
1.2338 + #endif
1.2339 + }
1.2340 +
1.2341 +
1.2342 +void ArmMmu::ClearPages(TInt aNumPages, TPhysAddr* aPageList, TUint8 aClearByte)
1.2343 + {
1.2344 + //map the pages at a temporary address, clear them and unmap
1.2345 + __ASSERT_MUTEX(RamAllocatorMutex);
1.2346 + while (--aNumPages >= 0)
1.2347 + {
1.2348 + TPhysAddr pa;
1.2349 + if((TInt)aPageList&1)
1.2350 + {
1.2351 + pa = (TPhysAddr)aPageList&~1;
1.2352 + *(TPhysAddr*)&aPageList += iPageSize;
1.2353 + }
1.2354 + else
1.2355 + pa = *aPageList++;
1.2356 +
1.2357 + *iTempPte = pa | SP_PTE(KArmV6PermRWNO, KNormalUncachedAttr, 0, 1);
1.2358 + CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte);
1.2359 + InvalidateTLBForPage(iTempAddr, KERNEL_MAPPING);
1.2360 + memset((TAny*)iTempAddr, aClearByte, iPageSize);
1.2361 + // This temporary mapping is noncached => No need to flush cache here.
1.2362 + // Still, we have to make sure that write buffer(s) are drained.
1.2363 + CacheMaintenance::MemoryToPreserveAndReuse((TLinAddr)iTempAddr, iPageSize, EMapAttrBufferedC);
1.2364 + }
1.2365 + *iTempPte=0;
1.2366 + CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte);
1.2367 + InvalidateTLBForPage(iTempAddr, KERNEL_MAPPING);
1.2368 + }
1.2369 +
1.2370 +
1.2371 +/**
1.2372 +Create a temporary mapping of one or more contiguous physical pages.
1.2373 +Fully cached memory attributes apply.
1.2374 +The RamAllocatorMutex must be held before this function is called and not released
1.2375 +until after UnmapTemp has been called.
1.2376 +
1.2377 +@param aPage The physical address of the pages to be mapped.
1.2378 +@param aLinAddr The linear address of any existing location where the page is mapped.
1.2379 + If the page isn't already mapped elsewhere as a cachable page then
1.2380 + this value irrelevent. (It is used for page colouring.)
1.2381 +@param aPages Number of pages to map.
1.2382 +
1.2383 +@return The linear address of where the pages have been mapped.
1.2384 +*/
1.2385 +TLinAddr ArmMmu::MapTemp(TPhysAddr aPage,TLinAddr aLinAddr,TInt aPages)
1.2386 + {
1.2387 + __ASSERT_MUTEX(RamAllocatorMutex);
1.2388 + __ASSERT_DEBUG(!*iTempPte,MM::Panic(MM::ETempMappingAlreadyInUse));
1.2389 + iTempMapColor = (aLinAddr>>KPageShift)&KPageColourMask;
1.2390 + iTempMapCount = aPages;
1.2391 + if (aPages==1)
1.2392 + {
1.2393 + iTempPte[iTempMapColor] = (aPage&~KPageMask) | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1);
1.2394 + CacheMaintenance::SinglePteUpdated((TLinAddr)(iTempPte+iTempMapColor));
1.2395 + }
1.2396 + else
1.2397 + {
1.2398 + __ASSERT_DEBUG(iTempMapColor+aPages<=KPageColourCount,MM::Panic(MM::ETempMappingNoRoom));
1.2399 + for (TInt i=0; i<aPages; i++)
1.2400 + iTempPte[iTempMapColor+i] = ((aPage&~KPageMask)+(i<<KPageShift)) | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1);
1.2401 + CacheMaintenance::MultiplePtesUpdated((TLinAddr)(iTempPte+iTempMapColor), aPages*sizeof(TPte));
1.2402 + }
1.2403 + return iTempAddr+(iTempMapColor<<KPageShift);
1.2404 + }
1.2405 +
1.2406 +/**
1.2407 +Create a temporary mapping of one or more contiguous physical pages.
1.2408 +Memory attributes as specified by aMemType apply.
1.2409 +@See ArmMmu::MapTemp(TPhysAddr aPage,TLinAddr aLinAddr,TInt aPages) for other details.
1.2410 +*/
1.2411 +TLinAddr ArmMmu::MapTemp(TPhysAddr aPage,TLinAddr aLinAddr,TInt aPages, TMemoryType aMemType)
1.2412 + {
1.2413 + __ASSERT_MUTEX(RamAllocatorMutex);
1.2414 + __ASSERT_DEBUG(!*iTempPte,MM::Panic(MM::ETempMappingAlreadyInUse));
1.2415 + iTempMapColor = (aLinAddr>>KPageShift)&KPageColourMask;
1.2416 + iTempMapCount = aPages;
1.2417 + TUint pte = SP_PTE(KArmV6PermRWNO, aMemType, 0, 1);
1.2418 + if (aPages==1)
1.2419 + {
1.2420 + iTempPte[iTempMapColor] = (aPage&~KPageMask) | SP_PTE(KArmV6PermRWNO, pte, 0, 1);
1.2421 + CacheMaintenance::SinglePteUpdated((TLinAddr)(iTempPte+iTempMapColor));
1.2422 + }
1.2423 + else
1.2424 + {
1.2425 + __ASSERT_DEBUG(iTempMapColor+aPages<=KPageColourCount,MM::Panic(MM::ETempMappingNoRoom));
1.2426 + for (TInt i=0; i<aPages; i++)
1.2427 + iTempPte[iTempMapColor+i] = ((aPage&~KPageMask)+(i<<KPageShift)) | SP_PTE(KArmV6PermRWNO, pte, 0, 1);
1.2428 + CacheMaintenance::MultiplePtesUpdated((TLinAddr)(iTempPte+iTempMapColor), aPages*sizeof(TPte));
1.2429 + }
1.2430 + return iTempAddr+(iTempMapColor<<KPageShift);
1.2431 + }
1.2432 +
1.2433 +/**
1.2434 +Create a temporary mapping of one or more contiguous physical pages, distinct from
1.2435 +that created by MapTemp.
1.2436 +The RamAllocatorMutex must be held before this function is called and not released
1.2437 +until after UnmapSecondTemp has been called.
1.2438 +
1.2439 +@param aPage The physical address of the pages to be mapped.
1.2440 +@param aLinAddr The linear address of any existing location where the page is mapped.
1.2441 + If the page isn't already mapped elsewhere as a cachable page then
1.2442 + this value irrelevent. (It is used for page colouring.)
1.2443 +@param aPages Number of pages to map.
1.2444 +
1.2445 +@return The linear address of where the pages have been mapped.
1.2446 +*/
1.2447 +TLinAddr ArmMmu::MapSecondTemp(TPhysAddr aPage,TLinAddr aLinAddr,TInt aPages)
1.2448 + {
1.2449 + __ASSERT_MUTEX(RamAllocatorMutex);
1.2450 + __ASSERT_DEBUG(!*iSecondTempPte,MM::Panic(MM::ETempMappingAlreadyInUse));
1.2451 + iSecondTempMapColor = (aLinAddr>>KPageShift)&KPageColourMask;
1.2452 + iSecondTempMapCount = aPages;
1.2453 + if (aPages==1)
1.2454 + {
1.2455 + iSecondTempPte[iSecondTempMapColor] = (aPage&~KPageMask) | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1);
1.2456 + CacheMaintenance::SinglePteUpdated((TLinAddr)(iSecondTempPte+iSecondTempMapColor));
1.2457 + }
1.2458 + else
1.2459 + {
1.2460 + __ASSERT_DEBUG(iSecondTempMapColor+aPages<=KPageColourCount,MM::Panic(MM::ETempMappingNoRoom));
1.2461 + for (TInt i=0; i<aPages; i++)
1.2462 + iSecondTempPte[iSecondTempMapColor+i] = ((aPage&~KPageMask)+(i<<KPageShift)) | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1);
1.2463 + CacheMaintenance::MultiplePtesUpdated((TLinAddr)(iSecondTempPte+iSecondTempMapColor), aPages*sizeof(TPte));
1.2464 + }
1.2465 + return iSecondTempAddr+(iSecondTempMapColor<<KPageShift);
1.2466 + }
1.2467 +
1.2468 +/**
1.2469 +Remove the temporary mapping created with MapTemp.
1.2470 +*/
1.2471 +void ArmMmu::UnmapTemp()
1.2472 + {
1.2473 + __ASSERT_MUTEX(RamAllocatorMutex);
1.2474 + for (TInt i=0; i<iTempMapCount; i++)
1.2475 + {
1.2476 + iTempPte[iTempMapColor+i] = 0;
1.2477 + CacheMaintenance::SinglePteUpdated((TLinAddr)(iTempPte+iTempMapColor+i));
1.2478 + InvalidateTLBForPage(iTempAddr+((iTempMapColor+i)<<KPageShift), KERNEL_MAPPING);
1.2479 + }
1.2480 + }
1.2481 +
1.2482 +/**
1.2483 +Remove the temporary mapping created with MapSecondTemp.
1.2484 +*/
1.2485 +void ArmMmu::UnmapSecondTemp()
1.2486 + {
1.2487 + __ASSERT_MUTEX(RamAllocatorMutex);
1.2488 + for (TInt i=0; i<iSecondTempMapCount; i++)
1.2489 + {
1.2490 + iSecondTempPte[iSecondTempMapColor+i] = 0;
1.2491 + CacheMaintenance::SinglePteUpdated((TLinAddr)(iSecondTempPte+iSecondTempMapColor+i));
1.2492 + InvalidateTLBForPage(iSecondTempAddr+((iSecondTempMapColor+i)<<KPageShift), KERNEL_MAPPING);
1.2493 + }
1.2494 + }
1.2495 +
1.2496 +
1.2497 +TBool ArmMmu::ValidateLocalIpcAddress(TLinAddr aAddr,TInt aSize,TBool aWrite)
1.2498 + {
1.2499 + __NK_ASSERT_DEBUG(aSize<=KChunkSize);
1.2500 + TLinAddr end = aAddr+aSize-1;
1.2501 + if(end<aAddr)
1.2502 + end = ~0u;
1.2503 +
1.2504 + if(TUint(aAddr^KIPCAlias)<TUint(KChunkSize) || TUint(end^KIPCAlias)<TUint(KChunkSize))
1.2505 + {
1.2506 + // local address is in alias region.
1.2507 + // remove alias...
1.2508 + NKern::LockSystem();
1.2509 + ((DMemModelThread*)TheCurrentThread)->RemoveAlias();
1.2510 + NKern::UnlockSystem();
1.2511 + // access memory, which will cause an exception...
1.2512 + if(!(TUint(aAddr^KIPCAlias)<TUint(KChunkSize)))
1.2513 + aAddr = end;
1.2514 + InvalidateTLBForPage(aAddr,((DMemModelProcess*)TheCurrentThread->iOwningProcess)->iOsAsid);
1.2515 + if(aWrite)
1.2516 + *(volatile TUint8*)aAddr = 0;
1.2517 + else
1.2518 + aWrite = *(volatile TUint8*)aAddr;
1.2519 + // can't get here
1.2520 + __NK_ASSERT_DEBUG(0);
1.2521 + }
1.2522 +
1.2523 + TUint32 local_mask;
1.2524 + DMemModelProcess* process=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
1.2525 + if(aWrite)
1.2526 + local_mask = process->iAddressCheckMaskW;
1.2527 + else
1.2528 + local_mask = process->iAddressCheckMaskR;
1.2529 + TUint32 mask = 2<<(end>>27);
1.2530 + mask -= 1<<(aAddr>>27);
1.2531 + if((local_mask&mask)!=mask)
1.2532 + return EFalse;
1.2533 +
1.2534 + if(!aWrite)
1.2535 + return ETrue; // reads are ok
1.2536 +
1.2537 + // writes need further checking...
1.2538 + TLinAddr userCodeStart = iUserCodeBase;
1.2539 + TLinAddr userCodeEnd = userCodeStart+iMaxUserCodeSize;
1.2540 + if(end>=userCodeStart && aAddr<userCodeEnd)
1.2541 + return EFalse; // trying to write to user code area
1.2542 +
1.2543 + return ETrue;
1.2544 + }
1.2545 +
1.2546 +TInt DMemModelThread::Alias(TLinAddr aAddr, DMemModelProcess* aProcess, TInt aSize, TInt aPerm, TLinAddr& aAliasAddr, TInt& aAliasSize)
1.2547 +//
1.2548 +// Set up an alias mapping starting at address aAddr in specified process.
1.2549 +// Check permissions aPerm.
1.2550 +// Enter and return with system locked.
1.2551 +// Note: Alias is removed if an exception if trapped by DThread::IpcExcHandler.
1.2552 +//
1.2553 + {
1.2554 + __KTRACE_OPT(KMMU2,Kern::Printf("Thread %O Alias %08x+%x Process %O perm %x",this,aAddr,aSize,aProcess,aPerm));
1.2555 + __ASSERT_SYSTEM_LOCK
1.2556 +
1.2557 + if(TUint(aAddr^KIPCAlias)<TUint(KChunkSize))
1.2558 + return KErrBadDescriptor; // prevent access to alias region
1.2559 +
1.2560 + ArmMmu& m=::TheMmu;
1.2561 +
1.2562 + // check if memory is in region which is safe to access with supervisor permissions...
1.2563 + TBool okForSupervisorAccess = aPerm&(EMapAttrReadSup|EMapAttrWriteSup) ? 1 : 0;
1.2564 + if(!okForSupervisorAccess)
1.2565 + {
1.2566 + TInt shift = aAddr>>27;
1.2567 + if(!(aPerm&EMapAttrWriteUser))
1.2568 + {
1.2569 + // reading with user permissions...
1.2570 + okForSupervisorAccess = (aProcess->iAddressCheckMaskR>>shift)&1;
1.2571 + }
1.2572 + else
1.2573 + {
1.2574 + // writing with user permissions...
1.2575 + okForSupervisorAccess = (aProcess->iAddressCheckMaskW>>shift)&1;
1.2576 + if(okForSupervisorAccess)
1.2577 + {
1.2578 + // check for user code, because this is supervisor r/w and so
1.2579 + // is not safe to write to access with supervisor permissions.
1.2580 + if(TUint(aAddr-m.iUserCodeBase)<TUint(m.iMaxUserCodeSize))
1.2581 + return KErrBadDescriptor; // prevent write to this...
1.2582 + }
1.2583 + }
1.2584 + }
1.2585 +
1.2586 + TInt pdeIndex = aAddr>>KChunkShift;
1.2587 + if(pdeIndex>=(m.iLocalPdSize>>2))
1.2588 + {
1.2589 + // address is in global section, don't bother aliasing it...
1.2590 + if(iAliasLinAddr)
1.2591 + RemoveAlias();
1.2592 + aAliasAddr = aAddr;
1.2593 + TInt maxSize = KChunkSize-(aAddr&KChunkMask);
1.2594 + aAliasSize = aSize<maxSize ? aSize : maxSize;
1.2595 + __KTRACE_OPT(KMMU2,Kern::Printf("DMemModelThread::Alias() abandoned as memory is globaly mapped"));
1.2596 + return okForSupervisorAccess;
1.2597 + }
1.2598 +
1.2599 + TInt asid = aProcess->iOsAsid;
1.2600 + TPde* pd = PageDirectory(asid);
1.2601 + TPde pde = pd[pdeIndex];
1.2602 + if ((TPhysAddr)(pde&~KPageMask) == AliasRemapOld)
1.2603 + pde = AliasRemapNew|(pde&KPageMask);
1.2604 + pde = PDE_IN_DOMAIN(pde, KIPCAliasDomain);
1.2605 + TLinAddr aliasAddr = KIPCAlias+(aAddr&(KChunkMask & ~KPageMask));
1.2606 + if(pde==iAliasPde && iAliasLinAddr)
1.2607 + {
1.2608 + // pde already aliased, so just update linear address...
1.2609 + iAliasLinAddr = aliasAddr;
1.2610 + }
1.2611 + else
1.2612 + {
1.2613 + // alias PDE changed...
1.2614 + iAliasPde = pde;
1.2615 + iAliasOsAsid = asid;
1.2616 + if(!iAliasLinAddr)
1.2617 + {
1.2618 + ArmMmu::UnlockAlias();
1.2619 + ::TheMmu.iAliasList.Add(&iAliasLink); // add to list if not already aliased
1.2620 + }
1.2621 + iAliasLinAddr = aliasAddr;
1.2622 + *iAliasPdePtr = pde;
1.2623 + CacheMaintenance::SinglePteUpdated((TLinAddr)iAliasPdePtr);
1.2624 + }
1.2625 +
1.2626 + __KTRACE_OPT(KMMU2,Kern::Printf("DMemModelThread::Alias() PDEntry=%x, iAliasLinAddr=%x",pde, aliasAddr));
1.2627 + InvalidateTLBForPage(aliasAddr, ((DMemModelProcess*)iOwningProcess)->iOsAsid);
1.2628 + TInt offset = aAddr&KPageMask;
1.2629 + aAliasAddr = aliasAddr | offset;
1.2630 + TInt maxSize = KPageSize - offset;
1.2631 + aAliasSize = aSize<maxSize ? aSize : maxSize;
1.2632 + iAliasTarget = aAddr & ~KPageMask;
1.2633 + return okForSupervisorAccess;
1.2634 + }
1.2635 +
1.2636 +void DMemModelThread::RemoveAlias()
1.2637 +//
1.2638 +// Remove alias mapping (if present)
1.2639 +// Enter and return with system locked.
1.2640 +//
1.2641 + {
1.2642 + __KTRACE_OPT(KMMU2,Kern::Printf("Thread %O RemoveAlias", this));
1.2643 + __ASSERT_SYSTEM_LOCK
1.2644 + TLinAddr addr = iAliasLinAddr;
1.2645 + if(addr)
1.2646 + {
1.2647 + ArmMmu::LockAlias();
1.2648 + iAliasLinAddr = 0;
1.2649 + iAliasPde = 0;
1.2650 + *iAliasPdePtr = 0;
1.2651 + CacheMaintenance::SinglePteUpdated((TLinAddr)iAliasPdePtr);
1.2652 + InvalidateTLBForPage(addr, ((DMemModelProcess*)iOwningProcess)->iOsAsid);
1.2653 + iAliasLink.Deque();
1.2654 + }
1.2655 + }
1.2656 +
1.2657 +/*
1.2658 + * Performs cache maintenance for physical page that is going to be reused.
1.2659 + * Fully cached attributes are assumed.
1.2660 + */
1.2661 +void ArmMmu::CacheMaintenanceOnDecommit(TPhysAddr a)
1.2662 + {
1.2663 + // purge a single page from the cache following decommit
1.2664 + ArmMmu& m=::TheMmu;
1.2665 + TInt colour = SPageInfo::FromPhysAddr(a)->Offset()&KPageColourMask;
1.2666 + TPte& pte=m.iTempPte[colour];
1.2667 + TLinAddr va=m.iTempAddr+(colour<<KPageShift);
1.2668 + pte=a|SP_PTE(KArmV6PermRWNO, iCacheMaintenanceTempMapAttr, 1, 1);
1.2669 + CacheMaintenance::SinglePteUpdated((TLinAddr)&pte);
1.2670 +
1.2671 + CacheMaintenance::PageToReuse(va,EMemAttNormalCached, a);
1.2672 +
1.2673 + pte=0;
1.2674 + CacheMaintenance::SinglePteUpdated((TLinAddr)&pte);
1.2675 + InvalidateTLBForPage(va,KERNEL_MAPPING);
1.2676 + }
1.2677 +
1.2678 +void ArmMmu::CacheMaintenanceOnDecommit(const TPhysAddr* al, TInt n)
1.2679 + {
1.2680 + // purge a list of pages from the cache following decommit
1.2681 + while (--n>=0)
1.2682 + ArmMmu::CacheMaintenanceOnDecommit(*al++);
1.2683 + }
1.2684 +
1.2685 +/*
1.2686 + * Performs cache maintenance to preserve physical page that is going to be reused.
1.2687 + */
1.2688 +void ArmMmu::CacheMaintenanceOnPreserve(TPhysAddr a, TUint aMapAttr)
1.2689 + {
1.2690 + // purge a single page from the cache following decommit
1.2691 + ArmMmu& m=::TheMmu;
1.2692 + TInt colour = SPageInfo::FromPhysAddr(a)->Offset()&KPageColourMask;
1.2693 + TPte& pte=m.iTempPte[colour];
1.2694 + TLinAddr va=m.iTempAddr+(colour<<KPageShift);
1.2695 + pte=a|SP_PTE(KArmV6PermRWNO, iCacheMaintenanceTempMapAttr, 1, 1);
1.2696 + CacheMaintenance::SinglePteUpdated((TLinAddr)&pte);
1.2697 +
1.2698 + CacheMaintenance::MemoryToPreserveAndReuse(va, KPageSize,aMapAttr);
1.2699 +
1.2700 + pte=0;
1.2701 + CacheMaintenance::SinglePteUpdated((TLinAddr)&pte);
1.2702 + InvalidateTLBForPage(va,KERNEL_MAPPING);
1.2703 + }
1.2704 +
1.2705 +void ArmMmu::CacheMaintenanceOnPreserve(const TPhysAddr* al, TInt n, TUint aMapAttr)
1.2706 + {
1.2707 + // purge a list of pages from the cache following decommit
1.2708 + while (--n>=0)
1.2709 + ArmMmu::CacheMaintenanceOnPreserve(*al++, aMapAttr);
1.2710 + }
1.2711 +
1.2712 +/*
1.2713 + * Performs cache maintenance of physical memory that has been decommited and has to be preserved.
1.2714 + * Call this method for physical pages with no page info updated (or no page info at all).
1.2715 + * @arg aPhysAddr The address of contiguous physical memory to be preserved.
1.2716 + * @arg aSize The size of the region
1.2717 + * @arg aLinAddr Former linear address of the region. As said above, the physical memory is
1.2718 + * already remapped from this linear address.
1.2719 + * @arg aMapAttr Mapping attributes of the region when it was mapped in aLinAddr.
1.2720 + * @pre MMU mutex is held.
1.2721 + */
1.2722 +void ArmMmu::CacheMaintenanceOnPreserve(TPhysAddr aPhysAddr, TInt aSize, TLinAddr aLinAddr, TUint aMapAttr )
1.2723 + {
1.2724 + __NK_ASSERT_DEBUG((aPhysAddr&KPageMask)==0);
1.2725 + __NK_ASSERT_DEBUG((aSize&KPageMask)==0);
1.2726 + __NK_ASSERT_DEBUG((aLinAddr&KPageMask)==0);
1.2727 +
1.2728 + TPhysAddr pa = aPhysAddr;
1.2729 + TInt size = aSize;
1.2730 + TInt colour = (aLinAddr>>KPageShift)&KPageColourMask;
1.2731 + TPte* pte = &(iTempPte[colour]);
1.2732 + while (size)
1.2733 + {
1.2734 + pte=&(iTempPte[colour]);
1.2735 + TLinAddr va=iTempAddr+(colour<<KPageShift);
1.2736 + *pte=pa|SP_PTE(KArmV6PermRWNO, iCacheMaintenanceTempMapAttr, 1, 1);
1.2737 + CacheMaintenance::SinglePteUpdated((TLinAddr)pte);
1.2738 + CacheMaintenance::MemoryToPreserveAndReuse(va, KPageSize,aMapAttr);
1.2739 +
1.2740 + *pte=0;
1.2741 + CacheMaintenance::SinglePteUpdated((TLinAddr)pte);
1.2742 + InvalidateTLBForPage(va,KERNEL_MAPPING);
1.2743 +
1.2744 + colour = (colour+1)&KPageColourMask;
1.2745 + pa += KPageSize;
1.2746 + size -=KPageSize;
1.2747 + }
1.2748 + }
1.2749 +
1.2750 +TInt ArmMmu::UnlockRamCachePages(TLinAddr aLinAddr, TInt aNumPages, DProcess* aProcess)
1.2751 + {
1.2752 + TInt asid = ((DMemModelProcess*)aProcess)->iOsAsid;
1.2753 + TInt page = aLinAddr>>KPageShift;
1.2754 + NKern::LockSystem();
1.2755 + for(;;)
1.2756 + {
1.2757 + TPde* pd = PageDirectory(asid)+(page>>(KChunkShift-KPageShift));
1.2758 + TPte* pt = SafePageTableFromPde(*pd++);
1.2759 + TInt pteIndex = page&(KChunkMask>>KPageShift);
1.2760 + if(!pt)
1.2761 + {
1.2762 + // whole page table has gone, so skip all pages in it...
1.2763 + TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex;
1.2764 + aNumPages -= pagesInPt;
1.2765 + page += pagesInPt;
1.2766 + if(aNumPages>0)
1.2767 + continue;
1.2768 + NKern::UnlockSystem();
1.2769 + return KErrNone;
1.2770 + }
1.2771 + pt += pteIndex;
1.2772 + do
1.2773 + {
1.2774 + TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex;
1.2775 + if(pagesInPt>aNumPages)
1.2776 + pagesInPt = aNumPages;
1.2777 + if(pagesInPt>KMaxPages)
1.2778 + pagesInPt = KMaxPages;
1.2779 +
1.2780 + aNumPages -= pagesInPt;
1.2781 + page += pagesInPt;
1.2782 +
1.2783 + do
1.2784 + {
1.2785 + TPte pte = *pt++;
1.2786 + if(pte) // pte may be null if page has already been unlocked and reclaimed by system
1.2787 + iRamCache->DonateRamCachePage(SPageInfo::FromPhysAddr(pte));
1.2788 + }
1.2789 + while(--pagesInPt);
1.2790 +
1.2791 + if(!aNumPages)
1.2792 + {
1.2793 + NKern::UnlockSystem();
1.2794 + return KErrNone;
1.2795 + }
1.2796 +
1.2797 + pteIndex = page&(KChunkMask>>KPageShift);
1.2798 + }
1.2799 + while(!NKern::FlashSystem() && pteIndex);
1.2800 + }
1.2801 + }
1.2802 +
1.2803 +
1.2804 +TInt ArmMmu::LockRamCachePages(TLinAddr aLinAddr, TInt aNumPages, DProcess* aProcess)
1.2805 + {
1.2806 + TInt asid = ((DMemModelProcess*)aProcess)->iOsAsid;
1.2807 + TInt page = aLinAddr>>KPageShift;
1.2808 + NKern::LockSystem();
1.2809 + for(;;)
1.2810 + {
1.2811 + TPde* pd = PageDirectory(asid)+(page>>(KChunkShift-KPageShift));
1.2812 + TPte* pt = SafePageTableFromPde(*pd++);
1.2813 + TInt pteIndex = page&(KChunkMask>>KPageShift);
1.2814 + if(!pt)
1.2815 + goto not_found;
1.2816 + pt += pteIndex;
1.2817 + do
1.2818 + {
1.2819 + TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex;
1.2820 + if(pagesInPt>aNumPages)
1.2821 + pagesInPt = aNumPages;
1.2822 + if(pagesInPt>KMaxPages)
1.2823 + pagesInPt = KMaxPages;
1.2824 +
1.2825 + aNumPages -= pagesInPt;
1.2826 + page += pagesInPt;
1.2827 +
1.2828 + do
1.2829 + {
1.2830 + TPte pte = *pt++;
1.2831 + if(pte==0)
1.2832 + goto not_found;
1.2833 + if(!iRamCache->ReclaimRamCachePage(SPageInfo::FromPhysAddr(pte)))
1.2834 + goto not_found;
1.2835 + }
1.2836 + while(--pagesInPt);
1.2837 +
1.2838 + if(!aNumPages)
1.2839 + {
1.2840 + NKern::UnlockSystem();
1.2841 + return KErrNone;
1.2842 + }
1.2843 +
1.2844 + pteIndex = page&(KChunkMask>>KPageShift);
1.2845 + }
1.2846 + while(!NKern::FlashSystem() && pteIndex);
1.2847 + }
1.2848 +not_found:
1.2849 + NKern::UnlockSystem();
1.2850 + return KErrNotFound;
1.2851 + }
1.2852 +
1.2853 +
1.2854 +void RamCache::SetFree(SPageInfo* aPageInfo)
1.2855 + {
1.2856 + ArmMmu& m=::TheMmu;
1.2857 + // Make a page free
1.2858 + SPageInfo::TType type = aPageInfo->Type();
1.2859 + if(type==SPageInfo::EPagedCache)
1.2860 + {
1.2861 + TInt offset = aPageInfo->Offset()<<KPageShift;
1.2862 + DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
1.2863 + __NK_ASSERT_DEBUG(TUint(offset)<TUint(chunk->iMaxSize));
1.2864 + TLinAddr lin = ((TLinAddr)chunk->iBase)+offset;
1.2865 + TInt asid = ((DMemModelProcess*)chunk->iOwningProcess)->iOsAsid;
1.2866 + TPte* pt = PtePtrFromLinAddr(lin,asid);
1.2867 + TPhysAddr phys = (*pt)&~KPageMask;
1.2868 + *pt = KPteNotPresentEntry;
1.2869 + CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
1.2870 + InvalidateTLBForPage(lin,asid);
1.2871 + m.CacheMaintenanceOnDecommit(phys);
1.2872 +
1.2873 + // actually decommit it from chunk...
1.2874 + TInt ptid = ((TLinAddr)pt-KPageTableBase)>>KPageTableShift;
1.2875 + SPageTableInfo& ptinfo=((ArmMmu*)iMmu)->iPtInfo[ptid];
1.2876 + if(!--ptinfo.iCount)
1.2877 + {
1.2878 + chunk->iPageTables[offset>>KChunkShift] = 0xffff;
1.2879 + NKern::UnlockSystem();
1.2880 + ((ArmMmu*)iMmu)->DoUnassignPageTable(lin, (TAny*)asid);
1.2881 + ((ArmMmu*)iMmu)->FreePageTable(ptid);
1.2882 + NKern::LockSystem();
1.2883 + }
1.2884 + }
1.2885 + else
1.2886 + {
1.2887 + __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetFree() with bad page type = %d",aPageInfo->Type()));
1.2888 + Panic(EUnexpectedPageType);
1.2889 + }
1.2890 + }
1.2891 +
1.2892 +
1.2893 +//
1.2894 +// MemModelDemandPaging
1.2895 +//
1.2896 +
1.2897 +class MemModelDemandPaging : public DemandPaging
1.2898 + {
1.2899 +public:
1.2900 + // From RamCacheBase
1.2901 + virtual void Init2();
1.2902 + virtual TInt Init3();
1.2903 + virtual TBool PageUnmapped(SPageInfo* aPageInfo);
1.2904 + // From DemandPaging
1.2905 + virtual TInt Fault(TAny* aExceptionInfo);
1.2906 + virtual void SetOld(SPageInfo* aPageInfo);
1.2907 + virtual void SetFree(SPageInfo* aPageInfo);
1.2908 + virtual void NotifyPageFree(TPhysAddr aPage);
1.2909 + virtual TInt EnsurePagePresent(TLinAddr aPage, DProcess* aProcess);
1.2910 + virtual TPhysAddr LinearToPhysical(TLinAddr aPage, DProcess* aProcess);
1.2911 + virtual void AllocLoadAddress(DPagingRequest& aReq, TInt aDeviceId);
1.2912 + virtual TInt PageState(TLinAddr aAddr);
1.2913 + virtual TBool NeedsMutexOrderCheck(TLinAddr aStartAddr, TUint aLength);
1.2914 + // New
1.2915 + inline ArmMmu& Mmu() { return (ArmMmu&)*iMmu; }
1.2916 + void InitRomPaging();
1.2917 + void InitCodePaging();
1.2918 + TInt HandleFault(TArmExcInfo& aExc, TLinAddr aFaultAddress, TInt aAsid);
1.2919 + TInt PageIn(TLinAddr aAddress, TInt aAsid, DMemModelCodeSegMemory* aCodeSegMemory);
1.2920 +public:
1.2921 + // use of the folowing members is protected by the system lock..
1.2922 + TPte* iPurgePte; // PTE used for temporary mappings during cache purge operations
1.2923 + TLinAddr iPurgeAddr; // address corresponding to iPurgePte
1.2924 + };
1.2925 +
1.2926 +extern void MakeGlobalPTEInaccessible(TPte* aPtePtr, TPte aNewPte, TLinAddr aLinAddr);
1.2927 +extern void MakePTEInaccessible(TPte* aPtePtr, TPte aNewPte, TLinAddr aLinAddr, TInt aAsid);
1.2928 +
1.2929 +//
1.2930 +// MemModelDemandPaging
1.2931 +//
1.2932 +
1.2933 +
1.2934 +DemandPaging* DemandPaging::New()
1.2935 + {
1.2936 + return new MemModelDemandPaging();
1.2937 + }
1.2938 +
1.2939 +
1.2940 +void MemModelDemandPaging::Init2()
1.2941 + {
1.2942 + __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">MemModelDemandPaging::Init2"));
1.2943 + DemandPaging::Init2();
1.2944 +
1.2945 + iPurgeAddr = KDemandPagingTempAddr;
1.2946 + iPurgePte = PtePtrFromLinAddr(iPurgeAddr);
1.2947 +
1.2948 + __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<MemModelDemandPaging::Init2"));
1.2949 + }
1.2950 +
1.2951 +
1.2952 +void MemModelDemandPaging::AllocLoadAddress(DPagingRequest& aReq, TInt aReqId)
1.2953 + {
1.2954 + aReq.iLoadAddr = iTempPages + aReqId * KPageSize * KPageColourCount;
1.2955 + aReq.iLoadPte = PtePtrFromLinAddr(aReq.iLoadAddr);
1.2956 + }
1.2957 +
1.2958 +
1.2959 +TInt MemModelDemandPaging::Init3()
1.2960 + {
1.2961 + TInt r=DemandPaging::Init3();
1.2962 + if(r!=KErrNone)
1.2963 + return r;
1.2964 +
1.2965 + // Create a region for mapping pages during page in
1.2966 + DPlatChunkHw* chunk;
1.2967 + TInt chunkSize = (KMaxPagingDevices * KPagingRequestsPerDevice + 1) * KPageColourCount * KPageSize;
1.2968 + DPlatChunkHw::DoNew(chunk, KPhysAddrInvalid, chunkSize, EMapAttrSupRw|EMapAttrFullyBlocking);
1.2969 + if(!chunk)
1.2970 + Panic(EInitialiseFailed);
1.2971 + TInt colourMask = KPageColourMask << KPageShift;
1.2972 + iTempPages = (chunk->iLinAddr + colourMask) & ~colourMask;
1.2973 +
1.2974 + if(RomPagingRequested())
1.2975 + InitRomPaging();
1.2976 +
1.2977 + if (CodePagingRequested())
1.2978 + InitCodePaging();
1.2979 +
1.2980 + __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<MemModelDemandPaging::Init3"));
1.2981 + return KErrNone;
1.2982 + }
1.2983 +
1.2984 +void MemModelDemandPaging::InitRomPaging()
1.2985 + {
1.2986 + // Make page tables for demand paged part of ROM...
1.2987 + __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("MemModelDemandPaging::Init3 making page tables for paged ROM"));
1.2988 + TLinAddr lin = iRomPagedLinearBase&~KChunkMask; // first chunk with paged ROM in
1.2989 + TLinAddr linEnd = iRomLinearBase+iRomSize;
1.2990 + while(lin<linEnd)
1.2991 + {
1.2992 + // Get a Page Table
1.2993 + TInt ptid = Mmu().PageTableId(lin,0);
1.2994 + if(ptid<0)
1.2995 + {
1.2996 + MmuBase::Wait();
1.2997 + ptid = Mmu().AllocPageTable();
1.2998 + MmuBase::Signal();
1.2999 + __NK_ASSERT_DEBUG(ptid>=0);
1.3000 + Mmu().PtInfo(ptid).SetGlobal(lin >> KChunkShift);
1.3001 + }
1.3002 +
1.3003 + // Get new page table addresses
1.3004 + TPte* pt = PageTable(ptid);
1.3005 + TPhysAddr ptPhys=Mmu().LinearToPhysical((TLinAddr)pt,0);
1.3006 +
1.3007 + // Pointer to page directory entry
1.3008 + TPde* ppde = ::InitPageDirectory + (lin>>KChunkShift);
1.3009 +
1.3010 + // Fill in Page Table
1.3011 + TPte* ptEnd = pt+(1<<(KChunkShift-KPageShift));
1.3012 + pt += (lin&KChunkMask)>>KPageShift;
1.3013 + TLinAddr firstPte = (TLinAddr)pt; // Will need this to clean page table memory region from cache
1.3014 +
1.3015 + do
1.3016 + {
1.3017 + if(lin<iRomPagedLinearBase)
1.3018 + *pt++ = Mmu().LinearToPhysical(lin,0) | KRomPtePerm;
1.3019 + else
1.3020 + {
1.3021 + MakeGlobalPTEInaccessible(pt, KPteNotPresentEntry, lin);
1.3022 + ++pt;
1.3023 + }
1.3024 + lin += KPageSize;
1.3025 + }
1.3026 + while(pt<ptEnd && lin<=linEnd);
1.3027 +
1.3028 + CacheMaintenance::MultiplePtesUpdated((TLinAddr)firstPte, (TUint)pt-firstPte);
1.3029 +
1.3030 + // Add new Page Table to the Page Directory
1.3031 + TPde newpde = ptPhys | KShadowPdePerm;
1.3032 + __KTRACE_OPT2(KPAGING,KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde));
1.3033 + TInt irq=NKern::DisableAllInterrupts();
1.3034 + *ppde = newpde;
1.3035 + CacheMaintenance::SinglePteUpdated((TLinAddr)ppde);
1.3036 + FlushTLBs();
1.3037 + NKern::RestoreInterrupts(irq);
1.3038 + }
1.3039 + }
1.3040 +
1.3041 +
1.3042 +void MemModelDemandPaging::InitCodePaging()
1.3043 + {
1.3044 + // Initialise code paging info
1.3045 + iCodeLinearBase = Mmu().iUserCodeBase;
1.3046 + iCodeSize = Mmu().iMaxUserCodeSize;
1.3047 + }
1.3048 +
1.3049 +
1.3050 +/**
1.3051 +@return ETrue when the unmapped page should be freed, EFalse otherwise
1.3052 +*/
1.3053 +TBool MemModelDemandPaging::PageUnmapped(SPageInfo* aPageInfo)
1.3054 + {
1.3055 + SPageInfo::TType type = aPageInfo->Type();
1.3056 +
1.3057 + // Only have to deal with cache pages - pages containg code don't get returned to the system
1.3058 + // when they are decommitted from an individual process, only when the code segment is destroyed
1.3059 + if(type!=SPageInfo::EPagedCache)
1.3060 + {
1.3061 + __NK_ASSERT_DEBUG(type!=SPageInfo::EPagedCode); // shouldn't happen
1.3062 + __NK_ASSERT_DEBUG(type!=SPageInfo::EPagedData); // not supported yet
1.3063 + return ETrue;
1.3064 + }
1.3065 +
1.3066 + RemovePage(aPageInfo);
1.3067 + AddAsFreePage(aPageInfo);
1.3068 + // Return false to stop DMemModelChunk::DoDecommit from freeing this page
1.3069 + return EFalse;
1.3070 + }
1.3071 +
1.3072 +
1.3073 +void DoSetCodeOld(SPageInfo* aPageInfo, DMemModelCodeSegMemory* aCodeSegMemory, TLinAddr aLinAddr)
1.3074 + {
1.3075 + NThread* currentThread = NKern::CurrentThread();
1.3076 + aPageInfo->SetModifier(currentThread);
1.3077 + // scan all address spaces...
1.3078 + TInt asid = -1;
1.3079 + TInt lastAsid = KArmV6NumAsids-1;
1.3080 + TUint32* ptr = aCodeSegMemory->iOsAsids->iMap;
1.3081 + do
1.3082 + {
1.3083 + TUint32 bits = *ptr++;
1.3084 + do
1.3085 + {
1.3086 + ++asid;
1.3087 + if(bits&0x80000000u)
1.3088 + {
1.3089 + // codeseg is mapped in this address space, so update PTE...
1.3090 + TPte* pt = PtePtrFromLinAddr(aLinAddr,asid);
1.3091 + TPte pte = *pt;
1.3092 + if(pte&KPtePresentMask)
1.3093 + {
1.3094 + __NK_ASSERT_DEBUG((pte&~KPageMask) == aPageInfo->PhysAddr());
1.3095 + MakePTEInaccessible(pt, pte&~KPtePresentMask, aLinAddr, asid);
1.3096 + }
1.3097 + }
1.3098 + }
1.3099 + while(bits<<=1);
1.3100 + if(NKern::FlashSystem() && aPageInfo->CheckModified(currentThread))
1.3101 + return; // page was modified by another thread
1.3102 + asid |= 31;
1.3103 + }
1.3104 + while(asid<lastAsid);
1.3105 + }
1.3106 +
1.3107 +
1.3108 +void MemModelDemandPaging::SetOld(SPageInfo* aPageInfo)
1.3109 + {
1.3110 + __ASSERT_SYSTEM_LOCK;
1.3111 + __NK_ASSERT_DEBUG(aPageInfo->State() == SPageInfo::EStatePagedOld);
1.3112 +
1.3113 + SPageInfo::TType type = aPageInfo->Type();
1.3114 +
1.3115 + if(type==SPageInfo::EPagedROM)
1.3116 + {
1.3117 + // get linear address of page...
1.3118 + TInt offset = aPageInfo->Offset()<<KPageShift;
1.3119 + __NK_ASSERT_DEBUG(TUint(offset)<iRomSize);
1.3120 +
1.3121 + // make page inaccessible...
1.3122 + TLinAddr lin = iRomLinearBase+offset;
1.3123 + TPte* pt = PtePtrFromLinAddr(lin);
1.3124 + MakeGlobalPTEInaccessible(pt, *pt&~KPtePresentMask, lin);
1.3125 + }
1.3126 + else if(type==SPageInfo::EPagedCode)
1.3127 + {
1.3128 + START_PAGING_BENCHMARK;
1.3129 +
1.3130 + // get linear address of page...
1.3131 + TInt offset = aPageInfo->Offset()<<KPageShift;
1.3132 + __NK_ASSERT_DEBUG(TUint(offset)<iCodeSize);
1.3133 + TLinAddr lin = iCodeLinearBase+offset;
1.3134 +
1.3135 + // get CodeSegMemory...
1.3136 + DMemModelCodeSegMemory* codeSegMemory = (DMemModelCodeSegMemory*)aPageInfo->Owner();
1.3137 + __NK_ASSERT_DEBUG(codeSegMemory && codeSegMemory->iPages && codeSegMemory->iIsDemandPaged);
1.3138 +
1.3139 +#ifdef _DEBUG
1.3140 + TInt pageNumber = (lin - codeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift;
1.3141 + __NK_ASSERT_DEBUG(codeSegMemory->iPages[pageNumber] == aPageInfo->PhysAddr());
1.3142 +#endif
1.3143 +
1.3144 + // make page inaccessible...
1.3145 + DoSetCodeOld(aPageInfo,codeSegMemory,lin);
1.3146 +
1.3147 + END_PAGING_BENCHMARK(this, EPagingBmSetCodePageOld);
1.3148 + }
1.3149 + else if(type==SPageInfo::EPagedCache)
1.3150 + {
1.3151 + // leave page accessible
1.3152 + }
1.3153 + else if(type!=SPageInfo::EPagedFree)
1.3154 + {
1.3155 + __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetOld() with bad page type = %d",aPageInfo->Type()));
1.3156 + Panic(EUnexpectedPageType);
1.3157 + }
1.3158 + NKern::FlashSystem();
1.3159 + }
1.3160 +
1.3161 +
1.3162 +void DoSetCodeFree(SPageInfo* aPageInfo, TPhysAddr aPhysAddr, DMemModelCodeSegMemory* aCodeSegMemory, TLinAddr aLinAddr)
1.3163 + {
1.3164 + NThread* currentThread = NKern::CurrentThread();
1.3165 + aPageInfo->SetModifier(currentThread);
1.3166 + // scan all address spaces...
1.3167 + TInt asid = -1;
1.3168 + TInt lastAsid = KArmV6NumAsids-1;
1.3169 + TUint32* ptr = aCodeSegMemory->iOsAsids->iMap;
1.3170 + do
1.3171 + {
1.3172 + TUint32 bits = *ptr++;
1.3173 + do
1.3174 + {
1.3175 + ++asid;
1.3176 + if(bits&0x80000000u)
1.3177 + {
1.3178 + // codeseg is mapped in this address space, so update PTE...
1.3179 + TPte* pt = PtePtrFromLinAddr(aLinAddr,asid);
1.3180 + TPte pte = *pt;
1.3181 + if (pte!=KPteNotPresentEntry && (pte&~KPageMask) == aPhysAddr)
1.3182 + MakePTEInaccessible(pt, KPteNotPresentEntry, aLinAddr, asid);
1.3183 + }
1.3184 + }
1.3185 + while(bits<<=1);
1.3186 + if(NKern::FlashSystem())
1.3187 + {
1.3188 + // nobody else should modify page!
1.3189 + __NK_ASSERT_DEBUG(!aPageInfo->CheckModified(currentThread));
1.3190 + }
1.3191 + asid |= 31;
1.3192 + }
1.3193 + while(asid<lastAsid);
1.3194 + }
1.3195 +
1.3196 +
1.3197 +void MemModelDemandPaging::SetFree(SPageInfo* aPageInfo)
1.3198 + {
1.3199 + __ASSERT_SYSTEM_LOCK;
1.3200 + __ASSERT_MUTEX(MmuBase::RamAllocatorMutex);
1.3201 + __NK_ASSERT_DEBUG(aPageInfo->State() == SPageInfo::EStatePagedDead);
1.3202 + if(aPageInfo->LockCount())
1.3203 + Panic(ERamPageLocked);
1.3204 +
1.3205 + SPageInfo::TType type = aPageInfo->Type();
1.3206 + TPhysAddr phys = aPageInfo->PhysAddr();
1.3207 +
1.3208 + if(type==SPageInfo::EPagedROM)
1.3209 + {
1.3210 + // get linear address of page...
1.3211 + TInt offset = aPageInfo->Offset()<<KPageShift;
1.3212 + __NK_ASSERT_DEBUG(TUint(offset)<iRomSize);
1.3213 + TLinAddr lin = iRomLinearBase+offset;
1.3214 +
1.3215 + // unmap it...
1.3216 + TPte* pt = PtePtrFromLinAddr(lin);
1.3217 + MakeGlobalPTEInaccessible(pt, KPteNotPresentEntry, lin);
1.3218 +
1.3219 +#ifdef BTRACE_PAGING
1.3220 + BTraceContext8(BTrace::EPaging,BTrace::EPagingPageOutROM,phys,lin);
1.3221 +#endif
1.3222 + }
1.3223 + else if(type==SPageInfo::EPagedCode)
1.3224 + {
1.3225 + START_PAGING_BENCHMARK;
1.3226 +
1.3227 + // get linear address of page...
1.3228 + TInt offset = aPageInfo->Offset()<<KPageShift;
1.3229 + __NK_ASSERT_DEBUG(TUint(offset)<iCodeSize);
1.3230 + TLinAddr lin = iCodeLinearBase+offset;
1.3231 +
1.3232 + // get CodeSegMemory...
1.3233 + // NOTE, this cannot die because we hold the RamAlloc mutex, and the CodeSegMemory
1.3234 + // destructor also needs this mutex to do it's cleanup...
1.3235 + DMemModelCodeSegMemory* codeSegMemory = (DMemModelCodeSegMemory*)aPageInfo->Owner();
1.3236 + __NK_ASSERT_DEBUG(codeSegMemory && codeSegMemory->iPages && codeSegMemory->iIsDemandPaged);
1.3237 +
1.3238 + // remove page from CodeSegMemory (must come before System Lock is released)...
1.3239 + TInt pageNumber = (lin - codeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift;
1.3240 + __NK_ASSERT_DEBUG(codeSegMemory->iPages[pageNumber] == aPageInfo->PhysAddr());
1.3241 + codeSegMemory->iPages[pageNumber] = KPhysAddrInvalid;
1.3242 +
1.3243 + // unmap page from all processes it's mapped into...
1.3244 + DoSetCodeFree(aPageInfo,phys,codeSegMemory,lin);
1.3245 +
1.3246 + END_PAGING_BENCHMARK(this, EPagingBmSetCodePageFree);
1.3247 +#ifdef BTRACE_PAGING
1.3248 + BTraceContext8(BTrace::EPaging,BTrace::EPagingPageOutCode,phys,lin);
1.3249 +#endif
1.3250 + }
1.3251 + else if(type==SPageInfo::EPagedCache)
1.3252 + {
1.3253 + // get linear address of page...
1.3254 + TInt offset = aPageInfo->Offset()<<KPageShift;
1.3255 + DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
1.3256 + __NK_ASSERT_DEBUG(TUint(offset)<TUint(chunk->iMaxSize));
1.3257 + TLinAddr lin = ((TLinAddr)chunk->iBase)+offset;
1.3258 +
1.3259 + // unmap it...
1.3260 + TInt asid = ((DMemModelProcess*)chunk->iOwningProcess)->iOsAsid;
1.3261 + TPte* pt = PtePtrFromLinAddr(lin,asid);
1.3262 + *pt = KPteNotPresentEntry;
1.3263 + CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
1.3264 +
1.3265 + InvalidateTLBForPage(lin,asid);
1.3266 +
1.3267 + // actually decommit it from chunk...
1.3268 + TInt ptid = ((TLinAddr)pt-KPageTableBase)>>KPageTableShift;
1.3269 + SPageTableInfo& ptinfo=Mmu().iPtInfo[ptid];
1.3270 + if(!--ptinfo.iCount)
1.3271 + {
1.3272 + chunk->iPageTables[offset>>KChunkShift] = 0xffff;
1.3273 + NKern::UnlockSystem();
1.3274 + Mmu().DoUnassignPageTable(lin, (TAny*)asid);
1.3275 + Mmu().FreePageTable(ptid);
1.3276 + NKern::LockSystem();
1.3277 + }
1.3278 +
1.3279 +#ifdef BTRACE_PAGING
1.3280 + BTraceContext8(BTrace::EPaging,BTrace::EPagingPageOutCache,phys,lin);
1.3281 +#endif
1.3282 + }
1.3283 + else if(type==SPageInfo::EPagedFree)
1.3284 + {
1.3285 + // already free...
1.3286 +#ifdef BTRACE_PAGING
1.3287 + BTraceContext4(BTrace::EPaging,BTrace::EPagingPageOutFree,phys);
1.3288 +#endif
1.3289 + // fall through to cache purge code because cache may not have been
1.3290 + // cleaned for this page if PageUnmapped called
1.3291 + }
1.3292 + else
1.3293 + {
1.3294 + __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetFree() with bad page type = %d",aPageInfo->Type()));
1.3295 + Panic(EUnexpectedPageType);
1.3296 + return;
1.3297 + }
1.3298 +
1.3299 + NKern::UnlockSystem();
1.3300 +
1.3301 + // purge cache for page...
1.3302 + TInt colour = aPageInfo->Offset()&KPageColourMask;
1.3303 + TPte& pte=iPurgePte[colour];
1.3304 + TLinAddr va=iPurgeAddr+(colour<<KPageShift);
1.3305 + pte=phys|SP_PTE(KArmV6PermRWNO, TheMmu.iCacheMaintenanceTempMapAttr, 1, 1);
1.3306 + CacheMaintenance::SinglePteUpdated((TLinAddr)&pte);
1.3307 +
1.3308 + CacheMaintenance::PageToReuse(va,EMemAttNormalCached, KPhysAddrInvalid);
1.3309 +
1.3310 + pte=0;
1.3311 + CacheMaintenance::SinglePteUpdated((TLinAddr)&pte);
1.3312 + InvalidateTLBForPage(va,KERNEL_MAPPING);
1.3313 +
1.3314 + NKern::LockSystem();
1.3315 + }
1.3316 +
1.3317 +
1.3318 +void MemModelDemandPaging::NotifyPageFree(TPhysAddr aPage)
1.3319 + {
1.3320 + __KTRACE_OPT(KPAGING, Kern::Printf("MemModelDemandPaging::NotifyPageFree %08x", aPage));
1.3321 + __ASSERT_SYSTEM_LOCK;
1.3322 +
1.3323 + SPageInfo* pageInfo = SPageInfo::FromPhysAddr(aPage);
1.3324 + __ASSERT_DEBUG(pageInfo->Type()==SPageInfo::EPagedCode, MM::Panic(MM::EUnexpectedPageType));
1.3325 + RemovePage(pageInfo);
1.3326 + SetFree(pageInfo);
1.3327 + AddAsFreePage(pageInfo);
1.3328 + }
1.3329 +
1.3330 +
1.3331 +TInt MemModelDemandPaging::Fault(TAny* aExceptionInfo)
1.3332 + {
1.3333 + TArmExcInfo& exc=*(TArmExcInfo*)aExceptionInfo;
1.3334 +
1.3335 + // Get faulting address
1.3336 + TLinAddr faultAddress = exc.iFaultAddress;
1.3337 + if(exc.iExcCode==EArmExceptionDataAbort)
1.3338 + {
1.3339 + // Let writes take an exception rather than page in any memory...
1.3340 + if(exc.iFaultStatus&(1<<11))
1.3341 + return KErrUnknown;
1.3342 + }
1.3343 + else if (exc.iExcCode != EArmExceptionPrefetchAbort)
1.3344 + return KErrUnknown; // Not prefetch or data abort
1.3345 +
1.3346 + // Only handle page translation faults
1.3347 + if((exc.iFaultStatus & 0x40f) != 0x7)
1.3348 + return KErrUnknown;
1.3349 +
1.3350 + DMemModelThread* thread = (DMemModelThread*)TheCurrentThread;
1.3351 +
1.3352 + // check which ragion fault occured in...
1.3353 + TInt asid = 0; // asid != 0 => code paging fault
1.3354 + if(TUint(faultAddress-iRomPagedLinearBase)<iRomPagedSize)
1.3355 + {
1.3356 + // in ROM
1.3357 + }
1.3358 + else if(TUint(faultAddress-iCodeLinearBase)<iCodeSize)
1.3359 + {
1.3360 + // in code
1.3361 + asid = ((DMemModelProcess*)TheScheduler.iAddressSpace)->iOsAsid;
1.3362 + }
1.3363 + else if (thread->iAliasLinAddr && TUint(faultAddress - thread->iAliasLinAddr) < TUint(KPageSize))
1.3364 + {
1.3365 + // in aliased memory
1.3366 + faultAddress = (faultAddress - thread->iAliasLinAddr) + thread->iAliasTarget;
1.3367 + if(TUint(faultAddress-iCodeLinearBase)>=iCodeSize)
1.3368 + return KErrUnknown; // not in alias of code
1.3369 + asid = thread->iAliasOsAsid;
1.3370 + __NK_ASSERT_DEBUG(asid != 0);
1.3371 + }
1.3372 + else
1.3373 + return KErrUnknown; // Not in pageable region
1.3374 +
1.3375 + // Check if thread holds fast mutex and claim system lock
1.3376 + NFastMutex* fm = NKern::HeldFastMutex();
1.3377 + TPagingExcTrap* trap = thread->iPagingExcTrap;
1.3378 + if(!fm)
1.3379 + NKern::LockSystem();
1.3380 + else
1.3381 + {
1.3382 + if(!trap || fm!=&TheScheduler.iLock)
1.3383 + {
1.3384 + __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: Fault with FM Held! %x (%O pc=%x)",faultAddress,&Kern::CurrentThread(),exc.iR15));
1.3385 + Panic(EPageFaultWhilstFMHeld); // Not allowed to hold mutexes
1.3386 + }
1.3387 + // restore address space on multiple memory model (because the trap will
1.3388 + // bypass any code which would have done this.)...
1.3389 + DMemModelThread::RestoreAddressSpace();
1.3390 +
1.3391 + // Current thread already has the system lock...
1.3392 + NKern::FlashSystem(); // Let someone else have a go with the system lock.
1.3393 + }
1.3394 +
1.3395 + // System locked here
1.3396 +
1.3397 + TInt r = KErrNone;
1.3398 + if(thread->IsRealtime())
1.3399 + r = CheckRealtimeThreadFault(thread, aExceptionInfo);
1.3400 + if (r == KErrNone)
1.3401 + r = HandleFault(exc, faultAddress, asid);
1.3402 +
1.3403 + // Restore system lock state
1.3404 + if (fm != NKern::HeldFastMutex())
1.3405 + {
1.3406 + if (fm)
1.3407 + NKern::LockSystem();
1.3408 + else
1.3409 + NKern::UnlockSystem();
1.3410 + }
1.3411 +
1.3412 + // Deal with XTRAP_PAGING
1.3413 + if(r == KErrNone && trap)
1.3414 + {
1.3415 + trap->Exception(1); // Return from exception trap with result '1' (value>0)
1.3416 + // code doesn't continue beyond this point.
1.3417 + }
1.3418 +
1.3419 + return r;
1.3420 + }
1.3421 +
1.3422 +
1.3423 +
1.3424 +TInt MemModelDemandPaging::HandleFault(TArmExcInfo& aExc, TLinAddr aFaultAddress, TInt aAsid)
1.3425 + {
1.3426 + ++iEventInfo.iPageFaultCount;
1.3427 +
1.3428 + // get page table entry...
1.3429 + TPte* pt = SafePtePtrFromLinAddr(aFaultAddress, aAsid);
1.3430 + if(!pt)
1.3431 + return KErrNotFound;
1.3432 + TPte pte = *pt;
1.3433 +
1.3434 + // Do what is required to make page accessible...
1.3435 +
1.3436 + if(pte&KPtePresentMask)
1.3437 + {
1.3438 + // PTE is present, so assume it has already been dealt with
1.3439 +#ifdef BTRACE_PAGING
1.3440 + BTraceContext12(BTrace::EPaging,BTrace::EPagingPageNop,pte&~KPageMask,aFaultAddress,aExc.iR15);
1.3441 +#endif
1.3442 + return KErrNone;
1.3443 + }
1.3444 +
1.3445 + if(pte!=KPteNotPresentEntry)
1.3446 + {
1.3447 + // PTE alread has a page
1.3448 + SPageInfo* pageInfo = SPageInfo::FromPhysAddr(pte);
1.3449 + if(pageInfo->State()==SPageInfo::EStatePagedDead)
1.3450 + {
1.3451 + // page currently being unmapped, so do that here...
1.3452 + MakePTEInaccessible(pt, KPteNotPresentEntry, aFaultAddress, aAsid);
1.3453 + }
1.3454 + else
1.3455 + {
1.3456 + // page just needs making young again...
1.3457 + *pt = TPte(pte|KArmV6PteSmallPage); // Update page table
1.3458 + CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
1.3459 + Rejuvenate(pageInfo);
1.3460 +#ifdef BTRACE_PAGING
1.3461 + BTraceContext12(BTrace::EPaging,BTrace::EPagingRejuvenate,pte&~KPageMask,aFaultAddress,aExc.iR15);
1.3462 +#endif
1.3463 + return KErrNone;
1.3464 + }
1.3465 + }
1.3466 +
1.3467 + // PTE not present, so page it in...
1.3468 + // check if fault in a CodeSeg...
1.3469 + DMemModelCodeSegMemory* codeSegMemory = NULL;
1.3470 + if (!aAsid)
1.3471 + NKern::ThreadEnterCS();
1.3472 + else
1.3473 + {
1.3474 + // find CodeSeg...
1.3475 + DMemModelCodeSeg* codeSeg = (DMemModelCodeSeg*)DCodeSeg::CodeSegsByAddress.Find(aFaultAddress);
1.3476 + if (!codeSeg)
1.3477 + return KErrNotFound;
1.3478 + codeSegMemory = codeSeg->Memory();
1.3479 + if (codeSegMemory==0 || !codeSegMemory->iIsDemandPaged || codeSegMemory->iOsAsids->NotFree(aAsid, 1))
1.3480 + return KErrNotFound;
1.3481 +
1.3482 + // check if it's paged in but not yet mapped into this process...
1.3483 + TInt pageNumber = (aFaultAddress - codeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift;
1.3484 + TPhysAddr page = codeSegMemory->iPages[pageNumber];
1.3485 + if (page != KPhysAddrInvalid)
1.3486 + {
1.3487 + // map it into this process...
1.3488 + SPageInfo* pageInfo = SPageInfo::FromPhysAddr(page);
1.3489 + __NK_ASSERT_DEBUG(pageInfo->State()!=SPageInfo::EStatePagedDead);
1.3490 + *pt = page | (codeSegMemory->iCreator ? KUserCodeLoadPte : KUserCodeRunPte);
1.3491 + CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
1.3492 + Rejuvenate(pageInfo);
1.3493 +#ifdef BTRACE_PAGING
1.3494 + BTraceContext8(BTrace::EPaging,BTrace::EPagingMapCode,page,aFaultAddress);
1.3495 +#endif
1.3496 + return KErrNone;
1.3497 + }
1.3498 +
1.3499 + // open reference on CodeSegMemory
1.3500 + NKern::ThreadEnterCS();
1.3501 +#ifdef _DEBUG
1.3502 + TInt r =
1.3503 +#endif
1.3504 + codeSegMemory->Open();
1.3505 + __NK_ASSERT_DEBUG(r==KErrNone);
1.3506 + NKern::FlashSystem();
1.3507 + }
1.3508 +
1.3509 +#ifdef BTRACE_PAGING
1.3510 + BTraceContext8(BTrace::EPaging,BTrace::EPagingPageInBegin,aFaultAddress,aExc.iR15);
1.3511 +#endif
1.3512 + TInt r = PageIn(aFaultAddress, aAsid, codeSegMemory);
1.3513 +
1.3514 + NKern::UnlockSystem();
1.3515 +
1.3516 + if(codeSegMemory)
1.3517 + codeSegMemory->Close();
1.3518 +
1.3519 + NKern::ThreadLeaveCS();
1.3520 +
1.3521 + return r;
1.3522 + }
1.3523 +
1.3524 +
1.3525 +TInt MemModelDemandPaging::PageIn(TLinAddr aAddress, TInt aAsid, DMemModelCodeSegMemory* aCodeSegMemory)
1.3526 + {
1.3527 + // Get a request object - this may block until one is available
1.3528 + DPagingRequest* req = AcquireRequestObject();
1.3529 +
1.3530 + // Get page table entry
1.3531 + TPte* pt = SafePtePtrFromLinAddr(aAddress, aAsid);
1.3532 +
1.3533 + // Check page is still required...
1.3534 + if(!pt || *pt!=KPteNotPresentEntry)
1.3535 + {
1.3536 +#ifdef BTRACE_PAGING
1.3537 + BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded);
1.3538 +#endif
1.3539 + ReleaseRequestObject(req);
1.3540 + return pt ? KErrNone : KErrNotFound;
1.3541 + }
1.3542 +
1.3543 + ++iEventInfo.iPageInReadCount;
1.3544 +
1.3545 + // Get a free page
1.3546 + SPageInfo* pageInfo = AllocateNewPage();
1.3547 + __NK_ASSERT_DEBUG(pageInfo);
1.3548 +
1.3549 + // Get physical address of free page
1.3550 + TPhysAddr phys = pageInfo->PhysAddr();
1.3551 + __NK_ASSERT_DEBUG(phys!=KPhysAddrInvalid);
1.3552 +
1.3553 + // Temporarily map free page
1.3554 + TInt colour = (aAddress>>KPageShift)&KPageColourMask;
1.3555 + __NK_ASSERT_DEBUG((req->iLoadAddr & (KPageColourMask << KPageShift)) == 0);
1.3556 + req->iLoadAddr |= colour << KPageShift;
1.3557 + TLinAddr loadAddr = req->iLoadAddr;
1.3558 + pt = req->iLoadPte+colour;
1.3559 +// *pt = phys | SP_PTE(KArmV6PermRWNO, KArmV6MemAttWTWAWTWA, 0, 1);
1.3560 + *pt = phys | SP_PTE(KArmV6PermRWNO, KNormalUncachedAttr, 0, 1);
1.3561 + CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
1.3562 +
1.3563 + // Read page from backing store
1.3564 + aAddress &= ~KPageMask;
1.3565 + NKern::UnlockSystem();
1.3566 +
1.3567 + TInt r;
1.3568 + if (!aCodeSegMemory)
1.3569 + r = ReadRomPage(req, aAddress);
1.3570 + else
1.3571 + {
1.3572 + r = ReadCodePage(req, aCodeSegMemory, aAddress);
1.3573 + if (r == KErrNone)
1.3574 + aCodeSegMemory->ApplyCodeFixups((TUint32*)loadAddr, aAddress);
1.3575 + }
1.3576 + if(r!=KErrNone)
1.3577 + Panic(EPageInFailed);
1.3578 +
1.3579 + // make caches consistant...
1.3580 +// Cache::IMB_Range(loadAddr, KPageSize);
1.3581 + *pt = phys | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1);
1.3582 + CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
1.3583 + InvalidateTLBForPage(loadAddr,KERNEL_MAPPING);
1.3584 + CacheMaintenance::CodeChanged(loadAddr, KPageSize, CacheMaintenance::ECPUUncached);
1.3585 +
1.3586 + NKern::LockSystem();
1.3587 +
1.3588 + // Invalidate temporary mapping
1.3589 + MakeGlobalPTEInaccessible(pt, KPteNotPresentEntry, loadAddr);
1.3590 +
1.3591 + // Release request object now we're finished with it
1.3592 + req->iLoadAddr &= ~(KPageColourMask << KPageShift);
1.3593 + ReleaseRequestObject(req);
1.3594 +
1.3595 + // Get page table entry
1.3596 + pt = SafePtePtrFromLinAddr(aAddress, aAsid);
1.3597 +
1.3598 + // Check page still needs updating
1.3599 + TBool notNeeded = pt==0 || *pt!=KPteNotPresentEntry;
1.3600 + if(aCodeSegMemory)
1.3601 + notNeeded |= aCodeSegMemory->iOsAsids->NotFree(aAsid, 1);
1.3602 + if(notNeeded)
1.3603 + {
1.3604 + // We don't need the new page after all, so put it on the active list as a free page
1.3605 + __KTRACE_OPT(KPAGING,Kern::Printf("DP: PageIn (New page not used)"));
1.3606 +#ifdef BTRACE_PAGING
1.3607 + BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded);
1.3608 +#endif
1.3609 + AddAsFreePage(pageInfo);
1.3610 + return pt ? KErrNone : KErrNotFound;
1.3611 + }
1.3612 +
1.3613 + // Update page info
1.3614 + if (!aCodeSegMemory)
1.3615 + pageInfo->SetPagedROM((aAddress-iRomLinearBase)>>KPageShift);
1.3616 + else
1.3617 + {
1.3618 + // Check if page has been paged in and mapped into another process while we were waiting
1.3619 + TInt pageNumber = (aAddress - aCodeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift;
1.3620 + TPhysAddr page = aCodeSegMemory->iPages[pageNumber];
1.3621 + if (page != KPhysAddrInvalid)
1.3622 + {
1.3623 + // don't need page we've just paged in...
1.3624 + AddAsFreePage(pageInfo);
1.3625 +
1.3626 + // map existing page into this process...
1.3627 + pageInfo = SPageInfo::FromPhysAddr(page);
1.3628 + __NK_ASSERT_DEBUG(pageInfo->State()!=SPageInfo::EStatePagedDead);
1.3629 + *pt = page | (aCodeSegMemory->iCreator ? KUserCodeLoadPte : KUserCodeRunPte);
1.3630 + CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
1.3631 +#ifdef BTRACE_PAGING
1.3632 + BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded);
1.3633 +#endif
1.3634 + Rejuvenate(pageInfo);
1.3635 + return KErrNone;
1.3636 + }
1.3637 + aCodeSegMemory->iPages[pageNumber] = phys;
1.3638 +
1.3639 + pageInfo->SetPagedCode(aCodeSegMemory,(aAddress-Mmu().iUserCodeBase)>>KPageShift);
1.3640 + }
1.3641 +
1.3642 + // Map page into final location
1.3643 + *pt = phys | (aCodeSegMemory ? (aCodeSegMemory->iCreator ? KUserCodeLoadPte : KUserCodeRunPte) : KRomPtePerm);
1.3644 + CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
1.3645 +#ifdef BTRACE_PAGING
1.3646 + TInt subCat = aCodeSegMemory ? BTrace::EPagingPageInCode : BTrace::EPagingPageInROM;
1.3647 + BTraceContext8(BTrace::EPaging,subCat,phys,aAddress);
1.3648 +#endif
1.3649 +
1.3650 + AddAsYoungest(pageInfo);
1.3651 + BalanceAges();
1.3652 +
1.3653 + return KErrNone;
1.3654 + }
1.3655 +
1.3656 +
1.3657 +inline TUint8 ReadByte(TLinAddr aAddress)
1.3658 + { return *(volatile TUint8*)aAddress; }
1.3659 +
1.3660 +
1.3661 +TInt MemModelDemandPaging::EnsurePagePresent(TLinAddr aPage, DProcess* aProcess)
1.3662 + {
1.3663 + TInt r = KErrBadDescriptor;
1.3664 + XTRAPD(exc,XT_DEFAULT,
1.3665 + if (!aProcess)
1.3666 + {
1.3667 + XTRAP_PAGING_RETRY(CHECK_PAGING_SAFE; ReadByte(aPage););
1.3668 + r = KErrNone;
1.3669 + }
1.3670 + else
1.3671 + {
1.3672 + DMemModelThread& t=*(DMemModelThread*)TheCurrentThread;
1.3673 + retry:
1.3674 + TInt pagingFault;
1.3675 + XTRAP_PAGING_START(pagingFault);
1.3676 + CHECK_PAGING_SAFE;
1.3677 + // make alias of page in this process
1.3678 + TLinAddr alias_src;
1.3679 + TInt alias_size;
1.3680 + TInt aliasResult = t.Alias(aPage, (DMemModelProcess*)aProcess, 1, EMapAttrReadUser, alias_src, alias_size);
1.3681 + if (aliasResult>=0)
1.3682 + {
1.3683 + // ensure page to be locked is mapped in, by reading from it...
1.3684 + ReadByte(alias_src);
1.3685 + r = KErrNone;
1.3686 + }
1.3687 + XTRAP_PAGING_END;
1.3688 + t.RemoveAlias();
1.3689 + if(pagingFault>0)
1.3690 + goto retry;
1.3691 + }
1.3692 + ); // end of XTRAPD
1.3693 + if(exc)
1.3694 + return KErrBadDescriptor;
1.3695 + return r;
1.3696 + }
1.3697 +
1.3698 +
1.3699 +TPhysAddr MemModelDemandPaging::LinearToPhysical(TLinAddr aPage, DProcess* aProcess)
1.3700 + {
1.3701 + TInt asid = 0;
1.3702 + if (aProcess)
1.3703 + asid = ((DMemModelProcess*)aProcess)->iOsAsid;
1.3704 + return Mmu().LinearToPhysical(aPage, asid);
1.3705 + }
1.3706 +
1.3707 +
1.3708 +TInt MemModelDemandPaging::PageState(TLinAddr aAddr)
1.3709 + {
1.3710 + DMemModelProcess* process = (DMemModelProcess*)TheCurrentThread->iOwningProcess;
1.3711 + TInt asid = 0;
1.3712 + TPte* ptePtr = 0;
1.3713 + TPte pte = 0;
1.3714 + TInt r = 0;
1.3715 + SPageInfo* pageInfo = NULL;
1.3716 +
1.3717 + NKern::LockSystem();
1.3718 +
1.3719 + DMemModelCodeSegMemory* codeSegMemory = 0;
1.3720 + if(TUint(aAddr-iRomPagedLinearBase)<iRomPagedSize)
1.3721 + r |= EPageStateInRom;
1.3722 + else if (TUint(aAddr-iCodeLinearBase)<iCodeSize)
1.3723 + {
1.3724 + DMemModelCodeSeg* codeSeg = (DMemModelCodeSeg*)DCodeSeg::CodeSegsByAddress.Find(aAddr);
1.3725 + if(codeSeg)
1.3726 + codeSegMemory = codeSeg->Memory();
1.3727 + asid = process->iOsAsid;
1.3728 + if (codeSegMemory && codeSegMemory->iOsAsids->NotAllocated(asid, 1))
1.3729 + {
1.3730 + r |= EPageStateInRamCode;
1.3731 + if (codeSegMemory->iIsDemandPaged)
1.3732 + r |= EPageStatePaged;
1.3733 + }
1.3734 + if(process->iCodeChunk)
1.3735 + r |= EPageStateCodeChunkPresent;
1.3736 + }
1.3737 +
1.3738 + ptePtr = SafePtePtrFromLinAddr(aAddr,asid);
1.3739 + if (!ptePtr)
1.3740 + goto done;
1.3741 + r |= EPageStatePageTablePresent;
1.3742 + pte = *ptePtr;
1.3743 + if (pte == KPteNotPresentEntry)
1.3744 + goto done;
1.3745 + r |= EPageStatePtePresent;
1.3746 + if (pte & KPtePresentMask)
1.3747 + r |= EPageStatePteValid;
1.3748 +
1.3749 + pageInfo = SPageInfo::FromPhysAddr(pte);
1.3750 + r |= pageInfo->Type();
1.3751 + r |= pageInfo->State()<<8;
1.3752 +
1.3753 + if (codeSegMemory && codeSegMemory->iPages)
1.3754 + {
1.3755 + TPhysAddr phys = pte & ~KPageMask;
1.3756 + TInt pageNumber = (aAddr - codeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift;
1.3757 + if (codeSegMemory->iPages[pageNumber] == phys)
1.3758 + r |= EPageStatePhysAddrPresent;
1.3759 + }
1.3760 +
1.3761 +done:
1.3762 + NKern::UnlockSystem();
1.3763 + return r;
1.3764 + }
1.3765 +
1.3766 +
1.3767 +TBool MemModelDemandPaging::NeedsMutexOrderCheck(TLinAddr aStartAddr, TUint aLength)
1.3768 + {
1.3769 + // Don't check mutex order for reads from global area, except for the paged part of rom
1.3770 + TBool rangeInGlobalArea = aStartAddr >= KRomLinearBase;
1.3771 + TBool rangeInPagedRom = iRomPagedLinearBase != 0 && aStartAddr < (iRomLinearBase + iRomSize) && (aStartAddr + aLength) > iRomPagedLinearBase;
1.3772 + return !rangeInGlobalArea || rangeInPagedRom;
1.3773 + }
1.3774 +
1.3775 +
1.3776 +EXPORT_C TBool DDemandPagingLock::Lock(DThread* aThread, TLinAddr aStart, TInt aSize)
1.3777 + {
1.3778 + MemModelDemandPaging* pager = (MemModelDemandPaging*)iThePager;
1.3779 + if(pager)
1.3780 + {
1.3781 + ArmMmu& m = pager->Mmu();
1.3782 + TLinAddr end = aStart+aSize;
1.3783 +
1.3784 + if ((aStart < TUint(pager->iRomPagedLinearBase+pager->iRomPagedSize) && end > pager->iRomPagedLinearBase) ||
1.3785 + (aStart < TUint(m.iUserCodeBase + m.iMaxUserCodeSize) && end > m.iUserCodeBase))
1.3786 + return pager->ReserveLock(aThread,aStart,aSize,*this);
1.3787 + }
1.3788 + return EFalse;
1.3789 + }
1.3790 +
1.3791 +void ArmMmu::DisablePageModification(DMemModelChunk* aChunk, TInt aOffset)
1.3792 +//
1.3793 +// Mark the page at aOffset in aChunk read-only to prevent it being
1.3794 +// modified while defrag is in progress. Save the required information
1.3795 +// to allow the fault handler to deal with this.
1.3796 +// Call this with the system unlocked.
1.3797 +//
1.3798 + {
1.3799 + __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DisablePageModification() offset=%08x", aOffset));
1.3800 +
1.3801 + TInt ptid = aChunk->iPageTables[aOffset>>KChunkShift];
1.3802 + if(ptid == 0xffff)
1.3803 + Panic(EDefragDisablePageFailed);
1.3804 +
1.3805 + NKern::LockSystem();
1.3806 + TPte* pPte = PageTable(ptid) + ((aOffset&KChunkMask)>>KPageShift);
1.3807 + TPte pte = *pPte;
1.3808 + if ((pte & KArmV6PteSmallPage) != KArmV6PteSmallPage
1.3809 + || SP_PTE_PERM_GET(pte) != (TUint)KArmV6PermRWRW)
1.3810 + Panic(EDefragDisablePageFailed);
1.3811 +
1.3812 + iDisabledAddr = (TLinAddr)(aChunk->iBase) + aOffset;
1.3813 + if (aChunk->iOwningProcess)
1.3814 + iDisabledAddrAsid = ((DMemModelProcess*)(aChunk->iOwningProcess))->iOsAsid;
1.3815 + else
1.3816 + iDisabledAddrAsid = iDisabledAddr<KRomLinearBase ? UNKNOWN_MAPPING : KERNEL_MAPPING;
1.3817 + iDisabledPte = pPte;
1.3818 + iDisabledOldVal = pte;
1.3819 +
1.3820 + *pPte = SP_PTE_PERM_SET(pte, KArmV6PermRORO);
1.3821 + CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
1.3822 + InvalidateTLBForPage(iDisabledAddr, iDisabledAddrAsid);
1.3823 + NKern::UnlockSystem();
1.3824 + }
1.3825 +
1.3826 +TInt ArmMmu::RamDefragFault(TAny* aExceptionInfo)
1.3827 + {
1.3828 + TArmExcInfo& exc=*(TArmExcInfo*)aExceptionInfo;
1.3829 +
1.3830 + // Get faulting address
1.3831 + TLinAddr faultAddress;
1.3832 + if(exc.iExcCode==EArmExceptionDataAbort)
1.3833 + {
1.3834 + faultAddress = exc.iFaultAddress;
1.3835 + // Defrag can only cause writes to fault on multiple model
1.3836 + if(!(exc.iFaultStatus&(1<<11)))
1.3837 + return KErrUnknown;
1.3838 + }
1.3839 + else
1.3840 + return KErrUnknown; // Not data abort
1.3841 +
1.3842 + // Only handle page permission faults
1.3843 + if((exc.iFaultStatus & 0x40f) != 0xf)
1.3844 + return KErrUnknown;
1.3845 +
1.3846 + DMemModelThread* thread = (DMemModelThread*)TheCurrentThread;
1.3847 + TInt asid = ((DMemModelProcess*)TheScheduler.iAddressSpace)->iOsAsid;
1.3848 +
1.3849 + TBool aliased = EFalse;
1.3850 + if (thread->iAliasLinAddr && TUint(faultAddress - thread->iAliasLinAddr) < TUint(KPageSize))
1.3851 + {
1.3852 + // in aliased memory
1.3853 + aliased = ETrue;
1.3854 + faultAddress = (faultAddress - thread->iAliasLinAddr) + thread->iAliasTarget;
1.3855 + asid = thread->iAliasOsAsid;
1.3856 + __NK_ASSERT_DEBUG(asid != 0);
1.3857 + }
1.3858 +
1.3859 + // Take system lock if not already held
1.3860 + NFastMutex* fm = NKern::HeldFastMutex();
1.3861 + if(!fm)
1.3862 + NKern::LockSystem();
1.3863 + else if(fm!=&TheScheduler.iLock)
1.3864 + {
1.3865 + __KTRACE_OPT2(KMMU,KPANIC,Kern::Printf("Defrag: Fault with FM Held! %x (%O pc=%x)",faultAddress,&Kern::CurrentThread(),exc.iR15));
1.3866 + Panic(EDefragFaultWhilstFMHeld); // Not allowed to hold mutexes
1.3867 + }
1.3868 +
1.3869 + TInt r = KErrUnknown;
1.3870 +
1.3871 + // check if write access to the page has already been restored and retry if so
1.3872 + TPte* pt = SafePtePtrFromLinAddr(faultAddress, asid);
1.3873 + if(!pt)
1.3874 + {
1.3875 + r = KErrNotFound;
1.3876 + goto leave;
1.3877 + }
1.3878 + if (SP_PTE_PERM_GET(*pt) == (TUint)KArmV6PermRWRW)
1.3879 + {
1.3880 + r = KErrNone;
1.3881 + goto leave;
1.3882 + }
1.3883 +
1.3884 + // check if the fault occurred in the page we are moving
1.3885 + if ( iDisabledPte
1.3886 + && TUint(faultAddress - iDisabledAddr) < TUint(KPageSize)
1.3887 + && (iDisabledAddrAsid < 0 || asid == iDisabledAddrAsid) )
1.3888 + {
1.3889 + // restore access to the page
1.3890 + *iDisabledPte = iDisabledOldVal;
1.3891 + CacheMaintenance::SinglePteUpdated((TLinAddr)iDisabledPte);
1.3892 + InvalidateTLBForPage(iDisabledAddr, iDisabledAddrAsid);
1.3893 + if (aliased)
1.3894 + InvalidateTLBForPage(exc.iFaultAddress, ((DMemModelProcess*)TheScheduler.iAddressSpace)->iOsAsid);
1.3895 + iDisabledAddr = 0;
1.3896 + iDisabledAddrAsid = -1;
1.3897 + iDisabledPte = NULL;
1.3898 + iDisabledOldVal = 0;
1.3899 + r = KErrNone;
1.3900 + }
1.3901 +
1.3902 +leave:
1.3903 + // Restore system lock state
1.3904 + if (!fm)
1.3905 + NKern::UnlockSystem();
1.3906 +
1.3907 + return r;
1.3908 + }