First public contribution.
1 // Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\memmodel\epoc\multiple\arm\xmmu.cpp
19 #include <mmubase.inl>
21 #include <demand_paging.h>
24 #include "cache_maintenance.inl"
26 #undef __MMU_MACHINE_CODED__
28 // SECTION_PDE(perm, attr, domain, execute, global)
30 // LP_PTE(perm, attr, execute, global)
31 // SP_PTE(perm, attr, execute, global)
33 const TInt KPageColourShift=2;
34 const TInt KPageColourCount=(1<<KPageColourShift);
35 const TInt KPageColourMask=KPageColourCount-1;
38 const TPde KPdPdePerm=PT_PDE(0);
39 const TPde KPtPdePerm=PT_PDE(0);
40 const TPde KShadowPdePerm=PT_PDE(0);
42 #if defined(__CPU_MEMORY_TYPE_REMAPPING)
43 // ARM1176, ARM11MPCore, ARMv7 and later
44 // __CPU_MEMORY_TYPE_REMAPPING means that only three bits (TEX0:C:B) in page table define
45 // memory attributes. Kernel runs with a limited set of memory types: stronlgy ordered,
46 // device, normal un-cached & and normal WBWA. Due to lack of write through mode, page tables are
47 // write-back which means that cache has to be cleaned on every page/directory table update.
48 const TPte KPdPtePerm= SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1);
49 const TPte KPtPtePerm= SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1);
50 const TPte KPtInfoPtePerm= SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1);
51 const TPte KRomPtePerm= SP_PTE(KArmV6PermRORO, EMemAttNormalCached, 1, 1);
52 const TPte KShadowPtePerm= SP_PTE(KArmV6PermRORO, EMemAttNormalCached, 1, 1);
53 const TPde KRomSectionPermissions= SECTION_PDE(KArmV6PermRORO, EMemAttNormalCached, 0, 1, 1);
54 const TPte KUserCodeLoadPte= SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 1, 0);
55 const TPte KUserCodeRunPte= SP_PTE(KArmV6PermRORO, EMemAttNormalCached, 1, 0);
56 const TPte KGlobalCodeRunPte= SP_PTE(KArmV6PermRORO, EMemAttNormalCached, 1, 1);
57 const TPte KKernelCodeRunPte= SP_PTE(KArmV6PermRONO, EMemAttNormalCached, 1, 1);
59 const TInt KNormalUncachedAttr = EMemAttNormalUncached;
60 const TInt KNormalCachedAttr = EMemAttNormalCached;
65 const TPte KPtInfoPtePerm=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1);
66 #if defined (__CPU_WriteThroughDisabled)
67 const TPte KPdPtePerm=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1);
68 const TPte KPtPtePerm=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1);
69 const TPte KRomPtePerm=SP_PTE(KArmV6PermRORO, KArmV6MemAttWBWAWBWA, 1, 1);
70 const TPte KShadowPtePerm=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWBWAWBWA, 1, 1);
71 const TPde KRomSectionPermissions = SECTION_PDE(KArmV6PermRORO, KArmV6MemAttWBWAWBWA, 0, 1, 1);
72 const TPte KUserCodeLoadPte=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 1, 0);
73 const TPte KUserCodeRunPte=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWBWAWBWA, 1, 0);
74 const TPte KGlobalCodeRunPte=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWBWAWBWA, 1, 1);
75 const TInt KKernelCodeRunPteAttr = KArmV6MemAttWBWAWBWA;
77 const TPte KPdPtePerm=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBRAWTRA, 0, 1);
78 const TPte KPtPtePerm=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBRAWTRA, 0, 1);
79 const TPte KRomPtePerm=SP_PTE(KArmV6PermRORO, KArmV6MemAttWTRAWTRA, 1, 1);
80 const TPte KShadowPtePerm=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWTRAWTRA, 1, 1);
81 const TPde KRomSectionPermissions = SECTION_PDE(KArmV6PermRORO, KArmV6MemAttWTRAWTRA, 0, 1, 1);
82 const TPte KUserCodeLoadPte=SP_PTE(KArmV6PermRWNO, KArmV6MemAttWTRAWTRA, 1, 0);
83 const TPte KUserCodeRunPte=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWTRAWTRA, 1, 0);
84 const TPte KGlobalCodeRunPte=SP_PTE(KArmV6PermRWRO, KArmV6MemAttWTRAWTRA, 1, 1);
85 const TInt KKernelCodeRunPteAttr = KArmV6MemAttWTRAWTRA;
89 #if defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
90 const TInt KKernelCodeRunPtePerm = KArmV6PermRONO;
92 const TInt KKernelCodeRunPtePerm = KArmV6PermRORO;
94 const TPte KKernelCodeRunPte=SP_PTE(KKernelCodeRunPtePerm, KKernelCodeRunPteAttr, 1, 1);
96 const TInt KNormalUncachedAttr = KArmV6MemAttNCNC;
97 const TInt KNormalCachedAttr = KArmV6MemAttWBWAWBWA;
102 extern void __FlushBtb();
104 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
105 extern void remove_and_invalidate_page(TPte* aPte, TLinAddr aAddr, TInt aAsid);
106 extern void remove_and_invalidate_section(TPde* aPde, TLinAddr aAddr, TInt aAsid);
110 LOCAL_D const TPte ChunkPtePermissions[ENumChunkTypes] =
112 #if defined(__CPU_MEMORY_TYPE_REMAPPING)
113 // ARM1176, ARM11 mcore, ARMv7 and later
114 SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1), // EKernelData
115 SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1), // EKernelStack
116 SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 1, 1), // EKernelCode - loading
117 SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 1, 1), // EDll (used for global code) - loading
118 SP_PTE(KArmV6PermRORO, EMemAttNormalCached, 1, 0), // EUserCode - run
119 SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 1), // ERamDrive
120 SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 0), // EUserData
121 SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 0), // EDllData
122 SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 1, 0), // EUserSelfModCode
123 SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 0), // ESharedKernelSingle
124 SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 0), // ESharedKernelMultiple
125 SP_PTE(KArmV6PermRWRW, EMemAttNormalCached, 0, 0), // ESharedIo
126 SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1), // ESharedKernelMirror
127 SP_PTE(KArmV6PermRWNO, EMemAttNormalCached, 0, 1), // EKernelMessage
129 SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1), // EKernelData
130 SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1), // EKernelStack
131 #if defined (__CPU_WriteThroughDisabled)
132 SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 1, 1), // EKernelCode - loading
133 SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 1, 1), // EDll (used for global code) - loading
134 SP_PTE(KArmV6PermRWRO, KArmV6MemAttWBWAWBWA, 1, 0), // EUserCode - run
136 SP_PTE(KArmV6PermRWNO, KArmV6MemAttWTRAWTRA, 1, 1), // EKernelCode - loading
137 SP_PTE(KArmV6PermRWNO, KArmV6MemAttWTRAWTRA, 1, 1), // EDll (used for global code) - loading
138 SP_PTE(KArmV6PermRWRO, KArmV6MemAttWTRAWTRA, 1, 0), // EUserCode - run
140 SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 1), // ERamDrive
141 SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 0), // EUserData
142 SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 0), // EDllData
143 SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 1, 0), // EUserSelfModCode
144 SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 0), // ESharedKernelSingle
145 SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 0), // ESharedKernelMultiple
146 SP_PTE(KArmV6PermRWRW, KArmV6MemAttWBWAWBWA, 0, 0), // ESharedIo
147 SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1), // ESharedKernelMirror
148 SP_PTE(KArmV6PermRWNO, KArmV6MemAttWBWAWBWA, 0, 1), // EKernelMessage
152 // The domain for each chunk is selected according to its type.
153 // The RamDrive lives in a separate domain, to minimise the risk
154 // of accidental access and corruption. User chunks may also be
155 // located in a separate domain (15) in DEBUG builds.
156 LOCAL_D const TPde ChunkPdePermissions[ENumChunkTypes] =
158 PT_PDE(0), // EKernelData
159 PT_PDE(0), // EKernelStack
160 PT_PDE(0), // EKernelCode
162 PT_PDE(USER_MEMORY_DOMAIN), // EUserCode
163 PT_PDE(1), // ERamDrive
164 PT_PDE(USER_MEMORY_DOMAIN), // EUserData
165 PT_PDE(USER_MEMORY_DOMAIN), // EDllData
166 PT_PDE(USER_MEMORY_DOMAIN), // EUserSelfModCode
167 PT_PDE(USER_MEMORY_DOMAIN), // ESharedKernelSingle
168 PT_PDE(USER_MEMORY_DOMAIN), // ESharedKernelMultiple
169 PT_PDE(0), // ESharedIo
170 PT_PDE(0), // ESharedKernelMirror
171 PT_PDE(0), // EKernelMessage
174 // Inline functions for simple transformations
175 inline TLinAddr PageTableLinAddr(TInt aId)
177 return (KPageTableBase+(aId<<KPageTableShift));
180 inline TPte* PageTable(TInt aId)
182 return (TPte*)(KPageTableBase+(aId<<KPageTableShift));
185 inline TPte* PageTableEntry(TInt aId, TLinAddr aAddress)
187 return PageTable(aId) + ((aAddress >> KPageShift) & (KChunkMask >> KPageShift));
190 inline TLinAddr PageDirectoryLinAddr(TInt aOsAsid)
192 return (KPageDirectoryBase+(aOsAsid<<KPageDirectoryShift));
195 inline TPde* PageDirectoryEntry(TInt aOsAsid, TLinAddr aAddress)
197 return PageDirectory(aOsAsid) + (aAddress >> KChunkShift);
200 extern void InvalidateTLBForPage(TLinAddr /*aLinAddr*/, TInt /*aAsid*/);
201 extern void FlushTLBs();
202 extern TUint32 TTCR();
204 TPte* SafePageTableFromPde(TPde aPde)
206 if((aPde&KPdeTypeMask)==KArmV6PdePageTable)
208 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aPde);
211 TInt id = (pi->Offset()<<KPtClusterShift) | ((aPde>>KPageTableShift)&KPtClusterMask);
212 return PageTable(id);
218 TPte* SafePtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid=0)
220 if ((TInt)(aAddress>>KChunkShift)>=(TheMmu.iLocalPdSize>>2))
222 TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift];
223 TPte* pt = SafePageTableFromPde(pde);
225 pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
230 // inline in UREL builds...
232 __forceinline /* RVCT ignores normal inline qualifier :-( */
237 TPte* PtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid=0)
239 // this function only works for process local memory addresses, or for kernel memory (asid==0).
240 __NK_ASSERT_DEBUG(aOsAsid==0 || (TInt)(aAddress>>KChunkShift)<(TheMmu.iLocalPdSize>>2));
241 TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift];
242 SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
243 TInt id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
244 TPte* pt = PageTable(id);
245 pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
250 TInt ArmMmu::LinearToPhysical(TLinAddr aLinAddr, TInt aSize, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList, TInt aOsAsid)
252 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical %08x+%08x, asid=%d",aLinAddr,aSize,aOsAsid));
253 TPhysAddr physStart = ArmMmu::LinearToPhysical(aLinAddr,aOsAsid);
254 TPhysAddr nextPhys = physStart&~KPageMask;
256 TUint32* pageList = aPhysicalPageList;
258 TInt pageIndex = aLinAddr>>KPageShift;
259 TInt pagesLeft = ((aLinAddr+aSize-1)>>KPageShift)+1 - pageIndex;
260 TInt pdeIndex = aLinAddr>>KChunkShift;
261 TPde* pdePtr = (pdeIndex<(iLocalPdSize>>2) || (iAsidInfo[aOsAsid]&1))
262 ? PageDirectory(aOsAsid)
263 : ::InitPageDirectory;
267 pageIndex &= KChunkMask>>KPageShift;
268 TInt pagesLeftInChunk = (1<<(KChunkShift-KPageShift))-pageIndex;
269 if(pagesLeftInChunk>pagesLeft)
270 pagesLeftInChunk = pagesLeft;
271 pagesLeft -= pagesLeftInChunk;
274 TPde pde = *pdePtr++;
275 TUint pdeType = pde&KPdeTypeMask;
276 if(pdeType==KArmV6PdeSection)
278 phys = (pde & KPdeSectionAddrMask) + (pageIndex*KPageSize);
279 __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Section phys=%8x",phys));
280 TInt n=pagesLeftInChunk;
281 phys==nextPhys ? nextPhys+=n*KPageSize : nextPhys=KPhysAddrInvalid;
284 TUint32* pageEnd = pageList+n;
290 while(pageList<pageEnd);
295 TPte* pt = SafePageTableFromPde(pde);
298 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical missing page table: PDE=%8x",pde));
305 TUint pte_type = pte & KPteTypeMask;
306 if (pte_type >= KArmV6PteSmallPage)
308 phys = (pte & KPteSmallPageAddrMask);
309 __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Small Page phys=%8x",phys));
310 phys==nextPhys ? nextPhys+=KPageSize : nextPhys=KPhysAddrInvalid;
313 if(--pagesLeftInChunk)
317 if (pte_type == KArmV6PteLargePage)
320 TUint pageOffset = ((TUint)pt>>2)&(KLargeSmallPageRatio-1);
321 phys = (pte & KPteLargePageAddrMask) + pageOffset*KPageSize;
322 __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Large Page phys=%8x",phys));
323 TInt n=KLargeSmallPageRatio-pageOffset;
324 if(n>pagesLeftInChunk)
325 n = pagesLeftInChunk;
326 phys==nextPhys ? nextPhys+=n*KPageSize : nextPhys=KPhysAddrInvalid;
329 TUint32* pageEnd = pageList+n;
335 while(pageList<pageEnd);
338 if(pagesLeftInChunk-=n)
342 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical bad PTE %8x",pte));
346 if(!pageList && nextPhys==KPhysAddrInvalid)
348 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical not contiguous"));
354 if(nextPhys==KPhysAddrInvalid)
356 // Memory is discontiguous...
357 aPhysicalAddress = KPhysAddrInvalid;
362 // Memory is contiguous...
363 aPhysicalAddress = physStart;
368 TInt ArmMmu::PreparePagesForDMA(TLinAddr aLinAddr, TInt aSize, TInt aOsAsid, TPhysAddr* aPhysicalPageList)
369 //Returns the list of physical pages belonging to the specified memory space.
370 //Checks these pages belong to a chunk marked as being trusted.
371 //Locks these pages so they can not be moved by e.g. ram defragmenation.
373 SPageInfo* pi = NULL;
374 DChunk* chunk = NULL;
377 __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::PreparePagesForDMA %08x+%08x, asid=%d",aLinAddr,aSize,aOsAsid));
379 TUint32* pageList = aPhysicalPageList;
380 TInt pagesInList = 0; //The number of pages we put in the list so far
382 TInt pageIndex = (aLinAddr & KChunkMask) >> KPageShift; // Index of the page within the section
383 TInt pagesLeft = ((aLinAddr & KPageMask) + aSize + KPageMask) >> KPageShift;
385 TInt pdeIndex = aLinAddr>>KChunkShift;
388 MmuBase::Wait(); // RamAlloc Mutex for accessing page/directory tables.
389 NKern::LockSystem();// SystemlLock for accessing SPageInfo objects.
391 TPde* pdePtr = (pdeIndex<(iLocalPdSize>>2) || (iAsidInfo[aOsAsid]&1)) ? PageDirectory(aOsAsid) : ::InitPageDirectory;
392 pdePtr += pdeIndex;//This points to the first pde
396 TInt pagesLeftInChunk = (1<<(KChunkShift-KPageShift))-pageIndex;
397 if(pagesLeftInChunk>pagesLeft)
398 pagesLeftInChunk = pagesLeft;
400 pagesLeft -= pagesLeftInChunk;
402 TPte* pt = SafePageTableFromPde(*pdePtr++);
403 if(!pt) { err = KErrNotFound; goto fail; }// Cannot get page table.
407 for(;pagesLeftInChunk--;)
409 TPhysAddr phys = (*pt++ & KPteSmallPageAddrMask);
410 pi = SPageInfo::SafeFromPhysAddr(phys);
411 if(!pi) { err = KErrNotFound; goto fail; }// Invalid address
413 __KTRACE_OPT(KMMU2,Kern::Printf("PageInfo: PA:%x T:%x S:%x O:%x C:%x",phys, pi->Type(), pi->State(), pi->Owner(), pi->LockCount()));
415 {//This is the first page. Check 'trusted' bit.
416 if (pi->Type()!= SPageInfo::EChunk)
417 { err = KErrAccessDenied; goto fail; }// The first page do not belong to chunk.
419 chunk = (DChunk*)pi->Owner();
420 if ( (chunk == NULL) || ((chunk->iAttributes & DChunk::ETrustedChunk)== 0) )
421 { err = KErrAccessDenied; goto fail; }// Not a trusted chunk
426 if ( (++pagesInList&127) == 0) //release system lock temporarily on every 512K
427 NKern::FlashSystem();
432 if (pi->Type()!= SPageInfo::EChunk)
433 { err = KErrAccessDenied; goto fail; }// The last page do not belong to chunk.
435 if (chunk && (chunk != (DChunk*)pi->Owner()))
436 { err = KErrArgument; goto fail; }//The first & the last page do not belong to the same chunk.
438 NKern::UnlockSystem();
443 __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::PreparePagesForDMA failed"));
444 NKern::UnlockSystem();
446 ReleasePagesFromDMA(aPhysicalPageList, pagesInList);
450 TInt ArmMmu::ReleasePagesFromDMA(TPhysAddr* aPhysicalPageList, TInt aPageCount)
451 // Unlocks physical pages.
452 // @param aPhysicalPageList - points to the list of physical pages that should be released.
453 // @param aPageCount - the number of physical pages in the list.
456 __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::ReleasePagesFromDMA count:%d",aPageCount));
460 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(*aPhysicalPageList++);
463 NKern::UnlockSystem();
466 __KTRACE_OPT(KMMU2,Kern::Printf("PageInfo: T:%x S:%x O:%x C:%x",pi->Type(), pi->State(), pi->Owner(), pi->LockCount()));
469 NKern::UnlockSystem();
473 TPhysAddr ArmMmu::LinearToPhysical(TLinAddr aLinAddr, TInt aOsAsid)
475 // Find the physical address corresponding to a given linear address in a specified OS
476 // address space. Call with system locked.
479 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical(%08x,%d)",aLinAddr,aOsAsid));
480 TInt pdeIndex=aLinAddr>>KChunkShift;
481 TPde pde = (pdeIndex<(iLocalPdSize>>2) || (iAsidInfo[aOsAsid]&1)) ? PageDirectory(aOsAsid)[pdeIndex] : ::InitPageDirectory[pdeIndex];
482 TPhysAddr pa=KPhysAddrInvalid;
483 if ((pde&KPdePresentMask)==KArmV6PdePageTable)
485 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
488 TInt id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
489 TPte* pPte=PageTable(id);
490 TPte pte=pPte[(aLinAddr&KChunkMask)>>KPageShift];
491 if (pte & KArmV6PteSmallPage)
493 pa=(pte&KPteSmallPageAddrMask)+(aLinAddr&~KPteSmallPageAddrMask);
494 __KTRACE_OPT(KMMU,Kern::Printf("Mapped with small page - returning %08x",pa));
496 else if ((pte & KArmV6PteTypeMask) == KArmV6PteLargePage)
498 pa=(pte&KPteLargePageAddrMask)+(aLinAddr&~KPteLargePageAddrMask);
499 __KTRACE_OPT(KMMU,Kern::Printf("Mapped with large page - returning %08x",pa));
503 else if ((pde&KPdePresentMask)==KArmV6PdeSection)
505 pa=(pde&KPdeSectionAddrMask)|(aLinAddr&~KPdeSectionAddrMask);
506 __KTRACE_OPT(KMMU,Kern::Printf("Mapped with section - returning %08x",pa));
511 // permission table indexed by XN:APX:AP1:AP0
512 static const TInt PermissionLookup[16]=
514 0, //0 0 0 0 no access
515 EMapAttrWriteSup|EMapAttrReadSup|EMapAttrExecSup, //0 0 0 1 RW sup execute
516 EMapAttrWriteSup|EMapAttrReadUser|EMapAttrExecUser, //0 0 1 0 supRW usrR execute
517 EMapAttrWriteUser|EMapAttrReadUser|EMapAttrExecUser,//0 0 1 1 supRW usrRW execute
518 0, //0 1 0 0 reserved
519 EMapAttrReadSup|EMapAttrExecSup, //0 1 0 1 supR execute
520 EMapAttrReadUser|EMapAttrExecUser, //0 1 1 0 supR usrR execute
521 0, //0 1 1 1 reserved
522 0, //1 0 0 0 no access
523 EMapAttrWriteSup|EMapAttrReadSup, //1 0 0 1 RW sup
524 EMapAttrWriteSup|EMapAttrReadUser, //1 0 1 0 supRW usrR
525 EMapAttrWriteUser|EMapAttrReadUser, //1 0 1 1 supRW usrRW
526 0, //1 1 0 0 reserved
527 EMapAttrReadSup, //1 1 0 1 supR
528 EMapAttrReadUser, //1 1 1 0 supR usrR
529 EMapAttrReadUser, //1 1 1 1 supR usrR
532 TInt ArmMmu::PageTableId(TLinAddr aAddr, TInt aOsAsid)
535 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::PageTableId(%08x,%d)",aAddr,aOsAsid));
536 TInt pdeIndex=aAddr>>KChunkShift;
537 TPde pde = (pdeIndex<(iLocalPdSize>>2) || (iAsidInfo[aOsAsid]&1)) ? PageDirectory(aOsAsid)[pdeIndex] : ::InitPageDirectory[pdeIndex];
538 if ((pde&KArmV6PdeTypeMask)==KArmV6PdePageTable)
540 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
542 id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
544 __KTRACE_OPT(KMMU,Kern::Printf("ID=%d",id));
548 // Used only during boot for recovery of RAM drive
549 TInt ArmMmu::BootPageTableId(TLinAddr aAddr, TPhysAddr& aPtPhys)
551 TInt id=KErrNotFound;
552 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:BootPageTableId(%08x,&)",aAddr));
553 TPde* kpd=(TPde*)KPageDirectoryBase; // kernel page directory
554 TInt pdeIndex=aAddr>>KChunkShift;
555 TPde pde = kpd[pdeIndex];
556 if ((pde & KArmV6PdeTypeMask) == KArmV6PdePageTable)
558 aPtPhys = pde & KPdePageTableAddrMask;
559 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
562 SPageInfo::TType type = pi->Type();
563 if (type == SPageInfo::EPageTable)
564 id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
565 else if (type == SPageInfo::EUnused)
569 __KTRACE_OPT(KMMU,Kern::Printf("ID=%d",id));
573 TBool ArmMmu::PteIsPresent(TPte aPte)
575 return aPte & KArmV6PteTypeMask;
578 TPhysAddr ArmMmu::PtePhysAddr(TPte aPte, TInt aPteIndex)
580 TUint32 pte_type = aPte & KArmV6PteTypeMask;
581 if (pte_type == KArmV6PteLargePage)
582 return (aPte & KPteLargePageAddrMask) + (TPhysAddr(aPteIndex << KPageShift) & KLargePageMask);
583 else if (pte_type != 0)
584 return aPte & KPteSmallPageAddrMask;
585 return KPhysAddrInvalid;
588 TPhysAddr ArmMmu::PdePhysAddr(TLinAddr aAddr)
590 TPde* kpd = (TPde*)KPageDirectoryBase; // kernel page directory
591 TPde pde = kpd[aAddr>>KChunkShift];
592 if ((pde & KPdePresentMask) == KArmV6PdeSection)
593 return pde & KPdeSectionAddrMask;
594 return KPhysAddrInvalid;
599 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("ArmMmu::Init1"));
604 iPageShift=KPageShift;
605 iChunkSize=KChunkSize;
606 iChunkMask=KChunkMask;
607 iChunkShift=KChunkShift;
608 iPageTableSize=KPageTableSize;
609 iPageTableMask=KPageTableMask;
610 iPageTableShift=KPageTableShift;
611 iPtClusterSize=KPtClusterSize;
612 iPtClusterMask=KPtClusterMask;
613 iPtClusterShift=KPtClusterShift;
614 iPtBlockSize=KPtBlockSize;
615 iPtBlockMask=KPtBlockMask;
616 iPtBlockShift=KPtBlockShift;
617 iPtGroupSize=KChunkSize/KPageTableSize;
618 iPtGroupMask=iPtGroupSize-1;
619 iPtGroupShift=iChunkShift-iPageTableShift;
620 //TInt* iPtBlockCount; // dynamically allocated - Init2
621 //TInt* iPtGroupCount; // dynamically allocated - Init2
622 iPtInfo=(SPageTableInfo*)KPageTableInfoBase;
623 iPageTableLinBase=KPageTableBase;
624 //iRamPageAllocator; // dynamically allocated - Init2
625 //iAsyncFreeList; // dynamically allocated - Init2
626 //iPageTableAllocator; // dynamically allocated - Init2
627 //iPageTableLinearAllocator;// dynamically allocated - Init2
628 iPtInfoPtePerm=KPtInfoPtePerm;
629 iPtPtePerm=KPtPtePerm;
630 iPtPdePerm=KPtPdePerm;
631 iUserCodeLoadPtePerm=KUserCodeLoadPte;
632 iKernelCodePtePerm=KKernelCodeRunPte;
634 iSecondTempAddr=KSecondTempAddr;
635 iMapSizes=KPageSize|KLargePageSize|KChunkSize;
636 iRomLinearBase = ::RomHeaderAddress;
637 iRomLinearEnd = KRomLinearEnd;
638 iShadowPtePerm = KShadowPtePerm;
639 iShadowPdePerm = KShadowPdePerm;
642 TInt total_ram=TheSuperPage().iTotalRamSize;
644 // Large or small configuration?
645 // This is determined by the bootstrap based on RAM size
647 __NK_ASSERT_ALWAYS(ttcr==1 || ttcr==2);
648 TBool large = (ttcr==1);
650 // calculate cache colouring...
651 TInt iColourCount = 0;
652 TInt dColourCount = 0;
653 TUint32 ctr = InternalCache::TypeRegister();
654 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("CacheTypeRegister = %08x",ctr));
656 __NK_ASSERT_ALWAYS((ctr>>29)==0); // check ARMv6 format
662 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("CacheTypeRegister = %08x",ctr));
663 __NK_ASSERT_ALWAYS((ctr>>29)==4); // check ARMv7 format
664 TUint l1ip = (ctr>>14)&3; // L1 instruction cache indexing and tagging policy
665 __NK_ASSERT_ALWAYS(l1ip>=2); // check I cache is physically tagged
667 TUint32 clidr = InternalCache::LevelIDRegister();
668 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("CacheLevelIDRegister = %08x",clidr));
669 TUint l1type = clidr&7;
672 if(l1type==2 || l1type==3 || l1type==4)
674 // we have an L1 data cache...
675 TUint32 csir = InternalCache::SizeIdRegister(0,0);
676 TUint sets = ((csir>>13)&0x7fff)+1;
677 TUint ways = ((csir>>3)&0x3ff)+1;
678 TUint lineSizeShift = (csir&7)+4;
679 // assume L1 data cache is VIPT and alias checks broken and so we need data cache colouring...
680 dColourCount = (sets<<lineSizeShift)>>KPageShift;
681 if(l1type==4) // unified cache, so set instruction cache colour as well...
682 iColourCount = (sets<<lineSizeShift)>>KPageShift;
683 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("L1DCache = 0x%x,0x%x,%d colourCount=%d",sets,ways,lineSizeShift,(sets<<lineSizeShift)>>KPageShift));
686 if(l1type==1 || l1type==3)
688 // we have a separate L1 instruction cache...
689 TUint32 csir = InternalCache::SizeIdRegister(1,0);
690 TUint sets = ((csir>>13)&0x7fff)+1;
691 TUint ways = ((csir>>3)&0x3ff)+1;
692 TUint lineSizeShift = (csir&7)+4;
693 iColourCount = (sets<<lineSizeShift)>>KPageShift;
694 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("L1ICache = 0x%x,0x%x,%d colourCount=%d",sets,ways,lineSizeShift,(sets<<lineSizeShift)>>KPageShift));
699 // PIPT cache, so no colouring restrictions...
700 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("L1ICache is PIPT"));
706 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("L1ICache is VIPT"));
709 TUint colourShift = 0;
710 for(TUint colourCount=Max(iColourCount,dColourCount); colourCount!=0; colourCount>>=1)
712 iAliasSize=KPageSize<<colourShift;
713 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iAliasSize=0x%x",iAliasSize));
714 iAliasMask=iAliasSize-1;
715 iAliasShift=KPageShift+colourShift;
717 iDecommitThreshold = CacheMaintenance::SyncAllPerformanceThresholdPages();
719 iNumOsAsids=KArmV6NumAsids;
720 iNumGlobalPageDirs=1;
721 //iOsAsidAllocator; // dynamically allocated - Init2
722 iGlobalPdSize=KPageDirectorySize;
723 iGlobalPdShift=KPageDirectoryShift;
724 iAsidGroupSize=KChunkSize/KPageDirectorySize;
725 iAsidGroupMask=iAsidGroupSize-1;
726 iAsidGroupShift=KChunkShift-KPageDirectoryShift;
727 iUserLocalBase=KUserLocalDataBase;
728 iAsidInfo=(TUint32*)KAsidInfoBase;
729 iPdeBase=KPageDirectoryBase;
730 iPdPtePerm=KPdPtePerm;
731 iPdPdePerm=KPdPdePerm;
732 iRamDriveMask=0x00f00000;
733 iGlobalCodePtePerm=KGlobalCodeRunPte;
734 #if defined(__CPU_MEMORY_TYPE_REMAPPING)
735 iCacheMaintenanceTempMapAttr = CacheMaintenance::TemporaryMapping();
737 switch(CacheMaintenance::TemporaryMapping())
739 case EMemAttNormalUncached:
740 iCacheMaintenanceTempMapAttr = KArmV6MemAttNCNC;
742 case EMemAttNormalCached:
743 iCacheMaintenanceTempMapAttr = KArmV6MemAttWBWAWBWA;
746 Panic(ETempMappingFailed);
749 iMaxDllDataSize=Min(total_ram/2, 0x08000000); // phys RAM/2 up to 128Mb
750 iMaxDllDataSize=(iMaxDllDataSize+iChunkMask)&~iChunkMask; // round up to chunk size
751 iMaxUserCodeSize=Min(total_ram, 0x10000000); // phys RAM up to 256Mb
752 iMaxUserCodeSize=(iMaxUserCodeSize+iChunkMask)&~iChunkMask; // round up to chunk size
755 iLocalPdSize=KPageDirectorySize/2;
756 iLocalPdShift=KPageDirectoryShift-1;
757 iUserSharedBase=KUserSharedDataBase2GB;
758 iUserLocalEnd=iUserSharedBase-iMaxDllDataSize;
759 iUserSharedEnd=KUserSharedDataEnd2GB-iMaxUserCodeSize;
760 iDllDataBase=iUserLocalEnd;
761 iUserCodeBase=iUserSharedEnd;
765 iLocalPdSize=KPageDirectorySize/4;
766 iLocalPdShift=KPageDirectoryShift-2;
767 iUserSharedBase=KUserSharedDataBase1GB;
768 iUserLocalEnd=iUserSharedBase;
769 iDllDataBase=KUserSharedDataEnd1GB-iMaxDllDataSize;
770 iUserCodeBase=iDllDataBase-iMaxUserCodeSize;
771 iUserSharedEnd=iUserCodeBase;
773 __KTRACE_OPT(KMMU,Kern::Printf("LPD size %08x GPD size %08x Alias size %08x",
774 iLocalPdSize, iGlobalPdSize, iAliasSize));
775 __KTRACE_OPT(KMMU,Kern::Printf("ULB %08x ULE %08x USB %08x USE %08x",iUserLocalBase,iUserLocalEnd,
776 iUserSharedBase,iUserSharedEnd));
777 __KTRACE_OPT(KMMU,Kern::Printf("DDB %08x UCB %08x",iDllDataBase,iUserCodeBase));
782 PP::MaxUserThreadStack=0x14000; // 80K - STDLIB asks for 64K for PosixServer!!!!
783 PP::UserThreadStackGuard=0x2000; // 8K
784 PP::MaxStackSpacePerProcess=0x200000; // 2Mb
785 K::SupervisorThreadStackSize=0x1000; // 4K
786 PP::SupervisorThreadStackGuard=0x1000; // 4K
787 K::MachineConfig=(TMachineConfig*)KMachineConfigLinAddr;
788 PP::RamDriveStartAddress=KRamDriveStartAddress;
789 PP::RamDriveRange=KRamDriveMaxSize;
790 PP::RamDriveMaxSize=KRamDriveMaxSize; // may be reduced later
791 K::MemModelAttributes=EMemModelTypeMultiple|EMemModelAttrNonExProt|EMemModelAttrKernProt|EMemModelAttrWriteProt|
792 EMemModelAttrVA|EMemModelAttrProcessProt|EMemModelAttrSameVA|EMemModelAttrSvKernProt|
793 EMemModelAttrIPCKernProt|EMemModelAttrRamCodeProt;
795 Arm::DefaultDomainAccess=KDefaultDomainAccess;
800 void ArmMmu::DoInit2()
802 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("ArmMmu::DoInit2"));
803 iTempPte=PageTable(PageTableId(iTempAddr,0))+((iTempAddr&KChunkMask)>>KPageShift);
804 iSecondTempPte=PageTable(PageTableId(iSecondTempAddr,0))+((iSecondTempAddr&KChunkMask)>>KPageShift);
805 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iTempAddr=%08x, iTempPte=%08x, iSecondTempAddr=%08x, iSecondTempPte=%08x",
806 iTempAddr, iTempPte, iSecondTempAddr, iSecondTempPte));
807 CreateKernelSection(KKernelSectionEnd, iAliasShift);
808 CreateUserGlobalSection(KUserGlobalDataBase, KUserGlobalDataEnd);
812 #ifndef __MMU_MACHINE_CODED__
813 void ArmMmu::MapRamPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, const TPhysAddr* aPageList, TInt aNumPages, TPte aPtePerm)
815 // Map a list of physical RAM pages into a specified page table with specified PTE permissions.
816 // Update the page information array.
817 // Call this with the system locked.
820 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::MapRamPages() id=%d type=%d ptr=%08x off=%08x n=%d perm=%08x",
821 aId, aType, aPtr, aOffset, aNumPages, aPtePerm));
823 SPageTableInfo& ptinfo=iPtInfo[aId];
824 ptinfo.iCount+=aNumPages;
825 aOffset>>=KPageShift;
826 TInt ptOffset=aOffset & KPagesInPDEMask; // entry number in page table
827 TPte* pPte=PageTable(aId)+ptOffset; // address of first PTE
829 TLinAddr firstPte = (TLinAddr)pPte; //Will need this to clean page table changes in cache.
833 TPhysAddr pa = *aPageList++;
834 if(pa==KPhysAddrInvalid)
837 __NK_ASSERT_DEBUG(aType==SPageInfo::EInvalid);
840 *pPte++ = pa | aPtePerm; // insert PTE
841 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",pPte[-1],pPte-1));
842 if (aType!=SPageInfo::EInvalid)
844 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa);
847 pi->Set(aType,aPtr,aOffset);
848 __KTRACE_OPT(KMMU,Kern::Printf("I: %d %08x %08x",aType,aPtr,aOffset));
849 ++aOffset; // increment offset for next page
853 CacheMaintenance::MultiplePtesUpdated(firstPte, (TUint)pPte-firstPte);
856 void ArmMmu::MapPhysicalPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, TPhysAddr aPhysAddr, TInt aNumPages, TPte aPtePerm)
858 // Map consecutive physical pages into a specified page table with specified PTE permissions.
859 // Update the page information array if RAM pages are being mapped.
860 // Call this with the system locked.
863 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::MapPhysicalPages() id=%d type=%d ptr=%08x off=%08x phys=%08x n=%d perm=%08x",
864 aId, aType, aPtr, aOffset, aPhysAddr, aNumPages, aPtePerm));
865 SPageTableInfo& ptinfo=iPtInfo[aId];
866 ptinfo.iCount+=aNumPages;
867 aOffset>>=KPageShift;
868 TInt ptOffset=aOffset & KPagesInPDEMask; // entry number in page table
869 TPte* pPte=(TPte*)(PageTableLinAddr(aId))+ptOffset; // address of first PTE
871 TLinAddr firstPte = (TLinAddr)pPte; //Will need this to clean page table changes in cache
874 if(aType==SPageInfo::EInvalid)
877 pi = SPageInfo::SafeFromPhysAddr(aPhysAddr);
880 *pPte++ = aPhysAddr|aPtePerm; // insert PTE
881 aPhysAddr+=KPageSize;
882 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",pPte[-1],pPte-1));
885 pi->Set(aType,aPtr,aOffset);
886 __KTRACE_OPT(KMMU,Kern::Printf("I: %d %08x %08x",aType,aPtr,aOffset));
887 ++aOffset; // increment offset for next page
892 CacheMaintenance::MultiplePtesUpdated(firstPte, (TUint)pPte-firstPte);
895 void ArmMmu::MapVirtual(TInt aId, TInt aNumPages)
897 // Called in place of MapRamPages or MapPhysicalPages to update mmu data structures when committing
898 // virtual address space to a chunk. No pages are mapped.
899 // Call this with the system locked.
902 SPageTableInfo& ptinfo=iPtInfo[aId];
903 ptinfo.iCount+=aNumPages;
906 void ArmMmu::RemapPage(TInt aId, TUint32 aAddr, TPhysAddr aOldAddr, TPhysAddr aNewAddr, TPte aPtePerm, DProcess* aProcess)
908 // Replace the mapping at address aAddr in page table aId.
909 // Update the page information array for both the old and new pages.
910 // Return physical address of old page if it is now ready to be freed.
911 // Call this with the system locked.
912 // May be called with interrupts disabled, do not enable/disable them.
915 TInt ptOffset=(aAddr&KChunkMask)>>KPageShift; // entry number in page table
916 TPte* pPte=PageTable(aId)+ptOffset; // address of PTE
918 TInt asid=aProcess ? ((DMemModelProcess*)aProcess)->iOsAsid :
919 (aAddr<KRomLinearBase ? (TInt)UNKNOWN_MAPPING : (TInt)KERNEL_MAPPING );
921 if (pte & KArmV6PteSmallPage)
923 __ASSERT_ALWAYS((pte & KPteSmallPageAddrMask) == aOldAddr, Panic(ERemapPageFailed));
924 SPageInfo* oldpi = SPageInfo::FromPhysAddr(aOldAddr);
925 __ASSERT_DEBUG(oldpi->LockCount()==0,Panic(ERemapPageFailed));
928 *pPte = aNewAddr | aPtePerm; // overwrite PTE
929 CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
930 InvalidateTLBForPage(aAddr,asid); // flush TLB entry
932 // update new pageinfo, clear old
933 SPageInfo* pi = SPageInfo::FromPhysAddr(aNewAddr);
934 pi->Set(oldpi->Type(),oldpi->Owner(),oldpi->Offset());
939 Panic(ERemapPageFailed);
943 void ArmMmu::RemapPageByAsid(TBitMapAllocator* aOsAsids, TLinAddr aLinAddr, TPhysAddr aOldAddr, TPhysAddr aNewAddr, TPte aPtePerm)
945 // Replace the mapping at address aLinAddr in the relevant page table for all
946 // ASIDs specified in aOsAsids, but only if the currently mapped address is
948 // Update the page information array for both the old and new pages.
949 // Call this with the system unlocked.
952 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageByAsid() linaddr=%08x oldaddr=%08x newaddr=%08x perm=%08x", aLinAddr, aOldAddr, aNewAddr, aPtePerm));
955 TInt lastAsid = KArmV6NumAsids - 1;
956 TUint32* ptr = aOsAsids->iMap;
960 TUint32 bits = *ptr++;
964 if(bits & 0x80000000u)
966 // mapped in this address space, so update PTE...
967 TPte* pPte = PtePtrFromLinAddr(aLinAddr, asid);
969 if ((pte&~KPageMask) == aOldAddr)
971 *pPte = aNewAddr | aPtePerm;
972 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x in asid %d",*pPte,pPte,asid));
973 CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
974 InvalidateTLBForPage(aLinAddr,asid); // flush TLB entry
979 NKern::FlashSystem();
982 while(asid<lastAsid);
984 // copy pageinfo attributes and mark old page unused
985 SPageInfo* oldpi = SPageInfo::FromPhysAddr(aOldAddr);
986 SPageInfo::FromPhysAddr(aNewAddr)->Set(oldpi->Type(),oldpi->Owner(),oldpi->Offset());
989 NKern::UnlockSystem();
992 TInt ArmMmu::UnmapPages(TInt aId, TUint32 aAddr, TInt aNumPages, TPhysAddr* aPageList, TBool aSetPagesFree, TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess)
994 // Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped
995 // pages into aPageList, and count of unmapped pages into aNumPtes.
996 // Return number of pages still mapped using this page table.
997 // Call this with the system locked.
998 // On multiple memory model, do not call this method with aSetPagesFree false. Call UnmapUnownedPages instead.
1000 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::UnmapPages() id=%d addr=%08x n=%d pl=%08x set-free=%d",aId,aAddr,aNumPages,aPageList,aSetPagesFree));
1001 TInt ptOffset=(aAddr&KChunkMask)>>KPageShift; // entry number in page table
1002 TPte* pPte=PageTable(aId)+ptOffset; // address of first PTE
1006 TInt asid=aProcess ? ((DMemModelProcess*)aProcess)->iOsAsid :
1007 (aAddr<KRomLinearBase ? (TInt)UNKNOWN_MAPPING : (TInt)KERNEL_MAPPING );
1012 TPte pte=*pPte; // get original PTE
1013 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
1014 remove_and_invalidate_page(pPte, aAddr, asid);
1017 *pPte++=0; // clear PTE
1020 // We count all unmapped pages in np, including demand paged 'old' pages - but we don't pass
1021 // these to PageUnmapped, as the page doesn't become free until it's unmapped from all
1023 if (pte != KPteNotPresentEntry)
1026 if (pte & KArmV6PteSmallPage)
1029 #if !defined(__CPU_ARM1136__) || defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
1030 // Remove_and_invalidate_page will sort out cache and TLB.
1031 // When __CPU_ARM1136_ERRATUM_353494_FIXED, we have to do it here.
1032 CacheMaintenance::SinglePteUpdated((TLinAddr)(pPte-1));
1033 if (asid >= 0) //otherwise, KUnmapPagesTLBFlushDeferred will be returned.
1034 InvalidateTLBForPage(aAddr,asid); // flush any corresponding TLB entry
1036 TPhysAddr pa=pte & KPteSmallPageAddrMask; // physical address of unmapped page
1039 SPageInfo* pi = SPageInfo::FromPhysAddr(pa);
1040 if(iRamCache->PageUnmapped(pi))
1042 pi->SetUnused(); // mark page as unused
1043 if (pi->LockCount()==0)
1045 *aPageList++=pa; // store in page list
1046 ++nf; // count free pages
1051 *aPageList++=pa; // store in page list
1058 SPageTableInfo& ptinfo=iPtInfo[aId];
1059 TInt r=(ptinfo.iCount-=np);
1061 r|=KUnmapPagesTLBFlushDeferred;
1064 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
1068 __KTRACE_OPT(KMMU,Kern::Printf("Unmapped %d; Freed: %d; Return %08x",np,nf,r));
1069 return r; // return number of pages remaining in this page table
1072 TInt ArmMmu::UnmapVirtual(TInt aId, TUint32 aAddr, TInt aNumPages, TPhysAddr* aPageList, TBool aSetPagesFree, TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess)
1074 // Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped
1075 // pages into aPageList, and count of unmapped pages into aNumPtes.
1076 // Adjust the page table reference count as if aNumPages pages were unmapped.
1077 // Return number of pages still mapped using this page table.
1078 // Call this with the system locked.
1079 // On multiple memory model, do not call this method with aSetPagesFree false. Call UnmapUnownedVirtual instead.
1082 SPageTableInfo& ptinfo=iPtInfo[aId];
1083 TInt newCount = ptinfo.iCount - aNumPages;
1084 UnmapPages(aId, aAddr, aNumPages, aPageList, aSetPagesFree, aNumPtes, aNumFree, aProcess);
1085 ptinfo.iCount = newCount;
1086 aNumPtes = aNumPages;
1090 TInt ArmMmu::UnmapUnownedPages(TInt aId, TUint32 aAddr, TInt aNumPages,
1091 TPhysAddr* aPageList, TLinAddr* aLAPageList,TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess)
1093 * Unmaps specified area at address aAddr in page table aId.
1094 * Places physical addresses of not-demaned-paged unmapped pages into aPageList.
1095 * Corresponding linear addresses are placed into aLAPageList.
1096 * 'Old' demand-paged pages (holds invalid PE entry with physucal address) are neither unmapped nor
1097 * encountered in aPageList but are still counted in aNumPtes.
1099 * This method should be called to decommit physical memory not owned by the chunk. As we do not know
1100 * the origin of such memory, PtInfo could be invalid (or does't exist) so cache maintenance may not be
1101 * able to obtain mapping colour. For that reason, this also returns former linear address of each page
1104 * @pre All pages are mapped within a single page table identified by aId.
1105 * @pre On entry, system locked is held and is not released during the execution.
1107 * @arg aId Id of the page table that maps tha pages.
1108 * @arg aAddr Linear address of the start of the area.
1109 * @arg aNumPages The number of pages to unmap.
1110 * @arg aProcess The owning process of the mamory area to unmap.
1111 * @arg aPageList On exit, holds the list of unmapped pages.
1112 * @arg aLAPageList On exit, holds the list of linear addresses of unmapped pages.
1113 * @arg aNumFree On exit, holds the number of pages in aPageList.
1114 * @arg aNumPtes On exit, holds the number of unmapped pages. This includes demand-paged 'old'
1115 * pages (with invalid page table entry still holding the address of physical page.)
1117 * @return The number of pages still mapped using this page table. It is orred by
1118 * KUnmapPagesTLBFlushDeferred if TLB flush is not executed - which requires
1119 * the caller to do global TLB flush.
1122 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::UnmapUnownedPages() id=%d addr=%08x n=%d pl=%08x",aId,aAddr,aNumPages,aPageList));
1123 TInt ptOffset=(aAddr&KChunkMask)>>KPageShift; // entry number in page table
1124 TPte* pPte=PageTable(aId)+ptOffset; // address of first PTE
1128 TInt asid=aProcess ? ((DMemModelProcess*)aProcess)->iOsAsid :
1129 (aAddr<KRomLinearBase ? (TInt)UNKNOWN_MAPPING : (TInt)KERNEL_MAPPING );
1133 TPte pte=*pPte; // get original PTE
1134 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
1135 remove_and_invalidate_page(pPte, aAddr, asid);
1138 *pPte++=0; // clear PTE
1141 // We count all unmapped pages in np, including demand paged 'old' pages - but we don't pass
1142 // these to PageUnmapped, as the page doesn't become free until it's unmapped from all
1144 if (pte != KPteNotPresentEntry)
1147 if (pte & KArmV6PteSmallPage)
1150 #if !defined(__CPU_ARM1136__) || defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
1151 // Remove_and_invalidate_page will sort out cache and TLB.
1152 // When __CPU_ARM1136_ERRATUM_353494_FIXED, we have to do it here.
1153 CacheMaintenance::SinglePteUpdated((TLinAddr)(pPte-1));
1154 if (asid >= 0) //otherwise, KUnmapPagesTLBFlushDeferred will be returned.
1155 InvalidateTLBForPage(aAddr,asid); // flush any corresponding TLB entry
1157 TPhysAddr pa=pte & KPteSmallPageAddrMask; // physical address of unmapped page
1159 *aPageList++=pa; // store physical aaddress in page list
1160 *aLAPageList++=aAddr; // store linear address in page list
1167 SPageTableInfo& ptinfo=iPtInfo[aId];
1168 TInt r=(ptinfo.iCount-=np);
1170 r|=KUnmapPagesTLBFlushDeferred;
1173 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
1177 __KTRACE_OPT(KMMU,Kern::Printf("Unmapped %d; Freed: %d; Return %08x",np,nf,r));
1178 return r; // return number of pages remaining in this page table
1182 TInt ArmMmu::UnmapUnownedVirtual(TInt aId, TUint32 aAddr, TInt aNumPages,
1183 TPhysAddr* aPageList, TLinAddr* aLAPageList,TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess)
1185 // Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped
1186 // pages into aPageList, and count of unmapped pages into aNumPtes.
1187 // Adjust the page table reference count as if aNumPages pages were unmapped.
1188 // Return number of pages still mapped using this page table.
1189 // Call this with the system locked.
1192 SPageTableInfo& ptinfo=iPtInfo[aId];
1193 TInt newCount = ptinfo.iCount - aNumPages;
1194 UnmapUnownedPages(aId, aAddr, aNumPages, aPageList, aLAPageList, aNumPtes, aNumFree, aProcess);
1195 ptinfo.iCount = newCount;
1196 aNumPtes = aNumPages;
1200 void ArmMmu::DoAssignPageTable(TInt aId, TLinAddr aAddr, TPde aPdePerm, const TAny* aOsAsids)
1202 // Assign an allocated page table to map a given linear address with specified permissions.
1203 // This should be called with the system unlocked and the MMU mutex held.
1206 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DoAssignPageTable %d to %08x perm %08x asid %08x",aId,aAddr,aPdePerm,aOsAsids));
1207 TLinAddr ptLin=PageTableLinAddr(aId);
1208 TPhysAddr ptPhys=LinearToPhysical(ptLin,0);
1209 TInt pdeIndex=TInt(aAddr>>KChunkShift);
1210 TBool gpd=(pdeIndex>=(iLocalPdSize>>2));
1211 TInt os_asid=(TInt)aOsAsids;
1212 if (TUint32(os_asid)<TUint32(iNumOsAsids))
1215 TPde* pageDir=PageDirectory(os_asid);
1216 NKern::LockSystem();
1217 pageDir[pdeIndex]=ptPhys|aPdePerm; // will blow up here if address is in global region aOsAsid doesn't have a global PD
1218 CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
1219 NKern::UnlockSystem();
1221 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",ptPhys|aPdePerm,pageDir+pdeIndex));
1223 else if (os_asid==-1 && gpd)
1225 // all OS ASIDs, address in global region
1226 TInt num_os_asids=iNumGlobalPageDirs;
1227 const TBitMapAllocator& b=*(const TBitMapAllocator*)iOsAsidAllocator;
1228 for (os_asid=0; num_os_asids; ++os_asid)
1230 if (!b.NotAllocated(os_asid,1) && (iAsidInfo[os_asid]&1))
1232 // this OS ASID exists and has a global page directory
1233 TPde* pageDir=PageDirectory(os_asid);
1234 NKern::LockSystem();
1235 pageDir[pdeIndex]=ptPhys|aPdePerm;
1236 CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
1237 NKern::UnlockSystem();
1239 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",ptPhys|aPdePerm,pageDir+pdeIndex));
1246 // selection of OS ASIDs or all OS ASIDs
1247 const TBitMapAllocator* pB=(const TBitMapAllocator*)aOsAsids;
1249 pB=iOsAsidAllocator; // 0's in positions which exist
1250 TInt num_os_asids=pB->iSize-pB->iAvail;
1251 for (os_asid=0; num_os_asids; ++os_asid)
1253 if (pB->NotAllocated(os_asid,1))
1254 continue; // os_asid is not needed
1255 TPde* pageDir=PageDirectory(os_asid);
1256 NKern::LockSystem();
1257 pageDir[pdeIndex]=ptPhys|aPdePerm;
1258 CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
1259 NKern::UnlockSystem();
1261 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",ptPhys|aPdePerm,pageDir+pdeIndex));
1267 void ArmMmu::RemapPageTableSingle(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr, TInt aOsAsid)
1269 // Replace a single page table mapping the specified linear address.
1270 // This should be called with the system locked and the MMU mutex held.
1273 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTableSingle %08x to %08x at %08x asid %d",aOld,aNew,aAddr,aOsAsid));
1274 TPde* pageDir=PageDirectory(aOsAsid);
1275 TInt pdeIndex=TInt(aAddr>>KChunkShift);
1276 TPde pde=pageDir[pdeIndex];
1277 __ASSERT_ALWAYS((pde & KPdePageTableAddrMask) == aOld, Panic(ERemapPageTableFailed));
1278 TPde newPde=aNew|(pde&~KPdePageTableAddrMask);
1279 pageDir[pdeIndex]=newPde; // will blow up here if address is in global region aOsAsid doesn't have a global PD
1280 CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
1282 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",newPde,pageDir+pdeIndex));
1285 void ArmMmu::RemapPageTableGlobal(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr)
1287 // Replace a global page table mapping the specified linear address.
1288 // This should be called with the system locked and the MMU mutex held.
1291 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTableGlobal %08x to %08x at %08x",aOld,aNew,aAddr));
1292 TInt pdeIndex=TInt(aAddr>>KChunkShift);
1293 TInt num_os_asids=iNumGlobalPageDirs;
1294 const TBitMapAllocator& b=*(const TBitMapAllocator*)iOsAsidAllocator;
1295 for (TInt os_asid=0; num_os_asids; ++os_asid)
1297 if (!b.NotAllocated(os_asid,1) && (iAsidInfo[os_asid]&1))
1299 // this OS ASID exists and has a global page directory
1300 TPde* pageDir=PageDirectory(os_asid);
1301 TPde pde=pageDir[pdeIndex];
1302 if ((pde & KPdePageTableAddrMask) == aOld)
1304 TPde newPde=aNew|(pde&~KPdePageTableAddrMask);
1305 pageDir[pdeIndex]=newPde;
1306 CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
1308 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",newPde,pageDir+pdeIndex));
1312 if ((os_asid&31)==31)
1313 NKern::FlashSystem();
1317 void ArmMmu::RemapPageTableMultiple(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr, const TAny* aOsAsids)
1319 // Replace multiple page table mappings of the specified linear address.
1320 // This should be called with the system locked and the MMU mutex held.
1323 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTableMultiple %08x to %08x at %08x asids %08x",aOld,aNew,aAddr,aOsAsids));
1324 TInt pdeIndex=TInt(aAddr>>KChunkShift);
1325 const TBitMapAllocator* pB=(const TBitMapAllocator*)aOsAsids;
1326 if ((TInt)aOsAsids==-1)
1327 pB=iOsAsidAllocator; // 0's in positions which exist
1330 TInt lastAsid = KArmV6NumAsids - 1;
1331 const TUint32* ptr = pB->iMap;
1334 TUint32 bits = *ptr++;
1338 if ((bits & 0x80000000u) == 0)
1340 // mapped in this address space - bitmap is inverted
1341 TPde* pageDir=PageDirectory(asid);
1342 TPde pde=pageDir[pdeIndex];
1343 if ((pde & KPdePageTableAddrMask) == aOld)
1345 TPde newPde=aNew|(pde&~KPdePageTableAddrMask);
1346 pageDir[pdeIndex]=newPde;
1347 CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
1349 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",newPde,pageDir+pdeIndex));
1354 NKern::FlashSystem();
1357 while(asid<lastAsid);
1360 void ArmMmu::RemapPageTableAliases(TPhysAddr aOld, TPhysAddr aNew)
1362 // Replace aliases of the specified page table.
1363 // This should be called with the system locked and the MMU mutex held.
1366 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTableAliases %08x to %08x",aOld,aNew));
1367 SDblQue checkedList;
1370 while(!iAliasList.IsEmpty())
1372 next = iAliasList.First()->Deque();
1373 checkedList.Add(next);
1374 DMemModelThread* thread = (DMemModelThread*)((TInt)next-_FOFF(DMemModelThread,iAliasLink));
1375 TPde pde = thread->iAliasPde;
1376 if ((pde & ~KPageMask) == aOld)
1378 // a page table in this page is being aliased by the thread, so update it...
1379 thread->iAliasPde = (pde & KPageMask) | aNew;
1381 NKern::FlashSystem();
1384 // copy checkedList back to iAliasList
1385 iAliasList.MoveFrom(&checkedList);
1388 void ArmMmu::DoUnassignPageTable(TLinAddr aAddr, const TAny* aOsAsids)
1390 // Unassign a now-empty page table currently mapping the specified linear address.
1391 // We assume that TLB and/or cache flushing has been done when any RAM pages were unmapped.
1392 // This should be called with the system unlocked and the MMU mutex held.
1395 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DoUnassignPageTable at %08x a=%08x",aAddr,aOsAsids));
1396 TInt pdeIndex=TInt(aAddr>>KChunkShift);
1397 TBool gpd=(pdeIndex>=(iLocalPdSize>>2));
1398 TInt os_asid=(TInt)aOsAsids;
1401 SDblQue checkedList;
1404 if (TUint32(os_asid)<TUint32(iNumOsAsids))
1407 TPde* pageDir=PageDirectory(os_asid);
1408 NKern::LockSystem();
1409 pde = pageDir[pdeIndex];
1410 pageDir[pdeIndex]=0;
1411 CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
1412 __KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x",pageDir+pdeIndex));
1414 // remove any aliases of the page table...
1415 TUint ptId = pde>>KPageTableShift;
1416 while(!iAliasList.IsEmpty())
1418 next = iAliasList.First()->Deque();
1419 checkedList.Add(next);
1420 DMemModelThread* thread = (DMemModelThread*)((TInt)next-_FOFF(DMemModelThread,iAliasLink));
1421 if(thread->iAliasOsAsid==os_asid && (thread->iAliasPde>>KPageTableShift)==ptId)
1423 // the page table is being aliased by the thread, so remove it...
1424 thread->iAliasPde = 0;
1426 NKern::FlashSystem();
1429 else if (os_asid==-1 && gpd)
1431 // all OS ASIDs, address in global region
1432 TInt num_os_asids=iNumGlobalPageDirs;
1433 const TBitMapAllocator& b=*(const TBitMapAllocator*)iOsAsidAllocator;
1434 for (os_asid=0; num_os_asids; ++os_asid)
1436 if (!b.NotAllocated(os_asid,1) && (iAsidInfo[os_asid]&1))
1438 // this OS ASID exists and has a global page directory
1439 TPde* pageDir=PageDirectory(os_asid);
1440 NKern::LockSystem();
1441 pageDir[pdeIndex]=0;
1442 CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
1443 NKern::UnlockSystem();
1445 __KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x",pageDir+pdeIndex));
1449 // we don't need to look for aliases in this case, because these aren't
1450 // created for page tables in the global region.
1451 NKern::LockSystem();
1455 // selection of OS ASIDs or all OS ASIDs
1456 const TBitMapAllocator* pB=(const TBitMapAllocator*)aOsAsids;
1458 pB=iOsAsidAllocator; // 0's in positions which exist
1459 TInt num_os_asids=pB->iSize-pB->iAvail;
1460 for (os_asid=0; num_os_asids; ++os_asid)
1462 if (pB->NotAllocated(os_asid,1))
1463 continue; // os_asid is not needed
1464 TPde* pageDir=PageDirectory(os_asid);
1465 NKern::LockSystem();
1466 pde = pageDir[pdeIndex];
1467 pageDir[pdeIndex]=0;
1468 CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
1469 NKern::UnlockSystem();
1471 __KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x",pageDir+pdeIndex));
1475 // remove any aliases of the page table...
1476 TUint ptId = pde>>KPageTableShift;
1477 NKern::LockSystem();
1478 while(!iAliasList.IsEmpty())
1480 next = iAliasList.First()->Deque();
1481 checkedList.Add(next);
1482 DMemModelThread* thread = (DMemModelThread*)((TInt)next-_FOFF(DMemModelThread,iAliasLink));
1483 if((thread->iAliasPde>>KPageTableShift)==ptId && !pB->NotAllocated(thread->iAliasOsAsid,1))
1485 // the page table is being aliased by the thread, so remove it...
1486 thread->iAliasPde = 0;
1488 NKern::FlashSystem();
1492 // copy checkedList back to iAliasList
1493 iAliasList.MoveFrom(&checkedList);
1495 NKern::UnlockSystem();
1499 // Initialise page table at physical address aXptPhys to be used as page table aXptId
1500 // to expand the virtual address range used for mapping page tables. Map the page table
1501 // at aPhysAddr as page table aId using the expanded range.
1502 // Assign aXptPhys to kernel's Page Directory.
1503 // Called with system unlocked and MMU mutex held.
1504 void ArmMmu::BootstrapPageTable(TInt aXptId, TPhysAddr aXptPhys, TInt aId, TPhysAddr aPhysAddr)
1506 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::BootstrapPageTable xptid=%04x, xptphys=%08x, id=%04x, phys=%08x",
1507 aXptId, aXptPhys, aId, aPhysAddr));
1509 // put in a temporary mapping for aXptPhys
1510 // make it noncacheable
1511 TPhysAddr pa=aXptPhys&~KPageMask;
1512 *iTempPte = pa | SP_PTE(KArmV6PermRWNO, KNormalUncachedAttr, 0, 1);
1513 CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte);
1516 TPte* xpt=(TPte*)(iTempAddr+(aXptPhys&KPageMask));
1517 memclr(xpt, KPageTableSize);
1519 // must in fact have aXptPhys and aPhysAddr in same physical page
1520 __ASSERT_ALWAYS( TUint32(aXptPhys^aPhysAddr)<TUint32(KPageSize), MM::Panic(MM::EBootstrapPageTableBadAddr));
1522 // so only need one mapping
1523 xpt[(aXptId>>KPtClusterShift)&KPagesInPDEMask] = pa | KPtPtePerm;
1524 CacheMaintenance::MultiplePtesUpdated((TLinAddr)xpt, KPageTableSize);
1526 // remove temporary mapping
1528 CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte);
1530 InvalidateTLBForPage(iTempAddr, KERNEL_MAPPING);
1532 // initialise PtInfo...
1533 TLinAddr xptAddr = PageTableLinAddr(aXptId);
1534 iPtInfo[aXptId].SetGlobal(xptAddr>>KChunkShift);
1537 TInt pdeIndex=TInt(xptAddr>>KChunkShift);
1538 TPde* pageDir=PageDirectory(0);
1539 NKern::LockSystem();
1540 pageDir[pdeIndex]=aXptPhys|KPtPdePerm;
1541 CacheMaintenance::SinglePteUpdated((TLinAddr)(pageDir+pdeIndex));
1543 NKern::UnlockSystem();
1546 // Edit the self-mapping entry in page table aId, mapped at aTempMap, to
1547 // change the physical address from aOld to aNew. Used when moving page
1548 // tables which were created by BootstrapPageTable.
1549 // Called with system locked and MMU mutex held.
1550 void ArmMmu::FixupXPageTable(TInt aId, TLinAddr aTempMap, TPhysAddr aOld, TPhysAddr aNew)
1552 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::FixupXPageTable id=%04x, tempmap=%08x, old=%08x, new=%08x",
1553 aId, aTempMap, aOld, aNew));
1555 // find correct page table inside the page
1556 TPte* xpt=(TPte*)(aTempMap + ((aId & KPtClusterMask) << KPageTableShift));
1557 // find the pte in that page table
1558 xpt += (aId>>KPtClusterShift)&KPagesInPDEMask;
1560 // switch the mapping
1561 __ASSERT_ALWAYS((*xpt&~KPageMask)==aOld, Panic(EFixupXPTFailed));
1562 *xpt = aNew | KPtPtePerm;
1563 // mapped with MapTemp, and thus not mapped as a PTE - have to do real cache clean.
1564 CacheMaintenance::SinglePteUpdated((TLinAddr)xpt);
1567 TInt ArmMmu::NewPageDirectory(TInt aOsAsid, TBool aSeparateGlobal, TPhysAddr& aPhysAddr, TInt& aNumPages)
1569 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::NewPageDirectory(%d,%d)",aOsAsid,aSeparateGlobal));
1571 TInt nlocal=iLocalPdSize>>KPageShift;
1572 aNumPages=aSeparateGlobal ? KPageDirectorySize/KPageSize : nlocal;
1573 __KTRACE_OPT(KMMU,Kern::Printf("nlocal=%d, aNumPages=%d",nlocal,aNumPages));
1576 TInt align=aSeparateGlobal ? KPageDirectoryShift : KPageDirectoryShift-1;
1577 r=AllocContiguousRam(aNumPages<<KPageShift, aPhysAddr, EPageFixed, align);
1580 r=AllocRamPages(&aPhysAddr,1, EPageFixed);
1581 __KTRACE_OPT(KMMU,Kern::Printf("r=%d, phys=%08x",r,aPhysAddr));
1584 #ifdef BTRACE_KERNEL_MEMORY
1585 BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, aNumPages<<KPageShift);
1586 Epoc::KernelMiscPages += aNumPages;
1588 SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
1589 NKern::LockSystem();
1591 for (i=0; i<aNumPages; ++i)
1592 pi[i].SetPageDir(aOsAsid,i);
1593 NKern::UnlockSystem();
1597 inline void CopyPdes(TPde* aDest, const TPde* aSrc, TLinAddr aBase, TLinAddr aEnd)
1599 memcpy(aDest+(aBase>>KChunkShift), aSrc+(aBase>>KChunkShift), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde));
1600 CacheMaintenance::MultiplePtesUpdated((TLinAddr)(aDest+(aBase>>KChunkShift)), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde));
1603 inline void ZeroPdes(TPde* aDest, TLinAddr aBase, TLinAddr aEnd)
1605 memclr(aDest+(aBase>>KChunkShift), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde));
1606 CacheMaintenance::MultiplePtesUpdated((TLinAddr)(aDest+(aBase>>KChunkShift)), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde));
1609 void ArmMmu::InitPageDirectory(TInt aOsAsid, TBool aSeparateGlobal)
1611 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::InitPageDirectory(%d,%d)",aOsAsid,aSeparateGlobal));
1612 TPde* newpd=PageDirectory(aOsAsid); // new page directory
1613 memclr(newpd, iLocalPdSize); // clear local page directory
1614 CacheMaintenance::MultiplePtesUpdated((TLinAddr)newpd, iLocalPdSize);
1615 if (aSeparateGlobal)
1617 const TPde* kpd=(const TPde*)KPageDirectoryBase; // kernel page directory
1618 if (iLocalPdSize==KPageSize)
1619 ZeroPdes(newpd, KUserSharedDataEnd1GB, KUserSharedDataEnd2GB);
1620 ZeroPdes(newpd, KRamDriveStartAddress, KRamDriveEndAddress); // don't copy RAM drive
1621 CopyPdes(newpd, kpd, KRomLinearBase, KUserGlobalDataEnd); // copy ROM + user global
1622 CopyPdes(newpd, kpd, KRamDriveEndAddress, 0x00000000); // copy kernel mappings
1626 void ArmMmu::ClearPageTable(TInt aId, TInt aFirstIndex)
1628 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::ClearPageTable(%d,%d)",aId,aFirstIndex));
1629 TPte* pte=PageTable(aId);
1630 memclr(pte+aFirstIndex, KPageTableSize-aFirstIndex*sizeof(TPte));
1631 CacheMaintenance::MultiplePtesUpdated((TLinAddr)(pte+aFirstIndex), KPageTableSize-aFirstIndex*sizeof(TPte));
1634 void ArmMmu::ApplyTopLevelPermissions(TLinAddr aAddr, TInt aOsAsid, TInt aNumPdes, TPde aPdePerm)
1636 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::ApplyTopLevelPermissions %04x:%08x->%08x count %d",
1637 aOsAsid, aAddr, aPdePerm, aNumPdes));
1638 TInt ix=aAddr>>KChunkShift;
1639 TPde* pPde=PageDirectory(aOsAsid)+ix;
1640 TLinAddr firstPde = (TLinAddr)pPde; //Will need this to clean page table memory region in cache
1642 TPde* pPdeEnd=pPde+aNumPdes;
1643 NKern::LockSystem();
1644 for (; pPde<pPdeEnd; ++pPde)
1648 *pPde = (pde&KPdePageTableAddrMask)|aPdePerm;
1650 CacheMaintenance::MultiplePtesUpdated(firstPde, aNumPdes*sizeof(TPde));
1652 NKern::UnlockSystem();
1655 void ArmMmu::ApplyPagePermissions(TInt aId, TInt aPageOffset, TInt aNumPages, TPte aPtePerm)
1657 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::ApplyPagePermissions %04x:%03x+%03x perm %08x",
1658 aId, aPageOffset, aNumPages, aPtePerm));
1659 TPte* pPte=PageTable(aId)+aPageOffset;
1660 TLinAddr firstPte = (TLinAddr)pPte; //Will need this to clean page table memory region in cache
1662 TPde* pPteEnd=pPte+aNumPages;
1663 NKern::LockSystem();
1664 for (; pPte<pPteEnd; ++pPte)
1668 *pPte = (pte&KPteSmallPageAddrMask)|aPtePerm;
1670 CacheMaintenance::MultiplePtesUpdated(firstPte, aNumPages*sizeof(TPte));
1672 NKern::UnlockSystem();
1675 void ArmMmu::ClearRamDrive(TLinAddr aStart)
1677 // clear the page directory entries corresponding to the RAM drive
1678 TPde* kpd=(TPde*)KPageDirectoryBase; // kernel page directory
1679 ZeroPdes(kpd, aStart, KRamDriveEndAddress);
1682 TPde ArmMmu::PdePermissions(TChunkType aChunkType, TBool aRO)
1684 // if (aChunkType==EUserData && aRO)
1685 // return KPdePtePresent|KPdePteUser;
1686 return ChunkPdePermissions[aChunkType];
1689 TPte ArmMmu::PtePermissions(TChunkType aChunkType)
1691 return ChunkPtePermissions[aChunkType];
1694 // Set up a page table (specified by aId) to map a 1Mb section of ROM containing aRomAddr
1695 // using ROM at aOrigPhys.
1696 void ArmMmu::InitShadowPageTable(TInt aId, TLinAddr aRomAddr, TPhysAddr aOrigPhys)
1698 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:InitShadowPageTable id=%04x aRomAddr=%08x aOrigPhys=%08x",
1699 aId, aRomAddr, aOrigPhys));
1700 TPte* ppte = PageTable(aId);
1701 TLinAddr firstPte = (TLinAddr)ppte; //Will need this to clean page table memory region in cache
1703 TPte* ppte_End = ppte + KChunkSize/KPageSize;
1704 TPhysAddr phys = aOrigPhys - (aRomAddr & KChunkMask);
1705 for (; ppte<ppte_End; ++ppte, phys+=KPageSize)
1706 *ppte = phys | KRomPtePerm;
1707 CacheMaintenance::MultiplePtesUpdated(firstPte, sizeof(TPte)*KChunkSize/KPageSize);
1710 // Copy the contents of ROM at aRomAddr to a shadow page at physical address aShadowPhys
1711 // It is assumed aShadowPage is not mapped, therefore any mapping colour is OK.
1712 void ArmMmu::InitShadowPage(TPhysAddr aShadowPhys, TLinAddr aRomAddr)
1714 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:InitShadowPage aShadowPhys=%08x aRomAddr=%08x",
1715 aShadowPhys, aRomAddr));
1717 // put in a temporary mapping for aShadowPhys
1718 // make it noncacheable
1719 *iTempPte = aShadowPhys | SP_PTE(KArmV6PermRWNO, KNormalUncachedAttr, 0, 1);
1720 CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte);
1722 // copy contents of ROM
1723 wordmove( (TAny*)iTempAddr, (const TAny*)aRomAddr, KPageSize );
1724 //Temp address is uncached. No need to clean cache, just flush write buffer
1725 CacheMaintenance::MemoryToPreserveAndReuse((TLinAddr)iTempAddr, KPageSize, EMapAttrBufferedC);
1727 // remove temporary mapping
1729 CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte);
1730 InvalidateTLBForPage(iTempAddr, KERNEL_MAPPING);
1733 // Assign a shadow page table to replace a ROM section mapping
1734 // Enter and return with system locked
1735 void ArmMmu::AssignShadowPageTable(TInt aId, TLinAddr aRomAddr)
1737 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:AssignShadowPageTable aId=%04x aRomAddr=%08x",
1739 TLinAddr ptLin=PageTableLinAddr(aId);
1740 TPhysAddr ptPhys=LinearToPhysical(ptLin, 0);
1741 TPde* ppde = ::InitPageDirectory + (aRomAddr>>KChunkShift);
1742 TPde newpde = ptPhys | KShadowPdePerm;
1743 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde));
1744 TInt irq=NKern::DisableAllInterrupts();
1745 *ppde = newpde; // map in the page table
1746 CacheMaintenance::SinglePteUpdated((TLinAddr)ppde);
1748 FlushTLBs(); // flush both TLBs (no need to flush cache yet)
1749 NKern::RestoreInterrupts(irq);
1752 void ArmMmu::DoUnmapShadowPage(TInt aId, TLinAddr aRomAddr, TPhysAddr aOrigPhys)
1754 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:DoUnmapShadowPage, id=%04x lin=%08x origphys=%08x", aId, aRomAddr, aOrigPhys));
1755 TPte* ppte = PageTable(aId) + ((aRomAddr & KChunkMask)>>KPageShift);
1756 TPte newpte = aOrigPhys | KRomPtePerm;
1757 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", newpte, ppte));
1758 TInt irq=NKern::DisableAllInterrupts();
1760 CacheMaintenance::SinglePteUpdated((TLinAddr)ppte);
1762 InvalidateTLBForPage(aRomAddr, KERNEL_MAPPING);
1763 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
1767 CacheMaintenance::CodeChanged(aRomAddr, KPageSize, CacheMaintenance::EMemoryRemap);
1768 CacheMaintenance::PageToReuse(aRomAddr, EMemAttNormalCached, KPhysAddrInvalid);
1769 NKern::RestoreInterrupts(irq);
1772 TInt ArmMmu::UnassignShadowPageTable(TLinAddr aRomAddr, TPhysAddr aOrigPhys)
1774 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:UnassignShadowPageTable, lin=%08x origphys=%08x", aRomAddr, aOrigPhys));
1775 TPde* ppde = ::InitPageDirectory + (aRomAddr>>KChunkShift);
1776 TPde newpde = (aOrigPhys &~ KChunkMask) | KRomSectionPermissions;
1777 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde));
1778 TInt irq=NKern::DisableAllInterrupts();
1779 *ppde = newpde; // revert to section mapping
1780 CacheMaintenance::SinglePteUpdated((TLinAddr)ppde);
1782 FlushTLBs(); // flush both TLBs
1783 NKern::RestoreInterrupts(irq);
1788 #if defined(__CPU_MEMORY_TYPE_REMAPPING) // arm1176, arm11mcore, armv7, ...
1790 Shadow pages on platforms with remapping (mpcore, 1176, cortex...) are not writable.
1791 This will map the region into writable memory first.
1792 @pre No Fast Mutex held
1794 TInt ArmMmu::CopyToShadowMemory(TLinAddr aDest, TLinAddr aSrc, TUint32 aLength)
1796 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:CopyToShadowMemory aDest=%08x aSrc=%08x aLength=%08x", aDest, aSrc, aLength));
1798 // Check that destination is ROM
1799 if (aDest<iRomLinearBase || (aDest+aLength) > iRomLinearEnd)
1801 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:CopyToShadowMemory: Destination not entirely in ROM"));
1802 return KErrArgument;
1804 // do operation with RamAlloc mutex held (to prevent shadow pages from being released from under us)
1811 // Calculate memory size to copy in this loop. A single page region will be copied per loop
1812 TInt copySize = Min(aLength, iPageSize - (aDest&iPageMask));
1814 // Get physical address
1815 TPhysAddr physAddr = LinearToPhysical(aDest&~iPageMask, 0);
1816 if (KPhysAddrInvalid==physAddr)
1822 //check whether it is shadowed rom
1823 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(physAddr);
1824 if (pi==0 || pi->Type()!=SPageInfo::EShadow)
1826 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:CopyToShadowMemory: No shadow page at this address"));
1831 //Temporarily map into writable memory and copy data. RamAllocator DMutex is required
1832 TLinAddr tempAddr = MapTemp (physAddr, aDest&~iPageMask);
1833 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:CopyToShadowMemory Copy aDest=%08x aSrc=%08x aSize=%08x", tempAddr+(aDest&iPageMask), aSrc, copySize));
1834 memcpy ((TAny*)(tempAddr+(aDest&iPageMask)), (const TAny*)aSrc, copySize); //Kernel-to-Kernel copy is presumed
1837 //Update variables for the next loop/page
1847 void ArmMmu::DoFreezeShadowPage(TInt aId, TLinAddr aRomAddr)
1849 #if defined(__CPU_MEMORY_TYPE_REMAPPING) //arm1176, arm11mcore, armv7 and later
1850 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:DoFreezeShadowPage not required with MEMORY_TYPE_REMAPPING"));
1852 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:DoFreezeShadowPage aId=%04x aRomAddr=%08x",
1854 TPte* ppte = PageTable(aId) + ((aRomAddr & KChunkMask)>>KPageShift);
1855 TPte newpte = (*ppte & KPteSmallPageAddrMask) | KRomPtePerm;
1856 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", newpte, ppte));
1858 CacheMaintenance::SinglePteUpdated((TLinAddr)ppte);
1859 InvalidateTLBForPage(aRomAddr, KERNEL_MAPPING);
1863 /** Replaces large page(64K) entry in page table with small page(4K) entries.*/
1864 void ArmMmu::Pagify(TInt aId, TLinAddr aLinAddr)
1866 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:Pagify aId=%04x aLinAddr=%08x", aId, aLinAddr));
1868 TInt pteIndex = (aLinAddr & KChunkMask)>>KPageShift;
1869 TPte* pte = PageTable(aId);
1870 if ((pte[pteIndex] & KArmV6PteTypeMask) == KArmV6PteLargePage)
1872 __KTRACE_OPT(KMMU,Kern::Printf("Converting 64K page to 4K pages"));
1874 TPte source = pte[pteIndex];
1875 source = (source & KPteLargePageAddrMask) | SP_PTE_FROM_LP_PTE(source);
1877 for (TInt entry=0; entry<16; entry++)
1879 pte[entry] = source | (entry<<12);
1881 CacheMaintenance::MultiplePtesUpdated((TLinAddr)pte, 16*sizeof(TPte));
1886 void ArmMmu::FlushShadow(TLinAddr aRomAddr)
1888 CacheMaintenance::CodeChanged(aRomAddr, KPageSize, CacheMaintenance::EMemoryRemap);
1889 CacheMaintenance::PageToReuse(aRomAddr, EMemAttNormalCached, KPhysAddrInvalid);
1890 InvalidateTLBForPage(aRomAddr, KERNEL_MAPPING); // remove all TLB references to original ROM page
1894 #if defined(__CPU_MEMORY_TYPE_REMAPPING) //arm1176, arm11mcore, armv7
1896 Calculates page directory/table entries for memory type described in aMapAttr.
1897 Global, small page (4KB) mapping is assumed.
1898 (All magic numbers come from ARM page table descriptions.)
1899 @param aMapAttr On entry, holds description(memory type, access permisions,...) of the memory.
1900 It is made up of TMappingAttributes constants or TMappingAttributes2 object. If TMappingAttributes,
1901 may be altered on exit to hold the actual cache attributes & access permissions.
1902 @param aPde On exit, holds page-table-entry for the 1st level descriptor
1903 for given type of memory, with base address set to 0.
1904 @param aPte On exit, holds small-page-entry (4K) for the 2nd level descriptor
1905 for given type of memory, with base address set to 0.
1906 @return KErrNotSupported If memory described in aMapAttr is not supported
1909 TInt ArmMmu::PdePtePermissions(TUint& aMapAttr, TPde& aPde, TPte& aPte)
1911 __KTRACE_OPT(KMMU,Kern::Printf(">ArmMmu::PdePtePermissions, mapattr=%08x",aMapAttr));
1913 TMappingAttributes2& memory = (TMappingAttributes2&)aMapAttr;
1915 if(memory.ObjectType2())
1917 //---------Memory described by TMappingAttributes2 object-----------------
1918 aPde = KArmV6PdePageTable |
1919 (memory.Parity() ? KArmV6PdeECCEnable : 0);
1920 #if defined(FAULTY_NONSHARED_DEVICE_MEMORY)
1921 if(!memory.Shared() && (memory.Type() == EMemAttDevice ))
1923 aMapAttr ^= EMapAttrBufferedNC;
1924 aMapAttr |= EMapAttrFullyBlocking;
1925 // Clear EMemAttDevice
1926 aMapAttr ^= (EMemAttDevice << 26);
1927 aMapAttr |= (EMemAttStronglyOrdered << 26);
1930 aPte = KArmV6PteSmallPage |
1931 KArmV6PteAP0 | // AP0 bit always 1
1932 ((memory.Type()&3)<<2) | ((memory.Type()&4)<<4) | // memory type
1933 (memory.Executable() ? 0 : KArmV6PteSmallXN) | // eXecuteNever bit
1934 #if defined (__CPU_USE_SHARED_MEMORY)
1935 KArmV6PteS | // Memory is always shared.
1937 (memory.Shared() ? KArmV6PteS : 0) | // Shared bit
1939 (memory.Writable() ? 0 : KArmV6PteAPX) | // APX = !Writable
1940 (memory.UserAccess() ? KArmV6PteAP1: 0); // AP1 = UserAccess
1941 // aMapAttr remains the same
1945 //---------Memory described by TMappingAttributes bitmask-----------------
1946 #if defined(FAULTY_NONSHARED_DEVICE_MEMORY)
1947 if(((aMapAttr & EMapAttrL1CacheMask) == EMapAttrBufferedNC) && !(aMapAttr & EMapAttrShared))
1949 // Clear EMapAttrBufferedNC attribute
1950 aMapAttr ^= EMapAttrBufferedNC;
1951 aMapAttr |= EMapAttrFullyBlocking;
1954 // 1. Calculate TEX0:C:B bits in page table and actual cache attributes.
1955 // Only L1 cache attribute from aMapAttr matters. Outer (L2) cache policy will be the same as inner one.
1956 TUint l1cache=aMapAttr & EMapAttrL1CacheMask; // Inner cache attributes. May change to actual value.
1957 TUint l2cache; // Will hold actual L2 cache attributes (in terms of TMappingAttributes constants)
1958 TUint tex0_c_b; // Will hold TEX[0]:C:B value in page table
1962 case EMapAttrFullyBlocking:
1963 tex0_c_b = EMemAttStronglyOrdered;
1964 l2cache = EMapAttrL2Uncached;
1966 case EMapAttrBufferedNC:
1967 tex0_c_b = EMemAttDevice;
1968 l2cache = EMapAttrL2Uncached;
1970 case EMapAttrBufferedC:
1971 case EMapAttrL1Uncached:
1972 case EMapAttrCachedWTRA:
1973 case EMapAttrCachedWTWA:
1974 tex0_c_b = EMemAttNormalUncached;
1975 l1cache = EMapAttrBufferedC;
1976 l2cache = EMapAttrL2Uncached;
1978 case EMapAttrCachedWBRA:
1979 case EMapAttrCachedWBWA:
1980 case EMapAttrL1CachedMax:
1981 tex0_c_b = EMemAttNormalCached;
1982 l1cache = EMapAttrCachedWBWA;
1983 l2cache = EMapAttrL2CachedWBWA;
1986 return KErrNotSupported;
1989 // 2. Step 2 has been removed :)
1991 // 3. Calculate access permissions (apx:ap bits in page table + eXecute it)
1992 TUint read=aMapAttr & EMapAttrReadMask;
1993 TUint write=(aMapAttr & EMapAttrWriteMask)>>4;
1994 TUint exec=(aMapAttr & EMapAttrExecMask)>>8;
1996 read|=exec; // User/Sup execute access requires User/Sup read access.
1997 if (exec) exec = 1; // There is a single eXecute bit in page table. Set to one if User or Sup exec is required.
2000 if (write==0) // no write required
2002 if (read>=4) apxap=KArmV6PermRORO; // user read required
2003 else if (read==1) apxap=KArmV6PermRONO; // supervisor read required
2004 else return KErrNotSupported; // no read required
2006 else if (write<4) // supervisor write required
2008 if (read<4) apxap=KArmV6PermRWNO; // user read not required
2009 else return KErrNotSupported; // user read required
2011 else // user & supervisor writes required
2013 apxap=KArmV6PermRWRW;
2016 // 4. Calculate page-table-entry for the 1st level (aka page directory) descriptor
2017 aPde=((aMapAttr&EMapAttrUseECC)>>8)|KArmV6PdePageTable;
2019 // 5. Calculate small-page-entry for the 2nd level (aka page table) descriptor
2020 aPte=SP_PTE(apxap, tex0_c_b, exec, 1); // always global
2021 if (aMapAttr&EMapAttrShared)
2024 // 6. Fix aMapAttr to hold the actual values for access permission & cache attributes
2025 TUint xnapxap=((aPte<<3)&8)|((aPte>>7)&4)|((aPte>>4)&3);
2026 aMapAttr &= ~(EMapAttrAccessMask|EMapAttrL1CacheMask|EMapAttrL2CacheMask);
2027 aMapAttr |= PermissionLookup[xnapxap]; // Set actual access permissions
2028 aMapAttr |= l1cache; // Set actual inner cache attributes
2029 aMapAttr |= l2cache; // Set actual outer cache attributes
2032 __KTRACE_OPT(KMMU,Kern::Printf("<ArmMmu::PdePtePermissions, mapattr=%08x, pde=%08x, pte=%08x", aMapAttr, aPde, aPte));
2036 #else //ARMv6 (arm1136)
2038 const TUint FBLK=(EMapAttrFullyBlocking>>12);
2039 const TUint BFNC=(EMapAttrBufferedNC>>12);
2040 //const TUint BUFC=(EMapAttrBufferedC>>12);
2041 const TUint L1UN=(EMapAttrL1Uncached>>12);
2042 const TUint WTRA=(EMapAttrCachedWTRA>>12);
2043 //const TUint WTWA=(EMapAttrCachedWTWA>>12);
2044 const TUint WBRA=(EMapAttrCachedWBRA>>12);
2045 const TUint WBWA=(EMapAttrCachedWBWA>>12);
2046 const TUint AWTR=(EMapAttrAltCacheWTRA>>12);
2047 //const TUint AWTW=(EMapAttrAltCacheWTWA>>12);
2048 //const TUint AWBR=(EMapAttrAltCacheWBRA>>12);
2049 const TUint AWBW=(EMapAttrAltCacheWBWA>>12);
2050 const TUint MAXC=(EMapAttrL1CachedMax>>12);
2052 const TUint L2UN=(EMapAttrL2Uncached>>16);
2054 const TUint8 UNS=0xffu; // Unsupported attribute
2056 //Maps L1 & L2 cache attributes into TEX[4:2]:CB[1:0]
2057 //ARMv6 doesn't do WTWA so we use WTRA instead
2059 #if !defined(__CPU_ARM1136_ERRATUM_399234_FIXED)
2060 // L1 Write-Through mode is outlawed, L1WT acts as L1UN.
2061 static const TUint8 CBTEX[40]=
2063 // FBLK BFNC BUFC L1UN WTRA WTWA WBRA WBWA L2CACHE:
2064 0x00, 0x01, 0x01, 0x04, 0x04, 0x04, 0x13, 0x11, //NC
2065 0x00, 0x01, 0x01, 0x18, 0x18, 0x18, 0x1b, 0x19, //WTRA
2066 0x00, 0x01, 0x01, 0x18, 0x18, 0x18, 0x1b, 0x19, //WTWA
2067 0x00, 0x01, 0x01, 0x1c, 0x1c, 0x1c, 0x1f, 0x1d, //WBRA
2068 0x00, 0x01, 0x01, 0x14, 0x14, 0x14, 0x17, 0x15 //WBWA
2071 static const TUint8 CBTEX[40]=
2073 // FBLK BFNC BUFC L1UN WTRA WTWA WBRA WBWA L2CACHE:
2074 0x00, 0x01, 0x01, 0x04, 0x12, 0x12, 0x13, 0x11, //NC
2075 0x00, 0x01, 0x01, 0x18, 0x02, 0x02, 0x1b, 0x19, //WTRA
2076 0x00, 0x01, 0x01, 0x18, 0x02, 0x02, 0x1b, 0x19, //WTWA
2077 0x00, 0x01, 0x01, 0x1c, 0x1e, 0x1e, 0x1f, 0x1d, //WBRA
2078 0x00, 0x01, 0x01, 0x14, 0x16, 0x16, 0x17, 0x15 //WBWA
2082 //Maps TEX[4:2]:CB[1:0] value into L1 cache attributes
2083 static const TUint8 L1Actual[32]=
2085 //CB 00 01 10 11 //TEX
2086 FBLK, BFNC, WTRA, WBRA, //000
2087 L1UN, UNS, UNS, WBWA, //001
2088 BFNC, UNS, UNS, UNS, //010
2089 UNS, UNS, UNS, UNS, //011
2090 L1UN, WBWA, WTRA, WBRA, //100
2091 L1UN, WBWA, WTRA, WBRA, //101
2092 L1UN, WBWA, WTRA, WBRA, //110
2093 L1UN, WBWA, WTRA, WBRA //111
2096 //Maps TEX[4:2]:CB[1:0] value into L2 cache attributes
2097 static const TUint8 L2Actual[32]=
2099 //CB 00 01 10 11 //TEX
2100 L2UN, L2UN, WTRA, WBRA, //000
2101 L2UN, UNS, UNS, WBWA, //001
2102 L2UN, UNS, UNS, UNS, //010
2103 UNS, UNS, UNS, UNS, //011
2104 L2UN, L2UN, L2UN, L2UN, //100
2105 WBWA, WBWA, WBWA, WBWA, //101
2106 WTRA, WTRA, WTRA, WTRA, //110
2107 WBRA, WBRA, WBRA, WBRA //111
2110 TInt ArmMmu::PdePtePermissions(TUint& aMapAttr, TPde& aPde, TPte& aPte)
2112 __KTRACE_OPT(KMMU,Kern::Printf(">ArmMmu::PdePtePermissions, mapattr=%08x",aMapAttr));
2114 TUint read=aMapAttr & EMapAttrReadMask;
2115 TUint write=(aMapAttr & EMapAttrWriteMask)>>4;
2116 TUint exec=(aMapAttr & EMapAttrExecMask)>>8;
2117 TUint l1cache=(aMapAttr & EMapAttrL1CacheMask)>>12;
2118 TUint l2cache=(aMapAttr & EMapAttrL2CacheMask)>>16;
2119 if (l1cache==MAXC) l1cache=WBRA; // map max cache to WBRA
2121 return KErrNotSupported; // undefined attribute
2122 if (l1cache>=AWTR) l1cache-=4; // no alternate cache, so use normal cache
2123 if (l1cache<L1UN) l2cache=0; // for blocking/device, don't cache L2
2124 if (l2cache==MAXC) l2cache=WBRA; // map max cache to WBRA
2126 return KErrNotSupported; // undefined attribute
2127 if (l2cache) l2cache-=(WTRA-1); // l2cache now in range 0-4
2128 aPde=((aMapAttr&EMapAttrUseECC)>>8)|KArmV6PdePageTable;
2130 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
2131 // if broken 1136, can't have supervisor only code
2133 exec = TUint(EMapAttrExecUser>>8);
2136 // if any execute access, must have read=execute
2138 (void)(read>=exec || (read=exec)!=0), exec=1;
2140 // l1cache between 0 and 7, l2cache between 0 and 4; look up CBTEX
2141 TUint cbtex=CBTEX[(l2cache<<3)|l1cache];
2146 apxap=(read>=4)?KArmV6PermRORO:(read?KArmV6PermRONO:KArmV6PermNONO);
2148 apxap=(read>=4)?KArmV6PermRWRO:KArmV6PermRWNO;
2150 apxap=KArmV6PermRWRW;
2151 TPte pte=SP_PTE(apxap, cbtex, exec, 1); // always global
2152 if (aMapAttr&EMapAttrShared)
2155 // Translate back to get actual map attributes
2156 TUint xnapxap=((pte<<3)&8)|((pte>>7)&4)|((pte>>4)&3);
2157 cbtex=((pte>>4)&0x1c)|((pte>>2)&3); // = TEX[4:2]::CB[1:0]
2158 aMapAttr &= ~(EMapAttrAccessMask|EMapAttrL1CacheMask|EMapAttrL2CacheMask);
2159 aMapAttr |= PermissionLookup[xnapxap];
2160 aMapAttr |= (L1Actual[cbtex]<<12);
2161 aMapAttr |= (L2Actual[cbtex]<<16);
2163 __KTRACE_OPT(KMMU,Kern::Printf("<ArmMmu::PdePtePermissions, mapattr=%08x, pde=%08x, pte=%08x",
2164 aMapAttr, aPde, aPte));
2169 void ArmMmu::Map(TLinAddr aLinAddr, TPhysAddr aPhysAddr, TInt aSize, TPde aPdePerm, TPte aPtePerm, TInt aMapShift)
2171 // Map a region of physical addresses aPhysAddr to aPhysAddr+aSize-1 to virtual address aLinAddr.
2172 // Use permissions specified by aPdePerm and aPtePerm. Use mapping sizes up to and including (1<<aMapShift).
2173 // Assume any page tables required are already assigned.
2174 // aLinAddr, aPhysAddr, aSize must be page-aligned.
2177 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu::Map lin=%08x phys=%08x size=%08x", aLinAddr, aPhysAddr, aSize));
2178 __KTRACE_OPT(KMMU, Kern::Printf("pde=%08x pte=%08x mapshift=%d", aPdePerm, aPtePerm, aMapShift));
2179 TPde pt_pde=aPdePerm;
2180 TPte sp_pte=aPtePerm;
2181 TPde section_pde=SECTION_PDE_FROM_PDEPTE(pt_pde, sp_pte);
2182 TPte lp_pte=LP_PTE_FROM_SP_PTE(sp_pte);
2183 TLinAddr la=aLinAddr;
2184 TPhysAddr pa=aPhysAddr;
2188 if (aMapShift>=KChunkShift && (la & KChunkMask)==0 && remain>=KChunkSize)
2190 // use sections - ASSUMES ADDRESS IS IN GLOBAL REGION
2191 TInt npdes=remain>>KChunkShift;
2192 const TBitMapAllocator& b=*iOsAsidAllocator;
2193 TInt num_os_asids=iNumGlobalPageDirs;
2195 for (; num_os_asids; ++os_asid)
2197 if (b.NotAllocated(os_asid,1) || (iAsidInfo[os_asid]&1)==0)
2198 continue; // os_asid is not needed
2199 TPde* p_pde=PageDirectory(os_asid)+(la>>KChunkShift);
2200 TPde* p_pde_E=p_pde+npdes;
2201 TPde pde=pa|section_pde;
2202 TLinAddr firstPde = (TLinAddr)p_pde; //Will need this to clean page table memory region from cache
2204 NKern::LockSystem();
2205 for (; p_pde < p_pde_E; pde+=KChunkSize)
2207 __ASSERT_DEBUG(*p_pde==0, MM::Panic(MM::EPdeAlreadyInUse));
2208 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", pde, p_pde));
2211 CacheMaintenance::MultiplePtesUpdated(firstPde, (TUint)p_pde-firstPde);
2212 NKern::UnlockSystem();
2215 npdes<<=KChunkShift;
2216 la+=npdes, pa+=npdes, remain-=npdes;
2219 TInt block_size = Min(remain, KChunkSize-(la&KChunkMask));
2220 TPte pa_mask=~KPageMask;
2221 TPte pte_perm=sp_pte;
2222 if (aMapShift>=KLargePageShift && block_size>=KLargePageSize)
2224 if ((la & KLargePageMask)==0)
2226 // use 64K large pages
2227 pa_mask=~KLargePageMask;
2231 block_size = Min(remain, KLargePageSize-(la&KLargePageMask));
2233 block_size &= pa_mask;
2235 // use pages (large or small)
2236 TInt id=PageTableId(la, 0);
2237 __ASSERT_DEBUG(id>=0, MM::Panic(MM::EMmuMapNoPageTable));
2238 TPte* p_pte=PageTable(id)+((la&KChunkMask)>>KPageShift);
2239 TPte* p_pte_E=p_pte + (block_size>>KPageShift);
2240 SPageTableInfo& ptinfo=iPtInfo[id];
2241 TLinAddr firstPte = (TLinAddr)p_pte; //Will need this to clean page table memory region from cache
2243 NKern::LockSystem();
2244 for (; p_pte < p_pte_E; pa+=KPageSize)
2246 __ASSERT_DEBUG(*p_pte==0, MM::Panic(MM::EPteAlreadyInUse));
2247 TPte pte = (pa & pa_mask) | pte_perm;
2248 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", pte, p_pte));
2251 NKern::FlashSystem();
2253 CacheMaintenance::MultiplePtesUpdated(firstPte, (TUint)p_pte-firstPte);
2254 NKern::UnlockSystem();
2255 la+=block_size, remain-=block_size;
2259 void ArmMmu::Unmap(TLinAddr aLinAddr, TInt aSize)
2261 // Remove all mappings in the specified range of addresses.
2262 // Assumes there are only global mappings involved.
2263 // Don't free page tables.
2264 // aLinAddr, aSize must be page-aligned.
2267 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu::Unmap lin=%08x size=%08x", aLinAddr, aSize));
2268 TLinAddr a=aLinAddr;
2269 TLinAddr end=a+aSize;
2270 __KTRACE_OPT(KMMU,Kern::Printf("a=%08x end=%08x",a,end));
2271 NKern::LockSystem();
2274 TInt pdeIndex=a>>KChunkShift;
2275 TLinAddr next=(pdeIndex<<KChunkShift)+KChunkSize;
2276 TInt to_do = Min(TInt(end-a), TInt(next-a))>>KPageShift;
2277 __KTRACE_OPT(KMMU,Kern::Printf("a=%08x next=%08x to_do=%d",a,next,to_do));
2278 TPde pde=::InitPageDirectory[pdeIndex];
2279 if ( (pde&KArmV6PdeTypeMask)==KArmV6PdeSection )
2281 __ASSERT_DEBUG(!(a&KChunkMask), MM::Panic(MM::EUnmapBadAlignment));
2282 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
2283 remove_and_invalidate_section(::InitPageDirectory + pdeIndex, a, KERNEL_MAPPING);
2285 ::InitPageDirectory[pdeIndex]=0;
2286 CacheMaintenance::SinglePteUpdated(TLinAddr(::InitPageDirectory + pdeIndex));
2287 InvalidateTLBForPage(a, KERNEL_MAPPING); // ASID irrelevant since global
2290 NKern::FlashSystem();
2293 TInt ptid=PageTableId(a,0);
2294 SPageTableInfo& ptinfo=iPtInfo[ptid];
2297 TPte* ppte=PageTable(ptid)+((a&KChunkMask)>>KPageShift);
2298 TPte* ppte_End=ppte+to_do;
2299 for (; ppte<ppte_End; ++ppte, a+=KPageSize)
2301 if (*ppte & KArmV6PteSmallPage)
2304 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
2305 remove_and_invalidate_page(ppte, a, KERNEL_MAPPING);
2308 CacheMaintenance::SinglePteUpdated((TLinAddr)ppte);
2309 InvalidateTLBForPage(a, KERNEL_MAPPING);
2312 else if ((*ppte & KArmV6PteTypeMask) == KArmV6PteLargePage)
2314 __ASSERT_DEBUG(!(a&KLargePageMask), MM::Panic(MM::EUnmapBadAlignment));
2315 ptinfo.iCount-=KLargeSmallPageRatio;
2316 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
2317 remove_and_invalidate_page(ppte, a, KERNEL_MAPPING);
2319 memclr(ppte, KLargeSmallPageRatio*sizeof(TPte));
2320 CacheMaintenance::MultiplePtesUpdated((TLinAddr)ppte, KLargeSmallPageRatio*sizeof(TPte));
2321 InvalidateTLBForPage(a, KERNEL_MAPPING);
2323 a+=(KLargePageSize-KPageSize);
2324 ppte+=(KLargeSmallPageRatio-1);
2326 NKern::FlashSystem();
2330 a += (to_do<<KPageShift);
2332 NKern::UnlockSystem();
2333 #if defined(__CPU_ARM1136__) && !defined(__CPU_ARM1136_ERRATUM_353494_FIXED)
2339 void ArmMmu::ClearPages(TInt aNumPages, TPhysAddr* aPageList, TUint8 aClearByte)
2341 //map the pages at a temporary address, clear them and unmap
2342 __ASSERT_MUTEX(RamAllocatorMutex);
2343 while (--aNumPages >= 0)
2346 if((TInt)aPageList&1)
2348 pa = (TPhysAddr)aPageList&~1;
2349 *(TPhysAddr*)&aPageList += iPageSize;
2354 *iTempPte = pa | SP_PTE(KArmV6PermRWNO, KNormalUncachedAttr, 0, 1);
2355 CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte);
2356 InvalidateTLBForPage(iTempAddr, KERNEL_MAPPING);
2357 memset((TAny*)iTempAddr, aClearByte, iPageSize);
2358 // This temporary mapping is noncached => No need to flush cache here.
2359 // Still, we have to make sure that write buffer(s) are drained.
2360 CacheMaintenance::MemoryToPreserveAndReuse((TLinAddr)iTempAddr, iPageSize, EMapAttrBufferedC);
2363 CacheMaintenance::SinglePteUpdated((TLinAddr)iTempPte);
2364 InvalidateTLBForPage(iTempAddr, KERNEL_MAPPING);
2369 Create a temporary mapping of one or more contiguous physical pages.
2370 Fully cached memory attributes apply.
2371 The RamAllocatorMutex must be held before this function is called and not released
2372 until after UnmapTemp has been called.
2374 @param aPage The physical address of the pages to be mapped.
2375 @param aLinAddr The linear address of any existing location where the page is mapped.
2376 If the page isn't already mapped elsewhere as a cachable page then
2377 this value irrelevent. (It is used for page colouring.)
2378 @param aPages Number of pages to map.
2380 @return The linear address of where the pages have been mapped.
2382 TLinAddr ArmMmu::MapTemp(TPhysAddr aPage,TLinAddr aLinAddr,TInt aPages)
2384 __ASSERT_MUTEX(RamAllocatorMutex);
2385 __ASSERT_DEBUG(!*iTempPte,MM::Panic(MM::ETempMappingAlreadyInUse));
2386 iTempMapColor = (aLinAddr>>KPageShift)&KPageColourMask;
2387 iTempMapCount = aPages;
2390 iTempPte[iTempMapColor] = (aPage&~KPageMask) | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1);
2391 CacheMaintenance::SinglePteUpdated((TLinAddr)(iTempPte+iTempMapColor));
2395 __ASSERT_DEBUG(iTempMapColor+aPages<=KPageColourCount,MM::Panic(MM::ETempMappingNoRoom));
2396 for (TInt i=0; i<aPages; i++)
2397 iTempPte[iTempMapColor+i] = ((aPage&~KPageMask)+(i<<KPageShift)) | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1);
2398 CacheMaintenance::MultiplePtesUpdated((TLinAddr)(iTempPte+iTempMapColor), aPages*sizeof(TPte));
2400 return iTempAddr+(iTempMapColor<<KPageShift);
2404 Create a temporary mapping of one or more contiguous physical pages.
2405 Memory attributes as specified by aMemType apply.
2406 @See ArmMmu::MapTemp(TPhysAddr aPage,TLinAddr aLinAddr,TInt aPages) for other details.
2408 TLinAddr ArmMmu::MapTemp(TPhysAddr aPage,TLinAddr aLinAddr,TInt aPages, TMemoryType aMemType)
2410 __ASSERT_MUTEX(RamAllocatorMutex);
2411 __ASSERT_DEBUG(!*iTempPte,MM::Panic(MM::ETempMappingAlreadyInUse));
2412 iTempMapColor = (aLinAddr>>KPageShift)&KPageColourMask;
2413 iTempMapCount = aPages;
2414 TUint pte = SP_PTE(KArmV6PermRWNO, aMemType, 0, 1);
2417 iTempPte[iTempMapColor] = (aPage&~KPageMask) | SP_PTE(KArmV6PermRWNO, pte, 0, 1);
2418 CacheMaintenance::SinglePteUpdated((TLinAddr)(iTempPte+iTempMapColor));
2422 __ASSERT_DEBUG(iTempMapColor+aPages<=KPageColourCount,MM::Panic(MM::ETempMappingNoRoom));
2423 for (TInt i=0; i<aPages; i++)
2424 iTempPte[iTempMapColor+i] = ((aPage&~KPageMask)+(i<<KPageShift)) | SP_PTE(KArmV6PermRWNO, pte, 0, 1);
2425 CacheMaintenance::MultiplePtesUpdated((TLinAddr)(iTempPte+iTempMapColor), aPages*sizeof(TPte));
2427 return iTempAddr+(iTempMapColor<<KPageShift);
2431 Create a temporary mapping of one or more contiguous physical pages, distinct from
2432 that created by MapTemp.
2433 The RamAllocatorMutex must be held before this function is called and not released
2434 until after UnmapSecondTemp has been called.
2436 @param aPage The physical address of the pages to be mapped.
2437 @param aLinAddr The linear address of any existing location where the page is mapped.
2438 If the page isn't already mapped elsewhere as a cachable page then
2439 this value irrelevent. (It is used for page colouring.)
2440 @param aPages Number of pages to map.
2442 @return The linear address of where the pages have been mapped.
2444 TLinAddr ArmMmu::MapSecondTemp(TPhysAddr aPage,TLinAddr aLinAddr,TInt aPages)
2446 __ASSERT_MUTEX(RamAllocatorMutex);
2447 __ASSERT_DEBUG(!*iSecondTempPte,MM::Panic(MM::ETempMappingAlreadyInUse));
2448 iSecondTempMapColor = (aLinAddr>>KPageShift)&KPageColourMask;
2449 iSecondTempMapCount = aPages;
2452 iSecondTempPte[iSecondTempMapColor] = (aPage&~KPageMask) | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1);
2453 CacheMaintenance::SinglePteUpdated((TLinAddr)(iSecondTempPte+iSecondTempMapColor));
2457 __ASSERT_DEBUG(iSecondTempMapColor+aPages<=KPageColourCount,MM::Panic(MM::ETempMappingNoRoom));
2458 for (TInt i=0; i<aPages; i++)
2459 iSecondTempPte[iSecondTempMapColor+i] = ((aPage&~KPageMask)+(i<<KPageShift)) | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1);
2460 CacheMaintenance::MultiplePtesUpdated((TLinAddr)(iSecondTempPte+iSecondTempMapColor), aPages*sizeof(TPte));
2462 return iSecondTempAddr+(iSecondTempMapColor<<KPageShift);
2466 Remove the temporary mapping created with MapTemp.
2468 void ArmMmu::UnmapTemp()
2470 __ASSERT_MUTEX(RamAllocatorMutex);
2471 for (TInt i=0; i<iTempMapCount; i++)
2473 iTempPte[iTempMapColor+i] = 0;
2474 CacheMaintenance::SinglePteUpdated((TLinAddr)(iTempPte+iTempMapColor+i));
2475 InvalidateTLBForPage(iTempAddr+((iTempMapColor+i)<<KPageShift), KERNEL_MAPPING);
2480 Remove the temporary mapping created with MapSecondTemp.
2482 void ArmMmu::UnmapSecondTemp()
2484 __ASSERT_MUTEX(RamAllocatorMutex);
2485 for (TInt i=0; i<iSecondTempMapCount; i++)
2487 iSecondTempPte[iSecondTempMapColor+i] = 0;
2488 CacheMaintenance::SinglePteUpdated((TLinAddr)(iSecondTempPte+iSecondTempMapColor+i));
2489 InvalidateTLBForPage(iSecondTempAddr+((iSecondTempMapColor+i)<<KPageShift), KERNEL_MAPPING);
2494 TBool ArmMmu::ValidateLocalIpcAddress(TLinAddr aAddr,TInt aSize,TBool aWrite)
2496 __NK_ASSERT_DEBUG(aSize<=KChunkSize);
2497 TLinAddr end = aAddr+aSize-1;
2501 if(TUint(aAddr^KIPCAlias)<TUint(KChunkSize) || TUint(end^KIPCAlias)<TUint(KChunkSize))
2503 // local address is in alias region.
2505 NKern::LockSystem();
2506 ((DMemModelThread*)TheCurrentThread)->RemoveAlias();
2507 NKern::UnlockSystem();
2508 // access memory, which will cause an exception...
2509 if(!(TUint(aAddr^KIPCAlias)<TUint(KChunkSize)))
2511 InvalidateTLBForPage(aAddr,((DMemModelProcess*)TheCurrentThread->iOwningProcess)->iOsAsid);
2513 *(volatile TUint8*)aAddr = 0;
2515 aWrite = *(volatile TUint8*)aAddr;
2517 __NK_ASSERT_DEBUG(0);
2521 DMemModelProcess* process=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
2523 local_mask = process->iAddressCheckMaskW;
2525 local_mask = process->iAddressCheckMaskR;
2526 TUint32 mask = 2<<(end>>27);
2527 mask -= 1<<(aAddr>>27);
2528 if((local_mask&mask)!=mask)
2532 return ETrue; // reads are ok
2534 // writes need further checking...
2535 TLinAddr userCodeStart = iUserCodeBase;
2536 TLinAddr userCodeEnd = userCodeStart+iMaxUserCodeSize;
2537 if(end>=userCodeStart && aAddr<userCodeEnd)
2538 return EFalse; // trying to write to user code area
2543 TInt DMemModelThread::Alias(TLinAddr aAddr, DMemModelProcess* aProcess, TInt aSize, TInt aPerm, TLinAddr& aAliasAddr, TInt& aAliasSize)
2545 // Set up an alias mapping starting at address aAddr in specified process.
2546 // Check permissions aPerm.
2547 // Enter and return with system locked.
2548 // Note: Alias is removed if an exception if trapped by DThread::IpcExcHandler.
2551 __KTRACE_OPT(KMMU2,Kern::Printf("Thread %O Alias %08x+%x Process %O perm %x",this,aAddr,aSize,aProcess,aPerm));
2552 __ASSERT_SYSTEM_LOCK
2554 if(TUint(aAddr^KIPCAlias)<TUint(KChunkSize))
2555 return KErrBadDescriptor; // prevent access to alias region
2559 // check if memory is in region which is safe to access with supervisor permissions...
2560 TBool okForSupervisorAccess = aPerm&(EMapAttrReadSup|EMapAttrWriteSup) ? 1 : 0;
2561 if(!okForSupervisorAccess)
2563 TInt shift = aAddr>>27;
2564 if(!(aPerm&EMapAttrWriteUser))
2566 // reading with user permissions...
2567 okForSupervisorAccess = (aProcess->iAddressCheckMaskR>>shift)&1;
2571 // writing with user permissions...
2572 okForSupervisorAccess = (aProcess->iAddressCheckMaskW>>shift)&1;
2573 if(okForSupervisorAccess)
2575 // check for user code, because this is supervisor r/w and so
2576 // is not safe to write to access with supervisor permissions.
2577 if(TUint(aAddr-m.iUserCodeBase)<TUint(m.iMaxUserCodeSize))
2578 return KErrBadDescriptor; // prevent write to this...
2583 TInt pdeIndex = aAddr>>KChunkShift;
2584 if(pdeIndex>=(m.iLocalPdSize>>2))
2586 // address is in global section, don't bother aliasing it...
2590 TInt maxSize = KChunkSize-(aAddr&KChunkMask);
2591 aAliasSize = aSize<maxSize ? aSize : maxSize;
2592 __KTRACE_OPT(KMMU2,Kern::Printf("DMemModelThread::Alias() abandoned as memory is globaly mapped"));
2593 return okForSupervisorAccess;
2596 TInt asid = aProcess->iOsAsid;
2597 TPde* pd = PageDirectory(asid);
2598 TPde pde = pd[pdeIndex];
2599 if ((TPhysAddr)(pde&~KPageMask) == AliasRemapOld)
2600 pde = AliasRemapNew|(pde&KPageMask);
2601 pde = PDE_IN_DOMAIN(pde, KIPCAliasDomain);
2602 TLinAddr aliasAddr = KIPCAlias+(aAddr&(KChunkMask & ~KPageMask));
2603 if(pde==iAliasPde && iAliasLinAddr)
2605 // pde already aliased, so just update linear address...
2606 iAliasLinAddr = aliasAddr;
2610 // alias PDE changed...
2612 iAliasOsAsid = asid;
2615 ArmMmu::UnlockAlias();
2616 ::TheMmu.iAliasList.Add(&iAliasLink); // add to list if not already aliased
2618 iAliasLinAddr = aliasAddr;
2619 *iAliasPdePtr = pde;
2620 CacheMaintenance::SinglePteUpdated((TLinAddr)iAliasPdePtr);
2623 __KTRACE_OPT(KMMU2,Kern::Printf("DMemModelThread::Alias() PDEntry=%x, iAliasLinAddr=%x",pde, aliasAddr));
2624 InvalidateTLBForPage(aliasAddr, ((DMemModelProcess*)iOwningProcess)->iOsAsid);
2625 TInt offset = aAddr&KPageMask;
2626 aAliasAddr = aliasAddr | offset;
2627 TInt maxSize = KPageSize - offset;
2628 aAliasSize = aSize<maxSize ? aSize : maxSize;
2629 iAliasTarget = aAddr & ~KPageMask;
2630 return okForSupervisorAccess;
2633 void DMemModelThread::RemoveAlias()
2635 // Remove alias mapping (if present)
2636 // Enter and return with system locked.
2639 __KTRACE_OPT(KMMU2,Kern::Printf("Thread %O RemoveAlias", this));
2640 __ASSERT_SYSTEM_LOCK
2641 TLinAddr addr = iAliasLinAddr;
2644 ArmMmu::LockAlias();
2648 CacheMaintenance::SinglePteUpdated((TLinAddr)iAliasPdePtr);
2649 InvalidateTLBForPage(addr, ((DMemModelProcess*)iOwningProcess)->iOsAsid);
2655 * Performs cache maintenance for physical page that is going to be reused.
2656 * Fully cached attributes are assumed.
2658 void ArmMmu::CacheMaintenanceOnDecommit(TPhysAddr a)
2660 // purge a single page from the cache following decommit
2662 TInt colour = SPageInfo::FromPhysAddr(a)->Offset()&KPageColourMask;
2663 TPte& pte=m.iTempPte[colour];
2664 TLinAddr va=m.iTempAddr+(colour<<KPageShift);
2665 pte=a|SP_PTE(KArmV6PermRWNO, iCacheMaintenanceTempMapAttr, 1, 1);
2666 CacheMaintenance::SinglePteUpdated((TLinAddr)&pte);
2668 CacheMaintenance::PageToReuse(va,EMemAttNormalCached, a);
2671 CacheMaintenance::SinglePteUpdated((TLinAddr)&pte);
2672 InvalidateTLBForPage(va,KERNEL_MAPPING);
2675 void ArmMmu::CacheMaintenanceOnDecommit(const TPhysAddr* al, TInt n)
2677 // purge a list of pages from the cache following decommit
2679 ArmMmu::CacheMaintenanceOnDecommit(*al++);
2683 * Performs cache maintenance to preserve physical page that is going to be reused.
2685 void ArmMmu::CacheMaintenanceOnPreserve(TPhysAddr a, TUint aMapAttr)
2687 // purge a single page from the cache following decommit
2689 TInt colour = SPageInfo::FromPhysAddr(a)->Offset()&KPageColourMask;
2690 TPte& pte=m.iTempPte[colour];
2691 TLinAddr va=m.iTempAddr+(colour<<KPageShift);
2692 pte=a|SP_PTE(KArmV6PermRWNO, iCacheMaintenanceTempMapAttr, 1, 1);
2693 CacheMaintenance::SinglePteUpdated((TLinAddr)&pte);
2695 CacheMaintenance::MemoryToPreserveAndReuse(va, KPageSize,aMapAttr);
2698 CacheMaintenance::SinglePteUpdated((TLinAddr)&pte);
2699 InvalidateTLBForPage(va,KERNEL_MAPPING);
2702 void ArmMmu::CacheMaintenanceOnPreserve(const TPhysAddr* al, TInt n, TUint aMapAttr)
2704 // purge a list of pages from the cache following decommit
2706 ArmMmu::CacheMaintenanceOnPreserve(*al++, aMapAttr);
2710 * Performs cache maintenance of physical memory that has been decommited and has to be preserved.
2711 * Call this method for physical pages with no page info updated (or no page info at all).
2712 * @arg aPhysAddr The address of contiguous physical memory to be preserved.
2713 * @arg aSize The size of the region
2714 * @arg aLinAddr Former linear address of the region. As said above, the physical memory is
2715 * already remapped from this linear address.
2716 * @arg aMapAttr Mapping attributes of the region when it was mapped in aLinAddr.
2717 * @pre MMU mutex is held.
2719 void ArmMmu::CacheMaintenanceOnPreserve(TPhysAddr aPhysAddr, TInt aSize, TLinAddr aLinAddr, TUint aMapAttr )
2721 __NK_ASSERT_DEBUG((aPhysAddr&KPageMask)==0);
2722 __NK_ASSERT_DEBUG((aSize&KPageMask)==0);
2723 __NK_ASSERT_DEBUG((aLinAddr&KPageMask)==0);
2725 TPhysAddr pa = aPhysAddr;
2727 TInt colour = (aLinAddr>>KPageShift)&KPageColourMask;
2728 TPte* pte = &(iTempPte[colour]);
2731 pte=&(iTempPte[colour]);
2732 TLinAddr va=iTempAddr+(colour<<KPageShift);
2733 *pte=pa|SP_PTE(KArmV6PermRWNO, iCacheMaintenanceTempMapAttr, 1, 1);
2734 CacheMaintenance::SinglePteUpdated((TLinAddr)pte);
2735 CacheMaintenance::MemoryToPreserveAndReuse(va, KPageSize,aMapAttr);
2738 CacheMaintenance::SinglePteUpdated((TLinAddr)pte);
2739 InvalidateTLBForPage(va,KERNEL_MAPPING);
2741 colour = (colour+1)&KPageColourMask;
2747 TInt ArmMmu::UnlockRamCachePages(TLinAddr aLinAddr, TInt aNumPages, DProcess* aProcess)
2749 TInt asid = ((DMemModelProcess*)aProcess)->iOsAsid;
2750 TInt page = aLinAddr>>KPageShift;
2751 NKern::LockSystem();
2754 TPde* pd = PageDirectory(asid)+(page>>(KChunkShift-KPageShift));
2755 TPte* pt = SafePageTableFromPde(*pd++);
2756 TInt pteIndex = page&(KChunkMask>>KPageShift);
2759 // whole page table has gone, so skip all pages in it...
2760 TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex;
2761 aNumPages -= pagesInPt;
2765 NKern::UnlockSystem();
2771 TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex;
2772 if(pagesInPt>aNumPages)
2773 pagesInPt = aNumPages;
2774 if(pagesInPt>KMaxPages)
2775 pagesInPt = KMaxPages;
2777 aNumPages -= pagesInPt;
2783 if(pte) // pte may be null if page has already been unlocked and reclaimed by system
2784 iRamCache->DonateRamCachePage(SPageInfo::FromPhysAddr(pte));
2790 NKern::UnlockSystem();
2794 pteIndex = page&(KChunkMask>>KPageShift);
2796 while(!NKern::FlashSystem() && pteIndex);
2801 TInt ArmMmu::LockRamCachePages(TLinAddr aLinAddr, TInt aNumPages, DProcess* aProcess)
2803 TInt asid = ((DMemModelProcess*)aProcess)->iOsAsid;
2804 TInt page = aLinAddr>>KPageShift;
2805 NKern::LockSystem();
2808 TPde* pd = PageDirectory(asid)+(page>>(KChunkShift-KPageShift));
2809 TPte* pt = SafePageTableFromPde(*pd++);
2810 TInt pteIndex = page&(KChunkMask>>KPageShift);
2816 TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex;
2817 if(pagesInPt>aNumPages)
2818 pagesInPt = aNumPages;
2819 if(pagesInPt>KMaxPages)
2820 pagesInPt = KMaxPages;
2822 aNumPages -= pagesInPt;
2830 if(!iRamCache->ReclaimRamCachePage(SPageInfo::FromPhysAddr(pte)))
2837 NKern::UnlockSystem();
2841 pteIndex = page&(KChunkMask>>KPageShift);
2843 while(!NKern::FlashSystem() && pteIndex);
2846 NKern::UnlockSystem();
2847 return KErrNotFound;
2851 void RamCache::SetFree(SPageInfo* aPageInfo)
2855 SPageInfo::TType type = aPageInfo->Type();
2856 if(type==SPageInfo::EPagedCache)
2858 TInt offset = aPageInfo->Offset()<<KPageShift;
2859 DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
2860 __NK_ASSERT_DEBUG(TUint(offset)<TUint(chunk->iMaxSize));
2861 TLinAddr lin = ((TLinAddr)chunk->iBase)+offset;
2862 TInt asid = ((DMemModelProcess*)chunk->iOwningProcess)->iOsAsid;
2863 TPte* pt = PtePtrFromLinAddr(lin,asid);
2864 TPhysAddr phys = (*pt)&~KPageMask;
2865 *pt = KPteNotPresentEntry;
2866 CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
2867 InvalidateTLBForPage(lin,asid);
2868 m.CacheMaintenanceOnDecommit(phys);
2870 // actually decommit it from chunk...
2871 TInt ptid = ((TLinAddr)pt-KPageTableBase)>>KPageTableShift;
2872 SPageTableInfo& ptinfo=((ArmMmu*)iMmu)->iPtInfo[ptid];
2873 if(!--ptinfo.iCount)
2875 chunk->iPageTables[offset>>KChunkShift] = 0xffff;
2876 NKern::UnlockSystem();
2877 ((ArmMmu*)iMmu)->DoUnassignPageTable(lin, (TAny*)asid);
2878 ((ArmMmu*)iMmu)->FreePageTable(ptid);
2879 NKern::LockSystem();
2884 __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetFree() with bad page type = %d",aPageInfo->Type()));
2885 Panic(EUnexpectedPageType);
2891 // MemModelDemandPaging
2894 class MemModelDemandPaging : public DemandPaging
2897 // From RamCacheBase
2898 virtual void Init2();
2899 virtual TInt Init3();
2900 virtual TBool PageUnmapped(SPageInfo* aPageInfo);
2901 // From DemandPaging
2902 virtual TInt Fault(TAny* aExceptionInfo);
2903 virtual void SetOld(SPageInfo* aPageInfo);
2904 virtual void SetFree(SPageInfo* aPageInfo);
2905 virtual void NotifyPageFree(TPhysAddr aPage);
2906 virtual TInt EnsurePagePresent(TLinAddr aPage, DProcess* aProcess);
2907 virtual TPhysAddr LinearToPhysical(TLinAddr aPage, DProcess* aProcess);
2908 virtual void AllocLoadAddress(DPagingRequest& aReq, TInt aDeviceId);
2909 virtual TInt PageState(TLinAddr aAddr);
2910 virtual TBool NeedsMutexOrderCheck(TLinAddr aStartAddr, TUint aLength);
2912 inline ArmMmu& Mmu() { return (ArmMmu&)*iMmu; }
2913 void InitRomPaging();
2914 void InitCodePaging();
2915 TInt HandleFault(TArmExcInfo& aExc, TLinAddr aFaultAddress, TInt aAsid);
2916 TInt PageIn(TLinAddr aAddress, TInt aAsid, DMemModelCodeSegMemory* aCodeSegMemory);
2918 // use of the folowing members is protected by the system lock..
2919 TPte* iPurgePte; // PTE used for temporary mappings during cache purge operations
2920 TLinAddr iPurgeAddr; // address corresponding to iPurgePte
2923 extern void MakeGlobalPTEInaccessible(TPte* aPtePtr, TPte aNewPte, TLinAddr aLinAddr);
2924 extern void MakePTEInaccessible(TPte* aPtePtr, TPte aNewPte, TLinAddr aLinAddr, TInt aAsid);
2927 // MemModelDemandPaging
2931 DemandPaging* DemandPaging::New()
2933 return new MemModelDemandPaging();
2937 void MemModelDemandPaging::Init2()
2939 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">MemModelDemandPaging::Init2"));
2940 DemandPaging::Init2();
2942 iPurgeAddr = KDemandPagingTempAddr;
2943 iPurgePte = PtePtrFromLinAddr(iPurgeAddr);
2945 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<MemModelDemandPaging::Init2"));
2949 void MemModelDemandPaging::AllocLoadAddress(DPagingRequest& aReq, TInt aReqId)
2951 aReq.iLoadAddr = iTempPages + aReqId * KPageSize * KPageColourCount;
2952 aReq.iLoadPte = PtePtrFromLinAddr(aReq.iLoadAddr);
2956 TInt MemModelDemandPaging::Init3()
2958 TInt r=DemandPaging::Init3();
2962 // Create a region for mapping pages during page in
2963 DPlatChunkHw* chunk;
2964 TInt chunkSize = (KMaxPagingDevices * KPagingRequestsPerDevice + 1) * KPageColourCount * KPageSize;
2965 DPlatChunkHw::DoNew(chunk, KPhysAddrInvalid, chunkSize, EMapAttrSupRw|EMapAttrFullyBlocking);
2967 Panic(EInitialiseFailed);
2968 TInt colourMask = KPageColourMask << KPageShift;
2969 iTempPages = (chunk->iLinAddr + colourMask) & ~colourMask;
2971 if(RomPagingRequested())
2974 if (CodePagingRequested())
2977 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<MemModelDemandPaging::Init3"));
2981 void MemModelDemandPaging::InitRomPaging()
2983 // Make page tables for demand paged part of ROM...
2984 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("MemModelDemandPaging::Init3 making page tables for paged ROM"));
2985 TLinAddr lin = iRomPagedLinearBase&~KChunkMask; // first chunk with paged ROM in
2986 TLinAddr linEnd = iRomLinearBase+iRomSize;
2990 TInt ptid = Mmu().PageTableId(lin,0);
2994 ptid = Mmu().AllocPageTable();
2996 __NK_ASSERT_DEBUG(ptid>=0);
2997 Mmu().PtInfo(ptid).SetGlobal(lin >> KChunkShift);
3000 // Get new page table addresses
3001 TPte* pt = PageTable(ptid);
3002 TPhysAddr ptPhys=Mmu().LinearToPhysical((TLinAddr)pt,0);
3004 // Pointer to page directory entry
3005 TPde* ppde = ::InitPageDirectory + (lin>>KChunkShift);
3007 // Fill in Page Table
3008 TPte* ptEnd = pt+(1<<(KChunkShift-KPageShift));
3009 pt += (lin&KChunkMask)>>KPageShift;
3010 TLinAddr firstPte = (TLinAddr)pt; // Will need this to clean page table memory region from cache
3014 if(lin<iRomPagedLinearBase)
3015 *pt++ = Mmu().LinearToPhysical(lin,0) | KRomPtePerm;
3018 MakeGlobalPTEInaccessible(pt, KPteNotPresentEntry, lin);
3023 while(pt<ptEnd && lin<=linEnd);
3025 CacheMaintenance::MultiplePtesUpdated((TLinAddr)firstPte, (TUint)pt-firstPte);
3027 // Add new Page Table to the Page Directory
3028 TPde newpde = ptPhys | KShadowPdePerm;
3029 __KTRACE_OPT2(KPAGING,KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde));
3030 TInt irq=NKern::DisableAllInterrupts();
3032 CacheMaintenance::SinglePteUpdated((TLinAddr)ppde);
3034 NKern::RestoreInterrupts(irq);
3039 void MemModelDemandPaging::InitCodePaging()
3041 // Initialise code paging info
3042 iCodeLinearBase = Mmu().iUserCodeBase;
3043 iCodeSize = Mmu().iMaxUserCodeSize;
3048 @return ETrue when the unmapped page should be freed, EFalse otherwise
3050 TBool MemModelDemandPaging::PageUnmapped(SPageInfo* aPageInfo)
3052 SPageInfo::TType type = aPageInfo->Type();
3054 // Only have to deal with cache pages - pages containg code don't get returned to the system
3055 // when they are decommitted from an individual process, only when the code segment is destroyed
3056 if(type!=SPageInfo::EPagedCache)
3058 __NK_ASSERT_DEBUG(type!=SPageInfo::EPagedCode); // shouldn't happen
3059 __NK_ASSERT_DEBUG(type!=SPageInfo::EPagedData); // not supported yet
3063 RemovePage(aPageInfo);
3064 AddAsFreePage(aPageInfo);
3065 // Return false to stop DMemModelChunk::DoDecommit from freeing this page
3070 void DoSetCodeOld(SPageInfo* aPageInfo, DMemModelCodeSegMemory* aCodeSegMemory, TLinAddr aLinAddr)
3072 NThread* currentThread = NKern::CurrentThread();
3073 aPageInfo->SetModifier(currentThread);
3074 // scan all address spaces...
3076 TInt lastAsid = KArmV6NumAsids-1;
3077 TUint32* ptr = aCodeSegMemory->iOsAsids->iMap;
3080 TUint32 bits = *ptr++;
3084 if(bits&0x80000000u)
3086 // codeseg is mapped in this address space, so update PTE...
3087 TPte* pt = PtePtrFromLinAddr(aLinAddr,asid);
3089 if(pte&KPtePresentMask)
3091 __NK_ASSERT_DEBUG((pte&~KPageMask) == aPageInfo->PhysAddr());
3092 MakePTEInaccessible(pt, pte&~KPtePresentMask, aLinAddr, asid);
3097 if(NKern::FlashSystem() && aPageInfo->CheckModified(currentThread))
3098 return; // page was modified by another thread
3101 while(asid<lastAsid);
3105 void MemModelDemandPaging::SetOld(SPageInfo* aPageInfo)
3107 __ASSERT_SYSTEM_LOCK;
3108 __NK_ASSERT_DEBUG(aPageInfo->State() == SPageInfo::EStatePagedOld);
3110 SPageInfo::TType type = aPageInfo->Type();
3112 if(type==SPageInfo::EPagedROM)
3114 // get linear address of page...
3115 TInt offset = aPageInfo->Offset()<<KPageShift;
3116 __NK_ASSERT_DEBUG(TUint(offset)<iRomSize);
3118 // make page inaccessible...
3119 TLinAddr lin = iRomLinearBase+offset;
3120 TPte* pt = PtePtrFromLinAddr(lin);
3121 MakeGlobalPTEInaccessible(pt, *pt&~KPtePresentMask, lin);
3123 else if(type==SPageInfo::EPagedCode)
3125 START_PAGING_BENCHMARK;
3127 // get linear address of page...
3128 TInt offset = aPageInfo->Offset()<<KPageShift;
3129 __NK_ASSERT_DEBUG(TUint(offset)<iCodeSize);
3130 TLinAddr lin = iCodeLinearBase+offset;
3132 // get CodeSegMemory...
3133 DMemModelCodeSegMemory* codeSegMemory = (DMemModelCodeSegMemory*)aPageInfo->Owner();
3134 __NK_ASSERT_DEBUG(codeSegMemory && codeSegMemory->iPages && codeSegMemory->iIsDemandPaged);
3137 TInt pageNumber = (lin - codeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift;
3138 __NK_ASSERT_DEBUG(codeSegMemory->iPages[pageNumber] == aPageInfo->PhysAddr());
3141 // make page inaccessible...
3142 DoSetCodeOld(aPageInfo,codeSegMemory,lin);
3144 END_PAGING_BENCHMARK(this, EPagingBmSetCodePageOld);
3146 else if(type==SPageInfo::EPagedCache)
3148 // leave page accessible
3150 else if(type!=SPageInfo::EPagedFree)
3152 __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetOld() with bad page type = %d",aPageInfo->Type()));
3153 Panic(EUnexpectedPageType);
3155 NKern::FlashSystem();
3159 void DoSetCodeFree(SPageInfo* aPageInfo, TPhysAddr aPhysAddr, DMemModelCodeSegMemory* aCodeSegMemory, TLinAddr aLinAddr)
3161 NThread* currentThread = NKern::CurrentThread();
3162 aPageInfo->SetModifier(currentThread);
3163 // scan all address spaces...
3165 TInt lastAsid = KArmV6NumAsids-1;
3166 TUint32* ptr = aCodeSegMemory->iOsAsids->iMap;
3169 TUint32 bits = *ptr++;
3173 if(bits&0x80000000u)
3175 // codeseg is mapped in this address space, so update PTE...
3176 TPte* pt = PtePtrFromLinAddr(aLinAddr,asid);
3178 if (pte!=KPteNotPresentEntry && (pte&~KPageMask) == aPhysAddr)
3179 MakePTEInaccessible(pt, KPteNotPresentEntry, aLinAddr, asid);
3183 if(NKern::FlashSystem())
3185 // nobody else should modify page!
3186 __NK_ASSERT_DEBUG(!aPageInfo->CheckModified(currentThread));
3190 while(asid<lastAsid);
3194 void MemModelDemandPaging::SetFree(SPageInfo* aPageInfo)
3196 __ASSERT_SYSTEM_LOCK;
3197 __ASSERT_MUTEX(MmuBase::RamAllocatorMutex);
3198 __NK_ASSERT_DEBUG(aPageInfo->State() == SPageInfo::EStatePagedDead);
3199 if(aPageInfo->LockCount())
3200 Panic(ERamPageLocked);
3202 SPageInfo::TType type = aPageInfo->Type();
3203 TPhysAddr phys = aPageInfo->PhysAddr();
3205 if(type==SPageInfo::EPagedROM)
3207 // get linear address of page...
3208 TInt offset = aPageInfo->Offset()<<KPageShift;
3209 __NK_ASSERT_DEBUG(TUint(offset)<iRomSize);
3210 TLinAddr lin = iRomLinearBase+offset;
3213 TPte* pt = PtePtrFromLinAddr(lin);
3214 MakeGlobalPTEInaccessible(pt, KPteNotPresentEntry, lin);
3216 #ifdef BTRACE_PAGING
3217 BTraceContext8(BTrace::EPaging,BTrace::EPagingPageOutROM,phys,lin);
3220 else if(type==SPageInfo::EPagedCode)
3222 START_PAGING_BENCHMARK;
3224 // get linear address of page...
3225 TInt offset = aPageInfo->Offset()<<KPageShift;
3226 __NK_ASSERT_DEBUG(TUint(offset)<iCodeSize);
3227 TLinAddr lin = iCodeLinearBase+offset;
3229 // get CodeSegMemory...
3230 // NOTE, this cannot die because we hold the RamAlloc mutex, and the CodeSegMemory
3231 // destructor also needs this mutex to do it's cleanup...
3232 DMemModelCodeSegMemory* codeSegMemory = (DMemModelCodeSegMemory*)aPageInfo->Owner();
3233 __NK_ASSERT_DEBUG(codeSegMemory && codeSegMemory->iPages && codeSegMemory->iIsDemandPaged);
3235 // remove page from CodeSegMemory (must come before System Lock is released)...
3236 TInt pageNumber = (lin - codeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift;
3237 __NK_ASSERT_DEBUG(codeSegMemory->iPages[pageNumber] == aPageInfo->PhysAddr());
3238 codeSegMemory->iPages[pageNumber] = KPhysAddrInvalid;
3240 // unmap page from all processes it's mapped into...
3241 DoSetCodeFree(aPageInfo,phys,codeSegMemory,lin);
3243 END_PAGING_BENCHMARK(this, EPagingBmSetCodePageFree);
3244 #ifdef BTRACE_PAGING
3245 BTraceContext8(BTrace::EPaging,BTrace::EPagingPageOutCode,phys,lin);
3248 else if(type==SPageInfo::EPagedCache)
3250 // get linear address of page...
3251 TInt offset = aPageInfo->Offset()<<KPageShift;
3252 DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
3253 __NK_ASSERT_DEBUG(TUint(offset)<TUint(chunk->iMaxSize));
3254 TLinAddr lin = ((TLinAddr)chunk->iBase)+offset;
3257 TInt asid = ((DMemModelProcess*)chunk->iOwningProcess)->iOsAsid;
3258 TPte* pt = PtePtrFromLinAddr(lin,asid);
3259 *pt = KPteNotPresentEntry;
3260 CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
3262 InvalidateTLBForPage(lin,asid);
3264 // actually decommit it from chunk...
3265 TInt ptid = ((TLinAddr)pt-KPageTableBase)>>KPageTableShift;
3266 SPageTableInfo& ptinfo=Mmu().iPtInfo[ptid];
3267 if(!--ptinfo.iCount)
3269 chunk->iPageTables[offset>>KChunkShift] = 0xffff;
3270 NKern::UnlockSystem();
3271 Mmu().DoUnassignPageTable(lin, (TAny*)asid);
3272 Mmu().FreePageTable(ptid);
3273 NKern::LockSystem();
3276 #ifdef BTRACE_PAGING
3277 BTraceContext8(BTrace::EPaging,BTrace::EPagingPageOutCache,phys,lin);
3280 else if(type==SPageInfo::EPagedFree)
3283 #ifdef BTRACE_PAGING
3284 BTraceContext4(BTrace::EPaging,BTrace::EPagingPageOutFree,phys);
3286 // fall through to cache purge code because cache may not have been
3287 // cleaned for this page if PageUnmapped called
3291 __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetFree() with bad page type = %d",aPageInfo->Type()));
3292 Panic(EUnexpectedPageType);
3296 NKern::UnlockSystem();
3298 // purge cache for page...
3299 TInt colour = aPageInfo->Offset()&KPageColourMask;
3300 TPte& pte=iPurgePte[colour];
3301 TLinAddr va=iPurgeAddr+(colour<<KPageShift);
3302 pte=phys|SP_PTE(KArmV6PermRWNO, TheMmu.iCacheMaintenanceTempMapAttr, 1, 1);
3303 CacheMaintenance::SinglePteUpdated((TLinAddr)&pte);
3305 CacheMaintenance::PageToReuse(va,EMemAttNormalCached, KPhysAddrInvalid);
3308 CacheMaintenance::SinglePteUpdated((TLinAddr)&pte);
3309 InvalidateTLBForPage(va,KERNEL_MAPPING);
3311 NKern::LockSystem();
3315 void MemModelDemandPaging::NotifyPageFree(TPhysAddr aPage)
3317 __KTRACE_OPT(KPAGING, Kern::Printf("MemModelDemandPaging::NotifyPageFree %08x", aPage));
3318 __ASSERT_SYSTEM_LOCK;
3320 SPageInfo* pageInfo = SPageInfo::FromPhysAddr(aPage);
3321 __ASSERT_DEBUG(pageInfo->Type()==SPageInfo::EPagedCode, MM::Panic(MM::EUnexpectedPageType));
3322 RemovePage(pageInfo);
3324 AddAsFreePage(pageInfo);
3328 TInt MemModelDemandPaging::Fault(TAny* aExceptionInfo)
3330 TArmExcInfo& exc=*(TArmExcInfo*)aExceptionInfo;
3332 // Get faulting address
3333 TLinAddr faultAddress = exc.iFaultAddress;
3334 if(exc.iExcCode==EArmExceptionDataAbort)
3336 // Let writes take an exception rather than page in any memory...
3337 if(exc.iFaultStatus&(1<<11))
3340 else if (exc.iExcCode != EArmExceptionPrefetchAbort)
3341 return KErrUnknown; // Not prefetch or data abort
3343 // Only handle page translation faults
3344 if((exc.iFaultStatus & 0x40f) != 0x7)
3347 DMemModelThread* thread = (DMemModelThread*)TheCurrentThread;
3349 // check which ragion fault occured in...
3350 TInt asid = 0; // asid != 0 => code paging fault
3351 if(TUint(faultAddress-iRomPagedLinearBase)<iRomPagedSize)
3355 else if(TUint(faultAddress-iCodeLinearBase)<iCodeSize)
3358 asid = ((DMemModelProcess*)TheScheduler.iAddressSpace)->iOsAsid;
3360 else if (thread->iAliasLinAddr && TUint(faultAddress - thread->iAliasLinAddr) < TUint(KPageSize))
3362 // in aliased memory
3363 faultAddress = (faultAddress - thread->iAliasLinAddr) + thread->iAliasTarget;
3364 if(TUint(faultAddress-iCodeLinearBase)>=iCodeSize)
3365 return KErrUnknown; // not in alias of code
3366 asid = thread->iAliasOsAsid;
3367 __NK_ASSERT_DEBUG(asid != 0);
3370 return KErrUnknown; // Not in pageable region
3372 // Check if thread holds fast mutex and claim system lock
3373 NFastMutex* fm = NKern::HeldFastMutex();
3374 TPagingExcTrap* trap = thread->iPagingExcTrap;
3376 NKern::LockSystem();
3379 if(!trap || fm!=&TheScheduler.iLock)
3381 __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: Fault with FM Held! %x (%O pc=%x)",faultAddress,&Kern::CurrentThread(),exc.iR15));
3382 Panic(EPageFaultWhilstFMHeld); // Not allowed to hold mutexes
3384 // restore address space on multiple memory model (because the trap will
3385 // bypass any code which would have done this.)...
3386 DMemModelThread::RestoreAddressSpace();
3388 // Current thread already has the system lock...
3389 NKern::FlashSystem(); // Let someone else have a go with the system lock.
3392 // System locked here
3395 if(thread->IsRealtime())
3396 r = CheckRealtimeThreadFault(thread, aExceptionInfo);
3398 r = HandleFault(exc, faultAddress, asid);
3400 // Restore system lock state
3401 if (fm != NKern::HeldFastMutex())
3404 NKern::LockSystem();
3406 NKern::UnlockSystem();
3409 // Deal with XTRAP_PAGING
3410 if(r == KErrNone && trap)
3412 trap->Exception(1); // Return from exception trap with result '1' (value>0)
3413 // code doesn't continue beyond this point.
3421 TInt MemModelDemandPaging::HandleFault(TArmExcInfo& aExc, TLinAddr aFaultAddress, TInt aAsid)
3423 ++iEventInfo.iPageFaultCount;
3425 // get page table entry...
3426 TPte* pt = SafePtePtrFromLinAddr(aFaultAddress, aAsid);
3428 return KErrNotFound;
3431 // Do what is required to make page accessible...
3433 if(pte&KPtePresentMask)
3435 // PTE is present, so assume it has already been dealt with
3436 #ifdef BTRACE_PAGING
3437 BTraceContext12(BTrace::EPaging,BTrace::EPagingPageNop,pte&~KPageMask,aFaultAddress,aExc.iR15);
3442 if(pte!=KPteNotPresentEntry)
3444 // PTE alread has a page
3445 SPageInfo* pageInfo = SPageInfo::FromPhysAddr(pte);
3446 if(pageInfo->State()==SPageInfo::EStatePagedDead)
3448 // page currently being unmapped, so do that here...
3449 MakePTEInaccessible(pt, KPteNotPresentEntry, aFaultAddress, aAsid);
3453 // page just needs making young again...
3454 *pt = TPte(pte|KArmV6PteSmallPage); // Update page table
3455 CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
3456 Rejuvenate(pageInfo);
3457 #ifdef BTRACE_PAGING
3458 BTraceContext12(BTrace::EPaging,BTrace::EPagingRejuvenate,pte&~KPageMask,aFaultAddress,aExc.iR15);
3464 // PTE not present, so page it in...
3465 // check if fault in a CodeSeg...
3466 DMemModelCodeSegMemory* codeSegMemory = NULL;
3468 NKern::ThreadEnterCS();
3472 DMemModelCodeSeg* codeSeg = (DMemModelCodeSeg*)DCodeSeg::CodeSegsByAddress.Find(aFaultAddress);
3474 return KErrNotFound;
3475 codeSegMemory = codeSeg->Memory();
3476 if (codeSegMemory==0 || !codeSegMemory->iIsDemandPaged || codeSegMemory->iOsAsids->NotFree(aAsid, 1))
3477 return KErrNotFound;
3479 // check if it's paged in but not yet mapped into this process...
3480 TInt pageNumber = (aFaultAddress - codeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift;
3481 TPhysAddr page = codeSegMemory->iPages[pageNumber];
3482 if (page != KPhysAddrInvalid)
3484 // map it into this process...
3485 SPageInfo* pageInfo = SPageInfo::FromPhysAddr(page);
3486 __NK_ASSERT_DEBUG(pageInfo->State()!=SPageInfo::EStatePagedDead);
3487 *pt = page | (codeSegMemory->iCreator ? KUserCodeLoadPte : KUserCodeRunPte);
3488 CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
3489 Rejuvenate(pageInfo);
3490 #ifdef BTRACE_PAGING
3491 BTraceContext8(BTrace::EPaging,BTrace::EPagingMapCode,page,aFaultAddress);
3496 // open reference on CodeSegMemory
3497 NKern::ThreadEnterCS();
3501 codeSegMemory->Open();
3502 __NK_ASSERT_DEBUG(r==KErrNone);
3503 NKern::FlashSystem();
3506 #ifdef BTRACE_PAGING
3507 BTraceContext8(BTrace::EPaging,BTrace::EPagingPageInBegin,aFaultAddress,aExc.iR15);
3509 TInt r = PageIn(aFaultAddress, aAsid, codeSegMemory);
3511 NKern::UnlockSystem();
3514 codeSegMemory->Close();
3516 NKern::ThreadLeaveCS();
3522 TInt MemModelDemandPaging::PageIn(TLinAddr aAddress, TInt aAsid, DMemModelCodeSegMemory* aCodeSegMemory)
3524 // Get a request object - this may block until one is available
3525 DPagingRequest* req = AcquireRequestObject();
3527 // Get page table entry
3528 TPte* pt = SafePtePtrFromLinAddr(aAddress, aAsid);
3530 // Check page is still required...
3531 if(!pt || *pt!=KPteNotPresentEntry)
3533 #ifdef BTRACE_PAGING
3534 BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded);
3536 ReleaseRequestObject(req);
3537 return pt ? KErrNone : KErrNotFound;
3540 ++iEventInfo.iPageInReadCount;
3543 SPageInfo* pageInfo = AllocateNewPage();
3544 __NK_ASSERT_DEBUG(pageInfo);
3546 // Get physical address of free page
3547 TPhysAddr phys = pageInfo->PhysAddr();
3548 __NK_ASSERT_DEBUG(phys!=KPhysAddrInvalid);
3550 // Temporarily map free page
3551 TInt colour = (aAddress>>KPageShift)&KPageColourMask;
3552 __NK_ASSERT_DEBUG((req->iLoadAddr & (KPageColourMask << KPageShift)) == 0);
3553 req->iLoadAddr |= colour << KPageShift;
3554 TLinAddr loadAddr = req->iLoadAddr;
3555 pt = req->iLoadPte+colour;
3556 // *pt = phys | SP_PTE(KArmV6PermRWNO, KArmV6MemAttWTWAWTWA, 0, 1);
3557 *pt = phys | SP_PTE(KArmV6PermRWNO, KNormalUncachedAttr, 0, 1);
3558 CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
3560 // Read page from backing store
3561 aAddress &= ~KPageMask;
3562 NKern::UnlockSystem();
3565 if (!aCodeSegMemory)
3566 r = ReadRomPage(req, aAddress);
3569 r = ReadCodePage(req, aCodeSegMemory, aAddress);
3571 aCodeSegMemory->ApplyCodeFixups((TUint32*)loadAddr, aAddress);
3574 Panic(EPageInFailed);
3576 // make caches consistant...
3577 // Cache::IMB_Range(loadAddr, KPageSize);
3578 *pt = phys | SP_PTE(KArmV6PermRWNO, KNormalCachedAttr, 0, 1);
3579 CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
3580 InvalidateTLBForPage(loadAddr,KERNEL_MAPPING);
3581 CacheMaintenance::CodeChanged(loadAddr, KPageSize, CacheMaintenance::ECPUUncached);
3583 NKern::LockSystem();
3585 // Invalidate temporary mapping
3586 MakeGlobalPTEInaccessible(pt, KPteNotPresentEntry, loadAddr);
3588 // Release request object now we're finished with it
3589 req->iLoadAddr &= ~(KPageColourMask << KPageShift);
3590 ReleaseRequestObject(req);
3592 // Get page table entry
3593 pt = SafePtePtrFromLinAddr(aAddress, aAsid);
3595 // Check page still needs updating
3596 TBool notNeeded = pt==0 || *pt!=KPteNotPresentEntry;
3598 notNeeded |= aCodeSegMemory->iOsAsids->NotFree(aAsid, 1);
3601 // We don't need the new page after all, so put it on the active list as a free page
3602 __KTRACE_OPT(KPAGING,Kern::Printf("DP: PageIn (New page not used)"));
3603 #ifdef BTRACE_PAGING
3604 BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded);
3606 AddAsFreePage(pageInfo);
3607 return pt ? KErrNone : KErrNotFound;
3611 if (!aCodeSegMemory)
3612 pageInfo->SetPagedROM((aAddress-iRomLinearBase)>>KPageShift);
3615 // Check if page has been paged in and mapped into another process while we were waiting
3616 TInt pageNumber = (aAddress - aCodeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift;
3617 TPhysAddr page = aCodeSegMemory->iPages[pageNumber];
3618 if (page != KPhysAddrInvalid)
3620 // don't need page we've just paged in...
3621 AddAsFreePage(pageInfo);
3623 // map existing page into this process...
3624 pageInfo = SPageInfo::FromPhysAddr(page);
3625 __NK_ASSERT_DEBUG(pageInfo->State()!=SPageInfo::EStatePagedDead);
3626 *pt = page | (aCodeSegMemory->iCreator ? KUserCodeLoadPte : KUserCodeRunPte);
3627 CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
3628 #ifdef BTRACE_PAGING
3629 BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded);
3631 Rejuvenate(pageInfo);
3634 aCodeSegMemory->iPages[pageNumber] = phys;
3636 pageInfo->SetPagedCode(aCodeSegMemory,(aAddress-Mmu().iUserCodeBase)>>KPageShift);
3639 // Map page into final location
3640 *pt = phys | (aCodeSegMemory ? (aCodeSegMemory->iCreator ? KUserCodeLoadPte : KUserCodeRunPte) : KRomPtePerm);
3641 CacheMaintenance::SinglePteUpdated((TLinAddr)pt);
3642 #ifdef BTRACE_PAGING
3643 TInt subCat = aCodeSegMemory ? BTrace::EPagingPageInCode : BTrace::EPagingPageInROM;
3644 BTraceContext8(BTrace::EPaging,subCat,phys,aAddress);
3647 AddAsYoungest(pageInfo);
3654 inline TUint8 ReadByte(TLinAddr aAddress)
3655 { return *(volatile TUint8*)aAddress; }
3658 TInt MemModelDemandPaging::EnsurePagePresent(TLinAddr aPage, DProcess* aProcess)
3660 TInt r = KErrBadDescriptor;
3661 XTRAPD(exc,XT_DEFAULT,
3664 XTRAP_PAGING_RETRY(CHECK_PAGING_SAFE; ReadByte(aPage););
3669 DMemModelThread& t=*(DMemModelThread*)TheCurrentThread;
3672 XTRAP_PAGING_START(pagingFault);
3674 // make alias of page in this process
3677 TInt aliasResult = t.Alias(aPage, (DMemModelProcess*)aProcess, 1, EMapAttrReadUser, alias_src, alias_size);
3680 // ensure page to be locked is mapped in, by reading from it...
3681 ReadByte(alias_src);
3691 return KErrBadDescriptor;
3696 TPhysAddr MemModelDemandPaging::LinearToPhysical(TLinAddr aPage, DProcess* aProcess)
3700 asid = ((DMemModelProcess*)aProcess)->iOsAsid;
3701 return Mmu().LinearToPhysical(aPage, asid);
3705 TInt MemModelDemandPaging::PageState(TLinAddr aAddr)
3707 DMemModelProcess* process = (DMemModelProcess*)TheCurrentThread->iOwningProcess;
3712 SPageInfo* pageInfo = NULL;
3714 NKern::LockSystem();
3716 DMemModelCodeSegMemory* codeSegMemory = 0;
3717 if(TUint(aAddr-iRomPagedLinearBase)<iRomPagedSize)
3718 r |= EPageStateInRom;
3719 else if (TUint(aAddr-iCodeLinearBase)<iCodeSize)
3721 DMemModelCodeSeg* codeSeg = (DMemModelCodeSeg*)DCodeSeg::CodeSegsByAddress.Find(aAddr);
3723 codeSegMemory = codeSeg->Memory();
3724 asid = process->iOsAsid;
3725 if (codeSegMemory && codeSegMemory->iOsAsids->NotAllocated(asid, 1))
3727 r |= EPageStateInRamCode;
3728 if (codeSegMemory->iIsDemandPaged)
3729 r |= EPageStatePaged;
3731 if(process->iCodeChunk)
3732 r |= EPageStateCodeChunkPresent;
3735 ptePtr = SafePtePtrFromLinAddr(aAddr,asid);
3738 r |= EPageStatePageTablePresent;
3740 if (pte == KPteNotPresentEntry)
3742 r |= EPageStatePtePresent;
3743 if (pte & KPtePresentMask)
3744 r |= EPageStatePteValid;
3746 pageInfo = SPageInfo::FromPhysAddr(pte);
3747 r |= pageInfo->Type();
3748 r |= pageInfo->State()<<8;
3750 if (codeSegMemory && codeSegMemory->iPages)
3752 TPhysAddr phys = pte & ~KPageMask;
3753 TInt pageNumber = (aAddr - codeSegMemory->iRamInfo.iCodeRunAddr) >> KPageShift;
3754 if (codeSegMemory->iPages[pageNumber] == phys)
3755 r |= EPageStatePhysAddrPresent;
3759 NKern::UnlockSystem();
3764 TBool MemModelDemandPaging::NeedsMutexOrderCheck(TLinAddr aStartAddr, TUint aLength)
3766 // Don't check mutex order for reads from global area, except for the paged part of rom
3767 TBool rangeInGlobalArea = aStartAddr >= KRomLinearBase;
3768 TBool rangeInPagedRom = iRomPagedLinearBase != 0 && aStartAddr < (iRomLinearBase + iRomSize) && (aStartAddr + aLength) > iRomPagedLinearBase;
3769 return !rangeInGlobalArea || rangeInPagedRom;
3773 EXPORT_C TBool DDemandPagingLock::Lock(DThread* aThread, TLinAddr aStart, TInt aSize)
3775 MemModelDemandPaging* pager = (MemModelDemandPaging*)iThePager;
3778 ArmMmu& m = pager->Mmu();
3779 TLinAddr end = aStart+aSize;
3781 if ((aStart < TUint(pager->iRomPagedLinearBase+pager->iRomPagedSize) && end > pager->iRomPagedLinearBase) ||
3782 (aStart < TUint(m.iUserCodeBase + m.iMaxUserCodeSize) && end > m.iUserCodeBase))
3783 return pager->ReserveLock(aThread,aStart,aSize,*this);
3788 void ArmMmu::DisablePageModification(DMemModelChunk* aChunk, TInt aOffset)
3790 // Mark the page at aOffset in aChunk read-only to prevent it being
3791 // modified while defrag is in progress. Save the required information
3792 // to allow the fault handler to deal with this.
3793 // Call this with the system unlocked.
3796 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DisablePageModification() offset=%08x", aOffset));
3798 TInt ptid = aChunk->iPageTables[aOffset>>KChunkShift];
3800 Panic(EDefragDisablePageFailed);
3802 NKern::LockSystem();
3803 TPte* pPte = PageTable(ptid) + ((aOffset&KChunkMask)>>KPageShift);
3805 if ((pte & KArmV6PteSmallPage) != KArmV6PteSmallPage
3806 || SP_PTE_PERM_GET(pte) != (TUint)KArmV6PermRWRW)
3807 Panic(EDefragDisablePageFailed);
3809 iDisabledAddr = (TLinAddr)(aChunk->iBase) + aOffset;
3810 if (aChunk->iOwningProcess)
3811 iDisabledAddrAsid = ((DMemModelProcess*)(aChunk->iOwningProcess))->iOsAsid;
3813 iDisabledAddrAsid = iDisabledAddr<KRomLinearBase ? UNKNOWN_MAPPING : KERNEL_MAPPING;
3814 iDisabledPte = pPte;
3815 iDisabledOldVal = pte;
3817 *pPte = SP_PTE_PERM_SET(pte, KArmV6PermRORO);
3818 CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
3819 InvalidateTLBForPage(iDisabledAddr, iDisabledAddrAsid);
3820 NKern::UnlockSystem();
3823 TInt ArmMmu::RamDefragFault(TAny* aExceptionInfo)
3825 TArmExcInfo& exc=*(TArmExcInfo*)aExceptionInfo;
3827 // Get faulting address
3828 TLinAddr faultAddress;
3829 if(exc.iExcCode==EArmExceptionDataAbort)
3831 faultAddress = exc.iFaultAddress;
3832 // Defrag can only cause writes to fault on multiple model
3833 if(!(exc.iFaultStatus&(1<<11)))
3837 return KErrUnknown; // Not data abort
3839 // Only handle page permission faults
3840 if((exc.iFaultStatus & 0x40f) != 0xf)
3843 DMemModelThread* thread = (DMemModelThread*)TheCurrentThread;
3844 TInt asid = ((DMemModelProcess*)TheScheduler.iAddressSpace)->iOsAsid;
3846 TBool aliased = EFalse;
3847 if (thread->iAliasLinAddr && TUint(faultAddress - thread->iAliasLinAddr) < TUint(KPageSize))
3849 // in aliased memory
3851 faultAddress = (faultAddress - thread->iAliasLinAddr) + thread->iAliasTarget;
3852 asid = thread->iAliasOsAsid;
3853 __NK_ASSERT_DEBUG(asid != 0);
3856 // Take system lock if not already held
3857 NFastMutex* fm = NKern::HeldFastMutex();
3859 NKern::LockSystem();
3860 else if(fm!=&TheScheduler.iLock)
3862 __KTRACE_OPT2(KMMU,KPANIC,Kern::Printf("Defrag: Fault with FM Held! %x (%O pc=%x)",faultAddress,&Kern::CurrentThread(),exc.iR15));
3863 Panic(EDefragFaultWhilstFMHeld); // Not allowed to hold mutexes
3866 TInt r = KErrUnknown;
3868 // check if write access to the page has already been restored and retry if so
3869 TPte* pt = SafePtePtrFromLinAddr(faultAddress, asid);
3875 if (SP_PTE_PERM_GET(*pt) == (TUint)KArmV6PermRWRW)
3881 // check if the fault occurred in the page we are moving
3883 && TUint(faultAddress - iDisabledAddr) < TUint(KPageSize)
3884 && (iDisabledAddrAsid < 0 || asid == iDisabledAddrAsid) )
3886 // restore access to the page
3887 *iDisabledPte = iDisabledOldVal;
3888 CacheMaintenance::SinglePteUpdated((TLinAddr)iDisabledPte);
3889 InvalidateTLBForPage(iDisabledAddr, iDisabledAddrAsid);
3891 InvalidateTLBForPage(exc.iFaultAddress, ((DMemModelProcess*)TheScheduler.iAddressSpace)->iOsAsid);
3893 iDisabledAddrAsid = -1;
3894 iDisabledPte = NULL;
3895 iDisabledOldVal = 0;
3900 // Restore system lock state
3902 NKern::UnlockSystem();