Update contrib.
1 // Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\memmodel\epoc\moving\arm\xmmu.cpp
19 #include <mmubase.inl>
21 #include <demand_paging.h>
24 #include "cache_maintenance.h"
27 extern void FlushTLBs();
29 #if defined(__CPU_SA1__)
30 const TPde KRomSectionPermissions = SECTION_PDE(KArmV45PermRORO, KArmV45MemAttWB, EDomainClient);
31 const TPde KShadowPdePerm = PT_PDE(EDomainClient);
32 const TPte KPtPtePerm = SP_PTE(KArmV45PermRWNO, KArmV45MemAttBuf); // page tables not cached
33 const TPte KRomPtePermissions = SP_PTE(KArmV45PermRORO, KArmV45MemAttWB); // ROM is cached, read-only for everyone
34 const TPte KShadowPtePerm = SP_PTE(KArmV45PermRWRO, KArmV45MemAttWB); // shadowed ROM is cached, supervisor writeable
36 #elif defined(__CPU_ARM710T__) || defined(__CPU_ARM720T__)
37 const TPde KRomSectionPermissions = SECTION_PDE(KArmV45PermRORO, KArmV45MemAttWB, EDomainClient);
38 const TPde KShadowPdePerm = PT_PDE(EDomainClient);
39 const TPte KPtPtePerm = SP_PTE(KArmV45PermRWNO, KArmV45MemAttWB); // page tables cached (write-through)
40 const TPte KRomPtePermissions = SP_PTE(KArmV45PermRORO, KArmV45MemAttWB); // ROM is cached, read-only for everyone
41 const TPte KShadowPtePerm = SP_PTE(KArmV45PermRWRO, KArmV45MemAttWB); // shadowed ROM is cached, supervisor writeable
43 #elif defined(__CPU_ARM920T__) || defined(__CPU_ARM925T__) || defined(__CPU_ARM926J__)
44 const TPde KRomSectionPermissions = SECTION_PDE(KArmV45PermRORO, KArmV45MemAttWT, EDomainClient);
45 const TPde KShadowPdePerm = PT_PDE(EDomainClient);
46 const TPte KPtPtePerm = SP_PTE(KArmV45PermRWNO, KArmV45MemAttWT); // page tables cached write through
47 const TPte KRomPtePermissions = SP_PTE(KArmV45PermRORO, KArmV45MemAttWT); // ROM is cached, read-only for everyone
48 const TPte KShadowPtePerm = SP_PTE(KArmV45PermRWRO, KArmV45MemAttWT); // shadowed ROM is cached, supervisor writeable
50 #elif defined(__CPU_XSCALE__)
51 #ifdef __CPU_XSCALE_MANZANO__
52 const TPde KRomSectionPermissions = SECTION_PDE(KArmV45PermRORO, KXScaleMemAttWTRA_WBWA, EDomainClient);
53 const TPde KShadowPdePerm = PT_PDE(EDomainClient);
54 const TPte KPtPtePerm = SP_PTE(KArmV45PermRWNO, KXScaleMemAttWTRA_WBWA); // page tables write-through cached
55 const TPte KRomPtePermissions = SP_PTE(KArmV45PermRORO, KXScaleMemAttWTRA_WBWA); // ROM is cached, read-only for everyone
56 const TPte KShadowPtePerm = SP_PTE(KArmV45PermRWRO, KXScaleMemAttWTRA_WBWA); // shadowed ROM is cached, supervisor writeable
58 const TPde KRomSectionPermissions = SECTION_PDE(KArmV45PermRORO, KXScaleMemAttWTRA, EDomainClient);
59 const TPde KShadowPdePerm = PT_PDE(EDomainClient);
60 const TPte KPtPtePerm = SP_PTE(KArmV45PermRWNO, KXScaleMemAttWTRA); // page tables write-through cached
61 const TPte KRomPtePermissions = SP_PTE(KArmV45PermRORO, KXScaleMemAttWTRA); // ROM is cached, read-only for everyone
62 const TPte KShadowPtePerm = SP_PTE(KArmV45PermRWRO, KXScaleMemAttWTRA); // shadowed ROM is cached, supervisor writeable
66 const TPte KPtInfoPtePerm = KPtPtePerm;
67 const TPde KPtPdePerm = PT_PDE(EDomainClient);
69 // Permissions for each chunk type
72 ESupRo = SP_PTE(KArmV45PermRORO, KDefaultCaching),
73 ESupRw = SP_PTE(KArmV45PermRWNO, KDefaultCaching),
74 EUserRo = SP_PTE(KArmV45PermRWRO, KDefaultCaching),
75 EUserRw = SP_PTE(KArmV45PermRWRW, KDefaultCaching)
78 LOCAL_D const TPde ChunkPdePermissions[ENumChunkTypes] =
80 PT_PDE(EDomainClient), // EKernelData
81 PT_PDE(EDomainClient), // EKernelStack
82 PT_PDE(EDomainClient), // EKernelCode
83 PT_PDE(EDomainClient), // EDll
84 PT_PDE(EDomainClient), // EUserCode - user/ro & sup/rw everywhere
85 PT_PDE(EDomainClient), // ERamDrive - sup/rw accessed by domain change
87 // user data or self modifying code is sup/rw, user no access at home. It's user/rw & sup/rw when running
88 // note ARM MMU architecture prevents implementation of user read-only data
89 PT_PDE(EDomainClient), // EUserData
90 PT_PDE(EDomainClient), // EDllData
91 PT_PDE(EDomainClient), // EUserSelfModCode
92 PT_PDE(EDomainClient), // ESharedKernelSingle
93 PT_PDE(EDomainClient), // ESharedKernelMultiple
94 PT_PDE(EDomainClient), // ESharedIo
95 PT_PDE(EDomainClient), // ESharedKernelMirror (unused in this memory model)
96 PT_PDE(EDomainClient), // EKernelMessage
99 const TPde KUserDataRunningPermissions = PT_PDE(EDomainVarUserRun);
101 LOCAL_D const TPte ChunkPtePermissions[ENumChunkTypes] =
103 ESupRw, // EKernelData
104 ESupRw, // EKernelStack
105 ESupRw, // EKernelCode
107 EUserRo, // EUserCode
111 ESupRw, // EUserSelfModCode
112 ESupRw, // ESharedKernelSingle
113 ESupRw, // ESharedKernelMultiple
115 ESupRw, // ESharedKernelMirror (unused in this memory model)
116 ESupRw, // EKernelMessage
119 const TPte KUserCodeLoadPte = (TPte)EUserRo;
120 const TPte KKernelCodeRunPte = (TPte)ESupRw;
122 // Inline functions for simple transformations
123 inline TLinAddr PageTableLinAddr(TInt aId)
125 return KPageTableBase + (aId<<KPageTableShift);
128 inline TPte* PageTable(TInt aId)
130 return (TPte*)(KPageTableBase+(aId<<KPageTableShift));
133 inline TPde* PageDirectoryEntry(TLinAddr aLinAddr)
135 return PageDirectory + (aLinAddr>>KChunkShift);
138 inline TBool IsPageTable(TPde aPde)
140 return ((aPde&KPdeTypeMask)==KArmV45PdePageTable);
143 inline TBool IsSectionDescriptor(TPde aPde)
145 return ((aPde&KPdeTypeMask)==KArmV45PdeSection);
148 inline TBool IsPresent(TPte aPte)
150 return (aPte&KPtePresentMask);
153 inline TPhysAddr PageTablePhysAddr(TPde aPde)
155 return aPde & KPdePageTableAddrMask;
158 inline TPhysAddr PhysAddrFromSectionDescriptor(TPde aPde)
160 return aPde & KPdeSectionAddrMask;
163 extern void InvalidateTLBForPage(TLinAddr /*aLinAddr*/);
165 void Mmu::SetupInitialPageInfo(SPageInfo* aPageInfo, TLinAddr aChunkAddr, TInt aPdeIndex)
167 __ASSERT_ALWAYS(aChunkAddr==0 || aChunkAddr>=KRamDriveEndAddress, Panic(EBadInitialPageAddr));
168 TLinAddr addr = aChunkAddr + (aPdeIndex<<KPageShift);
169 if (aPageInfo->Type()!=SPageInfo::EUnused)
170 return; // already set (page table)
171 if (addr == KPageTableInfoBase)
173 aPageInfo->SetPtInfo(0);
176 else if (addr>=KPageDirectoryBase && addr<(KPageDirectoryBase+KPageDirectorySize))
178 aPageInfo->SetPageDir(0,aPdeIndex);
182 aPageInfo->SetFixed();
185 void Mmu::SetupInitialPageTableInfo(TInt aId, TLinAddr aChunkAddr, TInt aNumPtes)
187 __ASSERT_ALWAYS(aChunkAddr==0 || aChunkAddr>=KRamDriveEndAddress, Panic(EBadInitialPageAddr));
188 SPageTableInfo& pti=PtInfo(aId);
190 pti.SetGlobal(aChunkAddr>>KChunkShift);
193 TInt Mmu::GetPageTableId(TLinAddr aAddr)
196 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::PageTableId(%08x)",aAddr));
197 TInt pdeIndex=aAddr>>KChunkShift;
198 TPde pde = PageDirectory[pdeIndex];
199 if (IsPageTable(pde))
201 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
203 id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
205 __KTRACE_OPT(KMMU,Kern::Printf("ID=%d",id));
209 // Used only during boot for recovery of RAM drive
210 TInt ArmMmu::BootPageTableId(TLinAddr aAddr, TPhysAddr& aPtPhys)
212 TInt id=KErrNotFound;
213 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:BootPageTableId(%08x,&)",aAddr));
214 TInt pdeIndex=aAddr>>KChunkShift;
215 TPde pde = PageDirectory[pdeIndex];
216 if (IsPageTable(pde))
218 aPtPhys = pde & KPdePageTableAddrMask;
219 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
222 SPageInfo::TType type = pi->Type();
223 if (type == SPageInfo::EPageTable)
224 id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
225 else if (type == SPageInfo::EUnused)
229 __KTRACE_OPT(KMMU,Kern::Printf("ID=%d",id));
233 TBool ArmMmu::PteIsPresent(TPte aPte)
235 return aPte & KPtePresentMask;
238 TPhysAddr ArmMmu::PtePhysAddr(TPte aPte, TInt aPteIndex)
240 TUint pte_type = aPte & KPteTypeMask;
241 if (pte_type == KArmV45PteLargePage)
242 return (aPte & KPteLargePageAddrMask) + (TPhysAddr(aPteIndex << KPageShift) & KLargePageMask);
243 else if (pte_type != 0)
244 return aPte & KPteSmallPageAddrMask;
245 return KPhysAddrInvalid;
248 TPhysAddr ArmMmu::PdePhysAddr(TLinAddr aAddr)
250 TPde pde = PageDirectory[aAddr>>KChunkShift];
251 if (IsSectionDescriptor(pde))
252 return PhysAddrFromSectionDescriptor(pde);
253 return KPhysAddrInvalid;
256 TPte* SafePageTableFromPde(TPde aPde)
258 if((aPde&KPdeTypeMask)==KArmV45PdePageTable)
260 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aPde);
263 TInt id = (pi->Offset()<<KPtClusterShift) | ((aPde>>KPageTableShift)&KPtClusterMask);
264 return PageTable(id);
270 TPte* SafePtePtrFromLinAddr(TLinAddr aAddress)
272 TPde pde = PageDirectory[aAddress>>KChunkShift];
273 TPte* pt = SafePageTableFromPde(pde);
275 pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
280 __forceinline /* RVCT ignores normal inline qualifier :-( */
284 TPte* PtePtrFromLinAddr(TLinAddr aAddress)
286 TPde pde = PageDirectory[aAddress>>KChunkShift];
287 SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
288 TInt id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
289 TPte* pt = PageTable(id);
290 pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
295 TInt ArmMmu::LinearToPhysical(TLinAddr aLinAddr, TInt aSize, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList)
297 TPhysAddr physStart = ArmMmu::LinearToPhysical(aLinAddr);
298 TPhysAddr nextPhys = physStart&~KPageMask;
300 TUint32* pageList = aPhysicalPageList;
302 TInt pageIndex = aLinAddr>>KPageShift;
303 TInt pagesLeft = ((aLinAddr+aSize-1)>>KPageShift)+1 - pageIndex;
304 TPde* pdePtr = &PageDirectory[aLinAddr>>KChunkShift];
308 pageIndex &= KChunkMask>>KPageShift;
309 TInt pagesLeftInChunk = (1<<(KChunkShift-KPageShift))-pageIndex;
310 if(pagesLeftInChunk>pagesLeft)
311 pagesLeftInChunk = pagesLeft;
312 pagesLeft -= pagesLeftInChunk;
315 TPde pde = *pdePtr++;
316 TUint pdeType = pde&KPdeTypeMask;
317 if(pdeType==KArmV45PdeSection)
319 phys = (pde & KPdeSectionAddrMask) + (pageIndex*KPageSize);
320 __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Section phys=%8x",phys));
321 TInt n=pagesLeftInChunk;
322 phys==nextPhys ? nextPhys+=n*KPageSize : nextPhys=KPhysAddrInvalid;
325 TUint32* pageEnd = pageList+n;
331 while(pageList<pageEnd);
336 TPte* pt = SafePageTableFromPde(pde);
339 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical missing page table: PDE=%8x",pde));
346 TUint pte_type = pte & KPteTypeMask;
347 if (pte_type >= KArmV45PteSmallPage)
349 phys = (pte & KPteSmallPageAddrMask);
350 __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Small Page phys=%8x",phys));
351 phys==nextPhys ? nextPhys+=KPageSize : nextPhys=KPhysAddrInvalid;
354 if(--pagesLeftInChunk)
358 if (pte_type == KArmV45PteLargePage)
361 TUint pageOffset = ((TUint)pt>>2)&(KLargeSmallPageRatio-1);
362 phys = (pte & KPteLargePageAddrMask) + pageOffset*KPageSize;
363 __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::LinearToPhysical Large Page phys=%8x",phys));
364 TInt n=KLargeSmallPageRatio-pageOffset;
365 if(n>pagesLeftInChunk)
366 n = pagesLeftInChunk;
367 phys==nextPhys ? nextPhys+=n*KPageSize : nextPhys=KPhysAddrInvalid;
370 TUint32* pageEnd = pageList+n;
376 while(pageList<pageEnd);
379 if(pagesLeftInChunk-=n)
383 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical bad PTE %8x",pte));
387 if(!pageList && nextPhys==KPhysAddrInvalid)
389 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical not contiguous"));
395 if(nextPhys==KPhysAddrInvalid)
397 // Memory is discontiguous...
398 aPhysicalAddress = KPhysAddrInvalid;
403 // Memory is contiguous...
404 aPhysicalAddress = physStart;
409 TPhysAddr ArmMmu::LinearToPhysical(TLinAddr aLinAddr)
411 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::LinearToPhysical(%08x)",aLinAddr));
412 TPhysAddr phys = KPhysAddrInvalid;
413 TPde pde = PageDirectory[aLinAddr>>KChunkShift];
414 if (IsPageTable(pde))
416 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
419 TInt id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
420 TInt pteIndex = (aLinAddr & KChunkMask)>>KPageShift;
421 TPte pte = PageTable(id)[pteIndex];
422 TUint pte_type = pte & KPteTypeMask;
423 if (pte_type == KArmV45PteLargePage)
425 phys = (pte & KPteLargePageAddrMask) + (aLinAddr & KLargePageMask);
426 __KTRACE_OPT(KMMU,Kern::Printf("Mapped with 64K page - returning %08x", phys));
428 else if (pte_type != 0)
430 phys = (pte & KPteSmallPageAddrMask) + (aLinAddr & KPageMask);
431 __KTRACE_OPT(KMMU,Kern::Printf("Mapped with 4K page - returning %08x", phys));
435 else if (IsSectionDescriptor(pde))
437 phys = (pde & KPdeSectionAddrMask) + (aLinAddr & KChunkMask);
438 __KTRACE_OPT(KMMU,Kern::Printf("Mapped with section - returning %08x", phys));
442 __KTRACE_OPT(KMMU,Kern::Printf("Address invalid"));
447 TInt ArmMmu::PreparePagesForDMA(TLinAddr aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList)
448 //Returns the list of physical pages belonging to the specified memory space.
449 //Checks these pages belong to a chunk marked as being trusted.
450 //Locks these pages so they can not be moved by e.g. ram defragmenation.
452 SPageInfo* pi = NULL;
453 DChunk* chunk = NULL;
456 __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::PreparePagesForDMA %08x+%08x, asid=%d",aLinAddr,aSize));
458 TUint32* pageList = aPhysicalPageList;
459 TInt pagesInList = 0; //The number of pages we put in the list so far
461 TInt pageIndex = (aLinAddr & KChunkMask) >> KPageShift; // Index of the page within the section
462 TInt pagesLeft = ((aLinAddr & KPageMask) + aSize + KPageMask) >> KPageShift;
464 MmuBase::Wait(); // RamAlloc Mutex for accessing page/directory tables.
465 NKern::LockSystem();// SystemlLock for accessing SPageInfo objects.
467 TPde* pdePtr = PageDirectory + (aLinAddr>>KChunkShift);
471 TInt pagesLeftInChunk = (1<<(KChunkShift-KPageShift))-pageIndex;
472 if(pagesLeftInChunk>pagesLeft)
473 pagesLeftInChunk = pagesLeft;
475 pagesLeft -= pagesLeftInChunk;
477 TPte* pt = SafePageTableFromPde(*pdePtr++);
478 if(!pt) { err = KErrNotFound; goto fail; }// Cannot get page table.
482 for(;pagesLeftInChunk--;)
484 TPhysAddr phys = (*pt++ & KPteSmallPageAddrMask);
485 pi = SPageInfo::SafeFromPhysAddr(phys);
486 if(!pi) { err = KErrNotFound; goto fail; }// Invalid address
488 __KTRACE_OPT(KMMU2,Kern::Printf("PageInfo: PA:%x T:%x S:%x O:%x C:%x",phys, pi->Type(), pi->State(), pi->Owner(), pi->LockCount()));
490 {//This is the first page. Check 'trusted' bit.
491 if (pi->Type()!= SPageInfo::EChunk)
492 { err = KErrAccessDenied; goto fail; }// The first page do not belong to chunk.
494 chunk = (DChunk*)pi->Owner();
495 if ( (chunk == NULL) || ((chunk->iAttributes & DChunk::ETrustedChunk)== 0) )
496 { err = KErrAccessDenied; goto fail; } // Not a trusted chunk
501 if ( (++pagesInList&127) == 0) //release system lock temporarily on every 512K
502 NKern::FlashSystem();
507 if (pi->Type()!= SPageInfo::EChunk)
508 { err = KErrAccessDenied; goto fail; }// The last page do not belong to chunk.
510 if (chunk && (chunk != (DChunk*)pi->Owner()))
511 { err = KErrArgument; goto fail; }//The first & the last page do not belong to the same chunk.
513 NKern::UnlockSystem();
518 __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::PreparePagesForDMA failed"));
519 NKern::UnlockSystem();
521 ReleasePagesFromDMA(aPhysicalPageList, pagesInList);
525 TInt ArmMmu::ReleasePagesFromDMA(TPhysAddr* aPhysicalPageList, TInt aPageCount)
526 // Unlocks physical pages.
527 // @param aPhysicalPageList - points to the list of physical pages that should be released.
528 // @param aPageCount - the number of physical pages in the list.
531 __KTRACE_OPT(KMMU2,Kern::Printf("ArmMmu::ReleasePagesFromDMA count:%d",aPageCount));
535 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(*aPhysicalPageList++);
538 NKern::UnlockSystem();
541 __KTRACE_OPT(KMMU2,Kern::Printf("PageInfo: T:%x S:%x O:%x C:%x",pi->Type(), pi->State(), pi->Owner(), pi->LockCount()));
544 NKern::UnlockSystem();
551 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("ArmMmu::Init1"));
556 iPageShift=KPageShift;
557 iChunkSize=KChunkSize;
558 iChunkMask=KChunkMask;
559 iChunkShift=KChunkShift;
560 iPageTableSize=KPageTableSize;
561 iPageTableMask=KPageTableMask;
562 iPageTableShift=KPageTableShift;
563 iPtClusterSize=KPtClusterSize;
564 iPtClusterMask=KPtClusterMask;
565 iPtClusterShift=KPtClusterShift;
566 iPtBlockSize=KPtBlockSize;
567 iPtBlockMask=KPtBlockMask;
568 iPtBlockShift=KPtBlockShift;
569 iPtGroupSize=KChunkSize/KPageTableSize;
570 iPtGroupMask=iPtGroupSize-1;
571 iPtGroupShift=iChunkShift-iPageTableShift;
572 //TInt* iPtBlockCount; // dynamically allocated - Init2
573 //TInt* iPtGroupCount; // dynamically allocated - Init2
574 iPtInfo=(SPageTableInfo*)KPageTableInfoBase;
575 iPageTableLinBase=KPageTableBase;
576 //iRamPageAllocator; // dynamically allocated - Init2
577 //iAsyncFreeList; // dynamically allocated - Init2
578 //iPageTableAllocator; // dynamically allocated - Init2
579 //iPageTableLinearAllocator;// dynamically allocated - Init2
580 iPtInfoPtePerm=KPtInfoPtePerm;
581 iPtPtePerm=KPtPtePerm;
582 iPtPdePerm=KPtPdePerm;
584 iSecondTempAddr=KSecondTempAddr;
585 iMapSizes=KPageSize|KLargePageSize|KChunkSize;
586 iRomLinearBase = ::RomHeaderAddress;
587 iRomLinearEnd = KRomLinearEnd;
588 iShadowPtePerm = KShadowPtePerm;
589 iShadowPdePerm = KShadowPdePerm;
592 TInt total_ram=TheSuperPage().iTotalRamSize;
594 #if defined(__HAS_EXTERNAL_CACHE__)
595 //L2 cache on ARMv5 is always in write-back mode => must be always purged
596 iDecommitThreshold = CacheMaintenance::SyncAllPerformanceThresholdPages();
598 iDecommitThreshold = 0; ///no cache consistency issues on decommit
601 iDataSectionBase = KDataSectionBase;
602 iDataSectionEnd = KDataSectionEnd;
603 iMaxDllDataSize=Min(total_ram/2, 0x08000000); // phys RAM/2 up to 128Mb
604 iMaxDllDataSize=(iMaxDllDataSize+iChunkMask)&~iChunkMask; // round up to chunk size
605 iMaxUserCodeSize=Min(total_ram, 0x10000000); // phys RAM up to 256Mb
606 iMaxUserCodeSize=(iMaxUserCodeSize+iChunkMask)&~iChunkMask; // round up to chunk size
607 iMaxKernelCodeSize=Min(total_ram/2, 0x04000000); // phys RAM/2 up to 64Mb
608 iMaxKernelCodeSize=(iMaxKernelCodeSize+iChunkMask)&~iChunkMask; // round up to chunk size
609 iPdeBase=KPageDirectoryBase;
610 iUserCodeLoadPtePerm=KUserCodeLoadPte;
611 iKernelCodePtePerm=KKernelCodeRunPte;
612 iDllDataBase = KDataSectionEnd - iMaxDllDataSize;
613 iUserCodeBase = KPageInfoLinearBase - iMaxUserCodeSize;
614 iKernelCodeBase = iUserCodeBase - iMaxKernelCodeSize;
616 __KTRACE_OPT(KMMU,Kern::Printf("DDS %08x UCS %08x KCS %08x", iMaxDllDataSize, iMaxUserCodeSize, iMaxKernelCodeSize));
617 __KTRACE_OPT(KMMU,Kern::Printf("DDB %08x KCB %08x UCB %08x RLB %08x", iDllDataBase, iKernelCodeBase, iUserCodeBase, iRomLinearBase));
622 PP::MaxUserThreadStack=0x14000; // 80K - STDLIB asks for 64K for PosixServer!!!!
623 PP::UserThreadStackGuard=0x2000; // 8K
624 PP::MaxStackSpacePerProcess=0x200000; // 2Mb
625 K::SupervisorThreadStackSize=0x1000; // 4K
626 PP::SupervisorThreadStackGuard=0x1000; // 4K
627 K::MachineConfig=(TMachineConfig*)KMachineConfigLinAddr;
628 PP::RamDriveStartAddress=KRamDriveStartAddress;
629 PP::RamDriveRange=KRamDriveMaxSize;
630 PP::RamDriveMaxSize=KRamDriveMaxSize; // may be reduced later
632 __KTRACE_OPT(KBOOT,Kern::Printf("K::MaxMemCopyInOneGo=0x%x",K::MaxMemCopyInOneGo));
633 K::MemModelAttributes=EMemModelTypeMoving|EMemModelAttrNonExProt|EMemModelAttrKernProt|EMemModelAttrWriteProt|
634 EMemModelAttrVA|EMemModelAttrProcessProt|EMemModelAttrSameVA|EMemModelAttrSupportFixed|
635 EMemModelAttrSvKernProt|EMemModelAttrIPCKernProt;
637 Arm::DefaultDomainAccess=KDefaultDomainAccess;
639 // Domains 0-3 are preallocated
640 // 0=Variable user running, 1=Client, 2=Page tables, 3=RAM drive
641 Domains=(~(0xffffffffu<<ENumDomains))&0xfffffff0u;
643 iMaxPageTables = 1<<(32-KChunkShift); // possibly reduced when RAM size known
648 void ArmMmu::DoInit2()
650 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("ArmMmu::DoInit2"));
651 iTempPte=PageTable(GetPageTableId(iTempAddr))+((iTempAddr&KChunkMask)>>KPageShift);
652 iSecondTempPte=PageTable(GetPageTableId(iSecondTempAddr))+((iSecondTempAddr&KChunkMask)>>KPageShift);
653 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iTempAddr=%08x, iTempPte=%08x, iSecondTempAddr=%08x, iSecondTempPte=%08x", iTempAddr, iTempPte, iSecondTempAddr, iSecondTempPte));
654 CreateKernelSection(iKernelCodeBase, KPageShift);
655 iHomePdeMap=(TUint32*)Kern::AllocZ(-KSuperPageLinAddr>>KChunkShift<<2);
656 iHomePdeMap=(TUint32*)((TUint32)iHomePdeMap-(KSuperPageLinAddr>>KChunkShift<<2)); //adjust the pointer so it's indexed by address>>20
657 #if defined(__CPU_WRITE_BACK_CACHE)
658 #if defined(__CPU_HAS_SINGLE_ENTRY_DCACHE_FLUSH)
659 if (InternalCache::Info[KCacheInfoD].iLineLength == 32)
660 iCopyPageFn = &::CopyPageForRemap32;
661 else if (InternalCache::Info[KCacheInfoD].iLineLength == 16)
662 iCopyPageFn = &::CopyPageForRemap16;
664 Panic(ENoCopyPageFunction);
666 #error Write-back cache without single entry dcache flush is not supported
668 #else // !__CPU_HAS_WRITE_BACK_CACHE
669 iCopyPageFn = &::CopyPageForRemapWT;
674 #ifndef __MMU_MACHINE_CODED__
675 void ArmMmu::MapRamPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, const TPhysAddr* aPageList, TInt aNumPages, TPte aPtePerm)
677 // Map a list of physical RAM pages into a specified page table with specified PTE permissions.
678 // Update the page information array.
679 // Call this with the system locked.
682 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::MapRamPages() id=%d type=%d ptr=%08x off=%08x n=%d perm=%08x",
683 aId, aType, aPtr, aOffset, aNumPages, aPtePerm));
685 SPageTableInfo& ptinfo=iPtInfo[aId];
686 ptinfo.iCount+=aNumPages;
687 aOffset>>=KPageShift;
688 TInt ptOffset=aOffset & KPagesInPDEMask; // entry number in page table
689 TPte* pPte=PageTable(aId)+ptOffset; // address of first PTE
692 TPhysAddr pa = *aPageList++;
693 *pPte++ = pa | aPtePerm; // insert PTE
694 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",pPte[-1],pPte-1));
695 if (aType!=SPageInfo::EInvalid)
697 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa);
700 pi->Set(aType,aPtr,aOffset);
701 __KTRACE_OPT(KMMU,Kern::Printf("I: %d %08x %08x",aType,aPtr,aOffset));
702 ++aOffset; // increment offset for next page
706 __DRAIN_WRITE_BUFFER;
709 void ArmMmu::MapPhysicalPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, TPhysAddr aPhysAddr, TInt aNumPages, TPte aPtePerm)
711 // Map consecutive physical pages into a specified page table with specified PTE permissions.
712 // Update the page information array if RAM pages are being mapped.
713 // Call this with the system locked.
716 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::MapPhysicalPages() id=%d type=%d ptr=%08x off=%08x phys=%08x n=%d perm=%08x",
717 aId, aType, aPtr, aOffset, aPhysAddr, aNumPages, aPtePerm));
718 SPageTableInfo& ptinfo=iPtInfo[aId];
719 ptinfo.iCount+=aNumPages;
720 aOffset>>=KPageShift;
721 TInt ptOffset=aOffset & KPagesInPDEMask; // entry number in page table
722 TPte* pPte=PageTable(aId)+ptOffset; // address of first PTE
724 if(aType==SPageInfo::EInvalid)
727 pi = SPageInfo::SafeFromPhysAddr(aPhysAddr);
730 *pPte++ = aPhysAddr|aPtePerm; // insert PTE
731 aPhysAddr+=KPageSize;
732 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",pPte[-1],pPte-1));
735 pi->Set(aType,aPtr,aOffset);
736 __KTRACE_OPT(KMMU,Kern::Printf("I: %d %08x %08x",aType,aPtr,aOffset));
737 ++aOffset; // increment offset for next page
741 __DRAIN_WRITE_BUFFER;
744 void ArmMmu::MapVirtual(TInt aId, TInt aNumPages)
746 // Called in place of MapRamPages or MapPhysicalPages to update mmu data structures when committing
747 // virtual address space to a chunk. No pages are mapped.
748 // Call this with the system locked.
751 SPageTableInfo& ptinfo=iPtInfo[aId];
752 ptinfo.iCount+=aNumPages;
755 void ArmMmu::RemapPage(TInt aId, TUint32 aAddr, TPhysAddr aOldAddr, TPhysAddr aNewAddr, TPte aPtePerm, DProcess* /*aProcess*/)
757 // Replace the mapping at address aAddr in page table aId.
758 // Update the page information array for both the old and new pages.
759 // Call this with the system locked.
762 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPage() id=%d addr=%08x old=%08x new=%08x perm=%08x", aId, aAddr, aOldAddr, aNewAddr, aPtePerm));
764 TInt ptOffset=(aAddr&KChunkMask)>>KPageShift; // entry number in page table
765 TPte* pPte=PageTable(aId)+ptOffset; // address of PTE
768 TUint pageType = (pte & KPteTypeMask);
769 if (pageType == KArmPteSmallPage || pageType == 0)
771 __ASSERT_ALWAYS((pte & KPteSmallPageAddrMask) == aOldAddr || pte==KPteNotPresentEntry, Panic(ERemapPageFailed));
772 SPageInfo* oldpi = SPageInfo::FromPhysAddr(aOldAddr);
773 __ASSERT_DEBUG(oldpi->LockCount()==0,Panic(ERemapPageFailed));
776 *pPte = aNewAddr | aPtePerm; // overwrite PTE
777 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",*pPte,pPte));
778 __DRAIN_WRITE_BUFFER;
779 InvalidateTLBForPage(aAddr); // flush any corresponding TLB entry
781 // update new pageinfo, clear old
782 SPageInfo* pi = SPageInfo::FromPhysAddr(aNewAddr);
783 pi->Set(oldpi->Type(),oldpi->Owner(),oldpi->Offset());
788 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPage() called on a non-4K page!"));
789 Panic(ERemapPageFailed);
793 void ArmMmu::RemapKernelPage(TInt aId, TLinAddr aSrc, TLinAddr aDest, TPhysAddr aNewPhys, TPte aPtePerm)
795 // Replace the mapping at address aAddr in page table aId.
796 // Called with the system locked.
797 // MUST NOT INVOKE ANY TRACING - or do anything else that might touch the kernel heap
798 // We are depending on this not reintroducing any of the cache lines we previously
802 TInt ptOffset=(aSrc&KChunkMask)>>KPageShift; // entry number in page table
803 TPte* pPte=PageTable(aId)+ptOffset; // address of PTE
805 TInt irq = NKern::DisableAllInterrupts();
806 CopyPageForRemap(aDest, aSrc);
807 *pPte = aNewPhys | aPtePerm; // overwrite PTE
808 __DRAIN_WRITE_BUFFER;
809 InvalidateTLBForPage(aSrc); // flush any corresponding TLB entry
810 NKern::RestoreInterrupts(irq);
813 TInt ArmMmu::UnmapPages(TInt aId, TUint32 aAddr, TInt aNumPages, TPhysAddr* aPageList, TBool aSetPagesFree, TInt& aNumPtes, TInt& aNumFree, DProcess*)
815 // Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped
816 // pages into aPageList, and count of unmapped pages into aNumPtes.
817 // Return number of pages still mapped using this page table.
818 // Call this with the system locked.
819 // @param aId Identifies Page Table to unmap PTEs(Page Table Entries) from.
820 // @param aAddr Base Base Virtual Address of the region to unmap. It (indirectly) specifies the first PTE in this Page Table to unmap.
821 // @param aNumPages The number of consecutive PTEs to unmap.
822 // @param aPageList Points to pre-allocated array. On return, it is filled in with the list of physical addresses of the unmapped 4K
824 // @param aSetPagesFree If true, pages a placed in the free state and only mapped pages are added
826 // @param aNumPtes On return, indicates how many PTEs are unmapped.
827 // @param aNumFree On return, holds the number are freed 4K memory blocks. Not updated if aSetPagesFree is false.
828 // @return The number of PTEs still mapped in this Page Table (aId).
830 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::UnmapPages() id=%d addr=%08x n=%d pl=%08x set-free=%d",aId,aAddr,aNumPages,aPageList,aSetPagesFree));
831 TInt ptOffset=(aAddr&KChunkMask)>>KPageShift; // entry number in page table
832 TPte* pPte=PageTable(aId)+ptOffset; // address of first PTE
837 TPte pte=*pPte; // get original PTE
838 *pPte++=0; // clear PTE
839 TUint pageType = (pte & KPteTypeMask);
840 if (pageType == KArmPteSmallPage)
841 InvalidateTLBForPage(aAddr); // flush any corresponding TLB entry
842 if (pageType == KArmPteSmallPage || (pageType == 0 && pte != KPteNotPresentEntry))
844 ++np; // count unmapped pages
845 TPhysAddr pa=pte & KPteSmallPageAddrMask; // physical address of unmapped page
848 SPageInfo* pi = SPageInfo::FromPhysAddr(pa);
849 __NK_ASSERT_DEBUG(pageType == KArmPteSmallPage ||
850 (pi->Type()==SPageInfo::EPagedCode && pi->State()==SPageInfo::EStatePagedOld));
851 if(iRamCache->PageUnmapped(pi))
853 pi->SetUnused(); // mark page as unused
854 if (pi->LockCount()==0)
856 *aPageList++=pa; // store in page list
857 ++nf; // count free pages
862 *aPageList++=pa; // store in page list
868 SPageTableInfo& ptinfo=iPtInfo[aId];
869 TInt r=(ptinfo.iCount-=np);
870 __DRAIN_WRITE_BUFFER;
871 __KTRACE_OPT(KMMU,Kern::Printf("Unmapped %d; Freed: %d; Return %08x",np,nf,r));
872 return r; // return number of pages remaining in this page table
876 TInt ArmMmu::UnmapVirtual(TInt aId, TUint32 aAddr, TInt aNumPages, TPhysAddr* aPageList, TBool aSetPagesFree, TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess)
878 // Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped
879 // pages into aPageList, and count of unmapped pages into aNumPtes.
880 // Adjust the page table reference count as if aNumPages pages were unmapped.
881 // Return number of pages still mapped using this page table.
882 // Call this with the system locked.
885 SPageTableInfo& ptinfo=iPtInfo[aId];
886 TInt newCount = ptinfo.iCount - aNumPages;
887 UnmapPages(aId, aAddr, aNumPages, aPageList, aSetPagesFree, aNumPtes, aNumFree, aProcess);
888 ptinfo.iCount = newCount;
889 aNumPtes = aNumPages;
894 #ifndef __MMU_MACHINE_CODED__
895 void ArmMmu::DoAssignPageTable(TInt aId, TLinAddr aAddr, TPde aPdePerm)
897 // Assign an allocated page table to map a given linear address with specified permissions.
898 // This should be called with the system locked and the MMU mutex held.
901 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DoAssignPageTable %d to %08x perm %08x",aId,aAddr,aPdePerm));
902 TLinAddr ptLin=PageTableLinAddr(aId);
903 TPhysAddr ptPhys=LinearToPhysical(ptLin);
904 TInt pdeIndex=TInt(aAddr>>KChunkShift);
905 PageDirectory[pdeIndex]=ptPhys|aPdePerm;
906 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", ptPhys|aPdePerm, PageDirectory+pdeIndex));
907 __DRAIN_WRITE_BUFFER;
910 void ArmMmu::RemapPageTable(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr)
912 // Replace a page table mapping the specified linear address.
913 // This should be called with the system locked and the MMU mutex held.
916 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::RemapPageTable %08x to %08x at %08x",aOld,aNew,aAddr));
917 TInt pdeIndex=TInt(aAddr>>KChunkShift);
918 TPde pde=PageDirectory[pdeIndex];
919 __ASSERT_ALWAYS((pde & KPdePageTableAddrMask) == aOld, Panic(ERemapPageTableFailed));
920 TPde newPde=aNew|(pde&~KPdePageTableAddrMask);
921 PageDirectory[pdeIndex]=newPde;
922 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", newPde, PageDirectory+pdeIndex));
923 __DRAIN_WRITE_BUFFER;
926 void ArmMmu::DoUnassignPageTable(TLinAddr aAddr)
928 // Unassign a now-empty page table currently mapping the specified linear address.
929 // We assume that TLB and/or cache flushing has been done when any RAM pages were unmapped.
930 // This should be called with the system locked and the MMU mutex held.
933 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DoUnassignPageTable at %08x",aAddr));
934 TInt pdeIndex=TInt(aAddr>>KChunkShift);
935 PageDirectory[pdeIndex]=0;
936 __KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x", PageDirectory+pdeIndex));
937 __DRAIN_WRITE_BUFFER;
941 // Initialise page table at physical address aXptPhys to be used as page table aXptId
942 // to expand the virtual address range used for mapping page tables. Map the page table
943 // at aPhysAddr as page table aId using the expanded range.
944 // Assign aXptPhys to kernel's Page Directory.
945 // Called with system unlocked and MMU mutex held.
946 void ArmMmu::BootstrapPageTable(TInt aXptId, TPhysAddr aXptPhys, TInt aId, TPhysAddr aPhysAddr)
948 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::BootstrapPageTable xptid=%04x, xptphys=%08x, id=%04x, phys=%08x",
949 aXptId, aXptPhys, aId, aPhysAddr));
951 // put in a temporary mapping for aXptPhys
952 // make it noncacheable
953 TPhysAddr pa=aXptPhys&~KPageMask;
954 *iTempPte = pa | SP_PTE(KArmV45PermRWNO, KMemAttNC);
955 __DRAIN_WRITE_BUFFER;
958 TPte* xpt=(TPte*)(iTempAddr+(aXptPhys&KPageMask));
959 memclr(xpt, KPageTableSize);
961 // must in fact have aXptPhys and aPhysAddr in same physical page
962 __ASSERT_ALWAYS( TUint32(aXptPhys^aPhysAddr)<TUint32(KPageSize), MM::Panic(MM::EBootstrapPageTableBadAddr));
964 // so only need one mapping
965 xpt[(aXptId>>KPtClusterShift)&KPagesInPDEMask] = pa | KPtPtePerm;
967 // remove temporary mapping
969 __DRAIN_WRITE_BUFFER;
970 InvalidateTLBForPage(iTempAddr);
972 // initialise PtInfo...
973 TLinAddr xptAddr = PageTableLinAddr(aXptId);
974 iPtInfo[aXptId].SetGlobal(xptAddr>>KChunkShift);
977 TInt pdeIndex=TInt(xptAddr>>KChunkShift);
979 PageDirectory[pdeIndex]=aXptPhys|KPtPdePerm;
980 __DRAIN_WRITE_BUFFER;
981 NKern::UnlockSystem();
984 // Edit the self-mapping entry in page table aId, mapped at aTempMap, to
985 // change the physical address from aOld to aNew. Used when moving page
986 // tables which were created by BootstrapPageTable.
987 // Called with system locked and MMU mutex held.
988 void ArmMmu::FixupXPageTable(TInt aId, TLinAddr aTempMap, TPhysAddr aOld, TPhysAddr aNew)
990 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::FixupXPageTable id=%04x, tempmap=%08x, old=%08x, new=%08x",
991 aId, aTempMap, aOld, aNew));
993 // find correct page table inside the page
994 TPte* xpt=(TPte*)(aTempMap + ((aId & KPtClusterMask) << KPageTableShift));
995 // find the pte in that page table
996 xpt += (aId>>KPtClusterShift)&KPagesInPDEMask;
998 // switch the mapping
999 __ASSERT_ALWAYS((*xpt&~KPageMask)==aOld, Panic(EFixupXPTFailed));
1000 *xpt = aNew | KPtPtePerm;
1002 // invalidate the TLB entry for the self-mapping page table
1003 // the PDE has not yet been changed, but since we hold the
1004 // system lock, nothing should bring this back into the TLB.
1005 InvalidateTLBForPage(PageTableLinAddr(aId));
1008 // Set up a page table (specified by aId) to map a 1Mb section of ROM containing aRomAddr
1009 // using ROM at aOrigPhys.
1010 void ArmMmu::InitShadowPageTable(TInt aId, TLinAddr aRomAddr, TPhysAddr aOrigPhys)
1012 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:InitShadowPageTable id=%04x aRomAddr=%08x aOrigPhys=%08x",
1013 aId, aRomAddr, aOrigPhys));
1014 TPte* ppte = PageTable(aId);
1015 TPte* ppte_End = ppte + KChunkSize/KPageSize;
1016 TPhysAddr phys = aOrigPhys - (aRomAddr & KChunkMask);
1017 for (; ppte<ppte_End; ++ppte, phys+=KPageSize)
1018 *ppte = phys | KRomPtePermissions;
1019 __DRAIN_WRITE_BUFFER;
1022 // Copy the contents of ROM at aRomAddr to a shadow page at physical address aShadowPhys
1023 void ArmMmu::InitShadowPage(TPhysAddr aShadowPhys, TLinAddr aRomAddr)
1025 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:InitShadowPage aShadowPhys=%08x aRomAddr=%08x",
1026 aShadowPhys, aRomAddr));
1028 // put in a temporary mapping for aShadowPhys
1029 // make it noncacheable
1030 *iTempPte = aShadowPhys | SP_PTE(KArmV45PermRWNO, KMemAttNC);
1031 __DRAIN_WRITE_BUFFER;
1033 // copy contents of ROM
1034 wordmove( (TAny*)iTempAddr, (const TAny*)aRomAddr, KPageSize );
1035 __DRAIN_WRITE_BUFFER; // make sure contents are written to memory
1037 // remove temporary mapping
1039 __DRAIN_WRITE_BUFFER;
1040 InvalidateTLBForPage(iTempAddr);
1043 // Assign a shadow page table to replace a ROM section mapping
1044 // Enter and return with system locked
1045 void ArmMmu::AssignShadowPageTable(TInt aId, TLinAddr aRomAddr)
1047 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:AssignShadowPageTable aId=%04x aRomAddr=%08x",
1049 TLinAddr ptLin=PageTableLinAddr(aId);
1050 TPhysAddr ptPhys=LinearToPhysical(ptLin);
1051 TPde* ppde = PageDirectory + (aRomAddr>>KChunkShift);
1052 TPde newpde = ptPhys | KShadowPdePerm;
1053 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde));
1054 TInt irq=NKern::DisableAllInterrupts();
1055 *ppde = newpde; // map in the page table
1056 __DRAIN_WRITE_BUFFER; // make sure new PDE written to main memory
1057 FlushTLBs(); // flush both TLBs (no need to flush cache yet)
1058 NKern::RestoreInterrupts(irq);
1061 void ArmMmu::DoUnmapShadowPage(TInt aId, TLinAddr aRomAddr, TPhysAddr aOrigPhys)
1063 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:DoUnmapShadowPage, id=%04x lin=%08x origphys=%08x", aId, aRomAddr, aOrigPhys));
1064 TPte* ppte = PageTable(aId) + ((aRomAddr & KChunkMask)>>KPageShift);
1065 TPte newpte = aOrigPhys | KRomPtePermissions;
1066 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", newpte, ppte));
1067 TInt irq=NKern::DisableAllInterrupts();
1069 __DRAIN_WRITE_BUFFER;
1070 InvalidateTLBForPage(aRomAddr);
1072 CacheMaintenance::CodeChanged(aRomAddr, KPageSize, CacheMaintenance::EMemoryRemap);
1073 CacheMaintenance::PageToReuse(aRomAddr, EMemAttNormalCached, KPhysAddrInvalid);
1074 NKern::RestoreInterrupts(irq);
1077 TInt ArmMmu::UnassignShadowPageTable(TLinAddr aRomAddr, TPhysAddr aOrigPhys)
1079 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu:UnassignShadowPageTable, lin=%08x origphys=%08x", aRomAddr, aOrigPhys));
1080 TPde* ppde = PageDirectory + (aRomAddr>>KChunkShift);
1081 TPde newpde = (aOrigPhys &~ KChunkMask) | KRomSectionPermissions;
1082 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde));
1083 TInt irq=NKern::DisableAllInterrupts();
1084 *ppde = newpde; // revert to section mapping
1085 __DRAIN_WRITE_BUFFER; // make sure new PDE written to main memory
1086 FlushTLBs(); // flush both TLBs
1087 NKern::RestoreInterrupts(irq);
1091 void ArmMmu::DoFreezeShadowPage(TInt aId, TLinAddr aRomAddr)
1093 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:DoFreezeShadowPage aId=%04x aRomAddr=%08x",
1095 TPte* ppte = PageTable(aId) + ((aRomAddr & KChunkMask)>>KPageShift);
1096 TPte newpte = (*ppte & KPteSmallPageAddrMask) | KRomPtePermissions;
1097 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", newpte, ppte));
1099 __DRAIN_WRITE_BUFFER;
1100 InvalidateTLBForPage(aRomAddr);
1103 void ArmMmu::Pagify(TInt aId, TLinAddr aLinAddr)
1105 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu:Pagify aId=%04x aLinAddr=%08x", aId, aLinAddr));
1107 TInt pteIndex = (aLinAddr & KChunkMask)>>KPageShift;
1108 TPte* pte = PageTable(aId);
1109 if ((pte[pteIndex] & KPteTypeMask) == KArmV45PteLargePage)
1111 __KTRACE_OPT(KMMU,Kern::Printf("Converting 64K page to 4K pages"));
1113 TPte source = pte[pteIndex];
1114 source = (source & KPteLargePageAddrMask) | SP_PTE_FROM_LP_PTE(source);
1116 for (TInt entry=0; entry<16; entry++)
1118 pte[entry] = source | (entry<<12);
1124 void ArmMmu::FlushShadow(TLinAddr aRomAddr)
1126 CacheMaintenance::CodeChanged(aRomAddr, KPageSize, CacheMaintenance::EMemoryRemap);
1127 CacheMaintenance::PageToReuse(aRomAddr, EMemAttNormalCached, KPhysAddrInvalid);
1128 InvalidateTLBForPage(aRomAddr); // remove all TLB references to original ROM page
1133 inline void ZeroPdes(TLinAddr aBase, TLinAddr aEnd)
1135 memclr(PageDirectory+(aBase>>KChunkShift), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde));
1138 void ArmMmu::ClearPageTable(TInt aId, TInt aFirstIndex)
1140 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::ClearPageTable(%d,%d)",aId,aFirstIndex));
1141 TPte* pte=PageTable(aId);
1142 memclr(pte+aFirstIndex, KPageTableSize-aFirstIndex*sizeof(TPte));
1143 __DRAIN_WRITE_BUFFER;
1146 void ArmMmu::ClearRamDrive(TLinAddr aStart)
1148 // clear the page directory entries corresponding to the RAM drive
1149 ZeroPdes(aStart, KRamDriveEndAddress);
1150 __DRAIN_WRITE_BUFFER;
1153 void ArmMmu::ApplyTopLevelPermissions(TLinAddr aAddr, TUint aChunkSize, TPde aPdePerm)
1155 __KTRACE_OPT(KMMU,Kern::Printf("ApplyTopLevelPermissions at %x",aAddr));
1156 TInt pdeIndex=aAddr>>KChunkShift;
1157 TInt numPdes=(aChunkSize+KChunkMask)>>KChunkShift;
1158 TPde* pPde=PageDirectory+pdeIndex;
1161 *pPde=(*pPde)?((*pPde & KPdePageTableAddrMask)|aPdePerm):0;
1164 __DRAIN_WRITE_BUFFER;
1167 void ArmMmu::ApplyPagePermissions(TInt aId, TInt aPageOffset, TInt aNumPages, TPte aPtePerm)
1169 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::ApplyPagePermissions %04x:%03x+%03x perm %08x",
1170 aId, aPageOffset, aNumPages, aPtePerm));
1171 TPte* pPte=PageTable(aId)+aPageOffset;
1172 TPde* pPteEnd=pPte+aNumPages;
1173 NKern::LockSystem();
1174 for (; pPte<pPteEnd; ++pPte)
1178 *pPte = (pte&KPteSmallPageAddrMask)|aPtePerm;
1180 NKern::UnlockSystem();
1182 __DRAIN_WRITE_BUFFER;
1185 void ArmMmu::MoveChunk(TLinAddr aInitAddr, TUint aSize, TLinAddr aFinalAddr, TPde aPdePerm)
1187 __KTRACE_OPT(KMMU,Kern::Printf("MoveChunk at %08x to %08x size %08x PdePerm %08x",
1188 aInitAddr, aFinalAddr, aSize, aPdePerm));
1189 TInt numPdes=(aSize+KChunkMask)>>KChunkShift;
1190 TInt iS=aInitAddr>>KChunkShift;
1191 TInt iD=aFinalAddr>>KChunkShift;
1192 TPde* pS=PageDirectory+iS;
1193 TPde* pD=PageDirectory+iD;
1196 *pD++=(*pS)?((*pS & KPdePageTableAddrMask)|aPdePerm):0;
1197 *pS++=KPdeNotPresentEntry;
1199 __DRAIN_WRITE_BUFFER;
1202 void ArmMmu::MoveChunk(TLinAddr aInitAddr, TLinAddr aFinalAddr, TInt aNumPdes)
1204 // Move a block of PDEs without changing permissions. Must work with overlapping initial and final
1205 // regions. Call this with kernel locked.
1208 __KTRACE_OPT(KMMU,Kern::Printf("MoveChunk at %08x to %08x numPdes %d", aInitAddr, aFinalAddr, aNumPdes));
1209 if (aInitAddr==aFinalAddr || aNumPdes==0)
1211 TInt iS=aInitAddr>>KChunkShift;
1212 TInt iD=aFinalAddr>>KChunkShift;
1213 TBool forwardOverlap=(iS<iD && iD-iS<aNumPdes);
1214 TBool backwardOverlap=(iS>iD && iS-iD<aNumPdes);
1215 TInt iC=backwardOverlap?(iD+aNumPdes):iS; // first index to clear
1216 TInt iZ=forwardOverlap?iD:(iS+aNumPdes); // last index to clear + 1
1217 TPde* pS=PageDirectory+iS;
1218 TPde* pD=PageDirectory+iD;
1219 __KTRACE_OPT(KMMU,Kern::Printf("backwardOverlap=%d, forwardOverlap=%d",backwardOverlap,forwardOverlap));
1220 __KTRACE_OPT(KMMU,Kern::Printf("first clear %03x, last clear %03x",iC,iZ));
1221 wordmove(pD,pS,aNumPdes<<2); // move PDEs
1222 pD=PageDirectory+iC; // pointer to first PDE to clear
1223 iZ-=iC; // number of PDEs to clear
1224 memclr(pD, iZ<<2); // clear PDEs
1225 __DRAIN_WRITE_BUFFER;
1228 TPde ArmMmu::PdePermissions(TChunkType aChunkType, TInt aChunkState)
1230 if ((aChunkType==EUserData || aChunkType==EDllData || aChunkType==EUserSelfModCode
1231 || aChunkType==ESharedKernelSingle || aChunkType==ESharedKernelMultiple || aChunkType==ESharedIo)
1233 return KUserDataRunningPermissions;
1234 return ChunkPdePermissions[aChunkType];
1237 TPte ArmMmu::PtePermissions(TChunkType aChunkType)
1239 return ChunkPtePermissions[aChunkType];
1242 const TUint FBLK=(EMapAttrFullyBlocking>>12);
1243 const TUint BFNC=(EMapAttrBufferedNC>>12);
1244 const TUint BUFC=(EMapAttrBufferedC>>12);
1245 const TUint L1UN=(EMapAttrL1Uncached>>12);
1246 const TUint WTRA=(EMapAttrCachedWTRA>>12);
1247 const TUint WTWA=(EMapAttrCachedWTWA>>12);
1248 const TUint WBRA=(EMapAttrCachedWBRA>>12);
1249 const TUint WBWA=(EMapAttrCachedWBWA>>12);
1250 const TUint AWTR=(EMapAttrAltCacheWTRA>>12);
1251 const TUint AWTW=(EMapAttrAltCacheWTWA>>12);
1252 const TUint AWBR=(EMapAttrAltCacheWBRA>>12);
1253 const TUint AWBW=(EMapAttrAltCacheWBWA>>12);
1254 const TUint MAXC=(EMapAttrL1CachedMax>>12);
1256 const TUint L2UN=(EMapAttrL2Uncached>>12);
1258 const TUint16 UNS=0xffffu; // Unsupported attribute
1259 const TUint16 SPE=0xfffeu; // Special processing required
1261 #if defined(__CPU_ARM710T__) || defined(__CPU_ARM720T__)
1262 // Original definition of C B
1263 static const TUint16 CacheBuffAttributes[16]=
1264 {0x00,0x00,0x04,0x04,0x0C,0x0C,0x0C,0x0C, UNS, UNS, UNS, UNS, UNS, UNS, UNS,0x0C};
1265 static const TUint8 CacheBuffActual[16]=
1266 {FBLK,FBLK,BUFC,BUFC,WTRA,WTRA,WTRA,WTRA,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,WTRA};
1268 #elif defined(__CPU_ARM920T__) || defined(__CPU_ARM925T__) || defined(__CPU_ARM926J__)
1269 // Newer definition of C B
1270 static const TUint16 CacheBuffAttributes[16]=
1271 {0x00,0x00,0x04,0x04,0x08,0x08,0x0C,0x0C, UNS, UNS, UNS, UNS, UNS, UNS, UNS,0x0C};
1272 static const TUint8 CacheBuffActual[16]=
1273 {FBLK,FBLK,BUFC,BUFC,WTRA,WTRA,WBRA,WBRA,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,WBRA};
1275 #elif defined(__CPU_SA1__)
1276 // Special definition of C B
1277 static const TUint16 CacheBuffAttributes[16]=
1278 {0x00,0x00,0x04,0x04,0x04,0x04,0x0C,0x0C,0x04,0x04,0x08,0x08, UNS, UNS, UNS,0x0C};
1279 static const TUint8 CacheBuffActual[16]=
1280 {FBLK,FBLK,BUFC,BUFC,BUFC,BUFC,WBRA,WBRA,FBLK,FBLK,AWBR,AWBR,FBLK,FBLK,FBLK,WBRA};
1282 #elif defined(__CPU_XSCALE__)
1283 #ifdef __CPU_XSCALE_MANZANO__
1284 #ifdef __HAS_EXTERNAL_CACHE__
1285 // ***MANZANO with L2 cache****** //
1287 //Specifies TEX::CB bits for different L1/L2 cache attributes
1290 static const TUint16 CacheBuffAttributes[80]=
1292 // FBLK BFNC BUFC L1UN WTRA WTWA WBRA WBWA AWTR AWTW AWBR AWBT UNS UNS UNS MAX L2CACHE:
1293 0x00, 0x44, 0x40, 0x40, 0x108, 0x108, 0x10c, 0x10c, SPE, SPE, SPE, SPE, UNS,UNS,UNS,0x10c, //NC
1294 0x00, 0x44, 0x40, 0x40, 0x108, 0x108, 0x10c, 0x10c, SPE, SPE, SPE, SPE, UNS,UNS,UNS,0x10c, //WTRA
1295 0x00, 0x44, 0x40, 0x40, 0x108, 0x108, 0x10c, 0x10c, SPE, SPE, SPE, SPE, UNS,UNS,UNS,0x10c, //WTWA
1296 0x00, 0x44, 0x40, 0x140, 0x148, 0x148, 0x14c, 0x14c, SPE, SPE, SPE, SPE, UNS,UNS,UNS,0x14c, //WBRA
1297 0x00, 0x44, 0x40, 0x140, 0x148, 0x148, 0x14c, 0x14c, SPE, SPE, SPE, SPE, UNS,UNS,UNS,0x14c, //WBWA
1300 extern TUint MiniCacheConfig();
1301 //Converts page table attributes(TEX:CB) into appropriate cache attributes.
1302 TInt CacheAttributesActual(TUint& cacheL1, TUint& cacheL2, TUint cbatt)
1306 case 0: cacheL1 = FBLK; cacheL2 = L2UN; return KErrNone;
1307 case 0x40: cacheL1 = L1UN; cacheL2 = L2UN; return KErrNone;
1308 case 0x44: cacheL1 = BFNC; cacheL2 = L2UN; return KErrNone;
1309 case 0x48: cacheL1 = MiniCacheConfig(); cacheL2 = L2UN; return KErrNone;
1310 case 0x108: cacheL1 = WTRA; cacheL2 = L2UN; return KErrNone;
1311 case 0x10c: cacheL1 = WBRA; cacheL2 = L2UN; return KErrNone;
1312 case 0x140: cacheL1 = L1UN; cacheL2 = WBWA; return KErrNone;
1313 case 0x148: cacheL1 = WTRA; cacheL2 = WBWA; return KErrNone;
1314 case 0x14c: cacheL1 = WBRA; cacheL2 = WBWA; return KErrNone;
1316 return KErrNotSupported;
1318 #else //__HAS_EXTERNAL_CACHE__
1319 // ***MANZANO without L2 cache****** //
1321 static const TUint16 CacheBuffAttributes[16]=
1322 // FBLK BFNC BUFC L1UN WTRA WTWA WBRA WBWA -----------AltCache-------- MAXC
1323 {0x00,0x44,0x40,0x40,0x148,0x148,0x14C,0x14C,SPE,SPE,SPE,SPE,UNS,UNS,UNS,0x14C};
1324 static const TUint8 CacheBuffActual[16]=
1325 {FBLK,BFNC,BUFC,BUFC,WTRA,WTRA,WBRA,WBRA,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,WBRA};
1326 #endif //__HAS_EXTERNAL_CACHE__
1329 // ***XSCALE that is not MANZANO (no L2 cache)****** //
1332 static const TUint16 CacheBuffAttributes[16]=
1333 {0x00,0x44,0x04,0x04,0x08,0x08,0x0C,0x4C,SPE,SPE,SPE,SPE,UNS,UNS,UNS,0x4C};
1334 static const TUint8 CacheBuffActual[16]=
1335 {FBLK,BFNC,BUFC,BUFC,WTRA,WTRA,WBRA,WBWA,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,WBWA};
1338 // ***Common code for all XSCALE cores*** //
1340 extern TUint MiniCacheConfig();
1341 void ProcessSpecialCacheAttr(TUint& cache, TUint& cbatt)
1343 // If writeback requested, give writeback or writethrough
1344 // If writethrough requested, give writethrough or uncached
1345 // Give other allocation policy if necessary.
1346 TUint mccfg=MiniCacheConfig();
1347 __KTRACE_OPT(KMMU,Kern::Printf("MiniCacheConfig: %x",mccfg));
1349 if (cache<AWBR && mccfg>=AWBR) // asked for WT but cache is set for WB
1351 cache=BUFC; // so give uncached, buffered, coalescing
1352 #if defined (__CPU_XSCALE_MANZANO__)
1360 cache=mccfg; // give whatever minicache is configured for
1361 cbatt=0x48; // minicache attributes
1366 static const TUint8 ActualReadPrivilegeLevel[4]={4,1,4,4}; // RORO,RWNO,RWRO,RWRW
1367 static const TUint8 ActualWritePrivilegeLevel[4]={0,1,1,4}; // RORO,RWNO,RWRO,RWRW
1369 /** Calculates cb attributes for page table and sets actual cache attributes*/
1370 TInt GetCacheAttr(TUint& cacheL1, TUint& cacheL2, TUint& cbatt)
1373 // Scale down L2 to 0-4 : NC, WTRA, WTWA, WBRA, WBWA
1374 #if defined (__CPU_XSCALE_MANZANO__) && defined(__HAS_EXTERNAL_CACHE__)
1375 if (cacheL2 == MAXC) cacheL2 = WBWA-3; // Scale down L2 cache attributes...
1376 else if (cacheL2 > WBWA) return KErrNotSupported; // ... to 0-4 for...
1377 else if (cacheL2 < WTRA) cacheL2 = L2UN; // ... L2UN to WBWA
1380 cacheL2 = 0; // Either no L2 cache or L2 cache attributes will be just a copy of L1 cache attributes.
1383 //Get cb page attributes. (On some platforms, tex bits are includded as well.)
1384 cbatt = CacheBuffAttributes[cacheL1 + (cacheL2<<4)];
1385 __KTRACE_OPT(KMMU,Kern::Printf("GetCacheAttr, table returned:%x",cbatt));
1387 #if defined(__CPU_XSCALE__)
1388 //Check if altDCache/LLR cache attributes are defined
1391 cacheL2 = 0; //Not L2 cached in such case
1392 ProcessSpecialCacheAttr(cacheL1,cbatt);
1393 __KTRACE_OPT(KMMU,Kern::Printf("GetCacheAttr, spec case returned:%x",cbatt));
1398 return KErrNotSupported;
1400 //W Got CB page attributes. Now, find out what are the actual cache attributes.
1401 #if defined(__CPU_XSCALE_MANZANO__) && defined(__HAS_EXTERNAL_CACHE__)
1402 r = CacheAttributesActual(cacheL1, cacheL2, cbatt);
1404 cacheL1 = CacheBuffActual[cacheL1];
1405 #if defined(__HAS_EXTERNAL_CACHE__)
1414 TInt ArmMmu::PdePtePermissions(TUint& aMapAttr, TPde& aPde, TPte& aPte)
1416 __KTRACE_OPT(KMMU,Kern::Printf(">ArmMmu::PdePtePermissions, mapattr=%08x",aMapAttr));
1417 TUint read=aMapAttr & EMapAttrReadMask;
1418 TUint write=(aMapAttr & EMapAttrWriteMask)>>4;
1419 TUint exec=(aMapAttr & EMapAttrExecMask)>>8;
1421 // if execute access is greater than read, adjust read (since there are no separate execute permissions on ARM)
1429 ap=KArmV45PermRORO; // user and supervisor read-only
1431 ap=KArmV45PermRWNO; // supervisor r/w user no access (since no RO/NO access is available)
1435 // only supervisor can write
1437 ap=KArmV45PermRWRO; // supervisor r/w user r/o
1439 ap=KArmV45PermRWNO; // supervisor r/w user no access
1442 ap=KArmV45PermRWRW; // supervisor r/w user r/w
1443 read=ActualReadPrivilegeLevel[ap];
1444 write=ActualWritePrivilegeLevel[ap];
1445 #ifndef __CPU_USE_MMU_TEX_FIELD
1447 ap|=(ap<<4); // replicate permissions in all four subpages
1449 ap<<=4; // shift access permissions into correct position for PTE
1450 ap|=KArmPteSmallPage; // add in mandatory small page bits
1452 // Get cb atributes for the page table and the actual cache attributes
1454 TUint cacheL1=(aMapAttr & EMapAttrL1CacheMask)>>12;
1455 TUint cacheL2=(aMapAttr & EMapAttrL2CacheMask)>>16;
1456 TInt r = GetCacheAttr(cacheL1, cacheL2, cbatt);
1460 aPde=PT_PDE(EDomainClient);
1462 aMapAttr=read|(write<<4)|(read<<8)|(cacheL1<<12)|(cacheL2<<16);
1464 __KTRACE_OPT(KMMU,Kern::Printf("<ArmMmu::PdePtePermissions, r=%d, mapattr=%08x, pde=%08x, pte=%08x",
1465 r,aMapAttr,aPde,aPte));
1469 void ArmMmu::Map(TLinAddr aLinAddr, TPhysAddr aPhysAddr, TInt aSize, TPde aPdePerm, TPte aPtePerm, TInt aMapShift)
1471 // Map a region of physical addresses aPhysAddr to aPhysAddr+aSize-1 to virtual address aLinAddr.
1472 // Use permissions specified by aPdePerm and aPtePerm. Use mapping sizes up to and including (1<<aMapShift).
1473 // Assume any page tables required are already assigned.
1474 // aLinAddr, aPhysAddr, aSize must be page-aligned.
1477 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu::Map lin=%08x phys=%08x size=%08x", aLinAddr, aPhysAddr, aSize));
1478 __KTRACE_OPT(KMMU, Kern::Printf("pde=%08x pte=%08x mapshift=%d", aPdePerm, aPtePerm, aMapShift));
1479 TPde pt_pde=aPdePerm;
1480 TPte sp_pte=aPtePerm;
1481 TPde section_pde=SECTION_PDE_FROM_PDEPTE(pt_pde, sp_pte);
1482 TPte lp_pte=LP_PTE_FROM_SP_PTE(sp_pte);
1483 TLinAddr la=aLinAddr;
1484 TPhysAddr pa=aPhysAddr;
1488 if (aMapShift>=KChunkShift && (la & KChunkMask)==0 && remain>=KChunkSize)
1491 TInt npdes = remain>>KChunkShift;
1492 TPde* p_pde = PageDirectory + (la>>KChunkShift);
1493 TPde* p_pde_E = p_pde + npdes;
1494 TPde pde = pa|section_pde;
1495 NKern::LockSystem();
1496 for (; p_pde < p_pde_E; pde+=KChunkSize)
1498 __ASSERT_DEBUG(*p_pde==0, MM::Panic(MM::EPdeAlreadyInUse));
1499 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", pde, p_pde));
1502 NKern::UnlockSystem();
1503 npdes<<=KChunkShift;
1504 la+=npdes, pa+=npdes, remain-=npdes;
1507 TInt block_size = Min(remain, KChunkSize-(la&KChunkMask));
1508 TPte pa_mask=~KPageMask;
1509 TPte pte_perm=sp_pte;
1510 if (aMapShift>=KLargePageShift && block_size>=KLargePageSize)
1512 if ((la & KLargePageMask)==0)
1514 // use 64K large pages
1515 pa_mask=~KLargePageMask;
1519 block_size = Min(remain, KLargePageSize-(la&KLargePageMask));
1521 block_size &= pa_mask;
1523 // use pages (large or small)
1524 TInt id = PageTableId(la);
1525 __ASSERT_DEBUG(id>=0, MM::Panic(MM::EMmuMapNoPageTable));
1526 TPte* p_pte = PageTable(id) + ((la&KChunkMask)>>KPageShift);
1527 TPte* p_pte_E = p_pte + (block_size>>KPageShift);
1528 SPageTableInfo& ptinfo = iPtInfo[id];
1529 NKern::LockSystem();
1530 for (; p_pte < p_pte_E; pa+=KPageSize)
1532 __ASSERT_DEBUG(*p_pte==0, MM::Panic(MM::EPteAlreadyInUse));
1533 TPte pte = (pa & pa_mask) | pte_perm;
1534 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", pte, p_pte));
1537 NKern::FlashSystem();
1539 NKern::UnlockSystem();
1540 la+=block_size, remain-=block_size;
1544 void ArmMmu::Unmap(TLinAddr aLinAddr, TInt aSize)
1546 // Remove all mappings in the specified range of addresses.
1547 // Assumes there are only global mappings involved.
1548 // Don't free page tables.
1549 // aLinAddr, aSize must be page-aligned.
1552 __KTRACE_OPT(KMMU, Kern::Printf("ArmMmu::Unmap lin=%08x size=%08x", aLinAddr, aSize));
1553 TLinAddr a=aLinAddr;
1554 TLinAddr end=a+aSize;
1555 __KTRACE_OPT(KMMU,Kern::Printf("a=%08x end=%08x",a,end));
1556 NKern::LockSystem();
1559 TInt pdeIndex=a>>KChunkShift;
1560 TLinAddr next=(pdeIndex<<KChunkShift)+KChunkSize;
1561 TInt to_do = Min(TInt(end-a), TInt(next-a))>>KPageShift;
1562 __KTRACE_OPT(KMMU,Kern::Printf("a=%08x next=%08x to_do=%d",a,next,to_do));
1563 TPde pde = PageDirectory[pdeIndex];
1564 if ( (pde&KPdePresentMask)==KArmV45PdeSection )
1566 __ASSERT_DEBUG(!(a&KChunkMask), MM::Panic(MM::EUnmapBadAlignment));
1567 PageDirectory[pdeIndex]=0;
1568 InvalidateTLBForPage(a);
1570 NKern::FlashSystem();
1573 TInt ptid = GetPageTableId(a);
1574 SPageTableInfo& ptinfo=iPtInfo[ptid];
1577 TPte* ppte = PageTable(ptid) + ((a&KChunkMask)>>KPageShift);
1578 TPte* ppte_End = ppte + to_do;
1579 for (; ppte<ppte_End; ++ppte, a+=KPageSize)
1581 TUint pte_type = *ppte & KPteTypeMask;
1582 if (pte_type && pte_type != KArmV45PteLargePage)
1586 InvalidateTLBForPage(a);
1590 __ASSERT_DEBUG(!(a&KLargePageMask), MM::Panic(MM::EUnmapBadAlignment));
1591 ptinfo.iCount-=KLargeSmallPageRatio;
1592 memclr(ppte, KLargeSmallPageRatio*sizeof(TPte));
1593 InvalidateTLBForPage(a);
1594 a+=(KLargePageSize-KPageSize);
1595 ppte+=(KLargeSmallPageRatio-1);
1597 NKern::FlashSystem();
1601 a += (to_do<<KPageShift);
1603 NKern::UnlockSystem();
1606 TInt ArmMmu::AllocDomain()
1608 NKern::FMWait(&DomainLock);
1612 r=__e32_find_ls1_32(Domains);
1615 NKern::FMSignal(&DomainLock);
1619 void ArmMmu::FreeDomain(TInt aDomain)
1621 __ASSERT_ALWAYS(aDomain>=0 && aDomain<ENumDomains, MM::Panic(MM::EFreeInvalidDomain));
1622 TUint32 m=1<<aDomain;
1623 NKern::FMWait(&DomainLock);
1624 __ASSERT_ALWAYS(!(Domains&m), MM::Panic(MM::EFreeDomainNotAllocated));
1626 NKern::FMSignal(&DomainLock);
1629 void ArmMmu::ClearPages(TInt aNumPages, TPhysAddr* aPageList, TUint8 aClearByte)
1631 //map the pages at a temporary address, clear them and unmap
1632 __ASSERT_MUTEX(RamAllocatorMutex);
1633 while (--aNumPages >= 0)
1636 if((TInt)aPageList&1)
1638 pa = (TPhysAddr)aPageList&~1;
1639 *(TPhysAddr*)&aPageList += iPageSize;
1643 *iTempPte = pa | SP_PTE(KArmV45PermRWNO, KMemAttBuf);
1644 __DRAIN_WRITE_BUFFER;
1645 InvalidateTLBForPage(iTempAddr);
1646 memset((TAny*)iTempAddr, aClearByte, iPageSize);
1649 __DRAIN_WRITE_BUFFER;
1650 InvalidateTLBForPage(iTempAddr);
1653 TLinAddr DoMapTemp(TPhysAddr aPage, TBool aCached, TLinAddr aTempAddr, TPte* aTempPte)
1655 __ASSERT_DEBUG(!*aTempPte,MM::Panic(MM::ETempMappingAlreadyInUse));
1656 *aTempPte = (aPage&~KPageMask) | SP_PTE(KArmV45PermRWNO, aCached?KDefaultCaching:KMemAttBuf);
1657 __DRAIN_WRITE_BUFFER;
1662 Create a temporary mapping of a physical page.
1663 The RamAllocatorMutex must be held before this function is called and not released
1664 until after UnmapTemp has been called.
1666 @param aPage The physical address of the page to be mapped.
1667 @param aCached Whether to map the page cached or not.
1669 @return The linear address of where the page has been mapped.
1671 TLinAddr ArmMmu::MapTemp(TPhysAddr aPage, TBool aCached)
1673 __ASSERT_MUTEX(RamAllocatorMutex);
1674 return DoMapTemp(aPage, aCached, iTempAddr, iTempPte);
1678 Create a temporary mapping of a physical page, distinct from that created by MapTemp.
1679 The RamAllocatorMutex must be held before this function is called and not released
1680 until after UnmapSecondTemp has been called.
1682 @param aPage The physical address of the page to be mapped.
1683 @param aCached Whether to map the page cached or not.
1685 @return The linear address of where the page has been mapped.
1687 TLinAddr ArmMmu::MapSecondTemp(TPhysAddr aPage, TBool aCached)
1689 __ASSERT_MUTEX(RamAllocatorMutex);
1690 return DoMapTemp(aPage, aCached, iSecondTempAddr, iSecondTempPte);
1693 void DoUnmapTemp(TLinAddr aTempAddr, TPte* aTempPte)
1696 __DRAIN_WRITE_BUFFER;
1697 InvalidateTLBForPage(aTempAddr);
1701 Remove the temporary mapping created with MapTemp.
1703 void ArmMmu::UnmapTemp()
1705 __ASSERT_MUTEX(RamAllocatorMutex);
1706 DoUnmapTemp(iTempAddr, iTempPte);
1710 Remove the temporary mapping created with MapSecondTemp.
1712 void ArmMmu::UnmapSecondTemp()
1714 __ASSERT_MUTEX(RamAllocatorMutex);
1715 DoUnmapTemp(iSecondTempAddr, iSecondTempPte);
1719 * Performs cache maintenance on physical cache (VIPT & PIPT) for a page to be reused.
1721 void ArmMmu::CacheMaintenanceOnDecommit(TPhysAddr aAddr)
1723 CacheMaintenance::PageToReusePhysicalCache(aAddr);
1726 void ArmMmu::CacheMaintenanceOnDecommit(const TPhysAddr* aAddr, TInt aCount)
1729 ArmMmu::CacheMaintenanceOnDecommit(*aAddr++);
1732 void ArmMmu::CacheMaintenanceOnPreserve(TPhysAddr, TUint)
1734 //Not required for moving memory model
1735 __ASSERT_ALWAYS(0, Panic(ECacheMaintenance));
1738 void ArmMmu::CacheMaintenanceOnPreserve(const TPhysAddr*, TInt, TUint)
1740 //Not required for moving memory model
1741 __ASSERT_ALWAYS(0, Panic(ECacheMaintenance));
1744 void ArmMmu::CacheMaintenanceOnPreserve(TPhysAddr , TInt , TLinAddr , TUint )
1746 //Not required for moving memory model
1747 __ASSERT_ALWAYS(0, Panic(ECacheMaintenance));
1751 TInt ArmMmu::UnlockRamCachePages(TUint8* volatile & aBase, TInt aStartPage, TInt aNumPages)
1753 NKern::LockSystem();
1756 TInt page = ((TLinAddr)aBase>>KPageShift)+aStartPage;
1757 TPde* pd = PageDirectory+(page>>(KChunkShift-KPageShift));
1758 TPte* pt = SafePageTableFromPde(*pd++);
1759 TInt pteIndex = page&(KChunkMask>>KPageShift);
1762 // whole page table has gone, so skip all pages in it...
1763 TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex;
1764 aNumPages -= pagesInPt;
1765 aStartPage += pagesInPt;
1768 NKern::UnlockSystem();
1774 TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex;
1775 if(pagesInPt>aNumPages)
1776 pagesInPt = aNumPages;
1777 if(pagesInPt>KMaxPages)
1778 pagesInPt = KMaxPages;
1780 aNumPages -= pagesInPt;
1781 aStartPage += pagesInPt;
1786 if(pte!=KPteNotPresentEntry) // pte may be null if page has already been unlocked and reclaimed by system
1787 iRamCache->DonateRamCachePage(SPageInfo::FromPhysAddr(pte));
1793 NKern::UnlockSystem();
1797 pteIndex = aStartPage&(KChunkMask>>KPageShift);
1799 while(!NKern::FlashSystem() && pteIndex);
1804 TInt ArmMmu::LockRamCachePages(TUint8* volatile & aBase, TInt aStartPage, TInt aNumPages)
1806 NKern::LockSystem();
1809 TInt page = ((TLinAddr)aBase>>KPageShift)+aStartPage;
1810 TPde* pd = PageDirectory+(page>>(KChunkShift-KPageShift));
1811 TPte* pt = SafePageTableFromPde(*pd++);
1812 TInt pteIndex = page&(KChunkMask>>KPageShift);
1818 TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex;
1819 if(pagesInPt>aNumPages)
1820 pagesInPt = aNumPages;
1821 if(pagesInPt>KMaxPages)
1822 pagesInPt = KMaxPages;
1824 aNumPages -= pagesInPt;
1825 aStartPage += pagesInPt;
1830 if(pte==KPteNotPresentEntry)
1832 if(!iRamCache->ReclaimRamCachePage(SPageInfo::FromPhysAddr(pte)))
1839 NKern::UnlockSystem();
1843 pteIndex = aStartPage&(KChunkMask>>KPageShift);
1845 while(!NKern::FlashSystem() && pteIndex);
1848 NKern::UnlockSystem();
1849 return KErrNotFound;
1853 void RamCache::SetFree(SPageInfo* aPageInfo)
1856 SPageInfo::TType type = aPageInfo->Type();
1857 if(type==SPageInfo::EPagedCache)
1859 TInt offset = aPageInfo->Offset()<<KPageShift;
1860 DArmPlatChunk* chunk = (DArmPlatChunk*)aPageInfo->Owner();
1861 __NK_ASSERT_DEBUG(TUint(offset)<TUint(chunk->iMaxSize));
1862 TLinAddr lin = ((TLinAddr)chunk->iBase)+offset;
1863 TPte* pt = PtePtrFromLinAddr(lin);
1864 *pt = KPteNotPresentEntry;
1865 __DRAIN_WRITE_BUFFER;
1866 InvalidateTLBForPage(lin);
1867 ((ArmMmu*)iMmu)->SyncCodeMappings();
1868 CacheMaintenance::PageToReuseVirtualCache(lin);
1869 // actually decommit it from chunk...
1870 TInt ptid = ((TLinAddr)pt-KPageTableBase)>>KPageTableShift;
1871 SPageTableInfo& ptinfo=((ArmMmu*)iMmu)->iPtInfo[ptid];
1872 if(!--ptinfo.iCount)
1874 ((ArmMmu*)iMmu)->DoUnassignPageTable(lin);
1875 chunk->RemovePde(offset);
1876 NKern::UnlockSystem();
1877 ((ArmMmu*)iMmu)->FreePageTable(ptid);
1878 NKern::LockSystem();
1883 __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetFree() with bad page type = %d",aPageInfo->Type()));
1884 Panic(EUnexpectedPageType);
1890 // MemModelDemandPaging
1893 class MemModelDemandPaging : public DemandPaging
1896 // From RamCacheBase
1897 virtual void Init2();
1898 virtual TInt Init3();
1899 virtual TBool PageUnmapped(SPageInfo* aPageInfo);
1900 // From DemandPaging
1901 virtual TInt Fault(TAny* aExceptionInfo);
1902 virtual void SetOld(SPageInfo* aPageInfo);
1903 virtual void SetFree(SPageInfo* aPageInfo);
1904 virtual void NotifyPageFree(TPhysAddr aPage);
1905 virtual TInt EnsurePagePresent(TLinAddr aPage, DProcess* aProcess);
1906 virtual TPhysAddr LinearToPhysical(TLinAddr aPage, DProcess* aProcess);
1907 virtual void AllocLoadAddress(DPagingRequest& aReq, TInt aDeviceId);
1908 virtual TInt PageState(TLinAddr aAddr);
1909 virtual TBool NeedsMutexOrderCheck(TLinAddr aStartAddr, TUint aLength);
1911 inline ArmMmu& Mmu() { return (ArmMmu&)*iMmu; }
1912 void InitRomPaging();
1913 void InitCodePaging();
1914 TInt HandleFault(TArmExcInfo& aExc, TLinAddr aFaultAddress, TBool aInRom);
1915 TInt PageIn(TLinAddr aAddress, DMemModelCodeSegMemory* aCodeSegMemory);
1917 TLinAddr GetLinearAddress(SPageInfo* aPageInfo);
1922 // MemModelDemandPaging
1926 DemandPaging* DemandPaging::New()
1928 return new MemModelDemandPaging();
1932 void MemModelDemandPaging::Init2()
1934 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">MemModelDemandPaging::Init2"));
1935 DemandPaging::Init2();
1936 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<MemModelDemandPaging::Init2"));
1940 void MemModelDemandPaging::AllocLoadAddress(DPagingRequest& aReq, TInt aReqId)
1942 aReq.iLoadAddr = iTempPages + aReqId * KPageSize;
1943 aReq.iLoadPte = PtePtrFromLinAddr(aReq.iLoadAddr);
1947 TInt MemModelDemandPaging::Init3()
1949 TInt r=DemandPaging::Init3();
1953 // Create a region for mapping pages during page in
1954 DPlatChunkHw* chunk;
1955 TInt chunkSize = KMaxPagingDevices * KPagingRequestsPerDevice * KPageSize;
1956 DPlatChunkHw::DoNew(chunk, KPhysAddrInvalid, chunkSize, EMapAttrSupRw|EMapAttrFullyBlocking);
1958 Panic(EInitialiseFailed);
1959 iTempPages = chunk->iLinAddr;
1961 if(RomPagingRequested())
1964 if (CodePagingRequested())
1967 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<MemModelDemandPaging::Init3"));
1972 void MemModelDemandPaging::InitRomPaging()
1974 // Make page tables for demand paged part of ROM...
1975 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("MemModelDemandPaging::Init3 making page tables for paged ROM"));
1976 TLinAddr lin = iRomPagedLinearBase&~KChunkMask; // first chunk with paged ROM in
1977 TLinAddr linEnd = iRomLinearBase+iRomSize;
1981 TInt ptid = Mmu().PageTableId(lin);
1985 ptid = Mmu().AllocPageTable();
1987 __NK_ASSERT_DEBUG(ptid>=0);
1988 Mmu().PtInfo(ptid).SetGlobal(lin >> KChunkShift);
1991 // Get new page table addresses
1992 TPte* pt = PageTable(ptid);
1993 TPhysAddr ptPhys=Mmu().LinearToPhysical((TLinAddr)pt);
1995 // Pointer to page dirctory entry
1996 TPde* ppde = PageDirectory + (lin>>KChunkShift);
1998 // Fill in Page Table
1999 TPte* ptEnd = pt+(1<<(KChunkShift-KPageShift));
2000 pt += (lin&KChunkMask)>>KPageShift;
2003 if(lin<iRomPagedLinearBase)
2004 *pt++ = Mmu().LinearToPhysical(lin) | KRomPtePermissions;
2006 *pt++ = KPteNotPresentEntry;
2009 while(pt<ptEnd && lin<=linEnd);
2010 __DRAIN_WRITE_BUFFER;
2012 // Add new Page Table to the Page Directory
2013 TPde newpde = ptPhys | KShadowPdePerm;
2014 __KTRACE_OPT2(KPAGING,KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde));
2015 TInt irq=NKern::DisableAllInterrupts();
2017 __DRAIN_WRITE_BUFFER;
2019 NKern::RestoreInterrupts(irq);
2024 void MemModelDemandPaging::InitCodePaging()
2026 // Initialise code paging info
2027 iCodeLinearBase = Mmu().iUserCodeBase;
2028 iCodeSize = Mmu().iMaxUserCodeSize;
2032 @return ETrue when the unmapped page should be freed, EFalse otherwise
2034 TBool MemModelDemandPaging::PageUnmapped(SPageInfo* aPageInfo)
2036 SPageInfo::TType type = aPageInfo->Type();
2038 if(type!=SPageInfo::EPagedCache && type!=SPageInfo::EPagedCode)
2040 __NK_ASSERT_DEBUG(type!=SPageInfo::EPagedData); // not supported yet
2044 RemovePage(aPageInfo);
2045 AddAsFreePage(aPageInfo);
2046 // Return false to stop DMemModelChunk::DoDecommit from freeing this page
2051 TLinAddr MemModelDemandPaging::GetLinearAddress(SPageInfo* aPageInfo)
2053 TInt offset = aPageInfo->Offset()<<KPageShift;
2054 SPageInfo::TType type = aPageInfo->Type();
2055 __NK_ASSERT_DEBUG(TUint(offset)<(type==SPageInfo::EPagedROM ? iRomSize : iCodeSize));
2056 TLinAddr base = type==SPageInfo::EPagedROM ? iRomLinearBase : iCodeLinearBase;
2057 return base + offset;
2061 void MemModelDemandPaging::SetOld(SPageInfo* aPageInfo)
2063 __NK_ASSERT_DEBUG(aPageInfo->State() == SPageInfo::EStatePagedOld);
2064 SPageInfo::TType type = aPageInfo->Type();
2066 if(type==SPageInfo::EPagedROM || type==SPageInfo::EPagedCode)
2068 START_PAGING_BENCHMARK;
2070 // get linear address of page...
2071 TLinAddr lin = GetLinearAddress(aPageInfo);
2073 // make page inaccessible...
2074 TPte* pt = PtePtrFromLinAddr(lin);
2075 *pt &= ~KPtePresentMask;
2076 __DRAIN_WRITE_BUFFER;
2077 InvalidateTLBForPage(lin);
2078 Mmu().SyncCodeMappings();
2080 if (type==SPageInfo::EPagedCode)
2081 END_PAGING_BENCHMARK(this, EPagingBmSetCodePageOld);
2083 else if(type==SPageInfo::EPagedCache)
2085 // leave page accessible
2087 else if(type!=SPageInfo::EPagedFree)
2089 __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetOld() with bad page type = %d",aPageInfo->Type()));
2090 Panic(EUnexpectedPageType);
2092 NKern::FlashSystem();
2096 void MemModelDemandPaging::SetFree(SPageInfo* aPageInfo)
2098 __ASSERT_SYSTEM_LOCK;
2099 __ASSERT_MUTEX(MmuBase::RamAllocatorMutex);
2100 __NK_ASSERT_DEBUG(aPageInfo->State() == SPageInfo::EStatePagedDead);
2101 if(aPageInfo->LockCount())
2102 Panic(ERamPageLocked);
2104 SPageInfo::TType type = aPageInfo->Type();
2106 if(type==SPageInfo::EPagedROM || type==SPageInfo::EPagedCode)
2108 START_PAGING_BENCHMARK;
2110 // get linear address of page...
2111 TLinAddr lin = GetLinearAddress(aPageInfo);
2114 TPte* pt = PtePtrFromLinAddr(lin);
2115 *pt = KPteNotPresentEntry;
2116 __DRAIN_WRITE_BUFFER;
2117 InvalidateTLBForPage(lin);
2118 Mmu().SyncCodeMappings();
2120 if (type==SPageInfo::EPagedCode)
2121 END_PAGING_BENCHMARK(this, EPagingBmSetCodePageFree);
2122 #ifdef BTRACE_PAGING
2123 TInt subCat = type==SPageInfo::EPagedCode ? BTrace::EPagingPageOutCode : BTrace::EPagingPageOutROM;
2124 TPhysAddr phys = aPageInfo->PhysAddr();
2125 BTraceContext8(BTrace::EPaging,subCat,phys,lin);
2128 else if(type==SPageInfo::EPagedCache)
2130 // get linear address of page...
2131 TInt offset = aPageInfo->Offset()<<KPageShift;
2132 DArmPlatChunk* chunk = (DArmPlatChunk*)aPageInfo->Owner();
2133 __NK_ASSERT_DEBUG(TUint(offset)<TUint(chunk->iMaxSize));
2134 TLinAddr lin = ((TLinAddr)chunk->iBase)+offset;
2137 TPte* pt = PtePtrFromLinAddr(lin);
2138 *pt = KPteNotPresentEntry;
2139 __DRAIN_WRITE_BUFFER;
2140 InvalidateTLBForPage(lin);
2141 Mmu().SyncCodeMappings();
2142 NKern::UnlockSystem();
2143 CacheMaintenance::PageToReuseVirtualCache(lin);
2144 NKern::LockSystem();
2146 // actually decommit it from chunk...
2147 TInt ptid = ((TLinAddr)pt-KPageTableBase)>>KPageTableShift;
2148 SPageTableInfo& ptinfo=((ArmMmu*)iMmu)->iPtInfo[ptid];
2149 if(!--ptinfo.iCount)
2151 ((ArmMmu*)iMmu)->DoUnassignPageTable(lin);
2152 chunk->RemovePde(offset);
2153 NKern::UnlockSystem();
2154 ((ArmMmu*)iMmu)->FreePageTable(ptid);
2155 NKern::LockSystem();
2158 #ifdef BTRACE_PAGING
2159 TPhysAddr phys = aPageInfo->PhysAddr();
2160 BTraceContext8(BTrace::EPaging,BTrace::EPagingPageOutCache,phys,lin);
2163 else if(type==SPageInfo::EPagedFree)
2166 #ifdef BTRACE_PAGING
2167 TPhysAddr phys = aPageInfo->PhysAddr();
2168 BTraceContext4(BTrace::EPaging,BTrace::EPagingPageOutFree,phys);
2170 // external cache may not have been cleaned if PageUnmapped called
2171 CacheMaintenance::PageToReusePhysicalCache(aPageInfo->PhysAddr());
2175 __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetFree() with bad page type = %d",aPageInfo->Type()));
2176 Panic(EUnexpectedPageType);
2178 NKern::FlashSystem();
2182 void MemModelDemandPaging::NotifyPageFree(TPhysAddr aPage)
2184 MM::Panic(MM::EOperationNotImplemented);
2189 Return True if exception was caused by a memory write access.
2190 This function can cause a paging exception!
2192 static TBool FaultDuringWrite(TArmExcInfo& aExc)
2194 // We can't decode jazelle instruction to determine if they faulted during a read.
2195 // Therefore we will treat them as writes (which will panic the thread)...
2196 if(aExc.iCpsr&(1<<24))
2199 if(aExc.iCpsr&(1<<5))
2202 TUint32 op = *(TUint16*)aExc.iR15;
2206 if((op&0xfa00)==0x5000)
2207 return ETrue; // STR (2) and STRB (2)
2208 if((op&0xfe00)==0x5200)
2209 return ETrue; // STRH (2)
2212 return !(op&(1<<11)); // STR (1) and STRB (1)
2214 return !(op&(1<<11)); // STR (3) and STRH (1)
2216 return (op&0xfe00)==0xb400; // PUSH
2218 return (op&0xf800)==0xc000; // STMIA
2224 TUint32 op = *(TUint32*)aExc.iR15;
2230 if((op&0xf0)==(0xb0))
2231 return !(op&(1<<20)); // load/store halfword
2232 else if((op&0x0e1000f0)==(0x000000f0))
2233 return ETrue; // store double
2234 else if((op&0x0fb000f0) == 0x010000f0)
2235 return ETrue; // swap instruction
2236 else if((op&0x0ff000f0) == 0x01800090)
2237 return ETrue; // strex
2240 return !(op&(1<<20)); // load/store immediate
2243 return !(op&(1<<20)); // load/store register offset
2246 return !(op&(1<<20)); // load/store multiple
2248 return !(op&(1<<20)); // coproc store
2256 if((op&0xfe5f0f00)==(0xf84d0500))
2257 return ETrue; // SRS instructions
2260 return !(op&(1<<20)); // coproc store (STC2)
2268 TInt MemModelDemandPaging::Fault(TAny* aExceptionInfo)
2270 TArmExcInfo& exc=*(TArmExcInfo*)aExceptionInfo;
2272 // Get faulting address
2273 TLinAddr faultAddress = exc.iFaultAddress;
2274 if(exc.iExcCode==EArmExceptionDataAbort)
2276 // Only handle page translation faults
2277 if((exc.iFaultStatus&0xf)!=0x7)
2279 // Let writes take an exception rather than page in any memory...
2280 if(FaultDuringWrite(exc))
2283 else if (exc.iExcCode != EArmExceptionPrefetchAbort)
2284 return KErrUnknown; // Not prefetch or data abort
2286 DThread* thread = TheCurrentThread;
2288 // check which ragion fault occured in...
2290 if(TUint(faultAddress-iRomPagedLinearBase)<iRomPagedSize)
2294 else if(TUint(faultAddress-iCodeLinearBase)<iCodeSize)
2300 return KErrUnknown; // Not in pageable region
2302 // Check if thread holds fast mutex and claim system lock
2303 NFastMutex* fm = NKern::HeldFastMutex();
2304 TPagingExcTrap* trap = thread->iPagingExcTrap;
2306 NKern::LockSystem();
2309 if(!trap || fm!=&TheScheduler.iLock)
2311 __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: Fault with FM Held! %x (%O pc=%x)",faultAddress,&Kern::CurrentThread(),exc.iR15));
2312 Panic(EPageFaultWhilstFMHeld); // Not allowed to hold mutexes
2315 // Current thread already has the system lock...
2316 NKern::FlashSystem(); // Let someone else have a go with the system lock.
2319 // System locked here
2322 if(thread->IsRealtime())
2323 r = CheckRealtimeThreadFault(thread, aExceptionInfo);
2325 r = HandleFault(exc, faultAddress, inRom);
2327 // Restore system lock state
2328 if (fm != NKern::HeldFastMutex())
2331 NKern::LockSystem();
2333 NKern::UnlockSystem();
2336 // Deal with XTRAP_PAGING
2337 if(r == KErrNone && trap)
2339 trap->Exception(1); // Return from exception trap with result '1' (value>0)
2340 // code doesn't continue beyond this point.
2347 TInt MemModelDemandPaging::HandleFault(TArmExcInfo& aExc, TLinAddr aFaultAddress, TBool aInRom)
2349 ++iEventInfo.iPageFaultCount;
2351 // get page table entry...
2352 TPte* pt = SafePtePtrFromLinAddr(aFaultAddress);
2354 return KErrNotFound;
2357 // Do what is required to make page accessible...
2359 if(pte&KPtePresentMask)
2361 // PTE is present, so assume it has already been dealt with
2362 #ifdef BTRACE_PAGING
2363 BTraceContext12(BTrace::EPaging,BTrace::EPagingPageNop,pte&~KPageMask,aFaultAddress,aExc.iR15);
2368 if(pte!=KPteNotPresentEntry)
2370 // PTE alread has a page
2371 SPageInfo* pageInfo = SPageInfo::FromPhysAddr(pte);
2372 if(pageInfo->State()==SPageInfo::EStatePagedDead)
2374 // page currently being unmapped, so do that here...
2375 *pt = KPteNotPresentEntry; // Update page table
2376 __DRAIN_WRITE_BUFFER;
2380 // page just needs making young again...
2381 *pt = TPte(pte|KArmPteSmallPage); // Update page table
2382 __DRAIN_WRITE_BUFFER;
2383 Rejuvenate(pageInfo);
2384 #ifdef BTRACE_PAGING
2385 BTraceContext12(BTrace::EPaging,BTrace::EPagingRejuvenate,pte&~KPageMask,aFaultAddress,aExc.iR15);
2391 // PTE not present, so page it in...
2392 // check if fault in a CodeSeg...
2393 DMemModelCodeSegMemory* codeSegMemory = NULL;
2395 NKern::ThreadEnterCS();
2399 DMemModelCodeSeg* codeSeg = (DMemModelCodeSeg*)DCodeSeg::CodeSegsByAddress.Find(aFaultAddress);
2401 return KErrNotFound;
2402 codeSegMemory = codeSeg->Memory();
2403 if (codeSegMemory==0 || !codeSegMemory->iIsDemandPaged)
2404 return KErrNotFound;
2405 // open reference on CodeSegMemory
2406 NKern::ThreadEnterCS();
2410 codeSegMemory->Open();
2411 __NK_ASSERT_DEBUG(r==KErrNone);
2412 NKern::FlashSystem();
2415 #ifdef BTRACE_PAGING
2416 BTraceContext8(BTrace::EPaging,BTrace::EPagingPageInBegin,aFaultAddress,aExc.iR15);
2419 TInt r = PageIn(aFaultAddress,codeSegMemory);
2421 NKern::UnlockSystem();
2424 codeSegMemory->Close();
2426 NKern::ThreadLeaveCS();
2432 TInt MemModelDemandPaging::PageIn(TLinAddr aAddress, DMemModelCodeSegMemory* aCodeSegMemory)
2434 // Get a request object - this may block until one is available
2435 DPagingRequest* req = AcquireRequestObject();
2437 // Get page table entry
2438 TPte* pt = SafePtePtrFromLinAddr(aAddress);
2440 // Check page is still required...
2441 if(!pt || *pt!=KPteNotPresentEntry)
2443 #ifdef BTRACE_PAGING
2444 BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded);
2446 ReleaseRequestObject(req);
2447 return pt ? KErrNone : KErrNotFound;
2450 ++iEventInfo.iPageInReadCount;
2453 SPageInfo* pageInfo = AllocateNewPage();
2454 __NK_ASSERT_DEBUG(pageInfo);
2456 // Get physical address of free page
2457 TPhysAddr phys = pageInfo->PhysAddr();
2458 __NK_ASSERT_DEBUG(phys!=KPhysAddrInvalid);
2460 // Temporarily map free page
2461 TLinAddr loadAddr = req->iLoadAddr;
2463 *pt = phys | SP_PTE(KArmV45PermRWNO, KMemAttTempDemandPaging);
2464 __DRAIN_WRITE_BUFFER;
2466 // Read page from backing store
2467 aAddress &= ~KPageMask;
2468 NKern::UnlockSystem();
2471 if (!aCodeSegMemory)
2472 r = ReadRomPage(req, aAddress);
2475 r = ReadCodePage(req, aCodeSegMemory, aAddress);
2477 aCodeSegMemory->ApplyCodeFixups((TUint32*)loadAddr, aAddress);
2480 Panic(EPageInFailed);
2482 // make caches consistant (uncached memory is used for page loading)
2483 __DRAIN_WRITE_BUFFER;
2484 NKern::LockSystem();
2486 // Invalidate temporary mapping
2487 *pt = KPteNotPresentEntry;
2488 __DRAIN_WRITE_BUFFER;
2489 InvalidateTLBForPage(loadAddr);
2491 ReleaseRequestObject(req);
2493 // Get page table entry
2494 pt = SafePtePtrFromLinAddr(aAddress);
2496 // Check page still needs updating
2497 TBool notNeeded = pt==0 || *pt!=KPteNotPresentEntry;
2500 // We don't need the new page after all, so put it on the active list as a free page
2501 __KTRACE_OPT(KPAGING,Kern::Printf("DP: PageIn (New page not used)"));
2502 #ifdef BTRACE_PAGING
2503 BTraceContext0(BTrace::EPaging,BTrace::EPagingPageInUnneeded);
2505 AddAsFreePage(pageInfo);
2506 return pt ? KErrNone : KErrNotFound;
2510 if (!aCodeSegMemory)
2511 pageInfo->SetPagedROM((aAddress-iRomLinearBase)>>KPageShift);
2513 pageInfo->SetPagedCode(aCodeSegMemory,(aAddress-Mmu().iUserCodeBase)>>KPageShift);
2515 // Map page into final location
2516 *pt = phys | (aCodeSegMemory ? KUserCodeLoadPte : KRomPtePermissions);
2517 __DRAIN_WRITE_BUFFER;
2518 #ifdef BTRACE_PAGING
2519 TInt subCat = aCodeSegMemory ? BTrace::EPagingPageInCode : BTrace::EPagingPageInROM;
2520 BTraceContext8(BTrace::EPaging,subCat,phys,aAddress);
2523 AddAsYoungest(pageInfo);
2530 inline TUint8 ReadByte(TLinAddr aAddress)
2531 { return *(volatile TUint8*)aAddress; }
2534 TInt MemModelDemandPaging::EnsurePagePresent(TLinAddr aPage, DProcess* aProcess)
2536 XTRAPD(exc,XT_DEFAULT,XTRAP_PAGING_RETRY(CHECK_PAGING_SAFE; ReadByte(aPage);));
2541 TPhysAddr MemModelDemandPaging::LinearToPhysical(TLinAddr aPage, DProcess* aProcess)
2543 return Mmu().LinearToPhysical(aPage);
2547 TInt MemModelDemandPaging::PageState(TLinAddr aAddr)
2552 SPageInfo* pageInfo = NULL;
2554 NKern::LockSystem();
2556 DMemModelCodeSegMemory* codeSegMemory = 0;
2557 if(TUint(aAddr-iRomPagedLinearBase)<iRomPagedSize)
2558 r |= EPageStateInRom;
2559 else if (TUint(aAddr-iCodeLinearBase)<iCodeSize)
2561 DMemModelCodeSeg* codeSeg = (DMemModelCodeSeg*)DCodeSeg::CodeSegsByAddress.Find(aAddr);
2563 codeSegMemory = codeSeg->Memory();
2566 r |= EPageStateInRamCode;
2567 if (codeSegMemory->iIsDemandPaged)
2568 r |= EPageStatePaged;
2572 ptePtr = SafePtePtrFromLinAddr(aAddr);
2575 r |= EPageStatePageTablePresent;
2577 if (pte == KPteNotPresentEntry)
2579 r |= EPageStatePtePresent;
2580 if (pte & KPtePresentMask)
2581 r |= EPageStatePteValid;
2583 pageInfo = SPageInfo::FromPhysAddr(pte);
2584 r |= pageInfo->Type();
2585 r |= pageInfo->State()<<8;
2588 NKern::UnlockSystem();
2593 TBool MemModelDemandPaging::NeedsMutexOrderCheck(TLinAddr aStartAddr, TUint aLength)
2595 // Don't check mutex order for reads from unpaged rom, kernel data area and kernel stack chunk
2596 TLinAddr endAddr = aStartAddr + aLength;
2597 TLinAddr stackBase = (TLinAddr)MM::SvStackChunk->Base();
2598 TLinAddr stackEnd = stackBase + MM::SvStackChunk->iMaxSize;
2599 TLinAddr unpagedRomEnd = iRomPagedLinearBase ? iRomPagedLinearBase : iRomLinearBase + iRomSize;
2600 TBool rangeInUnpagedRom = aStartAddr >= iRomLinearBase && endAddr <= unpagedRomEnd;
2601 TBool rangeInKernelData = aStartAddr >= KKernelDataBase && endAddr <= KKernelDataEnd;
2602 TBool rangeInKernelStack = aStartAddr >= stackBase && endAddr <= stackEnd;
2603 return !rangeInUnpagedRom && !rangeInKernelData && !rangeInKernelStack;
2607 EXPORT_C TBool DDemandPagingLock::Lock(DThread* aThread, TLinAddr aStart, TInt aSize)
2609 MemModelDemandPaging* pager = (MemModelDemandPaging*)iThePager;
2612 ArmMmu& m = pager->Mmu();
2613 TLinAddr end = aStart+aSize;
2615 if ((aStart < TUint(pager->iRomPagedLinearBase+pager->iRomPagedSize) && end > pager->iRomPagedLinearBase) ||
2616 (aStart < TUint(m.iUserCodeBase + m.iMaxUserCodeSize) && end > m.iUserCodeBase))
2617 return pager->ReserveLock(aThread,aStart,aSize,*this);
2623 void ArmMmu::DisablePageModification(DMemModelChunk* aChunk, TInt aOffset)
2625 // Mark the page at aOffset in aChunk inaccessible to prevent it being
2626 // modified while defrag is in progress. Save the required information
2627 // to allow the fault handler to deal with this.
2628 // Flush the cache for the page so that it can be aliased elsewhere for
2630 // Call this with the system unlocked.
2633 __KTRACE_OPT(KMMU,Kern::Printf("ArmMmu::DisablePageModification() offset=%08x", aOffset));
2635 // Acquire the system lock here for atomic access to aChunk->iBase as moving
2636 // between the home and run addresses (a reschedule) may update aChunk->iBase.
2637 NKern::LockSystem();
2639 iDisabledAddr = (TLinAddr)(aChunk->iBase) + aOffset;
2640 TInt ptid=GetPageTableId(iDisabledAddr);
2642 Panic(EDefragDisablePageFailed);
2644 TPte* pPte = PageTable(ptid) + ((aOffset&KChunkMask)>>KPageShift);
2646 if ((pte & KPteTypeMask) != KArmPteSmallPage)
2647 Panic(EDefragDisablePageFailed);
2649 iDisabledPte = pPte;
2650 iDisabledOldVal = pte;
2653 __DRAIN_WRITE_BUFFER;
2654 InvalidateTLBForPage(iDisabledAddr);
2655 NKern::UnlockSystem();
2657 CacheMaintenance::PageToPreserveAndReuseVirtualCache(iDisabledAddr);
2658 __DRAIN_WRITE_BUFFER;
2661 TBool FaultStatusFromLinAddr(TLinAddr aAddr, TBool aKernel, TUint32& aFaultStatus)
2662 // Walk the page tables looking for the given linear address. If access
2663 // would've caused a fault, return ETrue and fill in aFaultStatus with a
2664 // FSR value. Otherwise, return EFalse. Assumes it was a read.
2666 TPde pde = PageDirectory[aAddr>>KChunkShift];
2667 TPde pdetype = pde & KPdeTypeMask;
2670 // section translation fault
2676 TInt domain = (pde >> 5) & 0xf;
2677 TUint32 dacr = Arm::Dacr();
2678 TInt domaccess = (dacr >> (domain<<1)) & 0x3;
2679 TInt ispage = (pdetype == KArmV45PdeSection) ? 0 : 0x2;
2683 pte = *PtePtrFromLinAddr(aAddr);
2684 if ((pte & KPteTypeMask) == 0)
2686 // page translation fault
2692 if (domaccess == 0x3)
2700 aFaultStatus = 0x9 | ispage;
2706 perms = (pte >> 4) & 0x3;
2708 perms = (pde >> 10) & 0x3;
2710 if (aKernel || perms != 0x1)
2714 aFaultStatus = 0xd | ispage;
2718 TInt ArmMmu::RamDefragFault(TAny* aExceptionInfo)
2720 TArmExcInfo& exc=*(TArmExcInfo*)aExceptionInfo;
2722 // Get faulting address
2723 TLinAddr faultAddress;
2724 TBool prefetch=EFalse;
2725 if(exc.iExcCode==EArmExceptionDataAbort)
2727 // Only handle page translation faults
2728 if((exc.iFaultStatus & 0xf) != 0x7)
2730 faultAddress = exc.iFaultAddress;
2732 else if(exc.iExcCode==EArmExceptionPrefetchAbort)
2735 faultAddress = exc.iR15;
2738 return KErrUnknown; // Not data/prefetch abort
2740 TBool kernelmode = exc.iCpsr&EMaskMode != EUserMode;
2742 // Take system lock if not already held
2743 NFastMutex* fm = NKern::HeldFastMutex();
2745 NKern::LockSystem();
2746 else if(fm!=&TheScheduler.iLock)
2748 __KTRACE_OPT2(KMMU,KPANIC,Kern::Printf("Defrag: Fault with FM Held! %x (%O pc=%x)",faultAddress,TheCurrentThread,exc.iR15));
2749 Panic(EDefragFaultWhilstFMHeld); // Not allowed to hold mutexes
2752 TInt r = KErrUnknown;
2754 // check if the mapping of the page has already been restored and retry if so
2758 if (!FaultStatusFromLinAddr(faultAddress, kernelmode, fsr))
2766 TPte* pt = SafePtePtrFromLinAddr(faultAddress);
2772 if ((*pt & 0x3) != 0)
2779 // check if the fault occurred in the page we are moving
2780 if (iDisabledPte && TUint(faultAddress - iDisabledAddr) < TUint(KPageSize))
2782 // restore access to the page
2783 *iDisabledPte = iDisabledOldVal;
2784 __DRAIN_WRITE_BUFFER;
2785 InvalidateTLBForPage(iDisabledAddr);
2787 iDisabledPte = NULL;
2788 iDisabledOldVal = 0;
2793 // Restore system lock state
2795 NKern::UnlockSystem();