Update contrib.
1 // Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\memmodel\epoc\multiple\x86\xmmu.cpp
19 #include <mmubase.inl>
24 extern "C" void DoTotalInvalidateTLB();
26 // Constants for X86 MMU
27 const TUint32 KPdePtePresent=0x01;
28 const TUint32 KPdePteWrite=0x02;
29 const TUint32 KPdePteUser=0x04;
30 const TUint32 KPdePteWriteThrough=0x08;
31 const TUint32 KPdePteUncached=0x10;
32 const TUint32 KPdePteAccessed=0x20;
33 const TUint32 KPdePteDirty=0x40;
34 const TUint32 KPdeLargePage=0x80; // Pentium and above, not 486
35 const TUint32 KPdePteGlobal=0x100; // P6 and above, not 486 or Pentium
36 const TUint32 KPdePtePhysAddrMask=0xfffff000u;
37 const TUint32 KPdeLargePagePhysAddrMask=0xffc00000u; // Pentium and above, not 486
39 const TPde KPdPdePerm=KPdePtePresent|KPdePteWrite;
40 const TPte KPdPtePerm=KPdePtePresent|KPdePteWrite;
41 const TPde KPtPdePerm=KPdePtePresent|KPdePteWrite;
42 const TPte KPtPtePerm=KPdePtePresent|KPdePteWrite;
43 const TPde KPtInfoPdePerm=KPdePtePresent|KPdePteWrite;
44 const TPte KPtInfoPtePerm=KPdePtePresent|KPdePteWrite;
45 const TPde KRomPdePerm=KPdePtePresent|KPdePteWrite|KPdePteUser;
46 const TPte KRomPtePerm=KPdePtePresent|KPdePteUser;
47 const TPde KShadowPdePerm=KPdePtePresent|KPdePteWrite|KPdePteUser;
48 const TPte KShadowPtePerm=KPdePtePresent|KPdePteWrite|KPdePteUser; // unfortunately there's no RWRO
50 // Permissions for each chunk type
52 const TPde KStandardPtePerm=KPdePtePresent|KPdePteWrite|KPdePteUser;
53 const TPte KPdePermNONO=KPdePtePresent|KPdePteWrite|KPdePteUser;
54 const TPte KPdePermRONO=KPdePtePresent;
55 const TPte KPdePermRORO=KPdePtePresent|KPdePteUser;
56 const TPte KPdePermRWNO=KPdePtePresent|KPdePteWrite;
57 const TPte KPdePermRWRW=KPdePtePresent|KPdePteWrite|KPdePteUser;
59 LOCAL_D const TPte ChunkPtePermissions[ENumChunkTypes] =
61 KStandardPtePerm|KPdePteGlobal, // EKernelData
62 KStandardPtePerm|KPdePteGlobal, // EKernelStack
63 KPdePermRWNO|KPdePteGlobal, // EKernelCode - loading
64 KPdePermRWNO, // EDll (used for global code) - loading
65 KPdePermRORO, // EUserCode
66 KStandardPtePerm, // ERamDrive
67 KStandardPtePerm, // EUserData
68 KStandardPtePerm, // EDllData
69 KStandardPtePerm, // EUserSelfModCode
70 KStandardPtePerm, // ESharedKernelSingle
71 KStandardPtePerm, // ESharedKernelMultiple
72 KStandardPtePerm, // ESharedIo
73 KStandardPtePerm|KPdePteGlobal, // ESharedKernelMirror
74 KStandardPtePerm|KPdePteGlobal, // EKernelMessage
77 LOCAL_D const TPde ChunkPdePermissions[ENumChunkTypes] =
79 KPdePermRWNO, // EKernelData
80 KPdePermRWNO, // EKernelStack
81 KPdePermRWNO, // EKernelCode
83 KPdePermRWRW, // EUserCode
84 KPdePermRWRW, // ERamDrive
85 KPdePermRWRW, // EUserData
86 KPdePermRWRW, // EDllData
87 KPdePermRWRW, // EUserSelfModCode
88 KPdePermRWRW, // ESharedKernelSingle
89 KPdePermRWRW, // ESharedKernelMultiple
90 KPdePermRWRW, // ESharedIo
91 KPdePermRWNO, // ESharedKernelMirror
92 KPdePermRWNO, // EKernelMessage
96 extern "C" void __DebugMsgFlushTLB()
98 __KTRACE_OPT(KMMU,Kern::Printf("FlushTLB"));
101 extern "C" void __DebugMsgLocalFlushTLB()
103 __KTRACE_OPT(KMMU,Kern::Printf("FlushTLB"));
106 extern "C" void __DebugMsgTotalFlushTLB()
108 __KTRACE_OPT(KMMU,Kern::Printf("TotalFlushTLB"));
111 extern "C" void __DebugMsgINVLPG(int a)
113 __KTRACE_OPT(KMMU,Kern::Printf("INVLPG(%08x)",a));
117 // Inline functions for simple transformations
118 inline TLinAddr PageTableLinAddr(TInt aId)
120 return (KPageTableBase+(aId<<KPageTableShift));
123 inline TPte* PageTable(TInt aId)
125 return (TPte*)(KPageTableBase+(aId<<KPageTableShift));
128 inline TLinAddr PageDirectoryLinAddr(TInt aOsAsid)
130 return (KPageDirectoryBase+(aOsAsid<<KPageTableShift));
135 void __fastcall DoInvalidateTLBForPage(TLinAddr /*aLinAddr*/);
136 void DoInvalidateTLB();
137 void DoLocalInvalidateTLB();
144 TSpinLock ShadowSpinLock(TSpinLock::EOrderGenericPreHigh0); // Used when stopping other CPUs
146 class TTLBIPI : public TGenericIPI
151 static void InvalidateForPagesIsr(TGenericIPI*);
152 static void LocalInvalidateIsr(TGenericIPI*);
153 static void TotalInvalidateIsr(TGenericIPI*);
154 static void InvalidateIsr(TGenericIPI*);
155 static void WaitAndInvalidateIsr(TGenericIPI*);
156 void AddAddress(TLinAddr aAddr);
157 void InvalidateList();
161 TLinAddr iAddr[KMaxPages];
165 : iFlag(0), iCount(0)
169 void TTLBIPI::LocalInvalidateIsr(TGenericIPI*)
171 __KTRACE_OPT(KMMU2,Kern::Printf("TLBLocInv"));
172 DoLocalInvalidateTLB();
175 void TTLBIPI::TotalInvalidateIsr(TGenericIPI*)
177 __KTRACE_OPT(KMMU2,Kern::Printf("TLBTotInv"));
178 DoTotalInvalidateTLB();
181 void TTLBIPI::InvalidateIsr(TGenericIPI*)
183 __KTRACE_OPT(KMMU2,Kern::Printf("TLBInv"));
187 void TTLBIPI::WaitAndInvalidateIsr(TGenericIPI* aTLBIPI)
189 __KTRACE_OPT(KMMU2,Kern::Printf("TLBWtInv"));
190 TTLBIPI& a = *(TTLBIPI*)aTLBIPI;
194 DoInvalidateTLBForPage(a.iAddr[0]);
199 void TTLBIPI::InvalidateForPagesIsr(TGenericIPI* aTLBIPI)
201 TTLBIPI& a = *(TTLBIPI*)aTLBIPI;
203 for (i=0; i<a.iCount; ++i)
205 __KTRACE_OPT(KMMU2,Kern::Printf("TLBInv %08x", a.iAddr[i]));
206 DoInvalidateTLBForPage(a.iAddr[i]);
210 void TTLBIPI::AddAddress(TLinAddr aAddr)
212 iAddr[iCount] = aAddr;
213 if (++iCount == KMaxPages)
217 void TTLBIPI::InvalidateList()
220 InvalidateForPagesIsr(this);
221 QueueAllOther(&InvalidateForPagesIsr);
227 void LocalInvalidateTLB()
231 DoLocalInvalidateTLB();
232 ipi.QueueAllOther(&TTLBIPI::LocalInvalidateIsr);
234 ipi.WaitCompletion();
237 void TotalInvalidateTLB()
241 DoTotalInvalidateTLB();
242 ipi.QueueAllOther(&TTLBIPI::TotalInvalidateIsr);
244 ipi.WaitCompletion();
252 ipi.QueueAllOther(&TTLBIPI::InvalidateIsr);
254 ipi.WaitCompletion();
257 void InvalidateTLBForPage(TLinAddr aAddr)
260 ipi.AddAddress(aAddr);
261 ipi.InvalidateList();
265 #define InvalidateTLBForPage(a) DoInvalidateTLBForPage(a)
266 #define LocalInvalidateTLB() DoLocalInvalidateTLB()
267 #define TotalInvalidateTLB() TotalInvalidateTLB()
268 #define InvalidateTLB() DoInvalidateTLB()
272 TPte* SafePageTableFromPde(TPde aPde)
274 if (aPde&KPdePtePresent)
276 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aPde);
279 TInt id=pi->Offset(); // assumes page table size = page size
280 return PageTable(id);
286 TPte* SafePtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid=0)
288 TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift];
289 TPte* pt = SafePageTableFromPde(pde);
291 pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
295 TPte* PtePtrFromLinAddr(TLinAddr aAddress, TInt aOsAsid=0)
297 TPde pde = PageDirectory(aOsAsid)[aAddress>>KChunkShift];
298 SPageInfo* pi = SPageInfo::FromPhysAddr(pde);
299 TInt id = (pi->Offset()<<KPtClusterShift) | ((pde>>KPageTableShift)&KPtClusterMask);
300 TPte* pt = PageTable(id);
301 pt += (aAddress>>KPageShift)&(KChunkMask>>KPageShift);
305 TInt X86Mmu::LinearToPhysical(TLinAddr aAddr, TInt aSize, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList, TInt aOsAsid)
307 TPhysAddr physStart = LinearToPhysical(aAddr,aOsAsid);
309 TInt pageShift = iPageShift;
310 TUint32 page = aAddr>>pageShift<<pageShift;
311 TUint32 lastPage = (aAddr+aSize-1)>>pageShift<<pageShift;
312 TUint32* pageList = aPhysicalPageList;
313 TUint32 nextPhys = LinearToPhysical(page,aOsAsid);
314 TUint32 pageSize = 1<<pageShift;
315 while(page<=lastPage)
317 TPhysAddr phys = LinearToPhysical(page,aOsAsid);
321 nextPhys = KPhysAddrInvalid;
323 nextPhys += pageSize;
326 if(nextPhys==KPhysAddrInvalid)
328 // Memory is discontiguous...
329 aPhysicalAddress = KPhysAddrInvalid;
334 // Memory is contiguous...
335 aPhysicalAddress = physStart;
341 TPhysAddr X86Mmu::LinearToPhysical(TLinAddr aLinAddr, TInt aOsAsid)
343 // Find the physical address corresponding to a given linear address in a specified OS
344 // address space. Call with system locked.
347 __KTRACE_OPT(KMMU2,Kern::Printf("X86Mmu::LinearToPhysical(%08x,%d)",aLinAddr,aOsAsid));
348 TInt pdeIndex=aLinAddr>>KChunkShift;
349 TPde pde=PageDirectory(aOsAsid)[pdeIndex];
350 TPhysAddr pa=KPhysAddrInvalid;
351 if (pde & KPdePtePresent)
353 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
356 TInt id=pi->Offset(); // assumes page table size = page size
357 TPte* pPte=PageTable(id);
358 TPte pte=pPte[(aLinAddr&KChunkMask)>>KPageShift];
359 if (pte & KPdePtePresent)
361 pa=(pte&KPdePtePhysAddrMask)+(aLinAddr&KPageMask);
362 __KTRACE_OPT(KMMU2,Kern::Printf("Mapped with page table - returning %08x",pa));
370 TInt X86Mmu::PreparePagesForDMA(TLinAddr /*aLinAddr*/, TInt /*aSize*/, TInt /*aOsAsid*/, TPhysAddr* /*aPhysicalPageList*/)
372 return KErrNotSupported;
375 TInt X86Mmu::ReleasePagesFromDMA(TPhysAddr* /*aPhysicalPageList*/, TInt /*aPageCount*/)
377 return KErrNotSupported;
380 static const TInt PermissionLookup[8]=
383 EMapAttrReadSup|EMapAttrExecSup,
385 EMapAttrWriteSup|EMapAttrReadSup|EMapAttrExecSup,
387 EMapAttrReadUser|EMapAttrExecUser,
389 EMapAttrWriteUser|EMapAttrReadUser|EMapAttrExecUser
392 TInt X86Mmu::PageTableId(TLinAddr aAddr, TInt aOsAsid)
395 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::PageTableId(%08x,%d)",aAddr,aOsAsid));
396 TInt pdeIndex=aAddr>>KChunkShift;
397 TPde pde=PageDirectory(aOsAsid)[pdeIndex];
398 if (pde & KPdePtePresent)
400 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
402 id=pi->Offset(); // assumes page table size = page size
404 __KTRACE_OPT(KMMU,Kern::Printf("ID=%d",id));
408 // Used only during boot for recovery of RAM drive
409 TInt X86Mmu::BootPageTableId(TLinAddr aAddr, TPhysAddr& aPtPhys)
411 TInt id=KErrNotFound;
412 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu:BootPageTableId(%08x,&)",aAddr));
413 TPde* kpd=(TPde*)KPageDirectoryBase; // kernel page directory
414 TInt pdeIndex=aAddr>>KChunkShift;
415 TPde pde = kpd[pdeIndex];
416 if (pde & KPdePtePresent)
418 aPtPhys = pde & KPdePtePhysAddrMask;
419 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pde);
422 SPageInfo::TType type = pi->Type();
423 if (type == SPageInfo::EPageTable)
424 id=pi->Offset(); // assumes page table size = page size
425 else if (type == SPageInfo::EUnused)
429 __KTRACE_OPT(KMMU,Kern::Printf("ID=%d",id));
433 TBool X86Mmu::PteIsPresent(TPte aPte)
435 return aPte & KPdePtePresent;
438 TPhysAddr X86Mmu::PtePhysAddr(TPte aPte, TInt /*aPteIndex*/)
440 return aPte & KPdePtePhysAddrMask;
443 TPhysAddr X86Mmu::PdePhysAddr(TLinAddr aAddr)
445 TPde* kpd = (TPde*)KPageDirectoryBase; // kernel page directory
446 TPde pde = kpd[aAddr>>KChunkShift];
447 if (pde & (KPdePtePresent|KPdeLargePage) == (KPdePtePresent|KPdeLargePage))
448 return pde & KPdeLargePagePhysAddrMask;
449 return KPhysAddrInvalid;
454 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("X86Mmu::Init1"));
456 TUint pge = TheSuperPage().iCpuId & EX86Feat_PGE;
457 iPteGlobal = pge ? KPdePteGlobal : 0;
458 X86_UseGlobalPTEs = pge!=0;
463 iPageShift=KPageShift;
464 iChunkSize=KChunkSize;
465 iChunkMask=KChunkMask;
466 iChunkShift=KChunkShift;
467 iPageTableSize=KPageTableSize;
468 iPageTableMask=KPageTableMask;
469 iPageTableShift=KPageTableShift;
470 iPtClusterSize=KPtClusterSize;
471 iPtClusterMask=KPtClusterMask;
472 iPtClusterShift=KPtClusterShift;
473 iPtBlockSize=KPtBlockSize;
474 iPtBlockMask=KPtBlockMask;
475 iPtBlockShift=KPtBlockShift;
476 iPtGroupSize=KChunkSize/KPageTableSize;
477 iPtGroupMask=iPtGroupSize-1;
478 iPtGroupShift=iChunkShift-iPageTableShift;
479 //TInt* iPtBlockCount; // dynamically allocated - Init2
480 //TInt* iPtGroupCount; // dynamically allocated - Init2
481 iPtInfo=(SPageTableInfo*)KPageTableInfoBase;
482 iPageTableLinBase=KPageTableBase;
483 //iRamPageAllocator; // dynamically allocated - Init2
484 //iAsyncFreeList; // dynamically allocated - Init2
485 //iPageTableAllocator; // dynamically allocated - Init2
486 //iPageTableLinearAllocator;// dynamically allocated - Init2
487 iPtInfoPtePerm=KPtInfoPtePerm|iPteGlobal;
488 iPtPtePerm=KPtPtePerm|iPteGlobal;
489 iPtPdePerm=KPtPdePerm;
490 iUserCodeLoadPtePerm=KPdePermRWNO;
491 iKernelCodePtePerm=KPdePermRONO|iPteGlobal;
493 iSecondTempAddr=KSecondTempAddr;
495 TUint pse = TheSuperPage().iCpuId & EX86Feat_PSE;
496 iMapSizes = pse ? KPageSize|KChunkSize : KPageSize;
498 iDecommitThreshold=0; // no cache consistency issues on decommit
499 iRomLinearBase = ::RomHeaderAddress;
500 iRomLinearEnd = KRomLinearEnd;
501 iShadowPtePerm = KShadowPtePerm;
502 iShadowPdePerm = KShadowPdePerm;
505 TInt total_ram=TheSuperPage().iTotalRamSize;
508 iNumGlobalPageDirs=1;
509 //iOsAsidAllocator; // dynamically allocated - Init2
510 iGlobalPdSize=KPageTableSize;
511 iGlobalPdShift=KPageTableShift;
514 iAsidGroupSize=KChunkSize/KPageTableSize;
515 iAsidGroupMask=iAsidGroupSize-1;
516 iAsidGroupShift=iChunkShift-iGlobalPdShift;
517 iAliasSize=KPageSize;
518 iAliasMask=KPageMask;
519 iAliasShift=KPageShift;
520 iUserLocalBase=KUserLocalDataBase;
521 iUserSharedBase=KUserSharedDataBase;
522 iAsidInfo=(TUint32*)KAsidInfoBase;
523 iPdeBase=KPageDirectoryBase;
524 iPdPtePerm=KPdPtePerm|iPteGlobal;
525 iPdPdePerm=KPdPdePerm;
526 iRamDriveMask=0x00f00000;
527 iGlobalCodePtePerm=KPdePermRORO|iPteGlobal;
529 iMaxDllDataSize=Min(total_ram/2, 0x08000000); // phys RAM/2 up to 128Mb
530 iMaxDllDataSize=(iMaxDllDataSize+iChunkMask)&~iChunkMask; // round up to chunk size
531 iMaxUserCodeSize=Min(total_ram, 0x10000000); // phys RAM up to 256Mb
532 iMaxUserCodeSize=(iMaxUserCodeSize+iChunkMask)&~iChunkMask; // round up to chunk size
533 iUserLocalEnd=iUserSharedBase-iMaxDllDataSize;
534 iUserSharedEnd=KUserSharedDataEnd-iMaxUserCodeSize;
535 iDllDataBase=iUserLocalEnd;
536 iUserCodeBase=iUserSharedEnd;
537 __KTRACE_OPT(KMMU,Kern::Printf("ULB %08x ULE %08x USB %08x USE %08x",iUserLocalBase,iUserLocalEnd,
538 iUserSharedBase,iUserSharedEnd));
539 __KTRACE_OPT(KMMU,Kern::Printf("DDB %08x UCB %08x",iDllDataBase,iUserCodeBase));
544 PP::MaxUserThreadStack=0x14000; // 80K - STDLIB asks for 64K for PosixServer!!!!
545 PP::UserThreadStackGuard=0x2000; // 8K
546 PP::MaxStackSpacePerProcess=0x200000; // 2Mb
547 K::SupervisorThreadStackSize=0x1000; // 4K
548 PP::SupervisorThreadStackGuard=0x1000; // 4K
549 K::MachineConfig=(TMachineConfig*)KMachineConfigLinAddr;
550 PP::RamDriveStartAddress=KRamDriveStartAddress;
551 PP::RamDriveRange=KRamDriveMaxSize;
552 PP::RamDriveMaxSize=KRamDriveMaxSize; // may be reduced later
553 K::MemModelAttributes=EMemModelTypeMultiple|EMemModelAttrNonExProt|EMemModelAttrKernProt|EMemModelAttrWriteProt|
554 EMemModelAttrVA|EMemModelAttrProcessProt|EMemModelAttrSameVA|EMemModelAttrSvKernProt|
555 EMemModelAttrIPCKernProt|EMemModelAttrRamCodeProt;
558 ApTrampolinePage = KApTrampolinePageLin;
561 for (i=0; i<KMaxCpus; ++i)
563 TSubScheduler& ss = TheSubSchedulers[i];
564 TLinAddr a = KIPCAlias + (i<<KChunkShift);
565 ss.i_AliasLinAddr = (TAny*)a;
566 ss.i_AliasPdePtr = (TAny*)(KPageDirectoryBase + (a>>KChunkShift)*sizeof(TPde));
573 void X86Mmu::DoInit2()
575 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("X86Mmu::DoInit2"));
576 iTempPte=PageTable(PageTableId(iTempAddr,0))+((iTempAddr&KChunkMask)>>KPageShift);
577 iSecondTempPte=PageTable(PageTableId(iSecondTempAddr,0))+((iSecondTempAddr&KChunkMask)>>KPageShift);
578 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iTempAddr=%08x, iTempPte=%08x, iSecondTempAddr=%08x, iSecondTempPte=%08x",
579 iTempAddr, iTempPte, iSecondTempAddr, iSecondTempPte));
580 CreateKernelSection(KKernelSectionEnd, iAliasShift);
581 CreateUserGlobalSection(KUserGlobalDataBase, KUserGlobalDataEnd);
582 iUserHwChunkAllocator=THwChunkAddressAllocator::New(0, iUserGlobalSection);
583 __ASSERT_ALWAYS(iUserHwChunkAllocator, Panic(ECreateUserGlobalSectionFailed));
587 #ifndef __MMU_MACHINE_CODED__
588 void X86Mmu::MapRamPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, const TPhysAddr* aPageList, TInt aNumPages, TPte aPtePerm)
590 // Map a list of physical RAM pages into a specified page table with specified PTE permissions.
591 // Update the page information array.
592 // Call this with the system locked.
595 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::MapRamPages() id=%d type=%d ptr=%08x off=%08x n=%d perm=%08x",
596 aId, aType, aPtr, aOffset, aNumPages, aPtePerm));
598 SPageTableInfo& ptinfo=iPtInfo[aId];
599 ptinfo.iCount+=aNumPages;
600 aOffset>>=KPageShift;
601 TInt ptOffset=aOffset & KPagesInPDEMask; // entry number in page table
602 TPte* pPte=PageTable(aId)+ptOffset; // address of first PTE
605 TPhysAddr pa = *aPageList++;
606 *pPte++ = pa | aPtePerm; // insert PTE
607 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",pPte[-1],pPte-1));
608 if (aType!=SPageInfo::EInvalid)
610 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa);
613 pi->Set(aType,aPtr,aOffset);
614 __KTRACE_OPT(KMMU,Kern::Printf("I: %d %08x %08x",aType,aPtr,aOffset));
615 ++aOffset; // increment offset for next page
619 __DRAIN_WRITE_BUFFER;
622 void X86Mmu::MapPhysicalPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, TPhysAddr aPhysAddr, TInt aNumPages, TPte aPtePerm)
624 // Map consecutive physical pages into a specified page table with specified PTE permissions.
625 // Update the page information array if RAM pages are being mapped.
626 // Call this with the system locked.
629 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::MapPhysicalPages() id=%d type=%d ptr=%08x off=%08x phys=%08x n=%d perm=%08x",
630 aId, aType, aPtr, aOffset, aPhysAddr, aNumPages, aPtePerm));
631 SPageTableInfo& ptinfo=iPtInfo[aId];
632 ptinfo.iCount+=aNumPages;
633 aOffset>>=KPageShift;
634 TInt ptOffset=aOffset & KPagesInPDEMask; // entry number in page table
635 TPte* pPte=(TPte*)(PageTableLinAddr(aId))+ptOffset; // address of first PTE
637 if(aType==SPageInfo::EInvalid)
640 pi = SPageInfo::SafeFromPhysAddr(aPhysAddr);
643 *pPte++ = aPhysAddr|aPtePerm; // insert PTE
644 aPhysAddr+=KPageSize;
645 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x",pPte[-1],pPte-1));
648 pi->Set(aType,aPtr,aOffset);
649 ++aOffset; // increment offset for next page
650 __KTRACE_OPT(KMMU,Kern::Printf("I: %d %08x %08x",aType,aPtr,aOffset));
654 __DRAIN_WRITE_BUFFER;
657 void X86Mmu::MapVirtual(TInt /*aId*/, TInt /*aNumPages*/)
659 // Used in the implementation of demand paging - not supported on x86
662 MM::Panic(MM::EOperationNotSupported);
665 void X86Mmu::RemapPage(TInt /*aId*/, TUint32 /*aAddr*/, TPhysAddr /*aOldAddr*/, TPhysAddr /*aNewAddr*/, TPte /*aPtePerm*/, DProcess* /*aProcess*/)
667 MM::Panic(MM::EOperationNotSupported);
670 void X86Mmu::RemapPageByAsid(TBitMapAllocator* /*aOsAsids*/, TLinAddr /*aLinAddr*/, TPhysAddr /*aOldAddr*/, TPhysAddr /*aNewAddr*/, TPte /*aPtePerm*/)
672 MM::Panic(MM::EOperationNotSupported);
675 TInt X86Mmu::UnmapPages(TInt aId, TUint32 aAddr, TInt aNumPages, TPhysAddr* aPageList, TBool aSetPagesFree, TInt& aNumPtes, TInt& aNumFree, DProcess*)
677 // Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped
678 // pages into aPageList, and count of unmapped pages into aNumPtes.
679 // Return number of pages still mapped using this page table.
680 // Call this with the system locked.
682 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::UnmapPages() id=%d off=%08x n=%d pl=%08x set-free=%08x",aId,aAddr,aNumPages,aPageList,aSetPagesFree));
683 TInt ptOffset=(aAddr&KChunkMask)>>KPageShift; // entry number in page table
684 TPte* pPte=PageTable(aId)+ptOffset; // address of first PTE
692 TPte pte=*pPte; // get original PTE
693 *pPte++=0; // clear PTE
694 if (pte & KPdePtePresent)
697 ipi.AddAddress(aAddr);
699 InvalidateTLBForPage(aAddr); // flush any corresponding TLB entry
701 ++np; // count unmapped pages
702 TPhysAddr pa=pte & KPdePtePhysAddrMask; // physical address of unmapped page
705 SPageInfo* pi = SPageInfo::FromPhysAddr(pa);
706 if(iRamCache->PageUnmapped(pi))
708 pi->SetUnused(); // mark page as unused
709 if (pi->LockCount()==0)
711 *aPageList++=pa; // store in page list
712 ++nf; // count free pages
717 *aPageList++=pa; // store in page list
722 ipi.InvalidateList();
726 SPageTableInfo& ptinfo=iPtInfo[aId];
727 TInt r=(ptinfo.iCount-=np);
728 __DRAIN_WRITE_BUFFER;
729 __KTRACE_OPT(KMMU,Kern::Printf("Pages recovered %d Pages remaining %d NF=%d",np,r,nf));
730 return r; // return number of pages remaining in this page table
733 TInt X86Mmu::UnmapUnownedPages(TInt aId, TUint32 aAddr, TInt aNumPages, TPhysAddr* aPageList, TLinAddr* aLAPageList, TInt& aNumPtes, TInt& aNumFree, DProcess*)
735 // Unmap a specified area at address aAddr in page table aId. Place physical addresses of unmapped
736 // pages into aPageList, and count of unmapped pages into aNumPtes.
737 // Return number of pages still mapped using this page table.
738 // Call this with the system locked.
740 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::UnmapPages() id=%d off=%08x n=%d pl=%08x",aId,aAddr,aNumPages,aPageList));
741 TInt ptOffset=(aAddr&KChunkMask)>>KPageShift; // entry number in page table
742 TPte* pPte=PageTable(aId)+ptOffset; // address of first PTE
750 TPte pte=*pPte; // get original PTE
751 *pPte++=0; // clear PTE
752 if (pte & KPdePtePresent)
755 ipi.AddAddress(aAddr);
757 InvalidateTLBForPage(aAddr); // flush any corresponding TLB entry
759 ++np; // count unmapped pages
760 TPhysAddr pa=pte & KPdePtePhysAddrMask; // physical address of unmapped page
763 *aPageList++=pa; // store in page list
764 *aLAPageList++ = aAddr;
769 ipi.InvalidateList();
773 SPageTableInfo& ptinfo=iPtInfo[aId];
774 TInt r=(ptinfo.iCount-=np);
775 __DRAIN_WRITE_BUFFER;
776 __KTRACE_OPT(KMMU,Kern::Printf("Pages recovered %d Pages remaining %d NF=%d",np,r,nf));
777 return r; // return number of pages remaining in this page table
780 TInt X86Mmu::UnmapVirtual(TInt /*aId*/, TUint32 /*aAddr*/, TInt /*aNumPages*/, TPhysAddr* /*aPageList*/, TBool /*aSetPagesFree*/, TInt& /*aNumPtes*/, TInt& /*aNumFree*/, DProcess* /*aProcess*/)
782 // Used in the implementation of demand paging - not supported on x86
785 MM::Panic(MM::EOperationNotSupported);
786 return 0; // keep compiler happy
789 TInt X86Mmu::UnmapUnownedVirtual(TInt /*aId*/, TUint32 /*aAddr*/, TInt /*aNumPages*/, TPhysAddr* /*aPageList*/, TLinAddr* /*aLALinAddr*/, TInt& /*aNumPtes*/, TInt& /*aNumFree*/, DProcess* /*aProcess*/)
791 // Used in the implementation of demand paging - not supported on x86
794 MM::Panic(MM::EOperationNotSupported);
795 return 0; // keep compiler happy
798 void X86Mmu::DoAssignPageTable(TInt aId, TLinAddr aAddr, TPde aPdePerm, const TAny* aOsAsids)
800 // Assign an allocated page table to map a given linear address with specified permissions.
801 // This should be called with the system unlocked and the MMU mutex held.
804 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::DoAssignPageTable %d to %08x perm %08x asid %08x",aId,aAddr,aPdePerm,aOsAsids));
805 TLinAddr ptLin=PageTableLinAddr(aId);
806 TPhysAddr ptPhys=LinearToPhysical(ptLin,0);
807 TInt pdeIndex=TInt(aAddr>>KChunkShift);
808 TInt os_asid=(TInt)aOsAsids;
809 if (TUint32(os_asid)<TUint32(iNumOsAsids))
812 TPde* pageDir=PageDirectory(os_asid);
814 pageDir[pdeIndex]=ptPhys|aPdePerm;
815 NKern::UnlockSystem();
816 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",ptPhys|aPdePerm,pageDir+pdeIndex));
820 // selection of OS ASIDs or all OS ASIDs
821 const TBitMapAllocator* pB=(const TBitMapAllocator*)aOsAsids;
823 pB=iOsAsidAllocator; // 0's in positions which exist
824 TInt num_os_asids=pB->iSize-pB->iAvail;
825 for (os_asid=0; num_os_asids; ++os_asid)
827 if (pB->NotAllocated(os_asid,1))
828 continue; // os_asid is not needed
829 TPde* pageDir=PageDirectory(os_asid);
831 pageDir[pdeIndex]=ptPhys|aPdePerm;
832 NKern::UnlockSystem();
833 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x",ptPhys|aPdePerm,pageDir+pdeIndex));
837 __DRAIN_WRITE_BUFFER;
840 void X86Mmu::RemapPageTableSingle(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr, TInt aOsAsid)
842 MM::Panic(MM::EOperationNotSupported);
845 void X86Mmu::RemapPageTableGlobal(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr)
847 MM::Panic(MM::EOperationNotSupported);
850 void X86Mmu::RemapPageTableMultiple(TPhysAddr aOld, TPhysAddr aNew, TLinAddr aAddr, const TAny* aOsAsids)
852 MM::Panic(MM::EOperationNotSupported);
855 void X86Mmu::RemapPageTableAliases(TPhysAddr aOld, TPhysAddr aNew)
857 MM::Panic(MM::EOperationNotSupported);
860 void X86Mmu::DoUnassignPageTable(TLinAddr aAddr, const TAny* aOsAsids)
862 // Unassign a now-empty page table currently mapping the specified linear address.
863 // We assume that TLB and/or cache flushing has been done when any RAM pages were unmapped.
864 // This should be called with the system unlocked and the MMU mutex held.
867 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::DoUnassignPageTable at %08x a=%08x",aAddr,aOsAsids));
868 TInt pdeIndex=TInt(aAddr>>KChunkShift);
869 TInt os_asid=(TInt)aOsAsids;
875 if (TUint32(os_asid)<TUint32(iNumOsAsids))
878 TPde* pageDir=PageDirectory(os_asid);
880 pde = pageDir[pdeIndex];
882 __KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x",pageDir+pdeIndex));
884 // remove any aliases of the page table...
885 TUint ptId = pde>>KPageTableShift;
886 while(!iAliasList.IsEmpty())
888 next = iAliasList.First()->Deque();
889 checkedList.Add(next);
890 DMemModelThread* thread = _LOFF(next, DMemModelThread, iAliasLink);
891 if(thread->iAliasOsAsid==os_asid && (thread->iAliasPde>>KPageTableShift)==ptId)
893 // the page table is being aliased by the thread, so remove it...
894 thread->iAliasPde = 0;
896 NKern::FlashSystem();
901 // selection of OS ASIDs or all OS ASIDs
902 const TBitMapAllocator* pB=(const TBitMapAllocator*)aOsAsids;
904 pB=iOsAsidAllocator; // 0's in positions which exist
905 TInt num_os_asids=pB->iSize-pB->iAvail;
906 for (os_asid=0; num_os_asids; ++os_asid)
908 if (pB->NotAllocated(os_asid,1))
909 continue; // os_asid is not needed
910 TPde* pageDir=PageDirectory(os_asid);
912 pde = pageDir[pdeIndex];
914 NKern::UnlockSystem();
915 __KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x",pageDir+pdeIndex));
919 // remove any aliases of the page table...
920 TUint ptId = pde>>KPageTableShift;
922 while(!iAliasList.IsEmpty())
924 next = iAliasList.First()->Deque();
925 checkedList.Add(next);
926 DMemModelThread* thread = _LOFF(next, DMemModelThread, iAliasLink);
927 if((thread->iAliasPde>>KPageTableShift)==ptId && !pB->NotAllocated(thread->iAliasOsAsid,1))
929 // the page table is being aliased by the thread, so remove it...
930 thread->iAliasPde = 0;
932 NKern::FlashSystem();
936 // copy checkedList back to iAliasList
937 iAliasList.MoveFrom(&checkedList);
939 NKern::UnlockSystem();
941 __DRAIN_WRITE_BUFFER; // because page tables have been updated
945 // Initialise page table at physical address aXptPhys to be used as page table aXptId
946 // to expand the virtual address range used for mapping page tables. Map the page table
947 // at aPhysAddr as page table aId using the expanded range.
948 // Assign aXptPhys to kernel's Page Directory.
949 // Called with system unlocked and MMU mutex held.
950 void X86Mmu::BootstrapPageTable(TInt aXptId, TPhysAddr aXptPhys, TInt aId, TPhysAddr aPhysAddr)
952 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::BootstrapPageTable xptid=%04x, xptphys=%08x, id=%04x, phys=%08x",
953 aXptId, aXptPhys, aId, aPhysAddr));
955 // put in a temporary mapping for aXptPhys
956 *iTempPte = aXptPhys | KPtPtePerm | iPteGlobal;
957 __DRAIN_WRITE_BUFFER;
960 TPte* xpt=(TPte*)iTempAddr;
961 memclr(xpt, KPageSize);
964 xpt[aXptId & KPagesInPDEMask] = aXptPhys | KPtPtePerm | iPteGlobal;
966 // map other page table
967 xpt[aId & KPagesInPDEMask] = aPhysAddr | KPtPtePerm | iPteGlobal;
969 // remove temporary mapping
971 __DRAIN_WRITE_BUFFER;
972 InvalidateTLBForPage(iTempAddr);
974 // initialise PtInfo...
975 TLinAddr xptAddr = PageTableLinAddr(aXptId);
976 iPtInfo[aXptId].SetGlobal(xptAddr>>KChunkShift);
979 TInt pdeIndex=TInt(xptAddr>>KChunkShift);
980 TPde* pageDir=PageDirectory(0);
982 pageDir[pdeIndex]=aXptPhys|KPtPdePerm;
983 __DRAIN_WRITE_BUFFER;
984 NKern::UnlockSystem();
987 void X86Mmu::FixupXPageTable(TInt aId, TLinAddr aTempMap, TPhysAddr aOld, TPhysAddr aNew)
989 MM::Panic(MM::EOperationNotSupported);
992 TInt X86Mmu::NewPageDirectory(TInt aOsAsid, TBool aSeparateGlobal, TPhysAddr& aPhysAddr, TInt& aNumPages)
994 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::NewPageDirectory(%d,%d)",aOsAsid,aSeparateGlobal));
995 TInt r=AllocRamPages(&aPhysAddr,1, EPageFixed);
998 #ifdef BTRACE_KERNEL_MEMORY
999 BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, 1<<KPageShift);
1000 Epoc::KernelMiscPages += 1;
1002 SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
1003 NKern::LockSystem();
1004 pi->SetPageDir(aOsAsid,0);
1005 NKern::UnlockSystem();
1010 inline void CopyPdes(TPde* aDest, const TPde* aSrc, TLinAddr aBase, TLinAddr aEnd)
1012 memcpy(aDest+(aBase>>KChunkShift), aSrc+(aBase>>KChunkShift), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde));
1015 inline void ZeroPdes(TPde* aDest, TLinAddr aBase, TLinAddr aEnd)
1017 memclr(aDest+(aBase>>KChunkShift), ((aEnd-aBase)>>KChunkShift)*sizeof(TPde));
1020 void X86Mmu::InitPageDirectory(TInt aOsAsid, TBool)
1022 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::InitPageDirectory(%d)",aOsAsid));
1023 TPde* newpd=PageDirectory(aOsAsid); // new page directory
1024 const TPde* kpd=(const TPde*)KPageDirectoryBase; // kernel page directory
1025 ZeroPdes(newpd, 0x00000000, KUserSharedDataEnd); // clear user mapping area
1026 ZeroPdes(newpd, KRamDriveStartAddress, KRamDriveEndAddress); // don't copy RAM drive
1027 CopyPdes(newpd, kpd, KRomLinearBase, KUserGlobalDataEnd); // copy ROM + user global
1028 CopyPdes(newpd, kpd, KRamDriveEndAddress, 0x00000000); // copy kernel mappings
1029 __DRAIN_WRITE_BUFFER;
1032 void X86Mmu::ClearPageTable(TInt aId, TInt aFirstIndex)
1034 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu:ClearPageTable(%d,%d)",aId,aFirstIndex));
1035 TPte* pte=PageTable(aId);
1036 memclr(pte+aFirstIndex, KPageSize-aFirstIndex*sizeof(TPte));
1037 __DRAIN_WRITE_BUFFER;
1040 void X86Mmu::ApplyTopLevelPermissions(TLinAddr aAddr, TInt aOsAsid, TInt aNumPdes, TPde aPdePerm)
1042 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::ApplyTopLevelPermissions %04x:%08x->%08x count %d",
1043 aOsAsid, aAddr, aPdePerm, aNumPdes));
1044 TInt ix=aAddr>>KChunkShift;
1045 TPde* pPde=PageDirectory(aOsAsid)+ix;
1046 TPde* pPdeEnd=pPde+aNumPdes;
1047 NKern::LockSystem();
1048 for (; pPde<pPdeEnd; ++pPde)
1052 *pPde = (pde&KPdePtePhysAddrMask)|aPdePerm;
1054 NKern::UnlockSystem();
1055 (aAddr>=KUserSharedDataEnd) ? InvalidateTLB() : LocalInvalidateTLB();
1056 __DRAIN_WRITE_BUFFER;
1059 void X86Mmu::ApplyPagePermissions(TInt aId, TInt aPageOffset, TInt aNumPages, TPte aPtePerm)
1061 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu::ApplyPagePermissions %04x:%03x+%03x perm %08x",
1062 aId, aPageOffset, aNumPages, aPtePerm));
1063 TPte* pPte=PageTable(aId)+aPageOffset;
1064 TPde* pPteEnd=pPte+aNumPages;
1066 NKern::LockSystem();
1067 for (; pPte<pPteEnd; ++pPte)
1072 *pPte = (pte&KPdePtePhysAddrMask)|aPtePerm;
1074 NKern::UnlockSystem();
1075 (g & KPdePteGlobal) ? InvalidateTLB() : LocalInvalidateTLB();
1076 __DRAIN_WRITE_BUFFER;
1080 // Set up a page table (specified by aId) to map a 4Mb section of ROM containing aRomAddr
1081 // using ROM at aOrigPhys.
1082 void X86Mmu::InitShadowPageTable(TInt aId, TLinAddr aRomAddr, TPhysAddr aOrigPhys)
1084 (void)aId, (void)aRomAddr, (void)aOrigPhys;
1085 FAULT(); // Never used
1087 __KTRACE_OPT(KMMU, Kern::Printf("X86Mmu:InitShadowPageTable id=%04x aRomAddr=%08x aOrigPhys=%08x",
1088 aId, aRomAddr, aOrigPhys));
1089 TPte* ppte = PageTable(aId);
1090 TPte* ppte_End = ppte + KChunkSize/KPageSize;
1091 TPhysAddr phys = aOrigPhys - (aRomAddr & KChunkMask);
1092 for (; ppte<ppte_End; ++ppte, phys+=KPageSize)
1093 *ppte = phys | KRomPtePerm;
1094 __DRAIN_WRITE_BUFFER;
1098 // Copy the contents of ROM at aRomAddr to a shadow page at physical address aShadowPhys
1099 void X86Mmu::InitShadowPage(TPhysAddr aShadowPhys, TLinAddr aRomAddr)
1101 __KTRACE_OPT(KMMU, Kern::Printf("X86Mmu:InitShadowPage aShadowPhys=%08x aRomAddr=%08x",
1102 aShadowPhys, aRomAddr));
1104 // put in a temporary mapping for aShadowPhys
1105 // make it noncacheable
1106 *iTempPte = aShadowPhys | KPtPtePerm | iPteGlobal;
1107 __DRAIN_WRITE_BUFFER;
1109 // copy contents of ROM
1110 wordmove( (TAny*)iTempAddr, (const TAny*)aRomAddr, KPageSize );
1111 __DRAIN_WRITE_BUFFER; // make sure contents are written to memory
1113 // remove temporary mapping
1115 __DRAIN_WRITE_BUFFER;
1116 InvalidateTLBForPage(iTempAddr);
1119 // Assign a shadow page table to replace a ROM section mapping
1120 // Enter and return with system locked
1121 void X86Mmu::AssignShadowPageTable(TInt aId, TLinAddr aRomAddr)
1123 __KTRACE_OPT(KMMU, Kern::Printf("X86Mmu:AssignShadowPageTable aId=%04x aRomAddr=%08x",
1125 TLinAddr ptLin=PageTableLinAddr(aId);
1126 TPhysAddr ptPhys=LinearToPhysical(ptLin, 0);
1127 TPde* ppde = ::InitPageDirectory + (aRomAddr>>KChunkShift);
1128 TPde newpde = ptPhys | KShadowPdePerm;
1129 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", newpde, ppde));
1132 NKern::Lock(); // stop other processors passing this point
1133 ShadowSpinLock.LockOnly();
1134 ipi.QueueAllOther(&TTLBIPI::WaitAndInvalidateIsr);
1135 ipi.WaitEntry(); // wait for other processors to stop in the ISR
1137 TInt irq=NKern::DisableAllInterrupts();
1138 *ppde = newpde; // map in the page table
1139 __DRAIN_WRITE_BUFFER; // make sure new PDE written to main memory
1140 DoInvalidateTLB(); // completely flush TLB
1141 NKern::RestoreInterrupts(irq);
1143 ipi.iFlag = 1; // release other processors so they can flush their TLBs
1144 ipi.WaitCompletion(); // wait for other processors to flush their TLBs
1145 ShadowSpinLock.UnlockOnly();
1150 void X86Mmu::DoUnmapShadowPage(TInt aId, TLinAddr aRomAddr, TPhysAddr aOrigPhys)
1152 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu:DoUnmapShadowPage, id=%04x lin=%08x origphys=%08x", aId, aRomAddr, aOrigPhys));
1153 TPte* ppte = PageTable(aId) + ((aRomAddr & KChunkMask)>>KPageShift);
1154 TPte newpte = aOrigPhys | KRomPtePerm;
1155 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", newpte, ppte));
1158 ipi.AddAddress(aRomAddr);
1159 NKern::Lock(); // stop other processors passing this point
1160 ShadowSpinLock.LockOnly();
1161 ipi.QueueAllOther(&TTLBIPI::WaitAndInvalidateIsr);
1162 ipi.WaitEntry(); // wait for other processors to stop
1164 TInt irq=NKern::DisableAllInterrupts();
1166 __DRAIN_WRITE_BUFFER;
1167 DoInvalidateTLBForPage(aRomAddr);
1168 NKern::RestoreInterrupts(irq);
1170 ipi.iFlag = 1; // release other processors so they can flush their TLBs
1171 ipi.WaitCompletion(); // wait for other processors to flush their TLBs
1172 ShadowSpinLock.UnlockOnly();
1177 TInt X86Mmu::UnassignShadowPageTable(TLinAddr /*aRomAddr*/, TPhysAddr /*aOrigPhys*/)
1179 // not used since we use page mappings for the ROM
1183 TInt X86Mmu::CopyToShadowMemory(TLinAddr aDest, TLinAddr aSrc, TUint32 aLength)
1185 __KTRACE_OPT(KMMU, Kern::Printf("X86Mmu:CopyToShadowMemory aDest=%08x aSrc=%08x aLength=%08x", aDest, aSrc, aLength));
1187 // Check that destination is ROM
1188 if (aDest<iRomLinearBase || (aDest+aLength) > iRomLinearEnd)
1190 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu:CopyToShadowMemory: Destination not entirely in ROM"));
1191 return KErrArgument;
1194 // do operation with RamAlloc mutex held (to prevent shadow pages from being released from under us)
1195 Kern::MutexWait(*RamAllocatorMutex);
1200 // Calculate memory size to copy in this loop. A single page region will be copied per loop
1201 TInt copySize = Min(aLength, iPageSize - (aDest&iPageMask));
1203 // Get physical address
1204 TPhysAddr physAddr = LinearToPhysical(aDest&~iPageMask, 0);
1205 if (KPhysAddrInvalid==physAddr)
1211 //check whether it is shadowed rom
1212 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(physAddr);
1213 if (pi==0 || pi->Type()!=SPageInfo::EShadow)
1215 __KTRACE_OPT(KMMU,Kern::Printf("X86Mmu:CopyToShadowMemory: No shadow page at this address"));
1220 //Temporarily map into writable memory and copy data. RamAllocator DMutex is required
1221 TLinAddr tempAddr = MapTemp (physAddr, aDest&~iPageMask);
1222 __KTRACE_OPT(KMMU, Kern::Printf("X86Mmu:CopyToShadowMemory Copy aDest=%08x aSrc=%08x aSize=%08x", tempAddr+(aDest&iPageMask), aSrc, copySize));
1223 memcpy ((TAny*)(tempAddr+(aDest&iPageMask)), (const TAny*)aSrc, copySize); //Kernel-to-Kernel copy is presumed
1226 //Update variables for the next loop/page
1232 Kern::MutexSignal(*RamAllocatorMutex);
1236 void X86Mmu::DoFreezeShadowPage(TInt aId, TLinAddr aRomAddr)
1238 __KTRACE_OPT(KMMU, Kern::Printf("X86Mmu:DoFreezeShadowPage aId=%04x aRomAddr=%08x",
1240 TPte* ppte = PageTable(aId) + ((aRomAddr & KChunkMask)>>KPageShift);
1241 TPte newpte = (*ppte & KPdePtePhysAddrMask) | KRomPtePerm;
1242 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", newpte, ppte));
1244 __DRAIN_WRITE_BUFFER;
1245 InvalidateTLBForPage(aRomAddr);
1248 void X86Mmu::FlushShadow(TLinAddr aRomAddr)
1252 ipi.AddAddress(aRomAddr);
1253 NKern::Lock(); // stop other processors passing this point
1254 ShadowSpinLock.LockOnly();
1255 ipi.QueueAllOther(&TTLBIPI::WaitAndInvalidateIsr);
1256 ipi.WaitEntry(); // wait for other processors to stop
1257 DoInvalidateTLBForPage(aRomAddr);
1258 ipi.iFlag = 1; // release other processors so they can flush their TLBs
1259 ipi.WaitCompletion(); // wait for other processors to flush their TLBs
1260 ShadowSpinLock.UnlockOnly();
1263 InvalidateTLBForPage(aRomAddr); // remove all TLB references to original ROM page
1267 void X86Mmu::Pagify(TInt aId, TLinAddr aLinAddr)
1269 // Nothing to do on x86
1272 void X86Mmu::ClearRamDrive(TLinAddr aStart)
1274 // clear the page directory entries corresponding to the RAM drive
1275 TPde* kpd=(TPde*)KPageDirectoryBase; // kernel page directory
1276 ZeroPdes(kpd, aStart, KRamDriveEndAddress);
1277 __DRAIN_WRITE_BUFFER;
1280 // Generic cache/TLB flush function.
1281 // Which things are flushed is determined by aMask.
1282 void X86Mmu::GenericFlush(TUint32 aMask)
1284 __KTRACE_OPT(KMMU,Kern::Printf("GenericFlush %x",aMask));
1285 if (aMask&(EFlushDPermChg|EFlushIPermChg))
1289 TPde X86Mmu::PdePermissions(TChunkType aChunkType, TBool aRO)
1291 if (aChunkType==EUserData && aRO)
1292 return KPdePtePresent|KPdePteUser;
1293 return ChunkPdePermissions[aChunkType];
1296 TPte X86Mmu::PtePermissions(TChunkType aChunkType)
1298 TPte pte=ChunkPtePermissions[aChunkType];
1299 return (pte&~KPdePteGlobal)|(pte&iPteGlobal);
1302 const TUint FBLK=(EMapAttrFullyBlocking>>12);
1303 const TUint BFNC=(EMapAttrBufferedNC>>12);
1304 const TUint BUFC=(EMapAttrBufferedC>>12);
1305 const TUint L1UN=(EMapAttrL1Uncached>>12);
1306 const TUint WTRA=(EMapAttrCachedWTRA>>12);
1307 const TUint WTWA=(EMapAttrCachedWTWA>>12);
1308 const TUint WBRA=(EMapAttrCachedWBRA>>12);
1309 const TUint WBWA=(EMapAttrCachedWBWA>>12);
1310 const TUint AWTR=(EMapAttrAltCacheWTRA>>12);
1311 const TUint AWTW=(EMapAttrAltCacheWTWA>>12);
1312 const TUint AWBR=(EMapAttrAltCacheWBRA>>12);
1313 const TUint AWBW=(EMapAttrAltCacheWBWA>>12);
1315 const TUint16 UNS=0xffffu; // Unsupported attribute
1316 const TUint16 SPE=0xfffeu; // Special processing required
1318 static const TUint16 CacheBuffAttributes[16]=
1319 {0x10,0x10,0x10,0x10,0x08,0x08,0x00,0x00, UNS, UNS, UNS, UNS, UNS, UNS, UNS,0x00};
1320 static const TUint8 CacheBuffActual[16]=
1321 {FBLK,FBLK,FBLK,FBLK,WTRA,WTRA,WBWA,WBWA,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,FBLK,WBWA};
1323 static const TUint8 ActualReadPrivilegeLevel[4]={1,1,4,4}; // RONO,RWNO,RORO,RWRW
1324 static const TUint8 ActualWritePrivilegeLevel[4]={0,1,0,4}; // RONO,RWNO,RORO,RWRW
1326 TInt X86Mmu::PdePtePermissions(TUint& aMapAttr, TPde& aPde, TPte& aPte)
1328 __KTRACE_OPT(KMMU,Kern::Printf(">X86Mmu::PdePtePermissions, mapattr=%08x",aMapAttr));
1329 TUint read=aMapAttr & EMapAttrReadMask;
1330 TUint write=(aMapAttr & EMapAttrWriteMask)>>4;
1331 TUint exec=(aMapAttr & EMapAttrExecMask)>>8;
1332 TUint cache=(aMapAttr & EMapAttrL1CacheMask)>>12;
1334 // ignore L2 cache attributes for now - downgrade to L2 uncached
1336 // if execute access is greater than read, adjust read (since there are no separate execute permissions on X86)
1344 pte=KPdePermRORO; // user and supervisor read-only
1346 pte=KPdePermRONO; // supervisor r/o user no access
1350 // only supervisor can write
1352 pte=KPdePermRWRW; // full access since no RWRO
1354 pte=KPdePermRWNO; // sup rw user no access
1357 pte=KPdePermRWRW; // sup rw user rw
1358 read=ActualReadPrivilegeLevel[pte>>1];
1359 write=ActualWritePrivilegeLevel[pte>>1];
1360 TUint cbatt=CacheBuffAttributes[cache];
1366 cache=CacheBuffActual[cache];
1367 aPde=KPdePtePresent|KPdePteWrite|KPdePteUser;
1368 aPte=pte|cbatt|iPteGlobal; // HW chunks can always be global
1369 aMapAttr=read|(write<<4)|(read<<8)|(cache<<12);
1371 __KTRACE_OPT(KMMU,Kern::Printf("<X86Mmu::PdePtePermissions, r=%d, mapattr=%08x, pde=%08x, pte=%08x",
1372 r,aMapAttr,aPde,aPte));
1376 THwChunkAddressAllocator* X86Mmu::MappingRegion(TUint aMapAttr)
1378 TUint read=aMapAttr & EMapAttrReadMask;
1379 TUint write=(aMapAttr & EMapAttrWriteMask)>>4;
1380 TUint exec=(aMapAttr & EMapAttrExecMask)>>8;
1381 if (read>=4 || write>=4 || exec>=4)
1382 return iUserHwChunkAllocator; // if any access in user mode, must put it in user global section
1383 return iHwChunkAllocator;
1386 void X86Mmu::Map(TLinAddr aLinAddr, TPhysAddr aPhysAddr, TInt aSize, TPde aPdePerm, TPte aPtePerm, TInt aMapShift)
1388 // Map a region of physical addresses aPhysAddr to aPhysAddr+aSize-1 to virtual address aLinAddr.
1389 // Use permissions specified by aPdePerm and aPtePerm. Use mapping sizes up to and including (1<<aMapShift).
1390 // Assume any page tables required are already assigned.
1391 // aLinAddr, aPhysAddr, aSize must be page-aligned.
1394 __KTRACE_OPT(KMMU, Kern::Printf("X86Mmu::Map lin=%08x phys=%08x size=%08x", aLinAddr, aPhysAddr, aSize));
1395 __KTRACE_OPT(KMMU, Kern::Printf("pde=%08x pte=%08x mapshift=%d", aPdePerm, aPtePerm, aMapShift));
1396 TPde lp_pde=aPtePerm|KPdeLargePage;
1397 TLinAddr la=aLinAddr;
1398 TPhysAddr pa=aPhysAddr;
1402 if (aMapShift>=KChunkShift && (la & KChunkMask)==0 && remain>=KChunkSize)
1405 TInt npdes=remain>>KChunkShift;
1406 const TBitMapAllocator& b=*iOsAsidAllocator;
1407 TInt num_os_asids=b.iSize-b.iAvail;
1409 for (; num_os_asids; ++os_asid)
1411 if (b.NotAllocated(os_asid,1))
1412 continue; // os_asid is not needed
1413 TPde* p_pde=PageDirectory(os_asid)+(la>>KChunkShift);
1414 TPde* p_pde_E=p_pde+npdes;
1416 NKern::LockSystem();
1417 for (; p_pde < p_pde_E; pde+=KChunkSize)
1419 __ASSERT_DEBUG(*p_pde==0, MM::Panic(MM::EPdeAlreadyInUse));
1420 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", pde, p_pde));
1423 NKern::UnlockSystem();
1426 npdes<<=KChunkShift;
1427 la+=npdes, pa+=npdes, remain-=npdes;
1431 TInt block_size = Min(remain, KChunkSize-(la&KChunkMask));
1432 TInt id=PageTableId(la, 0);
1433 __ASSERT_DEBUG(id>=0, MM::Panic(MM::EMmuMapNoPageTable));
1434 TPte* p_pte=PageTable(id)+((la&KChunkMask)>>KPageShift);
1435 TPte* p_pte_E = p_pte + (block_size>>KPageShift);
1436 TPte pte=pa|aPtePerm;
1437 SPageTableInfo& ptinfo=iPtInfo[id];
1438 NKern::LockSystem();
1439 for (; p_pte < p_pte_E; pte+=KPageSize)
1441 __ASSERT_DEBUG(*p_pte==0, MM::Panic(MM::EPteAlreadyInUse));
1442 __KTRACE_OPT(KMMU,Kern::Printf("Writing PTE %08x to %08x", pte, p_pte));
1445 NKern::FlashSystem();
1447 NKern::UnlockSystem();
1448 la+=block_size, pa+=block_size, remain-=block_size;
1452 void X86Mmu::Unmap(TLinAddr aLinAddr, TInt aSize)
1454 // Remove all mappings in the specified range of addresses.
1455 // Don't free page tables.
1456 // aLinAddr, aSize must be page-aligned.
1459 __KTRACE_OPT(KMMU, Kern::Printf("X86Mmu::Unmap lin=%08x size=%08x", aLinAddr, aSize));
1463 TLinAddr a=aLinAddr;
1464 TLinAddr end=a+aSize;
1465 __KTRACE_OPT(KMMU,Kern::Printf("a=%08x end=%08x",a,end));
1466 NKern::LockSystem();
1469 TInt pdeIndex=a>>KChunkShift;
1470 TLinAddr next=(pdeIndex<<KChunkShift)+KChunkSize;
1471 TInt to_do=Min(TInt(end-a), TInt(next-a))>>KPageShift;
1472 __KTRACE_OPT(KMMU,Kern::Printf("a=%08x next=%08x to_do=%d",a,next,to_do));
1473 TPde pde=::InitPageDirectory[pdeIndex];
1474 if ( (pde&(KPdePtePresent|KPdeLargePage))==(KPdePtePresent|KPdeLargePage) )
1476 __ASSERT_DEBUG(!(a&KChunkMask), MM::Panic(MM::EUnmapBadAlignment));
1477 ::InitPageDirectory[pdeIndex]=0;
1481 InvalidateTLBForPage(a); // flush any corresponding TLB entry
1484 NKern::FlashSystem();
1487 TInt ptid=PageTableId(a,0);
1488 SPageTableInfo& ptinfo=iPtInfo[ptid];
1491 TPte* ppte=PageTable(ptid)+((a&KChunkMask)>>KPageShift);
1492 TPte* ppte_End=ppte+to_do;
1493 for (; ppte<ppte_End; ++ppte, a+=KPageSize)
1495 if (*ppte & KPdePtePresent)
1501 InvalidateTLBForPage(a); // flush any corresponding TLB entry
1503 NKern::FlashSystem();
1507 a += (to_do<<KPageShift);
1510 ipi.InvalidateList();
1512 NKern::UnlockSystem();
1516 void X86Mmu::ClearPages(TInt aNumPages, TPhysAddr* aPageList, TUint8 aClearByte)
1518 //map the pages at a temporary address, clear them and unmap
1519 __ASSERT_MUTEX(RamAllocatorMutex);
1520 while (--aNumPages >= 0)
1523 if((TInt)aPageList&1)
1525 pa = (TPhysAddr)aPageList&~1;
1526 *(TPhysAddr*)&aPageList += iPageSize;
1530 *iTempPte = pa | KPdePtePresent | KPdePteWrite | iPteGlobal;
1531 __DRAIN_WRITE_BUFFER;
1532 InvalidateTLBForPage(iTempAddr);
1533 memset((TAny*)iTempAddr, aClearByte, iPageSize);
1536 __DRAIN_WRITE_BUFFER;
1537 InvalidateTLBForPage(iTempAddr);
1540 TLinAddr X86Mmu::MapTemp(TPhysAddr aPage,TLinAddr /*aLinAddr*/,TInt aPages)
1542 __ASSERT_MUTEX(RamAllocatorMutex);
1543 __ASSERT_DEBUG(!*iTempPte,MM::Panic(MM::ETempMappingAlreadyInUse));
1544 __ASSERT_DEBUG(aPages<=4,MM::Panic(MM::ETempMappingNoRoom));
1545 iTempMapCount = aPages;
1546 for (TInt i=0; i<aPages; i++)
1548 iTempPte[i] = ((aPage&~KPageMask)+(i<<KPageShift)) | KPdePtePresent | KPdePteWrite | iPteGlobal
1549 __DRAIN_WRITE_BUFFER;
1550 InvalidateTLBForPage(iTempAddr+(i<<KPageShift));
1555 TLinAddr X86Mmu::MapTemp(TPhysAddr aPage,TLinAddr aLinAddr,TInt aPages, TMemoryType)
1557 return MapTemp(aPage, aLinAddr, aPages);
1560 TLinAddr X86Mmu::MapSecondTemp(TPhysAddr aPage,TLinAddr /*aLinAddr*/,TInt aPages)
1562 __ASSERT_MUTEX(RamAllocatorMutex);
1563 __ASSERT_DEBUG(!*iSecondTempPte,MM::Panic(MM::ETempMappingAlreadyInUse));
1564 __ASSERT_DEBUG(aPages<=4,MM::Panic(MM::ETempMappingNoRoom));
1565 iSecondTempMapCount = aPages;
1566 for (TInt i=0; i<aPages; i++)
1568 iSecondTempPte[i] = ((aPage&~KPageMask)+(i<<KPageShift)) | KPdePtePresent | KPdePteWrite | iPteGlobal
1569 __DRAIN_WRITE_BUFFER;
1570 InvalidateTLBForPage(iSecondTempAddr+(i<<KPageShift));
1572 return iSecondTempAddr;
1575 void X86Mmu::UnmapTemp()
1577 __ASSERT_MUTEX(RamAllocatorMutex);
1578 for (TInt i=0; i<iTempMapCount; i++)
1581 __DRAIN_WRITE_BUFFER;
1582 InvalidateTLBForPage(iTempAddr+(i<<KPageShift));
1586 void X86Mmu::UnmapSecondTemp()
1588 __ASSERT_MUTEX(RamAllocatorMutex);
1589 for (TInt i=0; i<iSecondTempMapCount; i++)
1591 iSecondTempPte[i] = 0;
1592 __DRAIN_WRITE_BUFFER;
1593 InvalidateTLBForPage(iSecondTempAddr+(i<<KPageShift));
1597 void ExecHandler::UnlockRamDrive()
1601 EXPORT_C void TInternalRamDrive::Unlock()
1605 EXPORT_C void TInternalRamDrive::Lock()
1609 TBool X86Mmu::ValidateLocalIpcAddress(TLinAddr aAddr,TInt aSize,TBool aWrite)
1611 __NK_ASSERT_DEBUG(aSize<=KChunkSize);
1612 TLinAddr end = aAddr+aSize-1;
1616 if(TUint(aAddr^KIPCAlias)<TUint(KChunkSize) || TUint(end^KIPCAlias)<TUint(KChunkSize))
1618 // local address is in alias region.
1620 NKern::LockSystem();
1621 ((DMemModelThread*)TheCurrentThread)->RemoveAlias();
1622 NKern::UnlockSystem();
1623 // access memory, which will cause an exception...
1624 if(!(TUint(aAddr^KIPCAlias)<TUint(KChunkSize)))
1626 DoInvalidateTLBForPage(aAddr); // only need to do this processor since alias range is owned by the thread
1628 *(volatile TUint8*)aAddr = 0;
1630 aWrite = *(volatile TUint8*)aAddr;
1632 __NK_ASSERT_DEBUG(0);
1636 DMemModelProcess* process=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
1638 local_mask = process->iAddressCheckMaskW;
1640 local_mask = process->iAddressCheckMaskR;
1641 TInt mask = 2<<(end>>27);
1642 mask -= 1<<(aAddr>>27);
1643 if((local_mask&mask)!=mask)
1649 TInt DMemModelThread::Alias(TLinAddr aAddr, DMemModelProcess* aProcess, TInt aSize, TInt aPerm, TLinAddr& aAliasAddr, TInt& aAliasSize)
1651 // Set up an alias mapping starting at address aAddr in specified process.
1652 // Check permissions aPerm.
1653 // Enter and return with system locked.
1654 // Note: Alias is removed if an exception if trapped by DThread::IpcExcHandler.
1657 __KTRACE_OPT(KMMU2,Kern::Printf("Thread %O Alias %08x+%x Process %O perm %x",this,aAddr,aSize,aProcess,aPerm));
1658 __ASSERT_SYSTEM_LOCK;
1660 if(TUint(aAddr^KIPCAlias)<TUint(KIPCAliasAreaSize))
1661 return KErrBadDescriptor; // prevent access to alias region
1663 // check if memory is in region which is safe to access with supervisor permissions...
1664 TBool okForSupervisorAccess = aPerm&(EMapAttrReadSup|EMapAttrWriteSup) ? 1 : 0;
1665 if(!okForSupervisorAccess)
1667 if(aAddr>=0xc0000000) // address in kernel area (top 1GB)?
1668 return KErrBadDescriptor; // don't have permission
1670 if(aPerm&EMapAttrWriteUser)
1671 local_mask = aProcess->iAddressCheckMaskW;
1673 local_mask = aProcess->iAddressCheckMaskR;
1674 okForSupervisorAccess = (local_mask>>(aAddr>>27))&1;
1677 if(aAddr>=KUserSharedDataEnd) // if address is in global section, don't bother aliasing it...
1682 TInt maxSize = KChunkSize-(aAddr&KChunkMask);
1683 aAliasSize = aSize<maxSize ? aSize : maxSize;
1684 return okForSupervisorAccess;
1687 TInt asid = aProcess->iOsAsid;
1688 TPde* pd = PageDirectory(asid);
1689 TPde pde = pd[aAddr>>KChunkShift];
1693 TLinAddr aliasAddr = KIPCAlias+(aAddr&(KChunkMask & ~KPageMask));
1695 if(pde==iAliasPde && iAliasLinAddr)
1697 // pde already aliased, so just update linear address...
1699 __NK_ASSERT_DEBUG(iCpuRestoreCookie>=0);
1700 aliasAddr = iAliasLinAddr & ~KChunkMask;
1701 aliasAddr |= (aAddr & (KChunkMask & ~KPageMask));
1703 iAliasLinAddr = aliasAddr;
1707 // alias PDE changed...
1710 ::TheMmu.iAliasList.Add(&iAliasLink); // add to list if not already aliased
1712 __NK_ASSERT_DEBUG(iCpuRestoreCookie==-1);
1713 iCpuRestoreCookie = NKern::FreezeCpu(); // temporarily lock current thread to this processor
1717 iAliasOsAsid = asid;
1719 TSubScheduler& ss = SubScheduler(); // OK since we are locked to this CPU
1720 aliasAddr = TLinAddr(ss.i_AliasLinAddr) + (aAddr & (KChunkMask & ~KPageMask));
1721 iAliasPdePtr = (TPde*)(TLinAddr(ss.i_AliasPdePtr) + (((DMemModelProcess*)iOwningProcess)->iOsAsid << KPageTableShift));
1723 iAliasLinAddr = aliasAddr;
1725 __KTRACE_OPT(KMMU,Kern::Printf("Writing PDE %08x to %08x", pde, iAliasPdePtr));
1726 *iAliasPdePtr = pde;
1727 __DRAIN_WRITE_BUFFER;
1728 DoInvalidateTLBForPage(aliasAddr); // only need to do this processor
1729 TInt offset = aAddr&KPageMask;
1730 aAliasAddr = aliasAddr | offset;
1731 TInt maxSize = KPageSize - offset;
1732 aAliasSize = aSize<maxSize ? aSize : maxSize;
1733 return okForSupervisorAccess;
1736 void DMemModelThread::RemoveAlias()
1738 // Remove alias mapping (if present)
1739 // Enter and return with system locked.
1742 __KTRACE_OPT(KMMU2,Kern::Printf("Thread %O RemoveAlias", this));
1743 __ASSERT_SYSTEM_LOCK;
1744 TLinAddr addr = iAliasLinAddr;
1749 __KTRACE_OPT(KMMU,Kern::Printf("Clearing PDE at %08x", iAliasPdePtr));
1751 __DRAIN_WRITE_BUFFER;
1752 DoInvalidateTLBForPage(addr); // only need to do it for this processor
1755 __NK_ASSERT_DEBUG(iCpuRestoreCookie>=0);
1756 NKern::EndFreezeCpu(iCpuRestoreCookie);
1757 iCpuRestoreCookie = -1;
1762 void X86Mmu::CacheMaintenanceOnDecommit(TPhysAddr)
1764 // no cache operations required on freeing memory
1767 void X86Mmu::CacheMaintenanceOnDecommit(const TPhysAddr*, TInt)
1769 // no cache operations required on freeing memory
1772 void X86Mmu::CacheMaintenanceOnPreserve(TPhysAddr, TUint)
1774 // no cache operations required on freeing memory
1777 void X86Mmu::CacheMaintenanceOnPreserve(const TPhysAddr*, TInt, TUint)
1779 // no cache operations required on freeing memory
1782 void X86Mmu::CacheMaintenanceOnPreserve(TPhysAddr , TInt , TLinAddr , TUint )
1784 // no cache operations required on freeing memory
1788 TInt X86Mmu::UnlockRamCachePages(TLinAddr aLinAddr, TInt aNumPages, DProcess* aProcess)
1790 TInt asid = ((DMemModelProcess*)aProcess)->iOsAsid;
1791 TInt page = aLinAddr>>KPageShift;
1792 NKern::LockSystem();
1795 TPde* pd = PageDirectory(asid)+(page>>(KChunkShift-KPageShift));
1796 TPte* pt = SafePageTableFromPde(*pd++);
1797 __NK_ASSERT_DEBUG(pt);
1798 TInt pteIndex = page&(KChunkMask>>KPageShift);
1802 TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex;
1803 if(pagesInPt>aNumPages)
1804 pagesInPt = aNumPages;
1805 if(pagesInPt>KMaxPages)
1806 pagesInPt = KMaxPages;
1808 aNumPages -= pagesInPt;
1814 if(pte) // pte may be null if page has already been unlocked and reclaimed by system
1815 iRamCache->DonateRamCachePage(SPageInfo::FromPhysAddr(pte));
1821 NKern::UnlockSystem();
1825 pteIndex = page&(KChunkMask>>KPageShift);
1827 while(!NKern::FlashSystem() && pteIndex);
1832 TInt X86Mmu::LockRamCachePages(TLinAddr aLinAddr, TInt aNumPages, DProcess* aProcess)
1834 TInt asid = ((DMemModelProcess*)aProcess)->iOsAsid;
1835 TInt page = aLinAddr>>KPageShift;
1836 NKern::LockSystem();
1839 TPde* pd = PageDirectory(asid)+(page>>(KChunkShift-KPageShift));
1840 TPte* pt = SafePageTableFromPde(*pd++);
1841 __NK_ASSERT_DEBUG(pt);
1842 TInt pteIndex = page&(KChunkMask>>KPageShift);
1846 TInt pagesInPt = (KChunkSize>>KPageShift)-pteIndex;
1847 if(pagesInPt>aNumPages)
1848 pagesInPt = aNumPages;
1849 if(pagesInPt>KMaxPages)
1850 pagesInPt = KMaxPages;
1852 aNumPages -= pagesInPt;
1860 if(!iRamCache->ReclaimRamCachePage(SPageInfo::FromPhysAddr(pte)))
1867 NKern::UnlockSystem();
1871 pteIndex = page&(KChunkMask>>KPageShift);
1873 while(!NKern::FlashSystem() && pteIndex);
1876 NKern::UnlockSystem();
1877 return KErrNotFound;
1881 void RamCache::SetFree(SPageInfo* aPageInfo)
1884 TInt type = aPageInfo->Type();
1885 if(type==SPageInfo::EPagedCache)
1887 TInt offset = aPageInfo->Offset()<<KPageShift;
1888 DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
1889 __NK_ASSERT_DEBUG(TUint(offset)<TUint(chunk->iSize));
1890 TLinAddr lin = ((TLinAddr)chunk->iBase)+offset;
1891 TInt asid = ((DMemModelProcess*)chunk->iOwningProcess)->iOsAsid;
1892 TPte* pt = PtePtrFromLinAddr(lin,asid);
1894 InvalidateTLBForPage(lin);
1896 // actually decommit it from chunk...
1897 TInt ptid = ((TLinAddr)pt-KPageTableBase)>>KPageTableShift;
1898 SPageTableInfo& ptinfo=((X86Mmu*)iMmu)->iPtInfo[ptid];
1899 if(!--ptinfo.iCount)
1901 chunk->iPageTables[offset>>KChunkShift] = 0xffff;
1902 NKern::UnlockSystem();
1903 ((X86Mmu*)iMmu)->DoUnassignPageTable(lin, (TAny*)asid);
1904 ((X86Mmu*)iMmu)->FreePageTable(ptid);
1905 NKern::LockSystem();
1910 __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: SetFree() with bad page type = %d",aPageInfo->Type()));
1911 Panic(EUnexpectedPageType);
1915 // Not supported on x86 - no defrag yet
1916 void X86Mmu::DisablePageModification(DMemModelChunk* aChunk, TInt aOffset)
1918 MM::Panic(MM::EOperationNotSupported);
1921 TInt X86Mmu::RamDefragFault(TAny* aExceptionInfo)
1923 MM::Panic(MM::EOperationNotSupported);