First public contribution.
1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
17 #include "kernel/cache_maintenance.inl"
18 #include <kernel/cache.h>
27 #include "mpagearray.h"
34 // check enough space for page infos...
35 __ASSERT_COMPILE((KPageInfoLinearEnd-KPageInfoLinearBase)/sizeof(SPageInfo)==(1<<(32-KPageShift)));
37 // check KPageInfoShift...
38 __ASSERT_COMPILE(sizeof(SPageInfo)==(1<<KPageInfoShift));
41 SPageInfo* SPageInfo::SafeFromPhysAddr(TPhysAddr aAddress)
43 __NK_ASSERT_DEBUG((aAddress&KPageMask)==0);
44 TUint index = aAddress>>(KPageShift+KPageShift-KPageInfoShift);
45 TUint flags = ((TUint8*)KPageInfoMap)[index>>3];
46 TUint mask = 1<<(index&7);
48 return 0; // no SPageInfo for aAddress
49 SPageInfo* info = FromPhysAddr(aAddress);
50 if(info->iType==SPageInfo::EInvalid)
58 void SPageInfo::CheckAccess(const char* aMessage, TUint aFlags)
60 if(K::Initialising || NKern::Crashed())
63 if((aFlags&ECheckNotAllocated) && (iType!=EUnknown))
65 Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iType==%d : %s",this,PhysAddr(),iType,aMessage);
70 if((aFlags&ECheckNotUnused) && (iType==EUnused))
72 Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iType==%d : %s",this,PhysAddr(),iType,aMessage);
77 if((aFlags&ECheckUnused) && (iType!=EUnused))
79 Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iType==%d : %s",this,PhysAddr(),iType,aMessage);
84 if((aFlags&ECheckNotPaged) && (iPagedState!=EUnpaged))
86 Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iPagedState=%d : %s",this,PhysAddr(),iPagedState,aMessage);
91 if((aFlags&ECheckRamAllocLock) && !RamAllocLock::IsHeld())
93 Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iType==%d : %s",this,PhysAddr(),iType,aMessage);
98 if((aFlags&ENoCheckMmuLock) || MmuLock::IsHeld())
101 Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x : %s",this,PhysAddr(),aMessage);
102 Mmu::Panic(Mmu::EUnsafePageInfoAccess);
106 void SPageInfo::Dump()
108 Kern::Printf("SPageInfo for page %x = %d,%d,%02x,0x%08x,0x%x,%d",PhysAddr(),iType,iPagedState,iFlags,iOwner,iIndex,iPinCount);
119 // check enough space for page table infos...
120 __ASSERT_COMPILE((KPageTableInfoEnd-KPageTableInfoBase)/sizeof(SPageTableInfo)
121 >=(KPageTableEnd-KPageTableBase)/KPageTableSize);
123 // check KPtBlockShift...
124 __ASSERT_COMPILE((sizeof(SPageTableInfo)<<KPtBlockShift)==KPageSize);
129 TBool SPageTableInfo::CheckPageCount()
131 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
132 TPte* pt = PageTable();
134 do if(*pt++) ++realCount;
135 while(TLinAddr(pt)&(KPageTableMask/sizeof(TPte)*sizeof(TPte)));
136 if(iPageCount==realCount)
138 Kern::Printf("CheckPageCount Failed: pt=0x%08x count=%d realCount=%d",TLinAddr(pt)-KPageTableSize,iPageCount,realCount);
143 void SPageTableInfo::CheckChangeUse(const char* aName)
147 if(PageTablesLockIsHeld() && MmuLock::IsHeld())
149 Kern::Printf("SPageTableInfo::CheckChangeUse failed : %s",aName);
150 Mmu::Panic(Mmu::EUnsafePageTableInfoAccess);
154 void SPageTableInfo::CheckCheckUse(const char* aName)
158 if(PageTablesLockIsHeld() || MmuLock::IsHeld())
160 Kern::Printf("SPageTableInfo::CheckCheckUse failed : %s",aName);
161 Mmu::Panic(Mmu::EUnsafePageTableInfoAccess);
165 void SPageTableInfo::CheckAccess(const char* aName)
169 if(MmuLock::IsHeld())
171 Kern::Printf("SPageTableInfo::CheckAccess failed : %s",aName);
172 Mmu::Panic(Mmu::EUnsafePageTableInfoAccess);
176 void SPageTableInfo::CheckInit(const char* aName)
180 if(PageTablesLockIsHeld() && iType==EUnused)
182 Kern::Printf("SPageTableInfo::CheckInit failed : %s",aName);
183 Mmu::Panic(Mmu::EUnsafePageTableInfoAccess);
194 _LIT(KLitRamAlloc,"RamAlloc");
195 _LIT(KLitPhysMemSync,"PhysMemSync");
197 void RamAllocLock::Lock()
200 Kern::MutexWait(*m.iRamAllocatorMutex);
201 if(!m.iRamAllocLockCount++)
203 // first lock, so setup memory fail data...
204 m.iRamAllocFailed = EFalse;
205 __NK_ASSERT_DEBUG(m.iRamAllocInitialFreePages==m.FreeRamInPages()); // free RAM shouldn't have changed whilst lock was held
210 void RamAllocLock::Unlock()
213 if(--m.iRamAllocLockCount)
215 Kern::MutexSignal(*m.iRamAllocatorMutex);
218 TBool failed = m.iRamAllocFailed;
219 TUint initial = m.iRamAllocInitialFreePages;
220 TUint final = m.FreeRamInPages();
221 m.iRamAllocInitialFreePages = final; // new baseline value
222 TUint changes = K::CheckFreeMemoryLevel(initial*KPageSize,final*KPageSize,failed);
225 __KTRACE_OPT(KMMU,Kern::Printf("RamAllocLock::Unlock() changes=%x",changes));
227 Kern::MutexSignal(*m.iRamAllocatorMutex);
231 TBool RamAllocLock::Flash()
235 return true; // lock was released
239 TBool RamAllocLock::IsHeld()
242 return m.iRamAllocatorMutex->iCleanup.iThread == &Kern::CurrentThread() && m.iRamAllocLockCount;
252 TUint MmuLock::UnlockGuardNest =0;
253 TUint MmuLock::UnlockGuardFail =0;
256 NFastMutex MmuLock::iLock;
260 NKern::FMWait(&iLock);
263 void MmuLock::Unlock()
266 NKern::FMSignal(&iLock);
269 TBool MmuLock::Flash()
272 return NKern::FMFlash(&iLock);
275 TBool MmuLock::IsHeld()
277 NFastMutex& m = iLock;
278 return m.HeldByCurrentThread();
289 void Mmu::Init1Common()
291 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::Init1Common"));
294 TUint pteType = PteType(ESupervisorReadWrite,true);
295 iTempPteCached = BlankPte((TMemoryAttributes)(EMemoryAttributeNormalCached|EMemoryAttributeDefaultShareable),pteType);
296 iTempPteUncached = BlankPte((TMemoryAttributes)(EMemoryAttributeNormalUncached|EMemoryAttributeDefaultShareable),pteType);
297 iTempPteCacheMaintenance = BlankPte((TMemoryAttributes)(CacheMaintenance::TemporaryMapping()|EMemoryAttributeDefaultShareable),pteType);
300 PP::MaxUserThreadStack=0x14000; // 80K - STDLIB asks for 64K for PosixServer!!!!
301 PP::UserThreadStackGuard=0x2000; // 8K
302 PP::MaxStackSpacePerProcess=0x200000; // 2Mb
303 K::SupervisorThreadStackSize=0x1000; // 4K
304 PP::SupervisorThreadStackGuard=0x1000; // 4K
305 K::MachineConfig=(TMachineConfig*)KMachineConfigLinAddr;
306 PP::RamDriveStartAddress=0;
308 PP::RamDriveMaxSize=0x20000000; // 512MB, probably will be reduced later
309 K::MemModelAttributes=EMemModelTypeFlexible|EMemModelAttrNonExProt|EMemModelAttrKernProt|EMemModelAttrWriteProt|
310 EMemModelAttrVA|EMemModelAttrProcessProt|EMemModelAttrSameVA|EMemModelAttrSvKernProt|
311 EMemModelAttrIPCKernProt|EMemModelAttrRamCodeProt;
316 void Mmu::VerifyRam()
318 Kern::Printf("Mmu::VerifyRam() pass 1");
319 RamAllocLock::Lock();
324 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(p);
327 Kern::Printf("%08x %d",p,pi->Type());
328 if(pi->Type()==SPageInfo::EUnused)
330 volatile TPhysAddr* b = (volatile TPhysAddr*)MapTemp(p,0);
333 __NK_ASSERT_DEBUG(b[0]==p);
334 __NK_ASSERT_DEBUG(b[1]==~p);
343 Kern::Printf("Mmu::VerifyRam() pass 2");
346 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(p);
349 if(pi->Type()==SPageInfo::EUnused)
351 volatile TPhysAddr* b = (volatile TPhysAddr*)MapTemp(p,0);
352 if(b[0]!=p || b[1]!=~p)
355 Kern::Printf("%08x FAILED %x %x",b[0],b[1]);
364 __NK_ASSERT_DEBUG(!fail);
365 RamAllocLock::Unlock();
370 void Mmu::Init2Common()
372 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::Init2Common"));
374 // create allocator...
375 const SRamInfo& info = *(const SRamInfo*)TheSuperPage().iRamBootData;
376 iRamPageAllocator = DRamAllocator::New(info, iRamZones, iRamZoneCallback);
378 // initialise all pages in banks as unused...
379 const SRamBank* bank = info.iBanks;
382 TUint32 base = bank->iBase;
383 TUint32 size = bank->iSize;
384 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Found RAM bank 0x%08x size %d",base,size));
385 if(base+size<=base || ((base|size)&KPageMask))
386 Panic(EInvalidRamBankAtBoot);
388 SPageInfo* pi = SPageInfo::FromPhysAddr(base);
389 SPageInfo* piEnd = pi+(size>>KPageShift);
394 // step over the last bank to get to the reserved banks.
396 // mark any reserved regions as allocated...
399 TUint32 base = bank->iBase;
400 TUint32 size = bank->iSize;
401 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Found reserved bank 0x%08x size %d",base,size));
402 if(base+size<=base || ((base|size)&KPageMask))
403 Panic(EInvalidReservedBankAtBoot);
405 SPageInfo* pi = SPageInfo::FromPhysAddr(base);
406 SPageInfo* piEnd = pi+(size>>KPageShift);
408 (pi++)->SetPhysAlloc();
412 // Clear the inital (and only so far) page table info page so all unused
413 // page tables infos will be marked as unused.
414 __ASSERT_COMPILE(SPageTableInfo::EUnused == 0);
415 memclr((TAny*)KPageTableInfoBase, KPageSize);
417 // look for page tables - assume first page table maps page tables
418 TPte* pPte = (TPte*)KPageTableBase;
420 for(i=0; i<KChunkSize/KPageSize; ++i)
423 if(pte==KPteUnallocatedEntry) // after boot, page tables are contiguous
425 TPhysAddr ptpgPhys = Mmu::PtePhysAddr(pte,i);
426 __KTRACE_OPT(KBOOT,Kern::Printf("Page Table Group %08x -> Phys %08x", KPageTableBase+i*KPageSize, ptpgPhys));
427 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(ptpgPhys);
428 __ASSERT_ALWAYS(pi, Panic(EInvalidPageTableAtBoot));
429 pi->SetFixed(i); // this also sets the SPageInfo::iOffset so that linear-to-physical works
432 // look for mapped pages
433 TPde* pd = Mmu::PageDirectory(KKernelOsAsid);
434 for(i=0; i<(1<<(32-KChunkShift)); ++i)
437 if(pde==KPdeUnallocatedEntry)
439 TPhysAddr pdePhys = Mmu::PdePhysAddr(pde);
441 if(pdePhys!=KPhysAddrInvalid)
443 __KTRACE_OPT(KBOOT,Kern::Printf("Addr %08x -> Whole PDE Phys %08x", i<<KChunkShift, pdePhys));
447 pt = Mmu::PageTableFromPde(pde);
448 __KTRACE_OPT(KBOOT,Kern::Printf("Addr %08x -> page table %08x", i<<KChunkShift, pt));
449 __ASSERT_ALWAYS(pt,Panic(EInvalidPdeAtBoot)); // bad PDE
454 for(j=0; j<KChunkSize/KPageSize; ++j)
456 TBool present = ETrue; // all pages present if whole PDE mapping
461 present = pte!=KPteUnallocatedEntry;
466 TPhysAddr pa = pt ? Mmu::PtePhysAddr(pte,j) : (pdePhys + (j<<KPageShift));
467 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa);
468 __KTRACE_OPT(KBOOT,Kern::Printf("Addr: %08x PA=%08x",
469 (i<<KChunkShift)+(j<<KPageShift), pa));
470 if(pi) // ignore non-RAM mappings
472 TInt r = iRamPageAllocator->MarkPageAllocated(pa, EPageFixed);
473 // allow KErrAlreadyExists since it's possible that a page is doubly mapped
474 __ASSERT_ALWAYS(r==KErrNone || r==KErrAlreadyExists, Panic(EBadMappedPageAfterBoot));
475 if(pi->Type()==SPageInfo::EUnused)
480 __KTRACE_OPT(KBOOT,Kern::Printf("Addr: %08x #PTEs=%d",(i<<KChunkShift),np));
483 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
488 TInt r = K::MutexCreate(iRamAllocatorMutex, KLitRamAlloc, NULL, EFalse, KMutexOrdRamAlloc);
490 Panic(ERamAllocMutexCreateFailed);
491 iRamAllocLockCount = 0;
492 iRamAllocInitialFreePages = FreeRamInPages();
494 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::DoInit2"));
496 for(i=0; i<KNumTempMappingSlots; ++i)
497 iTempMap[i].Alloc(1);
499 iPhysMemSyncTemp.Alloc(1);
500 r = K::MutexCreate(iPhysMemSyncMutex, KLitPhysMemSync, NULL, EFalse, KMutexOrdSyncPhysMem);
502 Panic(EPhysMemSyncMutexCreateFailed);
507 void Mmu::Init2FinalCommon()
509 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::Init2FinalCommon"));
510 // hack, reduce free memory to <2GB...
511 while(FreeRamInPages()>=0x80000000/KPageSize)
514 TInt r = iRamPageAllocator->AllocRamPages(&dummyPage,1, EPageFixed);
515 __NK_ASSERT_ALWAYS(r==KErrNone);
517 // hack, reduce total RAM to <2GB...
518 if(TheSuperPage().iTotalRamSize<0)
519 TheSuperPage().iTotalRamSize = 0x80000000-KPageSize;
521 // Save current free RAM size - there can never be more free RAM than this
522 TUint maxFreePages = FreeRamInPages();
523 K::MaxFreeRam = maxFreePages*KPageSize;
524 if(maxFreePages < (TUint(PP::RamDriveMaxSize)>>KPageShift))
525 PP::RamDriveMaxSize = maxFreePages*KPageSize;
527 // update this to stop assert triggering in RamAllocLock::Lock()
528 iRamAllocInitialFreePages = maxFreePages;
534 iDefrag = new Defrag;
536 Panic(EDefragAllocFailed);
537 iDefrag->Init3(TheMmu.iRamPageAllocator);
544 void Mmu::Panic(TPanic aPanic)
546 Kern::Fault("MMU",aPanic);
550 TUint Mmu::FreeRamInPages()
552 return iRamPageAllocator->FreeRamInPages()+ThePager.NumberOfFreePages();
556 TUint Mmu::TotalPhysicalRamPages()
558 return iRamPageAllocator->TotalPhysicalRamPages();
562 const SRamZone* Mmu::RamZoneConfig(TRamZoneCallback& aCallback) const
564 aCallback = iRamZoneCallback;
569 void Mmu::SetRamZoneConfig(const SRamZone* aZones, TRamZoneCallback aCallback)
572 iRamZoneCallback = aCallback;
576 TInt Mmu::ModifyRamZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask)
578 return iRamPageAllocator->ModifyZoneFlags(aId, aClearMask, aSetMask);
582 TInt Mmu::GetRamZonePageCount(TUint aId, SRamZonePageCount& aPageData)
584 return iRamPageAllocator->GetZonePageCount(aId, aPageData);
588 TInt Mmu::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aBytes, TPhysAddr& aPhysAddr, TInt aAlign)
590 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam(?,%d,%d,?,%d)", aZoneIdCount, aBytes, aPhysAddr, aAlign));
591 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
593 TInt r = iRamPageAllocator->ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aBytes, aPhysAddr, EPageFixed, aAlign);
595 iRamAllocFailed = ETrue;
598 TUint pages = MM::RoundToPageCount(aBytes);
599 AllocatedPhysicalRam(aPhysAddr, pages, (Mmu::TRamAllocFlags)EMemAttStronglyOrdered);
601 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam returns %d and aPhysAddr=0x%08x",r,aPhysAddr));
606 TInt Mmu::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList)
608 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam(?,%d,%d,?)", aZoneIdCount, aNumPages));
609 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
611 TInt r = iRamPageAllocator->ZoneAllocRamPages(aZoneIdList, aZoneIdCount, aPageList, aNumPages, EPageFixed);
613 iRamAllocFailed = ETrue;
616 PagesAllocated(aPageList, aNumPages, (Mmu::TRamAllocFlags)EMemAttStronglyOrdered);
618 // update page infos...
620 TPhysAddr* pageEnd = aPageList + aNumPages;
622 TPhysAddr* page = aPageList;
623 while (page < pageEnd)
625 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2);
626 TPhysAddr pagePhys = *page++;
627 __NK_ASSERT_DEBUG(pagePhys != KPhysAddrInvalid);
628 SPageInfo::FromPhysAddr(pagePhys)->SetPhysAlloc();
632 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam returns %d",r));
637 TInt Mmu::RamHalFunction(TInt aFunction, TAny* a1, TAny* a2)
639 // This function should only be registered with hal and therefore can only
640 // be invoked after the ram allocator has been created.
641 __NK_ASSERT_DEBUG(iRamPageAllocator);
642 return iRamPageAllocator->HalFunction(aFunction, a1, a2);
646 void Mmu::ChangePageType(SPageInfo* aPageInfo, TZonePageType aOldPageType, TZonePageType aNewPageType)
648 iRamPageAllocator->ChangePageType(aPageInfo, aOldPageType, aNewPageType);
651 TInt Mmu::HandlePageFault(TLinAddr aPc, TLinAddr aFaultAddress, TUint aAccessPermissions, TAny* aExceptionInfo)
653 TRACE(("Mmu::HandlePageFault(0x%08x,0x%08x,%d)",aPc,aFaultAddress,aAccessPermissions));
655 DMemModelThread* thread = (DMemModelThread*)TheCurrentThread;
656 // Get the os asid of the process taking the fault, no need to open a reference
657 // as it is the current thread's process so can't be freed.
658 TUint faultOsAsid = ((DMemModelProcess*)thread->iNThread.iAddressSpace)->OsAsid();
660 // check if any fast mutexes held...
661 NFastMutex* fm = NKern::HeldFastMutex();
662 TPagingExcTrap* trap = thread->iPagingExcTrap;
665 // check there is an XTRAP_PAGING in effect...
668 // oops, kill system...
669 __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("Fault with FM Held! addr=0x%08x (%O pc=%x)",aFaultAddress,thread,aPc));
670 Exc::Fault(aExceptionInfo);
673 // release the fast mutex...
677 NKern::ThreadEnterCS();
679 // work out address space for aFaultAddress...
680 TUint osAsid = faultOsAsid;
681 TLinAddr addr = aFaultAddress;
682 if(thread->iAliasLinAddr && TUint(addr - thread->iAliasLinAddr) < TUint(KPageSize))
684 // Address in aliased memory...
685 addr = (addr - thread->iAliasLinAddr) + thread->iAliasTarget;
686 // Get the os asid of the process thread is aliasing, no need to open
687 // a reference on it as one was already opened when the alias was created.
688 osAsid = thread->iAliasProcess->OsAsid();
690 else if(addr>=KGlobalMemoryBase)
692 // Address in global region, so look it up in kernel's address space...
693 osAsid = KKernelOsAsid;
696 // NOTE, osAsid will remain valid for duration of this function because it is either
697 // - The current thread's address space, which can't go away whilst the thread
699 // - The address space of another thread which we are aliasing memory from,
700 // and we would only do this if we have a reference on this other thread,
701 // which has a reference on it's process, which should own the address space!
703 #ifdef __BROADCAST_CACHE_MAINTENANCE__
705 if (thread->iAliasLinAddr)
707 // If an alias is in effect, the the thread will be locked to the current CPU,
708 // but we need to be able to migrate between CPUs for cache maintainance. This
709 // must be dealt with by removing the alias and restoring it with a paging trap
713 // oops, kill system...
714 __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("Fault with thread locked to current CPU! addr=0x%08x (%O pc=%x)",aFaultAddress,thread,aPc));
715 Exc::Fault(aExceptionInfo);
717 // Open a reference on the aliased process's os asid before removing the alias
718 // so that the address space can't be freed while we try to access its members.
719 aliasAsid = thread->iAliasProcess->TryOpenOsAsid();
720 // This should never fail as until we remove the alias there will
721 // always be at least one reference on the os asid.
722 __NK_ASSERT_DEBUG(aliasAsid >= 0);
723 thread->RemoveAlias();
728 TUint offsetInMapping;
729 TUint mapInstanceCount;
730 DMemoryMapping* mapping = MM::FindMappingInAddressSpace(osAsid, addr, 1, offsetInMapping, mapInstanceCount);
731 // TRACE(("%O mapping=0x%08x",TheCurrentThread,mapping));
732 TInt r = KErrNotFound;
738 // check if we need to process page fault...
739 if(!Mmu::CheckPteTypePermissions(mapping->PteType(),aAccessPermissions) ||
740 mapInstanceCount != mapping->MapInstanceCount())
742 // Invalid access to the page.
748 // Should not be able to take a fault on a pinned mapping if accessing it
749 // with the correct permissions.
750 __NK_ASSERT_DEBUG(!mapping->IsPinned());
752 // we do need to handle fault so is this a demand paging or page moving fault
753 DMemoryObject* memory = mapping->Memory();
758 TUint faultIndex = (offsetInMapping >> KPageShift) + mapping->iStartIndex;
761 // This is safe as we have the instance count so can detect the mapping
762 // being reused and we have a reference to the memory object so it can't
766 if(memory->IsDemandPaged())
768 // Let the pager handle the fault...
769 r = ThePager.HandlePageFault( aPc, aFaultAddress, faultOsAsid, faultIndex,
770 aAccessPermissions, memory, mapping, mapInstanceCount,
771 thread, aExceptionInfo);
774 {// The page could be being moved so verify that with its manager.
775 DMemoryManager* manager = memory->iManager;
776 r = manager->HandleFault(memory, faultIndex, mapping, mapInstanceCount, aAccessPermissions);
779 {// alias PDE needs updating because page tables have changed...
780 thread->RefreshAlias();
790 // restore address space (because the trap will bypass any code
791 // which would have done this.)...
792 DMemModelThread::RestoreAddressSpace();
795 #ifdef __BROADCAST_CACHE_MAINTENANCE__
796 // Close any reference on the aliased process's os asid before we leave the
800 thread->iAliasProcess->CloseOsAsid();
804 NKern::ThreadLeaveCS(); // thread will die now if CheckRealtimeThreadFault caused a panic
806 // deal with XTRAP_PAGING...
809 // re-acquire any fast mutex which was held before the page fault...
814 trap->Exception(1); // return from exception trap with result '1' (value>0)
815 // code doesn't continue beyond this point.
816 __NK_ASSERT_DEBUG(0);
828 TInt Mmu::AllocRam( TPhysAddr* aPages, TUint aCount, TRamAllocFlags aFlags, TZonePageType aZonePageType,
829 TUint aBlockZoneId, TBool aBlockRest)
831 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocRam(?,%d,%x)",aCount,aFlags));
832 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
834 if(K::CheckForSimulatedAllocFail())
836 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocRam returns simulated OOM %d",KErrNoMemory));
840 TInt missing = iRamPageAllocator->AllocRamPages(aPages, aCount, aZonePageType, aBlockZoneId, aBlockRest);
841 if(missing && !(aFlags&EAllocNoPagerReclaim) && ThePager.GetFreePages(missing))
842 missing = iRamPageAllocator->AllocRamPages(aPages, aCount, aZonePageType, aBlockZoneId, aBlockRest);
843 TInt r = missing ? KErrNoMemory : KErrNone;
845 iRamAllocFailed = ETrue;
847 PagesAllocated(aPages,aCount,aFlags);
848 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocRam returns %d",r));
853 void Mmu::FreeRam(TPhysAddr* aPages, TUint aCount, TZonePageType aZonePageType)
855 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreeRam(?,%d)",aCount));
856 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
858 // update page infos...
859 TPhysAddr* pages = aPages;
860 TPhysAddr* pagesEnd = pages+aCount;
861 TPhysAddr* pagesOut = aPages;
864 while(pages<pagesEnd)
866 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2);
867 TPhysAddr pagePhys = *pages++;
868 __NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid);
869 SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
872 // If this is an old page of a page being moved that was previously pinned
873 // then make sure it is freed as discardable otherwise despite DPager::DonatePages()
874 // having marked it as discardable it would be freed as movable.
875 __NK_ASSERT_DEBUG(pi->PagedState() != SPageInfo::EPagedPinnedMoved || aCount == 1);
876 if (pi->PagedState() == SPageInfo::EPagedPinnedMoved)
877 aZonePageType = EPageDiscard;
879 if(ThePager.PageFreed(pi)==KErrNone)
880 --aCount; // pager has dealt with this page, so one less for us
883 // All paged pages should have been dealt with by the pager above.
884 __NK_ASSERT_DEBUG(pi->PagedState() == SPageInfo::EUnpaged);
885 *pagesOut++ = pagePhys; // store page address for freeing later
890 iRamPageAllocator->FreeRamPages(aPages, aCount, aZonePageType);
894 TInt Mmu::AllocContiguousRam(TPhysAddr& aPhysAddr, TUint aCount, TUint aAlign, TRamAllocFlags aFlags)
896 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocContiguousRam(?,0x%x,%d,%x)",aCount,aAlign,aFlags));
897 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
899 if(K::CheckForSimulatedAllocFail())
901 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocContiguousRam returns simulated OOM %d",KErrNoMemory));
904 // Only the page sets EAllocNoPagerReclaim and it shouldn't allocate contiguous ram.
905 __NK_ASSERT_DEBUG(!(aFlags&EAllocNoPagerReclaim));
907 TInt r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, EPageFixed, aAlign+KPageShift);
908 if(r==KErrNoMemory && aCount > KMaxFreeableContiguousPages)
910 // flush paging cache and retry...
912 r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, EPageFixed, aAlign+KPageShift);
915 iRamAllocFailed = ETrue;
917 PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags);
918 __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguouseRam returns %d and aPhysAddr=0x%08x",r,aPhysAddr));
923 void Mmu::FreeContiguousRam(TPhysAddr aPhysAddr, TUint aCount)
925 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreeContiguousRam(0x%08x,0x%x)",aPhysAddr,aCount));
926 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
927 __NK_ASSERT_DEBUG((aPhysAddr&KPageMask)==0);
929 TUint pageCount = aCount;
931 // update page infos...
932 SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
933 SPageInfo* piEnd = pi+pageCount;
938 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
946 iRamPageAllocator->FreeRamPage(aPhysAddr, EPageFixed);
947 aPhysAddr += KPageSize;
953 TInt Mmu::AllocPhysicalRam(TPhysAddr* aPages, TUint aCount, TRamAllocFlags aFlags)
955 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam(?,%d,%x)",aCount,aFlags));
956 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
957 // Allocate fixed pages as physically allocated pages aren't movable or discardable.
958 TInt r = AllocRam(aPages, aCount, aFlags, EPageFixed);
962 // update page infos...
963 TPhysAddr* pages = aPages;
964 TPhysAddr* pagesEnd = pages+aCount;
967 while(pages<pagesEnd)
969 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2);
970 TPhysAddr pagePhys = *pages++;
971 __NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid);
972 SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
981 void Mmu::FreePhysicalRam(TPhysAddr* aPages, TUint aCount)
983 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreePhysicalRam(?,%d)",aCount));
984 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
986 // update page infos...
987 TPhysAddr* pages = aPages;
988 TPhysAddr* pagesEnd = pages+aCount;
991 while(pages<pagesEnd)
993 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2);
994 TPhysAddr pagePhys = *pages++;
995 __NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid);
996 SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
997 __ASSERT_ALWAYS(pi->Type()==SPageInfo::EPhysAlloc, Panic(EBadFreePhysicalRam));
998 __ASSERT_ALWAYS(!pi->UseCount(), Panic(EBadFreePhysicalRam));
1003 iRamPageAllocator->FreeRamPages(aPages,aCount, EPageFixed);
1007 TInt Mmu::AllocPhysicalRam(TPhysAddr& aPhysAddr, TUint aCount, TUint aAlign, TRamAllocFlags aFlags)
1009 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam(?,0x%x,d,%x)",aCount,aAlign,aFlags));
1010 TInt r = AllocContiguousRam(aPhysAddr,aCount,aAlign,aFlags);
1014 // update page infos...
1015 SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
1016 SPageInfo* piEnd = pi+aCount;
1021 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
1031 void Mmu::FreePhysicalRam(TPhysAddr aPhysAddr, TUint aCount)
1033 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreePhysicalRam(0x%08x,0x%x)",aPhysAddr,aCount));
1035 // update page infos...
1036 SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
1037 SPageInfo* piEnd = pi+aCount;
1042 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
1043 __ASSERT_ALWAYS(pi->Type()==SPageInfo::EPhysAlloc, Panic(EBadFreePhysicalRam));
1044 __ASSERT_ALWAYS(!pi->UseCount(), Panic(EBadFreePhysicalRam));
1050 iRamPageAllocator->FreePhysicalRam(aPhysAddr, aCount << KPageShift);
1054 TInt Mmu::ClaimPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags)
1056 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ClaimPhysicalRam(0x%08x,0x%x,0x%08x)",aPhysAddr,aCount,aFlags));
1057 aPhysAddr &= ~KPageMask;
1058 TInt r = iRamPageAllocator->ClaimPhysicalRam(aPhysAddr,(aCount << KPageShift));
1062 PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags);
1064 // update page infos...
1065 SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
1066 SPageInfo* piEnd = pi+aCount;
1071 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
1081 void Mmu::AllocatedPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags)
1083 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocatedPhysicalRam(0x%08x,0x%x,d,%x)",aPhysAddr,aCount,aFlags));
1085 PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags);
1087 // update page infos...
1088 SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
1089 SPageInfo* piEnd = pi+aCount;
1094 MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
1108 Perform a page table walk to return the physical address of
1109 the memory mapped at virtual address \a aLinAddr in the
1110 address space \a aOsAsid.
1112 If the page table used was not one allocated by the kernel
1113 then the results are unpredictable and may cause a system fault.
1117 TPhysAddr Mmu::LinearToPhysical(TLinAddr aLinAddr, TInt aOsAsid)
1119 __NK_ASSERT_DEBUG(MmuLock::IsHeld() || K::Initialising);
1120 return UncheckedLinearToPhysical(aLinAddr,aOsAsid);
1126 Next virtual address available for allocation by TTempMapping.
1127 This is initialised to #KTempAddr and addresses may be allocated
1128 until they reach #KTempAddrEnd.
1130 TLinAddr Mmu::TTempMapping::iNextLinAddr = KTempAddr;
1134 Allocate virtual address space required to map a given number of memory pages.
1136 The actual size of allocated virtual allocated needs to accommodate \a aNumPages
1137 number of pages of any colour. For example: if \a aNumPages == 4 and #KPageColourCount == 4,
1138 then at least 7 pages are required.
1140 @param aNumPages Maximum number of pages that can be mapped into this temporary mapping.
1142 @pre Called in single threaded content (boot) only.
1144 @pre #iNextLinAddr points to virtual page with zero colour.
1145 @post #iNextLinAddr points to virtual page with zero colour.
1147 void Mmu::TTempMapping::Alloc(TUint aNumPages)
1149 __NK_ASSERT_DEBUG(aNumPages<=(KTempAddrEnd-KTempAddr)/KPageSize);
1151 // This runs during the boot only (single threaded context) so the access to iNextLinAddr is not guarded by any mutex.
1152 TLinAddr tempAddr = iNextLinAddr;
1153 TUint numPages = (KPageColourMask+aNumPages+KPageColourMask)&~KPageColourMask;
1154 iNextLinAddr = tempAddr+numPages*KPageSize;
1156 __NK_ASSERT_ALWAYS(iNextLinAddr<=KTempAddrEnd);
1158 __NK_ASSERT_DEBUG(iSize==0);
1159 iLinAddr = tempAddr;
1161 iPtePtr = Mmu::PtePtrFromLinAddr(tempAddr,KKernelOsAsid);
1162 __NK_ASSERT_DEBUG(iPtePtr);
1164 iBlankPte = TheMmu.iTempPteCached;
1168 TRACEB(("Mmu::TTempMapping::Alloc(%d) iLinAddr=0x%08x, iPtePtr=0x%08x",aNumPages,iLinAddr,iPtePtr));
1173 Map a single physical page into this temporary mapping.
1175 Supervisor read/write access and EMemoryAttributeStandard memory attributes apply.
1177 @param aPage The physical page to map.
1178 @param aColour The required colour for the mapping.
1180 @return The linear address at which the page is mapped.
1182 TLinAddr Mmu::TTempMapping::Map(TPhysAddr aPage, TUint aColour)
1184 __NK_ASSERT_DEBUG(iSize>=1);
1185 __NK_ASSERT_DEBUG(iCount==0);
1187 TUint colour = aColour&KPageColourMask;
1188 TLinAddr addr = iLinAddr+(colour<<KPageShift);
1189 TPte* pPte = iPtePtr+colour;
1192 __ASSERT_DEBUG(*pPte==KPteUnallocatedEntry,MM::Panic(MM::ETempMappingAlreadyInUse));
1193 *pPte = (aPage&~KPageMask) | iBlankPte;
1194 CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
1195 InvalidateTLBForPage(addr|KKernelOsAsid);
1202 Map a single physical page into this temporary mapping using the given page table entry (PTE) value.
1204 @param aPage The physical page to map.
1205 @param aColour The required colour for the mapping.
1206 @param aBlankPte The PTE value to use for mapping the page,
1207 with the physical address component equal to zero.
1209 @return The linear address at which the page is mapped.
1211 TLinAddr Mmu::TTempMapping::Map(TPhysAddr aPage, TUint aColour, TPte aBlankPte)
1213 __NK_ASSERT_DEBUG(iSize>=1);
1214 __NK_ASSERT_DEBUG(iCount==0);
1216 TUint colour = aColour&KPageColourMask;
1217 TLinAddr addr = iLinAddr+(colour<<KPageShift);
1218 TPte* pPte = iPtePtr+colour;
1221 __ASSERT_DEBUG(*pPte==KPteUnallocatedEntry,MM::Panic(MM::ETempMappingAlreadyInUse));
1222 *pPte = (aPage&~KPageMask) | aBlankPte;
1223 CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
1224 InvalidateTLBForPage(addr|KKernelOsAsid);
1232 Map a number of physical pages into this temporary mapping.
1234 Supervisor read/write access and EMemoryAttributeStandard memory attributes apply.
1236 @param aPages The array of physical pages to map.
1237 @param aCount The number of pages to map.
1238 @param aColour The required colour for the first page.
1239 Consecutive pages will be coloured accordingly.
1241 @return The linear address at which the first page is mapped.
1243 TLinAddr Mmu::TTempMapping::Map(TPhysAddr* aPages, TUint aCount, TUint aColour)
1245 __NK_ASSERT_DEBUG(iSize>=aCount);
1246 __NK_ASSERT_DEBUG(iCount==0);
1248 TUint colour = aColour&KPageColourMask;
1249 TLinAddr addr = iLinAddr+(colour<<KPageShift);
1250 TPte* pPte = iPtePtr+colour;
1253 for(TUint i=0; i<aCount; ++i)
1255 __ASSERT_DEBUG(pPte[i]==KPteUnallocatedEntry,MM::Panic(MM::ETempMappingAlreadyInUse));
1256 pPte[i] = (aPages[i]&~KPageMask) | iBlankPte;
1257 CacheMaintenance::SinglePteUpdated((TLinAddr)&pPte[i]);
1258 InvalidateTLBForPage((addr+i*KPageSize)|KKernelOsAsid);
1267 Unmap all pages from this temporary mapping.
1269 @param aIMBRequired True if IMB barrier is required prior unmapping.
1271 void Mmu::TTempMapping::Unmap(TBool aIMBRequired)
1273 __NK_ASSERT_DEBUG(iSize>=1);
1275 CacheMaintenance::CodeChanged(iLinAddr+iColour*KPageSize,iCount*KPageSize);
1281 Unmap all pages from this temporary mapping.
1283 void Mmu::TTempMapping::Unmap()
1285 __NK_ASSERT_DEBUG(iSize>=1);
1287 TUint colour = iColour;
1288 TLinAddr addr = iLinAddr+(colour<<KPageShift);
1289 TPte* pPte = iPtePtr+colour;
1290 TUint count = iCount;
1294 *pPte = KPteUnallocatedEntry;
1295 CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
1296 InvalidateTLBForPage(addr|KKernelOsAsid);
1307 Dummy IPI to be invoked when a thread's alias pde members are updated remotely
1312 class TAliasIPI : public TGenericIPI
1315 static void RefreshIsr(TGenericIPI*);
1316 void RefreshAlias();
1323 void TAliasIPI::RefreshIsr(TGenericIPI*)
1325 TRACE2(("TAliasIPI"));
1330 Queue the dummy IPI on all other processors. This ensures that DoProcessSwitch will
1331 have completed updating iAliasPdePtr once this method returns.
1333 void TAliasIPI::RefreshAlias()
1336 QueueAllOther(&RefreshIsr);
1343 Perform a dummy ipi on all the other processors to ensure if any of them are
1344 executing DoProcessSwitch they will see the new value of iAliasPde before they
1345 update iAliasPdePtr or will finish updating iAliasPdePtr before we continue.
1346 This works as DoProcessSwitch() has interrupts disabled while reading iAliasPde
1347 and updating iAliasPdePtr.
1349 void BroadcastAliasRefresh()
1357 Remove any thread IPC aliases which use the specified page table.
1358 This is used by the page table allocator when a page table is freed.
1360 @pre #PageTablesLockIsHeld
1362 void Mmu::RemoveAliasesForPageTable(TPhysAddr aPageTable)
1364 __NK_ASSERT_DEBUG(PageTablesLockIsHeld());
1368 SDblQue checkedList;
1370 TUint ptId = aPageTable>>KPageTableShift;
1371 while(!iAliasList.IsEmpty())
1373 SDblQueLink* next = iAliasList.First()->Deque();
1374 checkedList.Add(next);
1375 DMemModelThread* thread = (DMemModelThread*)((TInt)next-_FOFF(DMemModelThread,iAliasLink));
1376 if((thread->iAliasPde>>KPageTableShift)==ptId)
1378 // the page table is being aliased by the thread, so remove it...
1379 TRACE2(("Thread %O RemoveAliasesForPageTable", this));
1380 thread->iAliasPde = KPdeUnallocatedEntry;
1381 #ifdef __SMP__ // we need to also unmap the page table in case thread is running on another core...
1383 // Ensure other processors see the update to iAliasPde.
1384 BroadcastAliasRefresh();
1386 *thread->iAliasPdePtr = KPdeUnallocatedEntry;
1388 SinglePdeUpdated(thread->iAliasPdePtr);
1389 __NK_ASSERT_DEBUG((thread->iAliasLinAddr&KPageMask)==0);
1390 // Invalidate the tlb for the page using os asid of the process that created the alias
1391 // this is safe as the os asid will be valid as thread must be running otherwise the alias
1392 // would have been removed.
1393 InvalidateTLBForPage(thread->iAliasLinAddr | ((DMemModelProcess*)thread->iOwningProcess)->OsAsid());
1394 // note, race condition with 'thread' updating its iAliasLinAddr is
1395 // not a problem because 'thread' will not the be accessing the aliased
1396 // region and will take care of invalidating the TLB.
1402 // copy checkedList back to iAliasList
1403 iAliasList.MoveFrom(&checkedList);
1409 void DMemModelThread::RefreshAlias()
1413 TRACE2(("Thread %O RefreshAlias", this));
1414 // Get the os asid, this is the current thread so no need to open a reference.
1415 TUint thisAsid = ((DMemModelProcess*)iOwningProcess)->OsAsid();
1417 TInt osAsid = iAliasProcess->OsAsid();
1418 TPde pde = *Mmu::PageDirectoryEntry(osAsid,iAliasTarget);
1420 *iAliasPdePtr = pde;
1421 SinglePdeUpdated(iAliasPdePtr);
1422 InvalidateTLBForPage(iAliasLinAddr|thisAsid);
1430 // Mapping/unmapping functions
1435 Modify page table entries (PTEs) so they map the given memory pages.
1436 Entries are only updated if the current state of the corresponding page
1437 is RPageArray::ECommitted.
1439 @param aPtePtr Pointer into a page table for the PTE of the first page.
1440 @param aCount The number of pages to modify.
1441 @param aPages Pointer to the entry for the first page in a memory object's #RPageArray.
1442 Each entry contains the physical address of a page together with its
1443 current state (RPageArray::TState).
1444 @param aBlankPte The value to use for each PTE, with the physical address component equal
1447 @return False, if the page table no longer maps any entries and may be freed.
1448 True otherwise, to indicate that the page table is still needed.
1451 @post #MmuLock held and has not been released by this function.
1453 TBool Mmu::MapPages(TPte* const aPtePtr, const TUint aCount, TPhysAddr* aPages, TPte aBlankPte)
1455 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1456 __NK_ASSERT_DEBUG(aCount);
1457 __NK_ASSERT_DEBUG(aBlankPte!=KPteUnallocatedEntry);
1462 // get page to map...
1463 TPhysAddr pagePhys = *aPages;
1464 TPte pte = *aPtePtr;
1465 if(!RPageArray::TargetStateIsCommitted(pagePhys))
1466 goto done; // page no longer needs mapping
1468 // clear type flags...
1469 pagePhys &= ~KPageMask;
1471 // check nobody has already mapped the page...
1472 if(pte!=KPteUnallocatedEntry)
1474 // already mapped...
1476 if((pte^pagePhys)>=TPte(KPageSize))
1479 Kern::Printf("Mmu::MapPages already mapped %x->%x",pagePhys,pte);
1480 __NK_ASSERT_DEBUG(0);
1483 return true; // return true to keep page table (it already had at least page mapped)
1487 pte = pagePhys|aBlankPte;
1488 TRACE2(("!PTE %x=%x",aPtePtr,pte));
1493 CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
1497 // check we are only updating a single page table...
1498 __NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0);
1501 TPte* pPte = aPtePtr;
1502 TPte* pPteEnd = aPtePtr+aCount;
1506 TPhysAddr pagePhys = *aPages++;
1508 if(RPageArray::TargetStateIsCommitted(pagePhys))
1510 // clear type flags...
1511 pagePhys &= ~KPageMask;
1513 // page not being freed, so try and map it...
1514 if(pte!=KPteUnallocatedEntry)
1516 // already mapped...
1518 if((pte^pagePhys)>=TPte(KPageSize))
1521 Kern::Printf("Mmu::MapPages already mapped %x->%x",pagePhys,pte);
1522 __NK_ASSERT_DEBUG(0);
1529 pte = pagePhys|aBlankPte;
1530 TRACE2(("!PTE %x=%x",pPte-1,pte));
1536 while(pPte!=pPteEnd);
1539 CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr);
1543 // update page counts...
1544 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPtePtr);
1545 count = pti->IncPageCount(count);
1546 TRACE2(("pt %x page count=%d",TLinAddr(aPtePtr)&~KPageTableMask,pti->PageCount()));
1547 __NK_ASSERT_DEBUG(pti->CheckPageCount());
1549 // see if page table needs freeing...
1550 TUint keepPt = count | pti->PermanenceCount();
1552 __NK_ASSERT_DEBUG(!pti->IsDemandPaged()); // check not demand paged page table
1559 Modify page table entries (PTEs) so they map a new page.
1560 Entries are only updated if the current state of the corresponding page
1561 is RPageArray::ECommitted or RPageArray::EMoving.
1563 @param aPtePtr Pointer into a page table for the PTE of the page.
1564 @param aPage Pointer to the entry for the page in a memory object's #RPageArray.
1565 The entry contains the physical address of a page together with its
1566 current state (RPageArray::TState).
1567 @param aBlankPte The value to use for each PTE, with the physical address component equal
1571 @post #MmuLock held and has not been released by this function.
1573 void Mmu::RemapPage(TPte* const aPtePtr, TPhysAddr& aPage, TPte aBlankPte)
1575 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1576 __NK_ASSERT_DEBUG(aBlankPte!=KPteUnallocatedEntry);
1578 // get page to remap...
1579 TPhysAddr pagePhys = aPage;
1581 // Only remap the page if it is committed or it is being moved and
1582 // no other operation has been performed on the page.
1583 if(!RPageArray::TargetStateIsCommitted(pagePhys))
1584 return; // page no longer needs mapping
1586 // Only remap the page if it is currently mapped, i.e. doesn't have an unallocated pte.
1587 // This will only be true if a new mapping is being added but it hasn't yet updated
1588 // all the ptes for the pages that it maps.
1589 TPte pte = *aPtePtr;
1590 if (pte == KPteUnallocatedEntry)
1593 // clear type flags...
1594 pagePhys &= ~KPageMask;
1596 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys);
1599 SPageInfo::TPagedState pagedState = pi->PagedState();
1600 if (pagedState != SPageInfo::EUnpaged)
1602 // The page is demand paged. Only remap the page if it is pinned or is currently
1603 // accessible but to the old physical page.
1604 if (pagedState != SPageInfo::EPagedPinned &&
1605 (Mmu::IsPteInaccessible(pte) || (pte^pagePhys) < TPte(KPageSize)))
1609 // Ensure that the page is mapped as read only to prevent pages being marked dirty
1610 // by page moving despite not having been written to
1611 Mmu::MakePteInaccessible(aBlankPte, EFalse);
1616 // Map the page in the page array entry as this is always the physical
1617 // page that the memory object's page should be mapped to.
1618 pte = pagePhys|aBlankPte;
1619 TRACE2(("!PTE %x=%x",aPtePtr,pte));
1623 CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
1628 Modify page table entries (PTEs) so they no longer map any memory pages.
1630 @param aPtePtr Pointer into a page table for the PTE of the first page.
1631 @param aCount The number of pages to modify.
1633 @return False, if the page table no longer maps any entries and may be freed.
1634 True otherwise, to indicate that the page table is still needed.
1637 @post #MmuLock held and has not been released by this function.
1639 TBool Mmu::UnmapPages(TPte* const aPtePtr, TUint aCount)
1641 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1642 __NK_ASSERT_DEBUG(aCount);
1647 if(*aPtePtr==KPteUnallocatedEntry)
1648 return true; // page already unmapped
1652 TPte pte = KPteUnallocatedEntry;
1653 TRACE2(("!PTE %x=%x",aPtePtr,pte));
1657 CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
1661 // check we are only updating a single page table...
1662 __NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0);
1665 TPte* pPte = aPtePtr;
1666 TPte* pPteEnd = aPtePtr+aCount;
1669 if(*pPte!=KPteUnallocatedEntry)
1673 TPte pte = KPteUnallocatedEntry;
1674 TRACE2(("!PTE %x=%x",pPte,pte));
1678 while(++pPte<pPteEnd);
1681 return true; // no PTEs changed, so nothing more to do
1684 CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr);
1687 // update page table info...
1688 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPtePtr);
1689 count = pti->DecPageCount(count);
1690 TRACE2(("pt %x page count=%d",TLinAddr(aPtePtr)&~KPageTableMask,count));
1691 __NK_ASSERT_DEBUG(pti->CheckPageCount());
1693 // see if page table needs freeing...
1694 TUint keepPt = count | pti->PermanenceCount();
1701 Modify page table entries (PTEs) so they no longer map the given memory pages.
1702 Entries are only updated if the current state of the corresponding page
1703 is 'decommitted' i.e. RPageArray::TargetStateIsDecommitted returns true.
1705 @param aPtePtr Pointer into a page table for the PTE of the first page.
1706 @param aCount The number of pages to modify.
1707 @param aPages Pointer to the entry for the first page in a memory object's #RPageArray.
1708 Each entry contains the physical address of a page together with its
1709 current state (RPageArray::TState).
1711 @return False, if the page table no longer maps any entries and may be freed.
1712 True otherwise, to indicate that the page table is still needed.
1715 @post #MmuLock held and has not been released by this function.
1717 TBool Mmu::UnmapPages(TPte* const aPtePtr, TUint aCount, TPhysAddr* aPages)
1719 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1720 __NK_ASSERT_DEBUG(aCount);
1725 if(*aPtePtr==KPteUnallocatedEntry)
1726 return true; // page already unmapped
1728 if(!RPageArray::TargetStateIsDecommitted(*aPages))
1729 return true; // page has been reallocated
1733 TPte pte = KPteUnallocatedEntry;
1734 TRACE2(("!PTE %x=%x",aPtePtr,pte));
1738 CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
1742 // check we are only updating a single page table...
1743 __NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0);
1746 TPte* pPte = aPtePtr;
1747 TPte* pPteEnd = aPtePtr+aCount;
1750 if(RPageArray::TargetStateIsDecommitted(*aPages++) && *pPte!=KPteUnallocatedEntry)
1754 TPte pte = KPteUnallocatedEntry;
1755 TRACE2(("!PTE %x=%x",pPte,pte));
1759 while(++pPte<pPteEnd);
1762 return true; // no PTEs changed, so nothing more to do
1765 CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr);
1768 // update page table info...
1769 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPtePtr);
1770 count = pti->DecPageCount(count);
1771 TRACE2(("pt %x page count=%d",TLinAddr(aPtePtr)&~KPageTableMask,count));
1772 __NK_ASSERT_DEBUG(pti->CheckPageCount());
1774 // see if page table needs freeing...
1775 TUint keepPt = count | pti->PermanenceCount();
1782 Modify page table entries (PTEs) so the given memory pages are not accessible.
1783 Entries are only updated if the current state of the corresponding page
1784 is RPageArray::ERestrictingNA.
1786 @param aPtePtr Pointer into a page table for the PTE of the first page.
1787 @param aCount The number of pages to modify.
1788 @param aPages Pointer to the entry for the first page in a memory object's #RPageArray.
1789 Each entry contains the physical address of a page together with its
1790 current state (RPageArray::TState).
1793 @post #MmuLock held and has not been released by this function.
1795 void Mmu::RestrictPagesNA(TPte* const aPtePtr, TUint aCount, TPhysAddr* aPages)
1797 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1798 __NK_ASSERT_DEBUG(aCount);
1802 TPhysAddr page = *aPages;
1803 TPte pte = *aPtePtr;
1804 RPageArray::TState state = RPageArray::State(page);
1805 if(state != RPageArray::ERestrictingNA && state != RPageArray::EMoving)
1806 return; // page no longer needs restricting
1808 if(pte==KPteUnallocatedEntry)
1809 return; // page gone
1812 pte = Mmu::MakePteInaccessible(pte,false);
1813 TRACE2(("!PTE %x=%x",aPtePtr,pte));
1817 CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
1821 // check we are only updating a single page table...
1822 __NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0);
1824 // restrict pages...
1825 TPte* pPte = aPtePtr;
1826 TPte* pPteEnd = aPtePtr+aCount;
1829 TPhysAddr page = *aPages++;
1831 if(RPageArray::State(page)==RPageArray::ERestrictingNA && pte!=KPteUnallocatedEntry)
1833 pte = Mmu::MakePteInaccessible(pte,false);
1834 TRACE2(("!PTE %x=%x",pPte-1,pte));
1838 while(pPte<pPteEnd);
1841 CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr);
1847 Modify page table entries (PTEs) so they map the given demand paged memory pages.
1849 Entries are only updated if the current state of the corresponding page
1850 is RPageArray::ECommitted.
1852 This function is used for demand paged memory when handling a page fault or
1853 memory pinning operation. It will widen the access permission of existing entries
1854 if required to match \a aBlankPte and will 'rejuvenate' the page table.
1856 @param aPtePtr Pointer into a page table for the PTE of the first page.
1857 @param aCount The number of pages to modify.
1858 @param aPages Pointer to the entry for the first page in a memory object's #RPageArray.
1859 Each entry contains the physical address of a page together with its
1860 current state (RPageArray::TState).
1861 @param aBlankPte The value to use for each PTE, with the physical address component equal
1864 @return False, if the page table no longer maps any entries and may be freed.
1865 True otherwise, to indicate that the page table is still needed.
1868 @post MmuLock held (but may have been released by this function)
1870 TBool Mmu::PageInPages(TPte* const aPtePtr, const TUint aCount, TPhysAddr* aPages, TPte aBlankPte)
1872 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1873 __NK_ASSERT_DEBUG(aCount);
1874 __NK_ASSERT_DEBUG(aBlankPte!=KPteUnallocatedEntry);
1880 // get page to map...
1881 TPhysAddr page = *aPages;
1882 TPte pte = *aPtePtr;
1883 if(!RPageArray::TargetStateIsCommitted(page))
1884 goto done; // page no longer needs mapping
1887 if(pte!=KPteUnallocatedEntry)
1889 if ((pte^page)>=TPte(KPageSize) && !Mmu::IsPteInaccessible(pte) &&
1890 !Mmu::IsPteReadOnly(pte))
1892 // Page has been mapped before but the physical address is different
1893 // and the page hasn't been moved as it is not inaccessible.
1894 Kern::Printf("Mmu::PageInPages already mapped %x->%x",page,pte);
1895 __NK_ASSERT_DEBUG(0);
1899 if(!Mmu::IsPteMoreAccessible(aBlankPte,pte))
1900 return true; // return true to keep page table (it already had at least page mapped)
1902 // remap page with new increased permissions...
1903 if(pte==KPteUnallocatedEntry)
1904 count = 1; // we'll be adding a new pte entry, count it
1905 if(!Mmu::IsPteReadOnly(aBlankPte))
1906 ThePager.SetWritable(*SPageInfo::FromPhysAddr(page));
1907 pte = (page&~KPageMask)|aBlankPte;
1908 TRACE2(("!PTE %x=%x",aPtePtr,pte));
1912 CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
1916 // check we are only updating a single page table...
1917 __NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0);
1920 TPte* pPte = aPtePtr;
1921 TPte* pPteEnd = aPtePtr+aCount;
1925 TPhysAddr page = *aPages++;
1927 if(RPageArray::TargetStateIsCommitted(page))
1930 if(pte!=KPteUnallocatedEntry)
1932 if ((pte^page)>=TPte(KPageSize) && !Mmu::IsPteInaccessible(pte) &&
1933 !Mmu::IsPteReadOnly(pte))
1935 // Page has been mapped before but the physical address is different
1936 // and the page hasn't been moved as it is not inaccessible.
1937 Kern::Printf("Mmu::PageInPages already mapped %x->%x",page,pte);
1938 __NK_ASSERT_DEBUG(0);
1942 if(Mmu::IsPteMoreAccessible(aBlankPte,pte))
1944 // remap page with new increased permissions...
1945 if(pte==KPteUnallocatedEntry)
1946 ++count; // we'll be adding a new pte entry, count it
1947 if(!Mmu::IsPteReadOnly(aBlankPte))
1948 ThePager.SetWritable(*SPageInfo::FromPhysAddr(page));
1949 pte = (page&~KPageMask)|aBlankPte;
1950 TRACE2(("!PTE %x=%x",pPte-1,pte));
1955 while(pPte!=pPteEnd);
1958 CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr);
1962 // update page counts...
1963 SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPtePtr);
1964 count = pti->IncPageCount(count);
1965 TRACE2(("pt %x page count=%d",TLinAddr(aPtePtr)&~KPageTableMask,pti->PageCount()));
1966 __NK_ASSERT_DEBUG(pti->CheckPageCount());
1968 // see if page table needs freeing...
1969 TUint keepPt = count | pti->PermanenceCount();
1971 // rejuvenate demand paged page tables...
1972 ThePager.RejuvenatePageTable(aPtePtr);
1982 #ifdef __DEBUGGER_SUPPORT__
1984 void DoWriteCode(TUint32* aAddress, TUint32 aValue);
1988 extern "C" void __e32_instruction_barrier();
1990 class TCodeModifierBroadcast : public TGenericIPI
1993 TCodeModifierBroadcast(TUint32* aAddress, TUint32 aValue);
1994 static void Isr(TGenericIPI*);
1999 volatile TInt iFlag;
2002 TCodeModifierBroadcast::TCodeModifierBroadcast(TUint32* aAddress, TUint32 aValue)
2003 : iAddress(aAddress), iValue(aValue), iFlag(0)
2007 void TCodeModifierBroadcast::Isr(TGenericIPI* aPtr)
2009 TCodeModifierBroadcast& a = *(TCodeModifierBroadcast*)aPtr;
2010 while (!__e32_atomic_load_acq32(&a.iFlag))
2012 #ifdef __BROADCAST_CACHE_MAINTENANCE__
2013 CacheMaintenance::CodeChanged((TLinAddr)a.iAddress, sizeof (TInt), CacheMaintenance::ECodeModifier); // need to do separate Clean-D, Purge-I on each core
2015 __e32_instruction_barrier(); // synchronize instruction execution
2019 void TCodeModifierBroadcast::Go()
2022 QueueAllOther(&Isr);
2023 WaitEntry(); // wait for other cores to stop
2024 DoWriteCode(iAddress, iValue);
2026 __e32_instruction_barrier(); // synchronize instruction execution
2027 WaitCompletion(); // wait for other cores to resume
2033 @pre Calling thread must be in critical section
2034 @pre CodeSeg mutex held
2036 TInt CodeModifier::SafeWriteCode(DProcess* aProcess, TLinAddr aAddress, TInt aSize, TUint aValue, void* aOldValue)
2040 RamAllocLock::Lock();
2042 __UNLOCK_GUARD_START(MmuLock);
2044 // Check aProcess is still alive by opening a reference on its os asid.
2045 TInt osAsid = ((DMemModelProcess*)aProcess)->TryOpenOsAsid();
2048 __KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - zombie process"));
2049 __UNLOCK_GUARD_END(MmuLock);
2051 RamAllocLock::Unlock();
2052 return KErrBadDescriptor;
2055 // Find physical address of the page, the breakpoint belongs to
2056 TPhysAddr physAddr = Mmu::LinearToPhysical(aAddress, osAsid);
2057 __KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - PA:%x", physAddr));
2060 if (physAddr==KPhysAddrInvalid)
2062 __KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - invalid VA"));
2063 __UNLOCK_GUARD_END(MmuLock);
2065 RamAllocLock::Unlock();
2066 // The os asid is no longer required.
2067 ((DMemModelProcess*)aProcess)->CloseOsAsid();
2068 return KErrBadDescriptor;
2071 // Temporary map physical page
2072 TLinAddr tempAddr = m.MapTemp(physAddr&~KPageMask, aAddress>>KPageShift);
2073 tempAddr |= aAddress & KPageMask;
2074 __KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - tempAddr:%x",tempAddr));
2076 TInt r = KErrBadDescriptor;
2077 TUint32* ptr = (TUint32*)(tempAddr&~3);
2080 if(Kern::SafeRead(ptr,&oldWord,sizeof(oldWord))==0 // safely read the original value...
2081 && Kern::SafeWrite(ptr,&oldWord,sizeof(oldWord))==0 ) // and write it back
2083 // We have successfully probed the memory by reading and writing to it
2084 // so we assume it is now safe to access without generating exceptions.
2085 // If this is wrong it will kill the system horribly.
2089 TUint shift = (aAddress&3)*8;
2093 case 1: // 1 byte value
2095 *(TUint8*)aOldValue = oldWord>>shift;
2096 newWord = (oldWord&~(0xff<<shift)) | ((aValue&0xff)<<shift);
2099 case 2: // 2 byte value
2100 badAlign = tempAddr&1;
2102 *(TUint16*)aOldValue = oldWord>>shift;
2103 newWord = (oldWord&~(0xffff<<shift)) | ((aValue&0xffff)<<shift);
2106 default: // 4 byte value
2107 badAlign = tempAddr&3;
2109 *(TUint32*)aOldValue = oldWord;
2116 // write the new value...
2118 TCodeModifierBroadcast b(ptr, newWord);
2121 DoWriteCode(ptr, newWord);
2127 __UNLOCK_GUARD_END(MmuLock);
2130 RamAllocLock::Unlock();
2131 // The os asid is no longer required.
2132 ((DMemModelProcess*)aProcess)->CloseOsAsid();
2137 @pre Calling thread must be in critical section
2138 @pre CodeSeg mutex held
2140 void DoWriteCode(TUint32* aAddress, TUint32 aValue)
2142 // We do not want to be interrupted by e.g. ISR that will run altered code before IMB-Range.
2143 // Therefore, copy data and clean/invalidate caches with interrupts disabled.
2144 TInt irq = NKern::DisableAllInterrupts();
2146 CacheMaintenance::CodeChanged((TLinAddr)aAddress, sizeof(TUint32), CacheMaintenance::ECodeModifier);
2147 NKern::RestoreInterrupts(irq);
2150 #endif //__DEBUGGER_SUPPORT__
2158 TInt M::CreateVirtualPinObject(TVirtualPinObject*& aPinObject)
2160 aPinObject = (TVirtualPinObject*)new DVirtualPinMapping;
2161 return aPinObject != NULL ? KErrNone : KErrNoMemory;
2164 TInt M::PinVirtualMemory(TVirtualPinObject* aPinObject, TLinAddr aStart, TUint aSize, DThread* aThread)
2167 TUint offsetInMapping;
2168 TUint mapInstanceCount;
2169 DMemoryMapping* mapping = MM::FindMappingInThread( (DMemModelThread*)aThread,
2174 TInt r = KErrBadDescriptor;
2177 TInt count = ((aStart & KPageMask) + aSize + KPageMask) >> KPageShift;
2178 if(mapping->IsPinned())
2180 // Mapping for specified virtual address is pinned so we don't need to
2181 // do anything. Also, we can't safely pin the memory in this case
2182 // anyway, as pinned mappings may move between memory objects
2188 DMemoryObject* memory = mapping->Memory();
2189 if (mapInstanceCount != mapping->MapInstanceCount() ||
2190 !memory || !memory->IsDemandPaged())
2192 // mapping has been reused, no memory, or it's not paged, so no need to pin...
2198 // paged memory needs pinning...
2199 // Open a reference on the memory so it doesn't get deleted.
2203 TUint startInMemory = (offsetInMapping >> KPageShift) + mapping->iStartIndex;
2204 r = ((DVirtualPinMapping*)aPinObject)->Pin( memory, startInMemory, count, mapping->Permissions(),
2205 mapping, mapInstanceCount);
2214 TInt M::CreateAndPinVirtualMemory(TVirtualPinObject*& aPinObject, TLinAddr aStart, TUint aSize)
2218 TUint offsetInMapping;
2219 TUint mapInstanceCount;
2220 DMemoryMapping* mapping = MM::FindMappingInThread( (DMemModelThread*)&Kern::CurrentThread(),
2225 TInt r = KErrBadDescriptor;
2228 TInt count = ((aStart & KPageMask) + aSize + KPageMask) >> KPageShift;
2229 if(mapping->IsPinned())
2231 // Mapping for specified virtual address is pinned so we don't need to
2232 // do anything. Also, we can't safely pin the memory in this case
2233 // anyway, as pinned mappings may move between memory objects
2239 DMemoryObject* memory = mapping->Memory();
2240 if (mapInstanceCount != mapping->MapInstanceCount() ||
2241 !memory || !memory->IsDemandPaged())
2243 // mapping has been reused, no memory, or it's not paged, so no need to pin...
2248 {// The memory is demand paged so create a pin object and pin it.
2249 // Open a reference on the memory so it doesn't get deleted.
2252 r = CreateVirtualPinObject(aPinObject);
2255 TUint startInMemory = (offsetInMapping >> KPageShift) + mapping->iStartIndex;
2256 r = ((DVirtualPinMapping*)aPinObject)->Pin( memory, startInMemory, count, mapping->Permissions(),
2257 mapping, mapInstanceCount);
2259 {// Failed to pin the memory so pin object is not required.
2260 DestroyVirtualPinObject(aPinObject);
2271 void M::UnpinVirtualMemory(TVirtualPinObject* aPinObject)
2273 DVirtualPinMapping* mapping = (DVirtualPinMapping*)aPinObject;
2274 if (mapping->IsAttached())
2278 void M::DestroyVirtualPinObject(TVirtualPinObject*& aPinObject)
2280 DVirtualPinMapping* mapping = (DVirtualPinMapping*)__e32_atomic_swp_ord_ptr(&aPinObject, 0);
2283 if (mapping->IsAttached())
2285 mapping->AsyncClose();
2293 TInt M::CreatePhysicalPinObject(TPhysicalPinObject*& aPinObject)
2295 aPinObject = (TPhysicalPinObject*)new DPhysicalPinMapping;
2296 return aPinObject != NULL ? KErrNone : KErrNoMemory;
2299 TInt M::PinPhysicalMemory(TPhysicalPinObject* aPinObject, TLinAddr aStart, TUint aSize, TBool aReadOnly,
2300 TPhysAddr& aAddress, TPhysAddr* aPages, TUint32& aMapAttr, TUint& aColour, DThread* aThread)
2303 TUint offsetInMapping;
2304 TUint mapInstanceCount;
2305 DMemoryMapping* mapping = MM::FindMappingInThread( (DMemModelThread*)aThread,
2310 TInt r = KErrBadDescriptor;
2313 TInt count = ((aStart & KPageMask) + aSize + KPageMask) >> KPageShift;
2316 DMemoryObject* memory = mapping->Memory();
2317 if (mapInstanceCount == mapping->MapInstanceCount() && memory)
2322 TUint startInMemory = (offsetInMapping >> KPageShift) + mapping->iStartIndex;
2323 TMappingPermissions permissions = aReadOnly ? ESupervisorReadOnly : ESupervisorReadWrite;
2324 r = ((DPhysicalPinMapping*)aPinObject)->Pin(memory, startInMemory, count, permissions);
2327 r = ((DPhysicalPinMapping*)aPinObject)->PhysAddr(0, count, aAddress, aPages);
2330 r = KErrNone; //Do not report discontiguous memory in return value.
2331 const TMappingAttributes2& mapAttr2 =
2332 MM::LegacyMappingAttributes(memory->Attributes(), mapping->Permissions());
2333 *(TMappingAttributes2*)&aMapAttr = mapAttr2;
2336 UnpinPhysicalMemory(aPinObject);
2340 else // mapping has been reused or no memory...
2346 aColour = (aStart >>KPageShift) & KPageColourMask;
2350 void M::UnpinPhysicalMemory(TPhysicalPinObject* aPinObject)
2352 DPhysicalPinMapping* mapping = (DPhysicalPinMapping*)aPinObject;
2353 if (mapping->IsAttached())
2357 void M::DestroyPhysicalPinObject(TPhysicalPinObject*& aPinObject)
2359 DPhysicalPinMapping* mapping = (DPhysicalPinMapping*)__e32_atomic_swp_ord_ptr(&aPinObject, 0);
2362 if (mapping->IsAttached())
2364 mapping->AsyncClose();
2370 // Kernel map and pin.
2373 TInt M::CreateKernelMapObject(TKernelMapObject*& aMapObject, TUint aMaxReserveSize)
2375 DKernelPinMapping* pinObject = new DKernelPinMapping();
2376 aMapObject = (TKernelMapObject*) pinObject;
2377 if (pinObject == NULL)
2379 return KErrNoMemory;
2381 // Ensure we reserve enough bytes for all possible alignments of the start and
2382 // end of the region to map.
2383 TUint reserveBytes = aMaxReserveSize? ((aMaxReserveSize + KPageMask) & ~KPageMask) + KPageSize : 0;
2384 TInt r = pinObject->Construct(reserveBytes);
2386 {// Failed so delete the kernel mapping object.
2394 TInt M::MapAndPinMemory(TKernelMapObject* aMapObject, DThread* aThread, TLinAddr aStart,
2395 TUint aSize, TUint aMapAttributes, TLinAddr& aKernelAddr, TPhysAddr* aPages)
2398 TUint offsetInMapping;
2399 TUint mapInstanceCount;
2400 DMemoryMapping* mapping = MM::FindMappingInThread( (DMemModelThread*)aThread,
2405 TInt r = KErrBadDescriptor;
2408 DKernelPinMapping* kernelMap = (DKernelPinMapping*)aMapObject;
2409 TInt count = (((aStart + aSize + KPageMask) & ~KPageMask) - (aStart & ~KPageMask)) >> KPageShift;
2410 if (kernelMap->iReservePages && kernelMap->iReservePages < count)
2413 return KErrArgument;
2417 DMemoryObject* memory = mapping->Memory();
2418 if (mapInstanceCount == mapping->MapInstanceCount() && memory)
2423 TUint startInMemory = (offsetInMapping >> KPageShift) + mapping->iStartIndex;
2424 TBool readOnly = aMapAttributes & Kern::EKernelMap_ReadOnly;
2425 TMappingPermissions permissions = readOnly ? ESupervisorReadOnly : ESupervisorReadWrite;
2426 r = kernelMap->MapAndPin(memory, startInMemory, count, permissions);
2429 __NK_ASSERT_DEBUG(!kernelMap->IsUserMapping());
2430 aKernelAddr = kernelMap->Base();
2431 TPhysAddr contigAddr; // Ignore this value as aPages will be populated
2432 // whether the memory is contiguous or not.
2433 r = kernelMap->PhysAddr(0, count, contigAddr, aPages);
2436 r = KErrNone; //Do not report discontiguous memory in return value.
2440 UnmapAndUnpinMemory((TKernelMapObject*)kernelMap);
2445 else // mapping has been reused or no memory...
2455 void M::UnmapAndUnpinMemory(TKernelMapObject* aMapObject)
2457 DKernelPinMapping* mapping = (DKernelPinMapping*)aMapObject;
2458 if (mapping->IsAttached())
2459 mapping->UnmapAndUnpin();
2463 void M::DestroyKernelMapObject(TKernelMapObject*& aMapObject)
2465 DKernelPinMapping* mapping = (DKernelPinMapping*)__e32_atomic_swp_ord_ptr(&aMapObject, 0);
2468 if (mapping->IsAttached())
2469 mapping->UnmapAndUnpin();
2470 mapping->AsyncClose();
2476 // Cache sync operations
2479 //@pre As for MASK_THREAD_STANDARD
2480 void Mmu::SyncPhysicalMemoryBeforeDmaWrite(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr)
2482 //Jump over the pages we do not have to sync
2483 aPages += aOffset>>KPageShift;
2484 aOffset &=KPageMask;
2485 aColour = (aColour + (aOffset>>KPageShift)) & KPageColourMask;
2487 //Calculate page table entry for the temporary mapping.
2488 TUint pteType = PteType(ESupervisorReadWrite,true);
2489 TMappingAttributes2 mapAttr2(aMapAttr);
2490 TPte pte = Mmu::BlankPte((TMemoryAttributes)mapAttr2.Type(), pteType);
2492 while (aSize) //A single pass of loop operates within page boundaries.
2494 TUint sizeInLoopPass = Min (KPageSize, aOffset+aSize) - aOffset; //The size of the region in this pass.
2496 NKern::ThreadEnterCS();
2497 Kern::MutexWait(*iPhysMemSyncMutex);
2499 TLinAddr linAddr = iPhysMemSyncTemp.Map(*aPages, aColour, pte);
2500 CacheMaintenance::MakeCPUChangesVisible(linAddr+aOffset, sizeInLoopPass, aMapAttr, *aPages+aOffset);
2501 iPhysMemSyncTemp.Unmap();
2503 Kern::MutexSignal(*iPhysMemSyncMutex);
2504 NKern::ThreadLeaveCS();
2506 aSize-=sizeInLoopPass; // Remaining bytes to sync
2507 aOffset=0; // In all the pages after the first, sync will always start with zero offset.
2508 aPages++; // Point to the next page
2509 aColour = (aColour+1) & KPageColourMask;
2513 //@pre As for MASK_THREAD_STANDARD
2514 void Mmu::SyncPhysicalMemoryBeforeDmaRead(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr)
2516 //Jump over the pages we do not have to sync
2517 aPages += aOffset>>KPageShift;
2518 aOffset &=KPageMask;
2519 aColour = (aColour + (aOffset>>KPageShift)) & KPageColourMask;
2521 //Calculate page table entry for the temporary mapping.
2522 TUint pteType = PteType(ESupervisorReadWrite,true);
2523 TMappingAttributes2 mapAttr2(aMapAttr);
2524 TPte pte = Mmu::BlankPte((TMemoryAttributes)mapAttr2.Type(), pteType);
2526 while (aSize) //A single pass of loop operates within page boundaries.
2528 TUint sizeInLoopPass = Min (KPageSize, aOffset+aSize) - aOffset; //The size of the region in this pass.
2530 NKern::ThreadEnterCS();
2531 Kern::MutexWait(*iPhysMemSyncMutex);
2533 TLinAddr linAddr = iPhysMemSyncTemp.Map(*aPages, aColour, pte);
2534 CacheMaintenance::PrepareMemoryForExternalWrites(linAddr+aOffset, sizeInLoopPass, aMapAttr, *aPages+aOffset);
2535 iPhysMemSyncTemp.Unmap();
2537 Kern::MutexSignal(*iPhysMemSyncMutex);
2538 NKern::ThreadLeaveCS();
2540 aSize-=sizeInLoopPass; // Remaining bytes to sync
2541 aOffset=0; // In all the pages after the first, sync will always start with zero offset.
2542 aPages++; // Point to the next page
2543 aColour = (aColour+1) & KPageColourMask;
2547 //@pre As for MASK_THREAD_STANDARD
2548 void Mmu::SyncPhysicalMemoryAfterDmaRead(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr)
2550 //Jump over the pages we do not have to sync
2551 aPages += aOffset>>KPageShift;
2552 aOffset &=KPageMask;
2553 aColour = (aColour + (aOffset>>KPageShift)) & KPageColourMask;
2555 //Calculate page table entry for the temporary mapping.
2556 TUint pteType = PteType(ESupervisorReadWrite,true);
2557 TMappingAttributes2 mapAttr2(aMapAttr);
2558 TPte pte = Mmu::BlankPte((TMemoryAttributes)mapAttr2.Type(), pteType);
2560 while (aSize) //A single pass of loop operates within page boundaries.
2562 TUint sizeInLoopPass = Min (KPageSize, aOffset+aSize) - aOffset; //The size of the region in this pass.
2564 NKern::ThreadEnterCS();
2565 Kern::MutexWait(*iPhysMemSyncMutex);
2567 TLinAddr linAddr = iPhysMemSyncTemp.Map(*aPages, aColour, pte);
2568 CacheMaintenance::MakeExternalChangesVisible(linAddr+aOffset, sizeInLoopPass, aMapAttr, *aPages+aOffset);
2569 iPhysMemSyncTemp.Unmap();
2571 Kern::MutexSignal(*iPhysMemSyncMutex);
2572 NKern::ThreadLeaveCS();
2574 aSize-=sizeInLoopPass; // Remaining bytes to sync
2575 aOffset=0; // In all the pages after the first, sync will always start with zero offset.
2576 aPages++; // Point to the next page
2577 aColour = (aColour+1) & KPageColourMask;
2581 EXPORT_C TInt Cache::SyncPhysicalMemoryBeforeDmaWrite(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr)
2583 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryBeforeDmaWrite");
2584 TheMmu.SyncPhysicalMemoryBeforeDmaWrite(aPages, aColour, aOffset, aSize, aMapAttr);
2588 EXPORT_C TInt Cache::SyncPhysicalMemoryBeforeDmaRead(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr)
2590 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryBeforeDmaRead");
2591 TheMmu.SyncPhysicalMemoryBeforeDmaRead(aPages, aColour, aOffset, aSize, aMapAttr);
2595 EXPORT_C TInt Cache::SyncPhysicalMemoryAfterDmaRead(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr)
2597 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryAfterDmaRead");
2598 TheMmu.SyncPhysicalMemoryAfterDmaRead(aPages, aColour, aOffset, aSize, aMapAttr);