sl@0: // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32\memmodel\epoc\mmubase\mmubase.cpp sl@0: // sl@0: // sl@0: sl@0: #include sl@0: #include sl@0: #include sl@0: #include sl@0: #include "cache_maintenance.h" sl@0: #include "highrestimer.h" sl@0: #include sl@0: #include sl@0: sl@0: sl@0: __ASSERT_COMPILE(sizeof(SPageInfo)==(1<iChunkMask; sl@0: return (aSize+mask)&~mask; sl@0: } sl@0: sl@0: TInt MmuBase::RoundUpRangeToPageSize(TUint32& aBase, TUint32& aSize) sl@0: { sl@0: TUint32 mask=KPageMask; sl@0: TUint32 shift=KPageShift; sl@0: TUint32 offset=aBase&mask; sl@0: aBase&=~mask; sl@0: aSize=(aSize+offset+mask)&~mask; sl@0: return TInt(aSize>>shift); sl@0: } sl@0: sl@0: void MmuBase::Wait() sl@0: { sl@0: Kern::MutexWait(*RamAllocatorMutex); sl@0: if (RamAllocatorMutex->iHoldCount==1) sl@0: { sl@0: MmuBase& m=*TheMmu; sl@0: m.iInitialFreeMemory=Kern::FreeRamInBytes(); sl@0: m.iAllocFailed=EFalse; sl@0: } sl@0: } sl@0: sl@0: void MmuBase::Signal() sl@0: { sl@0: if (RamAllocatorMutex->iHoldCount>1) sl@0: { sl@0: Kern::MutexSignal(*RamAllocatorMutex); sl@0: return; sl@0: } sl@0: MmuBase& m=*TheMmu; sl@0: TInt initial=m.iInitialFreeMemory; sl@0: TBool failed=m.iAllocFailed; sl@0: TInt final=Kern::FreeRamInBytes(); sl@0: Kern::MutexSignal(*RamAllocatorMutex); sl@0: K::CheckFreeMemoryLevel(initial,final,failed); sl@0: } sl@0: sl@0: void MmuBase::WaitHwChunk() sl@0: { sl@0: Kern::MutexWait(*HwChunkMutex); sl@0: } sl@0: sl@0: void MmuBase::SignalHwChunk() sl@0: { sl@0: Kern::MutexSignal(*HwChunkMutex); sl@0: } sl@0: sl@0: sl@0: void MmuBase::MapRamPage(TLinAddr aAddr, TPhysAddr aPage, TPte aPtePerm) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MapRamPage %08x@%08x perm %08x", aPage, aAddr, aPtePerm)); sl@0: TInt ptid=PageTableId(aAddr); sl@0: NKern::LockSystem(); sl@0: MapRamPages(ptid,SPageInfo::EInvalid,0,aAddr,&aPage,1,aPtePerm); sl@0: NKern::UnlockSystem(); sl@0: } sl@0: sl@0: // sl@0: // Unmap and free pages from a global area sl@0: // sl@0: void MmuBase::UnmapAndFree(TLinAddr aAddr, TInt aNumPages) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::UnmapAndFree(%08x,%d)",aAddr,aNumPages)); sl@0: while(aNumPages) sl@0: { sl@0: TInt pt_np=(iChunkSize-(aAddr&iChunkMask))>>iPageShift; sl@0: TInt np=Min(aNumPages,pt_np); sl@0: aNumPages-=np; sl@0: TInt id=PageTableId(aAddr); sl@0: if (id>=0) sl@0: { sl@0: while(np) sl@0: { sl@0: TInt np2=Min(np,KFreePagesStepSize); sl@0: TPhysAddr phys[KFreePagesStepSize]; sl@0: TInt nptes; sl@0: TInt nfree; sl@0: NKern::LockSystem(); sl@0: UnmapPages(id,aAddr,np2,phys,true,nptes,nfree,NULL); sl@0: NKern::UnlockSystem(); sl@0: if (nfree) sl@0: { sl@0: if (iDecommitThreshold) sl@0: CacheMaintenanceOnDecommit(phys, nfree); sl@0: iRamPageAllocator->FreeRamPages(phys,nfree,EPageFixed); sl@0: } sl@0: np-=np2; sl@0: aAddr+=(np2<SetUnused(); sl@0: if (pi->LockCount()) sl@0: ppa[-1]=KPhysAddrInvalid; // don't free page if it's locked down sl@0: else if (sync_decommit) sl@0: { sl@0: NKern::UnlockSystem(); sl@0: CacheMaintenanceOnDecommit(pa); sl@0: NKern::LockSystem(); sl@0: } sl@0: } sl@0: if (!sync_decommit) sl@0: NKern::FlashSystem(); sl@0: } sl@0: NKern::UnlockSystem(); sl@0: if (iDecommitThreshold && !sync_decommit) sl@0: CacheMaintenance::SyncPhysicalCache_All(); sl@0: iRamPageAllocator->FreeRamPages(aPageList,aCount, aPageType); sl@0: } sl@0: sl@0: TInt MmuBase::InitPageTableInfo(TInt aId) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::InitPageTableInfo(%x)",aId)); sl@0: TInt ptb=aId>>iPtBlockShift; sl@0: if (++iPtBlockCount[ptb]==1) sl@0: { sl@0: // expand page table info array sl@0: TPhysAddr pagePhys; sl@0: if (AllocRamPages(&pagePhys,1, EPageFixed)!=KErrNone) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Unable to allocate page")); sl@0: iPtBlockCount[ptb]=0; sl@0: iAllocFailed=ETrue; sl@0: return KErrNoMemory; sl@0: } sl@0: #ifdef BTRACE_KERNEL_MEMORY sl@0: BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, 1<SetPtInfo(ptb); sl@0: NKern::UnlockSystem(); sl@0: MapRamPage(pil, pagePhys, iPtInfoPtePerm); sl@0: memclr((TAny*)pil, iPageSize); sl@0: } sl@0: return KErrNone; sl@0: } sl@0: sl@0: TInt MmuBase::DoAllocPageTable(TPhysAddr& aPhysAddr) sl@0: // sl@0: // Allocate a new page table but don't map it. sl@0: // Return page table id and page number/phys address of new page if any. sl@0: // sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::DoAllocPageTable()")); sl@0: #ifdef _DEBUG sl@0: if(K::CheckForSimulatedAllocFail()) sl@0: return KErrNoMemory; sl@0: #endif sl@0: TInt id=iPageTableAllocator?iPageTableAllocator->Alloc():-1; sl@0: if (id<0) sl@0: { sl@0: // need to allocate a new page sl@0: if (AllocRamPages(&aPhysAddr,1, EPageFixed)!=KErrNone) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Unable to allocate page")); sl@0: iAllocFailed=ETrue; sl@0: return KErrNoMemory; sl@0: } sl@0: sl@0: // allocate an ID for the new page sl@0: id=iPageTableLinearAllocator->Alloc(); sl@0: if (id>=0) sl@0: { sl@0: id<<=iPtClusterShift; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Allocated ID %04x",id)); sl@0: } sl@0: if (id<0 || InitPageTableInfo(id)!=KErrNone) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Unable to allocate page table info")); sl@0: iPageTableLinearAllocator->Free(id>>iPtClusterShift); sl@0: if (iDecommitThreshold) sl@0: CacheMaintenanceOnDecommit(aPhysAddr); sl@0: sl@0: iRamPageAllocator->FreeRamPage(aPhysAddr, EPageFixed); sl@0: iAllocFailed=ETrue; sl@0: return KErrNoMemory; sl@0: } sl@0: sl@0: // Set up page info for new page sl@0: NKern::LockSystem(); sl@0: SPageInfo::FromPhysAddr(aPhysAddr)->SetPageTable(id>>iPtClusterShift); sl@0: NKern::UnlockSystem(); sl@0: #ifdef BTRACE_KERNEL_MEMORY sl@0: BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, 1<1) sl@0: iPageTableAllocator->Free(id+1,iPtClusterSize-1); sl@0: } sl@0: else sl@0: aPhysAddr=KPhysAddrInvalid; sl@0: sl@0: __KTRACE_OPT(KMMU,Kern::Printf("DoAllocPageTable returns %d (%08x)",id,aPhysAddr)); sl@0: PtInfo(id).SetUnused(); sl@0: return id; sl@0: } sl@0: sl@0: TInt MmuBase::MapPageTable(TInt aId, TPhysAddr aPhysAddr, TBool aAllowExpand) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MapPageTable(%d,%08x)",aId,aPhysAddr)); sl@0: TLinAddr ptLin=PageTableLinAddr(aId); sl@0: TInt ptg=aId>>iPtGroupShift; sl@0: if (++iPtGroupCount[ptg]==1) sl@0: { sl@0: // need to allocate a new page table sl@0: __ASSERT_ALWAYS(aAllowExpand, Panic(EMapPageTableBadExpand)); sl@0: TPhysAddr xptPhys; sl@0: TInt xptid=DoAllocPageTable(xptPhys); sl@0: if (xptid<0) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Unable to allocate extra page table")); sl@0: iPtGroupCount[ptg]=0; sl@0: return KErrNoMemory; sl@0: } sl@0: if (xptPhys==KPhysAddrInvalid) sl@0: xptPhys=aPhysAddr + ((xptid-aId)<SetUnused(); sl@0: NKern::UnlockSystem(); sl@0: if (iDecommitThreshold) sl@0: CacheMaintenanceOnDecommit(ptPhys); sl@0: sl@0: iRamPageAllocator->FreeRamPage(ptPhys, EPageFixed); sl@0: return r; sl@0: } sl@0: } sl@0: ClearPageTable(id); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("AllocPageTable returns %d",id)); sl@0: return id; sl@0: } sl@0: sl@0: TBool MmuBase::DoFreePageTable(TInt aId) sl@0: // sl@0: // Free an empty page table. We assume that all pages mapped by the page table have sl@0: // already been unmapped and freed. sl@0: // sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::DoFreePageTable(%d)",aId)); sl@0: SPageTableInfo& s=PtInfo(aId); sl@0: __NK_ASSERT_DEBUG(!s.iCount); // shouldn't have any pages mapped sl@0: s.SetUnused(); sl@0: sl@0: TInt id=aId &~ iPtClusterMask; sl@0: if (iPageTableAllocator) sl@0: { sl@0: iPageTableAllocator->Free(aId); sl@0: if (iPageTableAllocator->NotFree(id,iPtClusterSize)) sl@0: { sl@0: // some subpages still in use sl@0: return ETrue; sl@0: } sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Freeing whole page, id=%d",id)); sl@0: // whole page is now free sl@0: // remove it from the page table allocator sl@0: iPageTableAllocator->Alloc(id,iPtClusterSize); sl@0: } sl@0: sl@0: TInt ptb=aId>>iPtBlockShift; sl@0: if (--iPtBlockCount[ptb]==0) sl@0: { sl@0: // shrink page table info array sl@0: TLinAddr pil=PtInfoBlockLinAddr(ptb); sl@0: UnmapAndFree(pil,1); // remove PTE, null page info, free page sl@0: #ifdef BTRACE_KERNEL_MEMORY sl@0: BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscFree, 1<Free(id>>iPtClusterShift); sl@0: return EFalse; sl@0: } sl@0: sl@0: void MmuBase::FreePageTable(TInt aId) sl@0: // sl@0: // Free an empty page table. We assume that all pages mapped by the page table have sl@0: // already been unmapped and freed. sl@0: // sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::FreePageTable(%d)",aId)); sl@0: if (DoFreePageTable(aId)) sl@0: return; sl@0: sl@0: TInt id=aId &~ iPtClusterMask; sl@0: sl@0: // calculate linear address of page sl@0: TLinAddr ptLin=PageTableLinAddr(id); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Page lin %08x",ptLin)); sl@0: sl@0: // unmap and free the page sl@0: UnmapAndFree(ptLin,1); sl@0: #ifdef BTRACE_KERNEL_MEMORY sl@0: BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscFree, 1<>iPtGroupShift; sl@0: --iPtGroupCount[ptg]; sl@0: // don't shrink the page table mapping for now sl@0: } sl@0: sl@0: TInt MmuBase::AllocPhysicalRam(TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam() size=%x align=%d",aSize,aAlign)); sl@0: TInt r=AllocContiguousRam(aSize, aPhysAddr, EPageFixed, aAlign); sl@0: if (r!=KErrNone) sl@0: { sl@0: iAllocFailed=ETrue; sl@0: return r; sl@0: } sl@0: TInt n=TInt(TUint32(aSize+iPageMask)>>iPageShift); sl@0: SPageInfo* pI=SPageInfo::FromPhysAddr(aPhysAddr); sl@0: SPageInfo* pE=pI+n; sl@0: for (; pIType()==SPageInfo::EUnused); sl@0: pI->Lock(); sl@0: NKern::UnlockSystem(); sl@0: } sl@0: return KErrNone; sl@0: } sl@0: sl@0: /** Attempt to allocate a contiguous block of RAM from the specified zone. sl@0: sl@0: @param aZoneIdList An array of the IDs of the RAM zones to allocate from. sl@0: @param aZoneIdCount The number of RAM zone IDs listed in aZoneIdList. sl@0: @param aSize The number of contiguous bytes to allocate sl@0: @param aPhysAddr The physical address of the start of the contiguous block of sl@0: memory allocated sl@0: @param aAlign Required alignment sl@0: @return KErrNone on success, KErrArgument if zone doesn't exist or aSize is larger than the sl@0: size of the RAM zone or KErrNoMemory when the RAM zone is too full. sl@0: */ sl@0: TInt MmuBase::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam() size=0x%x align=%d", aSize, aAlign)); sl@0: TInt r = ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, EPageFixed, aAlign); sl@0: if (r!=KErrNone) sl@0: { sl@0: iAllocFailed=ETrue; sl@0: return r; sl@0: } sl@0: TInt n=TInt(TUint32(aSize+iPageMask)>>iPageShift); sl@0: SPageInfo* pI=SPageInfo::FromPhysAddr(aPhysAddr); sl@0: SPageInfo* pE=pI+n; sl@0: for (; pIType()==SPageInfo::EUnused); sl@0: pI->Lock(); sl@0: NKern::UnlockSystem(); sl@0: } sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: /** Attempt to allocate discontiguous RAM pages. sl@0: sl@0: @param aNumPages The number of pages to allocate. sl@0: @param aPageList Pointer to an array where each element will be the physical sl@0: address of each page allocated. sl@0: @return KErrNone on success, KErrNoMemory otherwise sl@0: */ sl@0: TInt MmuBase::AllocPhysicalRam(TInt aNumPages, TPhysAddr* aPageList) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam() numpages=%x", aNumPages)); sl@0: TInt r = AllocRamPages(aPageList, aNumPages, EPageFixed); sl@0: if (r!=KErrNone) sl@0: { sl@0: iAllocFailed=ETrue; sl@0: return r; sl@0: } sl@0: TPhysAddr* pageEnd = aPageList + aNumPages; sl@0: for (TPhysAddr* page = aPageList; page < pageEnd; page++) sl@0: { sl@0: SPageInfo* pageInfo = SPageInfo::FromPhysAddr(*page); sl@0: NKern::LockSystem(); sl@0: __NK_ASSERT_DEBUG(pageInfo->Type() == SPageInfo::EUnused); sl@0: pageInfo->Lock(); sl@0: NKern::UnlockSystem(); sl@0: } sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: /** Attempt to allocate discontiguous RAM pages from the specified RAM zones. sl@0: sl@0: @param aZoneIdList An array of the IDs of the RAM zones to allocate from. sl@0: @param aZoneIdCount The number of RAM zone IDs listed in aZoneIdList. sl@0: @param aNumPages The number of pages to allocate. sl@0: @param aPageList Pointer to an array where each element will be the physical sl@0: address of each page allocated. sl@0: @return KErrNone on success, KErrArgument if zone doesn't exist or aNumPages is sl@0: larger than the total number of pages in the RAM zone or KErrNoMemory when the RAM sl@0: zone is too full. sl@0: */ sl@0: TInt MmuBase::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam() numpages 0x%x zones 0x%x", aNumPages, aZoneIdCount)); sl@0: TInt r = ZoneAllocRamPages(aZoneIdList, aZoneIdCount, aPageList, aNumPages, EPageFixed); sl@0: if (r!=KErrNone) sl@0: { sl@0: iAllocFailed=ETrue; sl@0: return r; sl@0: } sl@0: sl@0: TPhysAddr* pageEnd = aPageList + aNumPages; sl@0: for (TPhysAddr* page = aPageList; page < pageEnd; page++) sl@0: { sl@0: SPageInfo* pageInfo = SPageInfo::FromPhysAddr(*page); sl@0: NKern::LockSystem(); sl@0: __NK_ASSERT_DEBUG(pageInfo->Type() == SPageInfo::EUnused); sl@0: pageInfo->Lock(); sl@0: NKern::UnlockSystem(); sl@0: } sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: TInt MmuBase::FreePhysicalRam(TPhysAddr aPhysAddr, TInt aSize) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreePhysicalRam(%08x,%x)",aPhysAddr,aSize)); sl@0: sl@0: TInt n=TInt(TUint32(aSize+iPageMask)>>iPageShift); sl@0: SPageInfo* pI=SPageInfo::FromPhysAddr(aPhysAddr); sl@0: SPageInfo* pE=pI+n; sl@0: for (; pIType()==SPageInfo::EUnused && pI->Unlock()==0, Panic(EBadFreePhysicalRam)); sl@0: NKern::UnlockSystem(); sl@0: } sl@0: TInt r=iRamPageAllocator->FreePhysicalRam(aPhysAddr, aSize); sl@0: return r; sl@0: } sl@0: sl@0: /** Free discontiguous RAM pages that were previously allocated using discontiguous sl@0: overload of MmuBase::AllocPhysicalRam() or MmuBase::ZoneAllocPhysicalRam(). sl@0: sl@0: Specifying one of the following may cause the system to panic: sl@0: a) an invalid physical RAM address. sl@0: b) valid physical RAM addresses where some had not been previously allocated. sl@0: c) an adrress not aligned to a page boundary. sl@0: sl@0: @param aNumPages Number of pages to free sl@0: @param aPageList Array of the physical address of each page to free sl@0: sl@0: @return KErrNone if the operation was successful. sl@0: sl@0: */ sl@0: TInt MmuBase::FreePhysicalRam(TInt aNumPages, TPhysAddr* aPageList) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreePhysicalRam(%08x,%08x)", aNumPages, aPageList)); sl@0: sl@0: TPhysAddr* pageEnd = aPageList + aNumPages; sl@0: TInt r = KErrNone; sl@0: sl@0: for (TPhysAddr* page = aPageList; page < pageEnd && r == KErrNone; page++) sl@0: { sl@0: SPageInfo* pageInfo = SPageInfo::FromPhysAddr(*page); sl@0: NKern::LockSystem(); sl@0: __ASSERT_ALWAYS(pageInfo->Type()==SPageInfo::EUnused && pageInfo->Unlock()==0, Panic(EBadFreePhysicalRam)); sl@0: NKern::UnlockSystem(); sl@0: sl@0: // Free the page sl@0: r = iRamPageAllocator->FreePhysicalRam(*page, KPageSize); sl@0: } sl@0: return r; sl@0: } sl@0: sl@0: sl@0: TInt MmuBase::ClaimPhysicalRam(TPhysAddr aPhysAddr, TInt aSize) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ClaimPhysicalRam(%08x,%x)",aPhysAddr,aSize)); sl@0: TUint32 pa=aPhysAddr; sl@0: TUint32 size=aSize; sl@0: TInt n=RoundUpRangeToPageSize(pa,size); sl@0: TInt r=iRamPageAllocator->ClaimPhysicalRam(pa, size); sl@0: if (r==KErrNone) sl@0: { sl@0: SPageInfo* pI=SPageInfo::FromPhysAddr(pa); sl@0: SPageInfo* pE=pI+n; sl@0: for (; pIType()==SPageInfo::EUnused && pI->LockCount()==0); sl@0: pI->Lock(); sl@0: NKern::UnlockSystem(); sl@0: } sl@0: } sl@0: return r; sl@0: } sl@0: sl@0: /** sl@0: Allocate a set of discontiguous RAM pages from the specified zone. sl@0: sl@0: @param aZoneIdList The array of IDs of the RAM zones to allocate from. sl@0: @param aZoneIdCount The number of RAM zone IDs in aZoneIdList. sl@0: @param aPageList Preallocated array of TPhysAddr elements that will receive the sl@0: physical address of each page allocated. sl@0: @param aNumPages The number of pages to allocate. sl@0: @param aPageType The type of the pages being allocated. sl@0: sl@0: @return KErrNone on success, KErrArgument if a zone of aZoneIdList doesn't exist, sl@0: KErrNoMemory if there aren't enough free pages in the zone sl@0: */ sl@0: TInt MmuBase::ZoneAllocRamPages(TUint* aZoneIdList, TUint aZoneIdCount, TPhysAddr* aPageList, TInt aNumPages, TZonePageType aPageType) sl@0: { sl@0: #ifdef _DEBUG sl@0: if(K::CheckForSimulatedAllocFail()) sl@0: return KErrNoMemory; sl@0: #endif sl@0: __NK_ASSERT_DEBUG(aPageType == EPageFixed); sl@0: sl@0: return iRamPageAllocator->ZoneAllocRamPages(aZoneIdList, aZoneIdCount, aPageList, aNumPages, aPageType); sl@0: } sl@0: sl@0: sl@0: TInt MmuBase::AllocRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aPageType, TUint aBlockedZoneId, TBool aBlockRest) sl@0: { sl@0: #ifdef _DEBUG sl@0: if(K::CheckForSimulatedAllocFail()) sl@0: return KErrNoMemory; sl@0: #endif sl@0: TInt missing = iRamPageAllocator->AllocRamPages(aPageList, aNumPages, aPageType, aBlockedZoneId, aBlockRest); sl@0: sl@0: // If missing some pages, ask the RAM cache to donate some of its pages. sl@0: // Don't ask it for discardable pages as those are intended for itself. sl@0: if(missing && aPageType != EPageDiscard && iRamCache->GetFreePages(missing)) sl@0: missing = iRamPageAllocator->AllocRamPages(aPageList, aNumPages, aPageType, aBlockedZoneId, aBlockRest); sl@0: return missing ? KErrNoMemory : KErrNone; sl@0: } sl@0: sl@0: sl@0: TInt MmuBase::AllocContiguousRam(TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aPageType, TInt aAlign, TUint aBlockedZoneId, TBool aBlockRest) sl@0: { sl@0: #ifdef _DEBUG sl@0: if(K::CheckForSimulatedAllocFail()) sl@0: return KErrNoMemory; sl@0: #endif sl@0: __NK_ASSERT_DEBUG(aPageType == EPageFixed); sl@0: TUint contigPages = (aSize + KPageSize - 1) >> KPageShift; sl@0: TInt r = iRamPageAllocator->AllocContiguousRam(contigPages, aPhysAddr, aPageType, aAlign, aBlockedZoneId, aBlockRest); sl@0: if (r == KErrNoMemory && contigPages > KMaxFreeableContiguousPages) sl@0: {// Allocation failed but as this is a large allocation flush the RAM cache sl@0: // and reattempt the allocation as large allocation wouldn't discard pages. sl@0: iRamCache->FlushAll(); sl@0: r = iRamPageAllocator->AllocContiguousRam(contigPages, aPhysAddr, aPageType, aAlign, aBlockedZoneId, aBlockRest); sl@0: } sl@0: return r; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Allocate contiguous RAM from the specified RAM zones. sl@0: @param aZoneIdList An array of IDs of the RAM zones to allocate from sl@0: @param aZoneIdCount The number of IDs listed in aZoneIdList sl@0: @param aSize The number of bytes to allocate sl@0: @param aPhysAddr Will receive the physical base address of the allocated RAM sl@0: @param aPageType The type of the pages being allocated sl@0: @param aAlign The log base 2 alginment required sl@0: */ sl@0: TInt MmuBase::ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aPageType, TInt aAlign) sl@0: { sl@0: #ifdef _DEBUG sl@0: if(K::CheckForSimulatedAllocFail()) sl@0: return KErrNoMemory; sl@0: #endif sl@0: return iRamPageAllocator->ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, aPageType, aAlign); sl@0: } sl@0: sl@0: SPageInfo* SPageInfo::SafeFromPhysAddr(TPhysAddr aAddress) sl@0: { sl@0: TUint index = aAddress>>(KPageShift+KPageShift-KPageInfoShift); sl@0: TUint flags = ((TUint8*)KPageInfoMap)[index>>3]; sl@0: TUint mask = 1<<(index&7); sl@0: if(!(flags&mask)) sl@0: return 0; // no SPageInfo for aAddress sl@0: SPageInfo* info = FromPhysAddr(aAddress); sl@0: if(info->Type()==SPageInfo::EInvalid) sl@0: return 0; sl@0: return info; sl@0: } sl@0: sl@0: /** HAL Function wrapper for the RAM allocator. sl@0: */ sl@0: sl@0: TInt RamHalFunction(TAny*, TInt aFunction, TAny* a1, TAny* a2) sl@0: { sl@0: DRamAllocator *pRamAlloc = MmuBase::TheMmu->iRamPageAllocator; sl@0: sl@0: if (pRamAlloc) sl@0: return pRamAlloc->HalFunction(aFunction, a1, a2); sl@0: return KErrNotSupported; sl@0: } sl@0: sl@0: sl@0: /****************************************************************************** sl@0: * Initialisation sl@0: ******************************************************************************/ sl@0: sl@0: void MmuBase::Init1() sl@0: { sl@0: __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("MmuBase::Init1")); sl@0: iInitialFreeMemory=0; sl@0: iAllocFailed=EFalse; sl@0: } sl@0: sl@0: void MmuBase::Init2() sl@0: { sl@0: __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("MmuBase::Init2")); sl@0: TInt total_ram=TheSuperPage().iTotalRamSize; sl@0: TInt total_ram_pages=total_ram>>iPageShift; sl@0: iNumPages = total_ram_pages; sl@0: const SRamInfo& info=*(const SRamInfo*)TheSuperPage().iRamBootData; sl@0: iRamPageAllocator=DRamAllocator::New(info, RamZoneConfig, RamZoneCallback); sl@0: sl@0: TInt max_pt=total_ram>>iPageTableShift; sl@0: if (max_pt>iPtClusterShift; sl@0: __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("max_ptpg=%d",max_ptpg)); sl@0: iPageTableLinearAllocator=TBitMapAllocator::New(max_ptpg,ETrue); sl@0: __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iPageTableLinearAllocator=%08x",iPageTableLinearAllocator)); sl@0: __ASSERT_ALWAYS(iPageTableLinearAllocator,Panic(EPtLinAllocCreateFailed)); sl@0: if (iPtClusterShift) // if more than one page table per page sl@0: { sl@0: iPageTableAllocator=TBitMapAllocator::New(iMaxPageTables,EFalse); sl@0: __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iPageTableAllocator=%08x",iPageTableAllocator)); sl@0: __ASSERT_ALWAYS(iPageTableAllocator,Panic(EPtAllocCreateFailed)); sl@0: } sl@0: TInt max_ptb=(iMaxPageTables+iPtBlockMask)>>iPtBlockShift; sl@0: __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("max_ptb=%d",max_ptb)); sl@0: iPtBlockCount=(TInt*)Kern::AllocZ(max_ptb*sizeof(TInt)); sl@0: __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iPtBlockCount=%08x",iPtBlockCount)); sl@0: __ASSERT_ALWAYS(iPtBlockCount,Panic(EPtBlockCountCreateFailed)); sl@0: TInt max_ptg=(iMaxPageTables+iPtGroupMask)>>iPtGroupShift; sl@0: __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("ptg_shift=%d, max_ptg=%d",iPtGroupShift,max_ptg)); sl@0: iPtGroupCount=(TInt*)Kern::AllocZ(max_ptg*sizeof(TInt)); sl@0: __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iPtGroupCount=%08x",iPtGroupCount)); sl@0: __ASSERT_ALWAYS(iPtGroupCount,Panic(EPtGroupCountCreateFailed)); sl@0: sl@0: sl@0: // Clear the inital (and only so far) page table info page so all unused sl@0: // page tables will be marked as unused. sl@0: memclr((TAny*)KPageTableInfoBase, KPageSize); sl@0: sl@0: // look for page tables - assume first page table (id=0) maps page tables sl@0: TPte* pPte=(TPte*)iPageTableLinBase; sl@0: TInt i; sl@0: for (i=0; iAlloc(i,1); sl@0: TPhysAddr ptpgPhys=PtePhysAddr(pte, i); sl@0: SPageInfo* pi = SPageInfo::SafeFromPhysAddr(ptpgPhys); sl@0: __ASSERT_ALWAYS(pi, Panic(EInvalidPageTableAtBoot)); sl@0: pi->SetPageTable(i); sl@0: pi->Lock(); sl@0: TInt id=i<>iPtBlockShift; sl@0: ++iPtBlockCount[ptb]; sl@0: TInt ptg=id>>iPtGroupShift; sl@0: ++iPtGroupCount[ptg]; sl@0: } sl@0: sl@0: // look for mapped pages sl@0: TInt npdes=1<<(32-iChunkShift); sl@0: TInt npt=0; sl@0: for (i=0; i=PP::RamDriveStartAddress && TUint32(cAddr-PP::RamDriveStartAddress)=0) sl@0: { sl@0: ++npt; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Addr %08x -> page table %d", cAddr, ptid)); sl@0: pPte=(TPte*)PageTableLinAddr(ptid); sl@0: } sl@0: #ifdef KMMU sl@0: if (pdePhys != KPhysAddrInvalid) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Addr %08x -> Whole PDE Phys %08x", cAddr, pdePhys)); sl@0: } sl@0: #endif sl@0: if (ptid>=0 || pdePhys != KPhysAddrInvalid) sl@0: { sl@0: TInt j; sl@0: TInt np=0; sl@0: for (j=0; jMarkPageAllocated(pa, EPageFixed); sl@0: // allow KErrAlreadyExists since it's possible that a page is doubly mapped sl@0: __ASSERT_ALWAYS(r==KErrNone || r==KErrAlreadyExists, Panic(EBadMappedPageAfterBoot)); sl@0: SetupInitialPageInfo(pi,cAddr,j); sl@0: #ifdef BTRACE_KERNEL_MEMORY sl@0: if(r==KErrNone) sl@0: ++Epoc::KernelMiscPages; sl@0: #endif sl@0: } sl@0: } sl@0: } sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Addr: %08x #PTEs=%d",cAddr,np)); sl@0: if (ptid>=0) sl@0: SetupInitialPageTableInfo(ptid,cAddr,np); sl@0: } sl@0: } sl@0: sl@0: TInt oddpt=npt & iPtClusterMask; sl@0: if (oddpt) sl@0: oddpt=iPtClusterSize-oddpt; sl@0: __KTRACE_OPT(KBOOT,Kern::Printf("Total page tables %d, left over subpages %d",npt,oddpt)); sl@0: if (oddpt) sl@0: iPageTableAllocator->Free(npt,oddpt); sl@0: sl@0: DoInit2(); sl@0: sl@0: // Save current free RAM size - there can never be more free RAM than this sl@0: TInt max_free = Kern::FreeRamInBytes(); sl@0: K::MaxFreeRam = max_free; sl@0: if (max_free < PP::RamDriveMaxSize) sl@0: PP::RamDriveMaxSize = max_free; sl@0: sl@0: if (K::ColdStart) sl@0: ClearRamDrive(PP::RamDriveStartAddress); sl@0: else sl@0: RecoverRamDrive(); sl@0: sl@0: TInt r=K::MutexCreate((DMutex*&)RamAllocatorMutex, KLitRamAlloc, NULL, EFalse, KMutexOrdRamAlloc); sl@0: if (r!=KErrNone) sl@0: Panic(ERamAllocMutexCreateFailed); sl@0: r=K::MutexCreate((DMutex*&)HwChunkMutex, KLitHwChunk, NULL, EFalse, KMutexOrdHwChunk); sl@0: if (r!=KErrNone) sl@0: Panic(EHwChunkMutexCreateFailed); sl@0: sl@0: #ifdef __DEMAND_PAGING__ sl@0: if (DemandPaging::RomPagingRequested() || DemandPaging::CodePagingRequested()) sl@0: iRamCache = DemandPaging::New(); sl@0: else sl@0: iRamCache = new RamCache; sl@0: #else sl@0: iRamCache = new RamCache; sl@0: #endif sl@0: if (!iRamCache) sl@0: Panic(ERamCacheAllocFailed); sl@0: iRamCache->Init2(); sl@0: RamCacheBase::TheRamCache = iRamCache; sl@0: sl@0: // Get the allocator to signal to the variant which RAM zones are in use so far sl@0: iRamPageAllocator->InitialCallback(); sl@0: } sl@0: sl@0: void MmuBase::Init3() sl@0: { sl@0: __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("MmuBase::Init3")); sl@0: sl@0: // Initialise demand paging sl@0: #ifdef __DEMAND_PAGING__ sl@0: M::DemandPagingInit(); sl@0: #endif sl@0: sl@0: // Register a HAL Function for the Ram allocator. sl@0: TInt r = Kern::AddHalEntry(EHalGroupRam, RamHalFunction, 0); sl@0: __NK_ASSERT_ALWAYS(r==KErrNone); sl@0: sl@0: // sl@0: // Perform the intialisation for page moving and RAM defrag object. sl@0: // sl@0: sl@0: // allocate a page to use as an alt stack sl@0: MmuBase::Wait(); sl@0: TPhysAddr stackpage; sl@0: r = AllocPhysicalRam(KPageSize, stackpage); sl@0: MmuBase::Signal(); sl@0: if (r!=KErrNone) sl@0: Panic(EDefragStackAllocFailed); sl@0: sl@0: // map it at a predetermined address sl@0: TInt ptid = PageTableId(KDefragAltStackAddr); sl@0: TPte perm = PtePermissions(EKernelStack); sl@0: NKern::LockSystem(); sl@0: MapRamPages(ptid, SPageInfo::EFixed, NULL, KDefragAltStackAddr, &stackpage, 1, perm); sl@0: NKern::UnlockSystem(); sl@0: iAltStackBase = KDefragAltStackAddr + KPageSize; sl@0: sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Allocated defrag alt stack page at %08x, mapped to %08x, base is now %08x", stackpage, KDefragAltStackAddr, iAltStackBase)); sl@0: sl@0: // Create the actual defrag object and initialise it. sl@0: iDefrag = new Defrag; sl@0: if (!iDefrag) sl@0: Panic(EDefragAllocFailed); sl@0: iDefrag->Init3(iRamPageAllocator); sl@0: } sl@0: sl@0: void MmuBase::CreateKernelSection(TLinAddr aEnd, TInt aHwChunkAlign) sl@0: { sl@0: TLinAddr base=(TLinAddr)TheRomHeader().iKernelLimit; sl@0: iKernelSection=TLinearSection::New(base, aEnd); sl@0: __ASSERT_ALWAYS(iKernelSection!=NULL, Panic(ECreateKernelSectionFailed)); sl@0: iHwChunkAllocator=THwChunkAddressAllocator::New(aHwChunkAlign, iKernelSection); sl@0: __ASSERT_ALWAYS(iHwChunkAllocator!=NULL, Panic(ECreateHwChunkAllocFailed)); sl@0: } sl@0: sl@0: // Recover RAM drive contents after a reset sl@0: TInt MmuBase::RecoverRamDrive() sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::RecoverRamDrive()")); sl@0: TLinAddr ptlin; sl@0: TLinAddr chunk = PP::RamDriveStartAddress; sl@0: TLinAddr end = chunk + (TLinAddr)PP::RamDriveRange; sl@0: TInt size = 0; sl@0: TInt limit = RoundToPageSize(TheSuperPage().iRamDriveSize); sl@0: for( ; chunkMarkPageAllocated(ptpgphys, EPageMovable); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("MPA: r=%d",r)); sl@0: if (r==KErrArgument) sl@0: break; // page table address was invalid - stop here and clear to end of range sl@0: if (r==KErrNone) sl@0: { sl@0: // this page was currently unallocated sl@0: if (ptid>=0) sl@0: break; // ID has been allocated - bad news - bail here sl@0: ptid = iPageTableLinearAllocator->Alloc(); sl@0: __ASSERT_ALWAYS(ptid>=0, Panic(ERecoverRamDriveAllocPTIDFailed)); sl@0: SPageInfo* pi = SPageInfo::SafeFromPhysAddr(ptpgphys); sl@0: __ASSERT_ALWAYS(pi, Panic(ERecoverRamDriveBadPageTable)); sl@0: pi->SetPageTable(ptid); // id = cluster number here sl@0: ptid <<= iPtClusterShift; sl@0: MapPageTable(ptid, ptpgphys, EFalse); sl@0: if (iPageTableAllocator) sl@0: iPageTableAllocator->Free(ptid, iPtClusterSize); sl@0: ptid |= ((ptphys>>iPageTableShift)&iPtClusterMask); sl@0: ptlin = PageTableLinAddr(ptid); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Page table ID %d lin %08x", ptid, ptlin)); sl@0: if (iPageTableAllocator) sl@0: iPageTableAllocator->Alloc(ptid, 1); sl@0: } sl@0: else sl@0: { sl@0: // this page was already allocated sl@0: if (ptid<0) sl@0: break; // ID not allocated - bad news - bail here sl@0: ptlin = PageTableLinAddr(ptid); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Page table lin %08x", ptlin)); sl@0: if (iPageTableAllocator) sl@0: iPageTableAllocator->Alloc(ptid, 1); sl@0: } sl@0: TInt pte_index; sl@0: TBool chunk_inc = 0; sl@0: TPte* page_table = (TPte*)ptlin; sl@0: for (pte_index=0; pte_index<(iChunkSize>>iPageSize); ++pte_index) sl@0: { sl@0: if (size==limit) // have reached end of ram drive sl@0: break; sl@0: TPte pte = page_table[pte_index]; sl@0: if (PteIsPresent(pte)) sl@0: { sl@0: TPhysAddr pa=PtePhysAddr(pte, pte_index); sl@0: SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa); sl@0: if (!pi) sl@0: break; sl@0: TInt r = iRamPageAllocator->MarkPageAllocated(pa, EPageMovable); sl@0: __ASSERT_ALWAYS(r==KErrNone, Panic(ERecoverRamDriveBadPage)); sl@0: size+=iPageSize; sl@0: chunk_inc = iChunkSize; sl@0: } sl@0: } sl@0: if (pte_index < (iChunkSize>>iPageSize) ) sl@0: { sl@0: // if we recovered pages in this page table, leave it in place sl@0: chunk += chunk_inc; sl@0: sl@0: // clear from here on sl@0: ClearPageTable(ptid, pte_index); sl@0: break; sl@0: } sl@0: } sl@0: if (chunk < end) sl@0: ClearRamDrive(chunk); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Recovered RAM drive size %08x",size)); sl@0: if (size=iRomLinearBase && aRomAddr<=(iRomLinearEnd-iPageSize)) sl@0: orig_phys = LinearToPhysical(aRomAddr); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("OrigPhys = %08x",orig_phys)); sl@0: if (orig_phys == KPhysAddrInvalid) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Invalid ROM address")); sl@0: return KErrArgument; sl@0: } sl@0: SPageInfo* pi = SPageInfo::SafeFromPhysAddr(orig_phys); sl@0: if (pi && pi->Type()==SPageInfo::EShadow) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ROM address already shadowed")); sl@0: return KErrAlreadyExists; sl@0: } sl@0: TInt ptid = PageTableId(aRomAddr); sl@0: __KTRACE_OPT(KMMU, Kern::Printf("Shadow PTID %d", ptid)); sl@0: TInt newptid = -1; sl@0: if (ptid<0) sl@0: { sl@0: newptid = AllocPageTable(); sl@0: __KTRACE_OPT(KMMU, Kern::Printf("New shadow PTID %d", newptid)); sl@0: if (newptid<0) sl@0: return KErrNoMemory; sl@0: ptid = newptid; sl@0: PtInfo(ptid).SetShadow( (aRomAddr-iRomLinearBase)>>iChunkShift ); sl@0: InitShadowPageTable(ptid, aRomAddr, orig_phys); sl@0: } sl@0: TPhysAddr shadow_phys; sl@0: sl@0: if (AllocRamPages(&shadow_phys, 1, EPageFixed) != KErrNone) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Unable to allocate page")); sl@0: iAllocFailed=ETrue; sl@0: if (newptid>=0) sl@0: { sl@0: FreePageTable(newptid); sl@0: } sl@0: return KErrNoMemory; sl@0: } sl@0: #ifdef BTRACE_KERNEL_MEMORY sl@0: BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, 1<=0) sl@0: { sl@0: NKern::LockSystem(); sl@0: AssignShadowPageTable(newptid, aRomAddr); sl@0: NKern::UnlockSystem(); sl@0: } sl@0: FlushShadow(aRomAddr); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("AllocShadowPage successful")); sl@0: return KErrNone; sl@0: } sl@0: sl@0: TInt MmuBase::FreeShadowPage(TLinAddr aRomAddr) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("MmuBase:FreeShadowPage(%08x)", aRomAddr)); sl@0: aRomAddr &= ~iPageMask; sl@0: TPhysAddr shadow_phys = KPhysAddrInvalid; sl@0: if (aRomAddr>=iRomLinearBase || aRomAddr<=(iRomLinearEnd-iPageSize)) sl@0: shadow_phys = LinearToPhysical(aRomAddr); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ShadowPhys = %08x",shadow_phys)); sl@0: if (shadow_phys == KPhysAddrInvalid) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Invalid ROM address")); sl@0: return KErrArgument; sl@0: } sl@0: TInt ptid = PageTableId(aRomAddr); sl@0: SPageInfo* pi = SPageInfo::SafeFromPhysAddr(shadow_phys); sl@0: if (ptid<0 || !pi || pi->Type()!=SPageInfo::EShadow) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("No shadow page at this address")); sl@0: return KErrGeneral; sl@0: } sl@0: TPhysAddr orig_phys = (TPhysAddr)pi->Owner(); sl@0: DoUnmapShadowPage(ptid, aRomAddr, orig_phys); sl@0: SPageTableInfo& pti = PtInfo(ptid); sl@0: if (pti.Attribs()==SPageTableInfo::EShadow && --pti.iCount==0) sl@0: { sl@0: TInt r = UnassignShadowPageTable(aRomAddr, orig_phys); sl@0: if (r==KErrNone) sl@0: FreePageTable(ptid); sl@0: else sl@0: pti.SetGlobal(aRomAddr>>iChunkShift); sl@0: } sl@0: sl@0: FreePages(&shadow_phys, 1, EPageFixed); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("FreeShadowPage successful")); sl@0: #ifdef BTRACE_KERNEL_MEMORY sl@0: BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscFree, 1<=iRomLinearBase || aRomAddr<=(iRomLinearEnd-iPageSize)) sl@0: shadow_phys = LinearToPhysical(aRomAddr); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ShadowPhys = %08x",shadow_phys)); sl@0: if (shadow_phys == KPhysAddrInvalid) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Invalid ROM address")); sl@0: return KErrArgument; sl@0: } sl@0: TInt ptid = PageTableId(aRomAddr); sl@0: SPageInfo* pi = SPageInfo::SafeFromPhysAddr(shadow_phys); sl@0: if (ptid<0 || pi==0) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("No shadow page at this address")); sl@0: return KErrGeneral; sl@0: } sl@0: DoFreezeShadowPage(ptid, aRomAddr); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("FreezeShadowPage successful")); sl@0: return KErrNone; sl@0: } sl@0: sl@0: TInt MmuBase::CopyToShadowMemory(TLinAddr aDest, TLinAddr aSrc, TUint32 aLength) sl@0: { sl@0: memcpy ((TAny*)aDest, (const TAny*)aSrc, aLength); sl@0: return KErrNone; sl@0: } sl@0: sl@0: void M::BTracePrime(TUint aCategory) sl@0: { sl@0: (void)aCategory; sl@0: sl@0: #ifdef BTRACE_KERNEL_MEMORY sl@0: // Must check for -1 as that is the default value of aCategory for sl@0: // BTrace::Prime() which is intended to prime all categories that are sl@0: // currently enabled via a single invocation of BTrace::Prime(). sl@0: if(aCategory==BTrace::EKernelMemory || (TInt)aCategory == -1) sl@0: { sl@0: NKern::ThreadEnterCS(); sl@0: Mmu::Wait(); sl@0: BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryInitialFree,TheSuperPage().iTotalRamSize); sl@0: BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryCurrentFree,Kern::FreeRamInBytes()); sl@0: BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, Epoc::KernelMiscPages<iMinimumPageCount << KPageShift); sl@0: #endif sl@0: BTrace8(BTrace::EKernelMemory,BTrace::EKernelMemoryDrvPhysAlloc, Epoc::DriverAllocdPhysRam, -1); sl@0: Mmu::Signal(); sl@0: NKern::ThreadLeaveCS(); sl@0: } sl@0: #endif sl@0: sl@0: #ifdef BTRACE_RAM_ALLOCATOR sl@0: // Must check for -1 as that is the default value of aCategroy for sl@0: // BTrace::Prime() which is intended to prime all categories that are sl@0: // currently enabled via a single invocation of BTrace::Prime(). sl@0: if(aCategory==BTrace::ERamAllocator || (TInt)aCategory == -1) sl@0: { sl@0: NKern::ThreadEnterCS(); sl@0: Mmu::Wait(); sl@0: Mmu::Get().iRamPageAllocator->SendInitialBtraceLogs(); sl@0: Mmu::Signal(); sl@0: NKern::ThreadLeaveCS(); sl@0: } sl@0: #endif sl@0: } sl@0: sl@0: sl@0: /****************************************************************************** sl@0: * Code common to all virtual memory models sl@0: ******************************************************************************/ sl@0: sl@0: void RHeapK::Mutate(TInt aOffset, TInt aMaxLength) sl@0: // sl@0: // Used by the kernel to mutate a fixed heap into a chunk heap. sl@0: // sl@0: { sl@0: iMinLength += aOffset; sl@0: iMaxLength = aMaxLength + aOffset; sl@0: iOffset = aOffset; sl@0: iChunkHandle = (TInt)K::HeapInfo.iChunk; sl@0: iPageSize = M::PageSizeInBytes(); sl@0: iGrowBy = iPageSize; sl@0: iFlags = 0; sl@0: } sl@0: sl@0: TInt M::PageSizeInBytes() sl@0: { sl@0: return KPageSize; sl@0: } sl@0: sl@0: TInt MmuBase::FreeRamInBytes() sl@0: { sl@0: TInt free = iRamPageAllocator->FreeRamInBytes(); sl@0: if(iRamCache) sl@0: free += iRamCache->NumberOfFreePages()<FreeRamInBytes(); sl@0: } sl@0: sl@0: sl@0: /** Rounds up the argument to the size of a MMU page. sl@0: sl@0: To find out the size of a MMU page: sl@0: @code sl@0: size = Kern::RoundToPageSize(1); sl@0: @endcode sl@0: sl@0: @param aSize Value to round up sl@0: @pre any context sl@0: */ sl@0: EXPORT_C TUint32 Kern::RoundToPageSize(TUint32 aSize) sl@0: { sl@0: return MmuBase::RoundToPageSize(aSize); sl@0: } sl@0: sl@0: sl@0: /** Rounds up the argument to the amount of memory mapped by a MMU page sl@0: directory entry. sl@0: sl@0: Chunks occupy one or more consecutive page directory entries (PDE) and sl@0: therefore the amount of linear and physical memory allocated to a chunk is sl@0: always a multiple of the amount of memory mapped by a page directory entry. sl@0: */ sl@0: EXPORT_C TUint32 Kern::RoundToChunkSize(TUint32 aSize) sl@0: { sl@0: return MmuBase::RoundToChunkSize(aSize); sl@0: } sl@0: sl@0: sl@0: /** sl@0: Allows the variant to specify the details of the RAM zones. This should be invoked sl@0: by the variant in its implementation of the pure virtual function Asic::Init1(). sl@0: sl@0: There are some limitations to how the RAM zones can be specified: sl@0: - Each RAM zone's address space must be distinct and not overlap with any sl@0: other RAM zone's address space sl@0: - Each RAM zone's address space must have a size that is multiples of the sl@0: ASIC's MMU small page size and be aligned to the ASIC's MMU small page size, sl@0: usually 4KB on ARM MMUs. sl@0: - When taken together all of the RAM zones must cover the whole of the physical RAM sl@0: address space as specified by the bootstrap in the SuperPage members iTotalRamSize sl@0: and iRamBootData;. sl@0: - There can be no more than KMaxRamZones RAM zones specified by the base port sl@0: sl@0: Note the verification of the RAM zone data is not performed here but by the ram sl@0: allocator later in the boot up sequence. This is because it is only possible to sl@0: verify the zone data once the physical RAM configuration has been read from sl@0: the super page. Any verification errors result in a "RAM-ALLOC" panic sl@0: faulting the kernel during initialisation. sl@0: sl@0: @param aZones Pointer to an array of SRamZone structs containing the details for all sl@0: the zones. The end of the array is specified by an element with an iSize of zero. The array must sl@0: remain in memory at least until the kernel has successfully booted. sl@0: sl@0: @param aCallback Pointer to a call back function that the kernel may invoke to request sl@0: one of the operations specified by TRamZoneOp. sl@0: sl@0: @return KErrNone if successful, otherwise one of the system wide error codes sl@0: sl@0: @see TRamZoneOp sl@0: @see SRamZone sl@0: @see TRamZoneCallback sl@0: */ sl@0: EXPORT_C TInt Epoc::SetRamZoneConfig(const SRamZone* aZones, TRamZoneCallback aCallback) sl@0: { sl@0: // Ensure this is only called once and only while we are initialising the kernel sl@0: if (!K::Initialising || MmuBase::RamZoneConfig != NULL) sl@0: {// fault kernel, won't return sl@0: K::Fault(K::EBadSetRamZoneConfig); sl@0: } sl@0: sl@0: if (NULL == aZones) sl@0: { sl@0: return KErrArgument; sl@0: } sl@0: MmuBase::RamZoneConfig=aZones; sl@0: MmuBase::RamZoneCallback=aCallback; sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Modify the specified RAM zone's flags. sl@0: sl@0: This allows the BSP or device driver to configure which type of pages, if any, sl@0: can be allocated into a RAM zone by the system. sl@0: sl@0: Note: updating a RAM zone's flags can result in sl@0: 1 - memory allocations failing despite there being enough free RAM in the system. sl@0: 2 - the methods TRamDefragRequest::EmptyRamZone(), TRamDefragRequest::ClaimRamZone() sl@0: or TRamDefragRequest::DefragRam() never succeeding. sl@0: sl@0: The flag masks KRamZoneFlagDiscardOnly, KRamZoneFlagMovAndDisOnly and KRamZoneFlagNoAlloc sl@0: are intended to be used with this method. sl@0: sl@0: @param aId The ID of the RAM zone to modify. sl@0: @param aClearMask The bit mask to clear, each flag of which must already be set on the RAM zone. sl@0: @param aSetMask The bit mask to set. sl@0: sl@0: @return KErrNone on success, KErrArgument if the RAM zone of aId not found or if sl@0: aSetMask contains invalid flag bits. sl@0: sl@0: @see TRamDefragRequest::EmptyRamZone() sl@0: @see TRamDefragRequest::ClaimRamZone() sl@0: @see TRamDefragRequest::DefragRam() sl@0: sl@0: @see KRamZoneFlagDiscardOnly sl@0: @see KRamZoneFlagMovAndDisOnly sl@0: @see KRamZoneFlagNoAlloc sl@0: */ sl@0: EXPORT_C TInt Epoc::ModifyRamZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask) sl@0: { sl@0: MmuBase& m = *MmuBase::TheMmu; sl@0: MmuBase::Wait(); sl@0: sl@0: TInt ret = m.ModifyRamZoneFlags(aId, aClearMask, aSetMask); sl@0: sl@0: MmuBase::Signal(); sl@0: return ret; sl@0: } sl@0: sl@0: TInt MmuBase::ModifyRamZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask) sl@0: { sl@0: return iRamPageAllocator->ModifyZoneFlags(aId, aClearMask, aSetMask); sl@0: } sl@0: sl@0: sl@0: /** sl@0: Gets the current count of a particular RAM zone's pages by type. sl@0: sl@0: @param aId The ID of the RAM zone to enquire about sl@0: @param aPageData If successful, on return this contains the page count sl@0: sl@0: @return KErrNone if successful, KErrArgument if a RAM zone of aId is not found or sl@0: one of the system wide error codes sl@0: sl@0: @pre Calling thread must be in a critical section. sl@0: @pre Interrupts must be enabled. sl@0: @pre Kernel must be unlocked. sl@0: @pre No fast mutex can be held. sl@0: @pre Call in a thread context. sl@0: sl@0: @see SRamZonePageCount sl@0: */ sl@0: EXPORT_C TInt Epoc::GetRamZonePageCount(TUint aId, SRamZonePageCount& aPageData) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::GetRamZonePageCount"); sl@0: sl@0: MmuBase& m = *MmuBase::TheMmu; sl@0: MmuBase::Wait(); // Gets RAM alloc mutex sl@0: sl@0: TInt r = m.GetRamZonePageCount(aId, aPageData); sl@0: sl@0: MmuBase::Signal(); sl@0: sl@0: return r; sl@0: } sl@0: sl@0: TInt MmuBase::GetRamZonePageCount(TUint aId, SRamZonePageCount& aPageData) sl@0: { sl@0: return iRamPageAllocator->GetZonePageCount(aId, aPageData); sl@0: } sl@0: sl@0: /** sl@0: Replace a page of the system's execute-in-place (XIP) ROM image with a page of sl@0: RAM having the same contents. This RAM can subsequently be written to in order sl@0: to apply patches to the XIP ROM or to insert software breakpoints for debugging sl@0: purposes. sl@0: Call Epoc::FreeShadowPage() when you wish to revert to the original ROM page. sl@0: sl@0: @param aRomAddr The virtual address of the ROM page to be replaced. sl@0: @return KErrNone if the operation completed successfully. sl@0: KErrArgument if the specified address is not a valid XIP ROM address. sl@0: KErrNoMemory if the operation failed due to insufficient free RAM. sl@0: KErrAlreadyExists if the XIP ROM page at the specified address has sl@0: already been shadowed by a RAM page. sl@0: sl@0: @pre Calling thread must be in a critical section. sl@0: @pre Interrupts must be enabled. sl@0: @pre Kernel must be unlocked. sl@0: @pre No fast mutex can be held. sl@0: @pre Call in a thread context. sl@0: */ sl@0: EXPORT_C TInt Epoc::AllocShadowPage(TLinAddr aRomAddr) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::AllocShadowPage"); sl@0: sl@0: TInt r; sl@0: r=M::LockRegion(aRomAddr,1); sl@0: if(r!=KErrNone && r!=KErrNotFound) sl@0: return r; sl@0: MmuBase& m=*MmuBase::TheMmu; sl@0: MmuBase::Wait(); sl@0: r=m.AllocShadowPage(aRomAddr); sl@0: MmuBase::Signal(); sl@0: if(r!=KErrNone) sl@0: M::UnlockRegion(aRomAddr,1); sl@0: return r; sl@0: } sl@0: sl@0: /** sl@0: Copies data into shadow memory. Source data is presumed to be in Kernel memory. sl@0: sl@0: @param aSrc Data to copy from. sl@0: @param aDest Address to copy into. sl@0: @param aLength Number of bytes to copy. Maximum of 32 bytes of data can be copied. sl@0: . sl@0: @return KErrNone if the operation completed successfully. sl@0: KErrArgument if any part of destination region is not shadow page or sl@0: if aLength is greater then 32 bytes. sl@0: sl@0: @pre Calling thread must be in a critical section. sl@0: @pre Interrupts must be enabled. sl@0: @pre Kernel must be unlocked. sl@0: @pre No fast mutex can be held. sl@0: @pre Call in a thread context. sl@0: */ sl@0: EXPORT_C TInt Epoc::CopyToShadowMemory(TLinAddr aDest, TLinAddr aSrc, TUint32 aLength) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::CopyToShadowMemory"); sl@0: sl@0: if (aLength>32) sl@0: return KErrArgument; sl@0: MmuBase& m=*MmuBase::TheMmu; sl@0: // This is a simple copy operation except on platforms with __CPU_MEMORY_TYPE_REMAPPING defined, sl@0: // where shadow page is read-only and it has to be remapped before it is written into. sl@0: return m.CopyToShadowMemory(aDest, aSrc, aLength); sl@0: } sl@0: /** sl@0: Revert an XIP ROM address which has previously been shadowed to the original sl@0: page of ROM. sl@0: sl@0: @param aRomAddr The virtual address of the ROM page to be reverted. sl@0: @return KErrNone if the operation completed successfully. sl@0: KErrArgument if the specified address is not a valid XIP ROM address. sl@0: KErrGeneral if the specified address has not previously been shadowed sl@0: using Epoc::AllocShadowPage(). sl@0: sl@0: @pre Calling thread must be in a critical section. sl@0: @pre Interrupts must be enabled. sl@0: @pre Kernel must be unlocked. sl@0: @pre No fast mutex can be held. sl@0: @pre Call in a thread context. sl@0: */ sl@0: EXPORT_C TInt Epoc::FreeShadowPage(TLinAddr aRomAddr) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreeShadowPage"); sl@0: MmuBase& m=*MmuBase::TheMmu; sl@0: MmuBase::Wait(); sl@0: TInt r=m.FreeShadowPage(aRomAddr); sl@0: MmuBase::Signal(); sl@0: if(r==KErrNone) sl@0: M::UnlockRegion(aRomAddr,1); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Change the permissions on an XIP ROM address which has previously been shadowed sl@0: by a RAM page so that the RAM page may no longer be written to. sl@0: sl@0: Note: Shadow page on the latest platforms (that use the reduced set of access permissions: sl@0: arm11mpcore, arm1176, cortex) is implemented with read only permissions. Therefore, calling sl@0: this function in not necessary, as shadow page is already created as 'frozen'. sl@0: sl@0: @param aRomAddr The virtual address of the shadow RAM page to be frozen. sl@0: @return KErrNone if the operation completed successfully. sl@0: KErrArgument if the specified address is not a valid XIP ROM address. sl@0: KErrGeneral if the specified address has not previously been shadowed sl@0: using Epoc::AllocShadowPage(). sl@0: sl@0: @pre Calling thread must be in a critical section. sl@0: @pre Interrupts must be enabled. sl@0: @pre Kernel must be unlocked. sl@0: @pre No fast mutex can be held. sl@0: @pre Call in a thread context. sl@0: */ sl@0: EXPORT_C TInt Epoc::FreezeShadowPage(TLinAddr aRomAddr) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreezeShadowPage"); sl@0: MmuBase& m=*MmuBase::TheMmu; sl@0: MmuBase::Wait(); sl@0: TInt r=m.FreezeShadowPage(aRomAddr); sl@0: MmuBase::Signal(); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Allocate a block of physically contiguous RAM with a physical address aligned sl@0: to a specified power of 2 boundary. sl@0: When the RAM is no longer required it should be freed using sl@0: Epoc::FreePhysicalRam() sl@0: sl@0: @param aSize The size in bytes of the required block. The specified size sl@0: is rounded up to the page size, since only whole pages of sl@0: physical RAM can be allocated. sl@0: @param aPhysAddr Receives the physical address of the base of the block on sl@0: successful allocation. sl@0: @param aAlign Specifies the number of least significant bits of the sl@0: physical address which are required to be zero. If a value sl@0: less than log2(page size) is specified, page alignment is sl@0: assumed. Pass 0 for aAlign if there are no special alignment sl@0: constraints (other than page alignment). sl@0: @return KErrNone if the allocation was successful. sl@0: KErrNoMemory if a sufficiently large physically contiguous block of free sl@0: RAM with the specified alignment could not be found. sl@0: @pre Calling thread must be in a critical section. sl@0: @pre Interrupts must be enabled. sl@0: @pre Kernel must be unlocked. sl@0: @pre No fast mutex can be held. sl@0: @pre Call in a thread context. sl@0: @pre Can be used in a device driver. sl@0: */ sl@0: EXPORT_C TInt Epoc::AllocPhysicalRam(TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::AllocPhysicalRam"); sl@0: MmuBase& m=*MmuBase::TheMmu; sl@0: MmuBase::Wait(); sl@0: TInt r=m.AllocPhysicalRam(aSize,aPhysAddr,aAlign); sl@0: if (r == KErrNone) sl@0: { sl@0: // For the sake of platform security we have to clear the memory. E.g. the driver sl@0: // could assign it to a chunk visible to user side. sl@0: m.ClearPages(Kern::RoundToPageSize(aSize)>>m.iPageShift, (TPhysAddr*)(aPhysAddr|1)); sl@0: #ifdef BTRACE_KERNEL_MEMORY sl@0: TUint size = Kern::RoundToPageSize(aSize); sl@0: BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, size, aPhysAddr); sl@0: Epoc::DriverAllocdPhysRam += size; sl@0: #endif sl@0: } sl@0: MmuBase::Signal(); sl@0: return r; sl@0: } sl@0: sl@0: /** sl@0: Allocate a block of physically contiguous RAM with a physical address aligned sl@0: to a specified power of 2 boundary from the specified zone. sl@0: When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam(). sl@0: sl@0: Note that this method only repsects the KRamZoneFlagNoAlloc flag and will always attempt sl@0: to allocate regardless of whether the other flags are set for the specified RAM zones sl@0: or not. sl@0: sl@0: When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam(). sl@0: sl@0: @param aZoneId The ID of the zone to attempt to allocate from. sl@0: @param aSize The size in bytes of the required block. The specified size sl@0: is rounded up to the page size, since only whole pages of sl@0: physical RAM can be allocated. sl@0: @param aPhysAddr Receives the physical address of the base of the block on sl@0: successful allocation. sl@0: @param aAlign Specifies the number of least significant bits of the sl@0: physical address which are required to be zero. If a value sl@0: less than log2(page size) is specified, page alignment is sl@0: assumed. Pass 0 for aAlign if there are no special alignment sl@0: constraints (other than page alignment). sl@0: @return KErrNone if the allocation was successful. sl@0: KErrNoMemory if a sufficiently large physically contiguous block of free sl@0: RAM with the specified alignment could not be found within the specified sl@0: zone. sl@0: KErrArgument if a RAM zone of the specified ID can't be found or if the sl@0: RAM zone has a total number of physical pages which is less than those sl@0: requested for the allocation. sl@0: sl@0: @pre Calling thread must be in a critical section. sl@0: @pre Interrupts must be enabled. sl@0: @pre Kernel must be unlocked. sl@0: @pre No fast mutex can be held. sl@0: @pre Call in a thread context. sl@0: @pre Can be used in a device driver. sl@0: */ sl@0: EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint aZoneId, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign) sl@0: { sl@0: return ZoneAllocPhysicalRam(&aZoneId, 1, aSize, aPhysAddr, aAlign); sl@0: } sl@0: sl@0: sl@0: /** sl@0: Allocate a block of physically contiguous RAM with a physical address aligned sl@0: to a specified power of 2 boundary from the specified RAM zones. sl@0: When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam(). sl@0: sl@0: RAM will be allocated into the RAM zones in the order they are specified in the sl@0: aZoneIdList parameter. If the contiguous allocations are intended to span RAM zones sl@0: when required then aZoneIdList should be listed with the RAM zones in ascending sl@0: physical address order. sl@0: sl@0: Note that this method only repsects the KRamZoneFlagNoAlloc flag and will always attempt sl@0: to allocate regardless of whether the other flags are set for the specified RAM zones sl@0: or not. sl@0: sl@0: When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam(). sl@0: sl@0: @param aZoneIdList A pointer to an array of RAM zone IDs of the RAM zones to sl@0: attempt to allocate from. sl@0: @param aZoneIdCount The number of RAM zone IDs contained in aZoneIdList. sl@0: @param aSize The size in bytes of the required block. The specified size sl@0: is rounded up to the page size, since only whole pages of sl@0: physical RAM can be allocated. sl@0: @param aPhysAddr Receives the physical address of the base of the block on sl@0: successful allocation. sl@0: @param aAlign Specifies the number of least significant bits of the sl@0: physical address which are required to be zero. If a value sl@0: less than log2(page size) is specified, page alignment is sl@0: assumed. Pass 0 for aAlign if there are no special alignment sl@0: constraints (other than page alignment). sl@0: @return KErrNone if the allocation was successful. sl@0: KErrNoMemory if a sufficiently large physically contiguous block of free sl@0: RAM with the specified alignment could not be found within the specified sl@0: zone. sl@0: KErrArgument if a RAM zone of a specified ID can't be found or if the sl@0: RAM zones have a total number of physical pages which is less than those sl@0: requested for the allocation. sl@0: sl@0: @pre Calling thread must be in a critical section. sl@0: @pre Interrupts must be enabled. sl@0: @pre Kernel must be unlocked. sl@0: @pre No fast mutex can be held. sl@0: @pre Call in a thread context. sl@0: @pre Can be used in a device driver. sl@0: */ sl@0: EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::ZoneAllocPhysicalRam"); sl@0: MmuBase& m=*MmuBase::TheMmu; sl@0: MmuBase::Wait(); sl@0: TInt r = m.ZoneAllocPhysicalRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, aAlign); sl@0: if (r == KErrNone) sl@0: { sl@0: // For the sake of platform security we have to clear the memory. E.g. the driver sl@0: // could assign it to a chunk visible to user side. sl@0: m.ClearPages(Kern::RoundToPageSize(aSize)>>m.iPageShift, (TPhysAddr*)(aPhysAddr|1)); sl@0: #ifdef BTRACE_KERNEL_MEMORY sl@0: TUint size = Kern::RoundToPageSize(aSize); sl@0: BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, size, aPhysAddr); sl@0: Epoc::DriverAllocdPhysRam += size; sl@0: #endif sl@0: } sl@0: MmuBase::Signal(); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Attempt to allocate discontiguous RAM pages. sl@0: sl@0: When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam(). sl@0: sl@0: @param aNumPages The number of discontiguous pages required to be allocated sl@0: @param aPageList This should be a pointer to a previously allocated array of sl@0: aNumPages TPhysAddr elements. On a succesful allocation it sl@0: will receive the physical addresses of each page allocated. sl@0: sl@0: @return KErrNone if the allocation was successful. sl@0: KErrNoMemory if the requested number of pages can't be allocated sl@0: sl@0: @pre Calling thread must be in a critical section. sl@0: @pre Interrupts must be enabled. sl@0: @pre Kernel must be unlocked. sl@0: @pre No fast mutex can be held. sl@0: @pre Call in a thread context. sl@0: @pre Can be used in a device driver. sl@0: */ sl@0: EXPORT_C TInt Epoc::AllocPhysicalRam(TInt aNumPages, TPhysAddr* aPageList) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Epoc::AllocPhysicalRam"); sl@0: MmuBase& m = *MmuBase::TheMmu; sl@0: MmuBase::Wait(); sl@0: TInt r = m.AllocPhysicalRam(aNumPages, aPageList); sl@0: if (r == KErrNone) sl@0: { sl@0: // For the sake of platform security we have to clear the memory. E.g. the driver sl@0: // could assign it to a chunk visible to user side. sl@0: m.ClearPages(aNumPages, aPageList); sl@0: sl@0: #ifdef BTRACE_KERNEL_MEMORY sl@0: if (BTrace::CheckFilter(BTrace::EKernelMemory)) sl@0: {// Only loop round each page if EKernelMemory tracing is enabled sl@0: TPhysAddr* pAddr = aPageList; sl@0: TPhysAddr* pAddrEnd = aPageList + aNumPages; sl@0: while (pAddr < pAddrEnd) sl@0: { sl@0: BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, KPageSize, *pAddr++); sl@0: Epoc::DriverAllocdPhysRam += KPageSize; sl@0: } sl@0: } sl@0: #endif sl@0: } sl@0: MmuBase::Signal(); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Attempt to allocate discontiguous RAM pages from the specified zone. sl@0: sl@0: Note that this method only repsects the KRamZoneFlagNoAlloc flag and will always attempt sl@0: to allocate regardless of whether the other flags are set for the specified RAM zones sl@0: or not. sl@0: sl@0: When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam(). sl@0: sl@0: @param aZoneId The ID of the zone to attempt to allocate from. sl@0: @param aNumPages The number of discontiguous pages required to be allocated sl@0: from the specified zone. sl@0: @param aPageList This should be a pointer to a previously allocated array of sl@0: aNumPages TPhysAddr elements. On a succesful sl@0: allocation it will receive the physical addresses of each sl@0: page allocated. sl@0: @return KErrNone if the allocation was successful. sl@0: KErrNoMemory if the requested number of pages can't be allocated from the sl@0: specified zone. sl@0: KErrArgument if a RAM zone of the specified ID can't be found or if the sl@0: RAM zone has a total number of physical pages which is less than those sl@0: requested for the allocation. sl@0: sl@0: @pre Calling thread must be in a critical section. sl@0: @pre Interrupts must be enabled. sl@0: @pre Kernel must be unlocked. sl@0: @pre No fast mutex can be held. sl@0: @pre Call in a thread context. sl@0: @pre Can be used in a device driver. sl@0: */ sl@0: EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint aZoneId, TInt aNumPages, TPhysAddr* aPageList) sl@0: { sl@0: return ZoneAllocPhysicalRam(&aZoneId, 1, aNumPages, aPageList); sl@0: } sl@0: sl@0: sl@0: /** sl@0: Attempt to allocate discontiguous RAM pages from the specified RAM zones. sl@0: The RAM pages will be allocated into the RAM zones in the order that they are specified sl@0: in the aZoneIdList parameter, the RAM zone preferences will be ignored. sl@0: sl@0: Note that this method only repsects the KRamZoneFlagNoAlloc flag and will always attempt sl@0: to allocate regardless of whether the other flags are set for the specified RAM zones sl@0: or not. sl@0: sl@0: When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam(). sl@0: sl@0: @param aZoneIdList A pointer to an array of RAM zone IDs of the RAM zones to sl@0: attempt to allocate from. sl@0: @param aZoneIdCount The number of RAM zone IDs pointed to by aZoneIdList. sl@0: @param aNumPages The number of discontiguous pages required to be allocated sl@0: from the specified zone. sl@0: @param aPageList This should be a pointer to a previously allocated array of sl@0: aNumPages TPhysAddr elements. On a succesful sl@0: allocation it will receive the physical addresses of each sl@0: page allocated. sl@0: @return KErrNone if the allocation was successful. sl@0: KErrNoMemory if the requested number of pages can't be allocated from the sl@0: specified zone. sl@0: KErrArgument if a RAM zone of a specified ID can't be found or if the sl@0: RAM zones have a total number of physical pages which is less than those sl@0: requested for the allocation. sl@0: sl@0: @pre Calling thread must be in a critical section. sl@0: @pre Interrupts must be enabled. sl@0: @pre Kernel must be unlocked. sl@0: @pre No fast mutex can be held. sl@0: @pre Call in a thread context. sl@0: @pre Can be used in a device driver. sl@0: */ sl@0: EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Epoc::ZoneAllocPhysicalRam"); sl@0: MmuBase& m = *MmuBase::TheMmu; sl@0: MmuBase::Wait(); sl@0: TInt r = m.ZoneAllocPhysicalRam(aZoneIdList, aZoneIdCount, aNumPages, aPageList); sl@0: if (r == KErrNone) sl@0: { sl@0: // For the sake of platform security we have to clear the memory. E.g. the driver sl@0: // could assign it to a chunk visible to user side. sl@0: m.ClearPages(aNumPages, aPageList); sl@0: sl@0: #ifdef BTRACE_KERNEL_MEMORY sl@0: if (BTrace::CheckFilter(BTrace::EKernelMemory)) sl@0: {// Only loop round each page if EKernelMemory tracing is enabled sl@0: TPhysAddr* pAddr = aPageList; sl@0: TPhysAddr* pAddrEnd = aPageList + aNumPages; sl@0: while (pAddr < pAddrEnd) sl@0: { sl@0: BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, KPageSize, *pAddr++); sl@0: Epoc::DriverAllocdPhysRam += KPageSize; sl@0: } sl@0: } sl@0: #endif sl@0: } sl@0: MmuBase::Signal(); sl@0: return r; sl@0: } sl@0: sl@0: /** sl@0: Free a previously-allocated block of physically contiguous RAM. sl@0: sl@0: Specifying one of the following may cause the system to panic: sl@0: a) an invalid physical RAM address. sl@0: b) valid physical RAM addresses where some had not been previously allocated. sl@0: c) an adrress not aligned to a page boundary. sl@0: sl@0: @param aPhysAddr The physical address of the base of the block to be freed. sl@0: This must be the address returned by a previous call to sl@0: Epoc::AllocPhysicalRam(), Epoc::ZoneAllocPhysicalRam(), sl@0: Epoc::ClaimPhysicalRam() or Epoc::ClaimRamZone(). sl@0: @param aSize The size in bytes of the required block. The specified size sl@0: is rounded up to the page size, since only whole pages of sl@0: physical RAM can be allocated. sl@0: @return KErrNone if the operation was successful. sl@0: sl@0: sl@0: sl@0: @pre Calling thread must be in a critical section. sl@0: @pre Interrupts must be enabled. sl@0: @pre Kernel must be unlocked. sl@0: @pre No fast mutex can be held. sl@0: @pre Call in a thread context. sl@0: @pre Can be used in a device driver. sl@0: */ sl@0: EXPORT_C TInt Epoc::FreePhysicalRam(TPhysAddr aPhysAddr, TInt aSize) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreePhysicalRam"); sl@0: MmuBase& m=*MmuBase::TheMmu; sl@0: MmuBase::Wait(); sl@0: TInt r=m.FreePhysicalRam(aPhysAddr,aSize); sl@0: #ifdef BTRACE_KERNEL_MEMORY sl@0: if (r == KErrNone) sl@0: { sl@0: TUint size = Kern::RoundToPageSize(aSize); sl@0: BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysFree, size, aPhysAddr); sl@0: Epoc::DriverAllocdPhysRam -= size; sl@0: } sl@0: #endif sl@0: MmuBase::Signal(); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Free a number of physical RAM pages that were previously allocated using sl@0: Epoc::AllocPhysicalRam() or Epoc::ZoneAllocPhysicalRam(). sl@0: sl@0: Specifying one of the following may cause the system to panic: sl@0: a) an invalid physical RAM address. sl@0: b) valid physical RAM addresses where some had not been previously allocated. sl@0: c) an adrress not aligned to a page boundary. sl@0: sl@0: @param aNumPages The number of pages to be freed. sl@0: @param aPhysAddr An array of aNumPages TPhysAddr elements. Where each element sl@0: should contain the physical address of each page to be freed. sl@0: This must be the same set of addresses as those returned by a sl@0: previous call to Epoc::AllocPhysicalRam() or sl@0: Epoc::ZoneAllocPhysicalRam(). sl@0: @return KErrNone if the operation was successful. sl@0: sl@0: @pre Calling thread must be in a critical section. sl@0: @pre Interrupts must be enabled. sl@0: @pre Kernel must be unlocked. sl@0: @pre No fast mutex can be held. sl@0: @pre Call in a thread context. sl@0: @pre Can be used in a device driver. sl@0: sl@0: */ sl@0: EXPORT_C TInt Epoc::FreePhysicalRam(TInt aNumPages, TPhysAddr* aPageList) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreePhysicalRam"); sl@0: MmuBase& m=*MmuBase::TheMmu; sl@0: MmuBase::Wait(); sl@0: TInt r=m.FreePhysicalRam(aNumPages, aPageList); sl@0: #ifdef BTRACE_KERNEL_MEMORY sl@0: if (r == KErrNone && BTrace::CheckFilter(BTrace::EKernelMemory)) sl@0: {// Only loop round each page if EKernelMemory tracing is enabled sl@0: TPhysAddr* pAddr = aPageList; sl@0: TPhysAddr* pAddrEnd = aPageList + aNumPages; sl@0: while (pAddr < pAddrEnd) sl@0: { sl@0: BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysFree, KPageSize, *pAddr++); sl@0: Epoc::DriverAllocdPhysRam -= KPageSize; sl@0: } sl@0: } sl@0: #endif sl@0: MmuBase::Signal(); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Allocate a specific block of physically contiguous RAM, specified by physical sl@0: base address and size. sl@0: If and when the RAM is no longer required it should be freed using sl@0: Epoc::FreePhysicalRam() sl@0: sl@0: @param aPhysAddr The physical address of the base of the required block. sl@0: @param aSize The size in bytes of the required block. The specified size sl@0: is rounded up to the page size, since only whole pages of sl@0: physical RAM can be allocated. sl@0: @return KErrNone if the operation was successful. sl@0: KErrArgument if the range of physical addresses specified included some sl@0: which are not valid physical RAM addresses. sl@0: KErrInUse if the range of physical addresses specified are all valid sl@0: physical RAM addresses but some of them have already been sl@0: allocated for other purposes. sl@0: @pre Calling thread must be in a critical section. sl@0: @pre Interrupts must be enabled. sl@0: @pre Kernel must be unlocked. sl@0: @pre No fast mutex can be held. sl@0: @pre Call in a thread context. sl@0: @pre Can be used in a device driver. sl@0: */ sl@0: EXPORT_C TInt Epoc::ClaimPhysicalRam(TPhysAddr aPhysAddr, TInt aSize) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::ClaimPhysicalRam"); sl@0: MmuBase& m=*MmuBase::TheMmu; sl@0: MmuBase::Wait(); sl@0: TInt r=m.ClaimPhysicalRam(aPhysAddr,aSize); sl@0: #ifdef BTRACE_KERNEL_MEMORY sl@0: if(r==KErrNone) sl@0: { sl@0: TUint32 pa=aPhysAddr; sl@0: TUint32 size=aSize; sl@0: m.RoundUpRangeToPageSize(pa,size); sl@0: BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, size, pa); sl@0: Epoc::DriverAllocdPhysRam += size; sl@0: } sl@0: #endif sl@0: MmuBase::Signal(); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Translate a virtual address to the corresponding physical address. sl@0: sl@0: @param aLinAddr The virtual address to be translated. sl@0: @return The physical address corresponding to the given virtual address, or sl@0: KPhysAddrInvalid if the specified virtual address is unmapped. sl@0: @pre Interrupts must be enabled. sl@0: @pre Kernel must be unlocked. sl@0: @pre Call in a thread context. sl@0: @pre Can be used in a device driver. sl@0: @pre Hold system lock if there is any possibility that the virtual address is sl@0: unmapped, may become unmapped, or may be remapped during the operation. sl@0: This will potentially be the case unless the virtual address refers to a sl@0: hardware chunk or shared chunk under the control of the driver calling this sl@0: function. sl@0: */ sl@0: EXPORT_C TPhysAddr Epoc::LinearToPhysical(TLinAddr aLinAddr) sl@0: { sl@0: // This precondition is violated by various parts of the system under some conditions, sl@0: // e.g. when __FLUSH_PT_INTO_RAM__ is defined. This function might also be called by sl@0: // a higher-level RTOS for which these conditions are meaningless. Thus, it's been sl@0: // disabled for now. sl@0: // CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"Epoc::LinearToPhysical"); sl@0: MmuBase& m=*MmuBase::TheMmu; sl@0: TPhysAddr pa=m.LinearToPhysical(aLinAddr); sl@0: return pa; sl@0: } sl@0: sl@0: sl@0: EXPORT_C TInt TInternalRamDrive::MaxSize() sl@0: { sl@0: return TheSuperPage().iRamDriveSize+Kern::FreeRamInBytes(); sl@0: } sl@0: sl@0: sl@0: /****************************************************************************** sl@0: * Address allocator sl@0: ******************************************************************************/ sl@0: TLinearSection* TLinearSection::New(TLinAddr aBase, TLinAddr aEnd) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("TLinearSection::New(%08x,%08x)", aBase, aEnd)); sl@0: MmuBase& m=*MmuBase::TheMmu; sl@0: TUint npdes=(aEnd-aBase)>>m.iChunkShift; sl@0: TInt nmapw=(npdes+31)>>5; sl@0: TInt memsz=sizeof(TLinearSection)+(nmapw-1)*sizeof(TUint32); sl@0: TLinearSection* p=(TLinearSection*)Kern::Alloc(memsz); sl@0: if (p) sl@0: { sl@0: new(&p->iAllocator) TBitMapAllocator(npdes, ETrue); sl@0: p->iBase=aBase; sl@0: p->iEnd=aEnd; sl@0: } sl@0: __KTRACE_OPT(KMMU,Kern::Printf("TLinearSection at %08x", p)); sl@0: return p; sl@0: } sl@0: sl@0: /****************************************************************************** sl@0: * Address allocator for HW chunks sl@0: ******************************************************************************/ sl@0: THwChunkPageTable::THwChunkPageTable(TInt aIndex, TInt aSize, TPde aPdePerm) sl@0: : THwChunkRegion(aIndex, 0, aPdePerm), sl@0: iAllocator(aSize, ETrue) sl@0: { sl@0: } sl@0: sl@0: THwChunkPageTable* THwChunkPageTable::New(TInt aIndex, TPde aPdePerm) sl@0: { sl@0: __KTRACE_OPT(KMMU, Kern::Printf("THwChunkPageTable::New(%03x,%08x)",aIndex,aPdePerm)); sl@0: MmuBase& m=*MmuBase::TheMmu; sl@0: TInt pdepages=m.iChunkSize>>m.iPageShift; sl@0: TInt nmapw=(pdepages+31)>>5; sl@0: TInt memsz=sizeof(THwChunkPageTable)+(nmapw-1)*sizeof(TUint32); sl@0: THwChunkPageTable* p=(THwChunkPageTable*)Kern::Alloc(memsz); sl@0: if (p) sl@0: new (p) THwChunkPageTable(aIndex, pdepages, aPdePerm); sl@0: __KTRACE_OPT(KMMU, Kern::Printf("THwChunkPageTable at %08x",p)); sl@0: return p; sl@0: } sl@0: sl@0: THwChunkAddressAllocator::THwChunkAddressAllocator() sl@0: { sl@0: } sl@0: sl@0: THwChunkAddressAllocator* THwChunkAddressAllocator::New(TInt aAlign, TLinearSection* aSection) sl@0: { sl@0: __KTRACE_OPT(KMMU, Kern::Printf("THwChunkAddressAllocator::New(%d,%08x)",aAlign,aSection)); sl@0: THwChunkAddressAllocator* p=new THwChunkAddressAllocator; sl@0: if (p) sl@0: { sl@0: p->iAlign=aAlign; sl@0: p->iSection=aSection; sl@0: } sl@0: __KTRACE_OPT(KMMU, Kern::Printf("THwChunkAddressAllocator at %08x",p)); sl@0: return p; sl@0: } sl@0: sl@0: THwChunkRegion* THwChunkAddressAllocator::NewRegion(TInt aIndex, TInt aSize, TPde aPdePerm) sl@0: { sl@0: __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::NewRegion(index=%x, size=%x, pde=%08x)",aIndex,aSize,aPdePerm)); sl@0: THwChunkRegion* p=new THwChunkRegion(aIndex, aSize, aPdePerm); sl@0: if (p) sl@0: { sl@0: TInt r=InsertInOrder(p, Order); sl@0: __KTRACE_OPT(KMMU, Kern::Printf("p=%08x, insert ret %d",p,r)); sl@0: if (r<0) sl@0: delete p, p=NULL; sl@0: } sl@0: __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::NewRegion ret %08x)",p)); sl@0: return p; sl@0: } sl@0: sl@0: THwChunkPageTable* THwChunkAddressAllocator::NewPageTable(TInt aIndex, TPde aPdePerm, TInt aInitB, TInt aInitC) sl@0: { sl@0: __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::NewPageTable(index=%x, pde=%08x, iB=%d, iC=%d)",aIndex,aPdePerm,aInitB,aInitC)); sl@0: THwChunkPageTable* p=THwChunkPageTable::New(aIndex, aPdePerm); sl@0: if (p) sl@0: { sl@0: TInt r=InsertInOrder(p, Order); sl@0: __KTRACE_OPT(KMMU, Kern::Printf("p=%08x, insert ret %d",p,r)); sl@0: if (r<0) sl@0: delete p, p=NULL; sl@0: else sl@0: p->iAllocator.Alloc(aInitB, aInitC); sl@0: } sl@0: __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::NewPageTable ret %08x)",p)); sl@0: return p; sl@0: } sl@0: sl@0: TLinAddr THwChunkAddressAllocator::SearchExisting(TInt aNumPages, TInt aPageAlign, TInt aPageOffset, TPde aPdePerm) sl@0: { sl@0: __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::SrchEx np=%03x align=%d offset=%03x pdeperm=%08x", sl@0: aNumPages, aPageAlign, aPageOffset, aPdePerm)); sl@0: TInt c=Count(); sl@0: if (c==0) sl@0: return 0; // don't try to access [0] if array empty! sl@0: THwChunkPageTable** pp=(THwChunkPageTable**)&(*this)[0]; sl@0: THwChunkPageTable** ppE=pp+c; sl@0: while(ppiRegionSize!=0 || p->iPdePerm!=aPdePerm) sl@0: continue; // if not page table or PDE permissions wrong, we can't use it sl@0: TInt r=p->iAllocator.AllocAligned(aNumPages, aPageAlign, -aPageOffset, EFalse); sl@0: __KTRACE_OPT(KMMU, Kern::Printf("r=%d", r)); sl@0: if (r<0) sl@0: continue; // not enough space in this page table sl@0: sl@0: // got enough space in existing page table, so use it sl@0: p->iAllocator.Alloc(r, aNumPages); sl@0: MmuBase& m=*MmuBase::TheMmu; sl@0: TLinAddr a = iSection->iBase + (TLinAddr(p->iIndex)<>m.iPageShift; sl@0: TInt align=Max(aAlign,iAlign); sl@0: if (align>m.iChunkShift) sl@0: return 0; sl@0: TInt aligns=1<>m.iPageShift; sl@0: TInt pdepages=m.iChunkSize>>m.iPageShift; sl@0: TInt pdepageshift=m.iChunkShift-m.iPageShift; sl@0: MmuBase::WaitHwChunk(); sl@0: if (npages>pdepageshift; sl@0: __KTRACE_OPT(KMMU, Kern::Printf("Allocate %d PDEs", npdes)); sl@0: MmuBase::Wait(); sl@0: TInt ix=iSection->iAllocator.AllocConsecutive(npdes, EFalse); sl@0: if (ix>=0) sl@0: iSection->iAllocator.Alloc(ix, npdes); sl@0: MmuBase::Signal(); sl@0: TLinAddr a=0; sl@0: if (ix>=0) sl@0: a = iSection->iBase + (TLinAddr(ix)<=pdepages) sl@0: { sl@0: // next need whole-PDE-block placeholder sl@0: TInt whole_pdes=remain>>pdepageshift; sl@0: middle=NewRegion(nix, whole_pdes, aPdePerm); sl@0: nix+=whole_pdes; sl@0: remain-=(whole_pdes<=0) sl@0: { sl@0: MmuBase::Wait(); sl@0: iSection->iAllocator.Free(ix, npdes); sl@0: MmuBase::Signal(); sl@0: } sl@0: } sl@0: MmuBase::SignalHwChunk(); sl@0: __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::Alloc returns %08x", a)); sl@0: return a; sl@0: } sl@0: sl@0: void THwChunkAddressAllocator::Discard(THwChunkRegion* aRegion) sl@0: { sl@0: // remove a region from the array and destroy it sl@0: TInt r=FindInOrder(aRegion, Order); sl@0: if (r>=0) sl@0: Remove(r); sl@0: Kern::Free(aRegion); sl@0: } sl@0: sl@0: TInt THwChunkAddressAllocator::Order(const THwChunkRegion& a1, const THwChunkRegion& a2) sl@0: { sl@0: // order two regions by address sl@0: return a1.iIndex-a2.iIndex; sl@0: } sl@0: sl@0: THwChunkRegion* THwChunkAddressAllocator::Free(TLinAddr aAddr, TInt aSize) sl@0: { sl@0: __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::Free addr=%08x size=%08x", aAddr, aSize)); sl@0: __ASSERT_ALWAYS(aAddr>=iSection->iBase && (aAddr+aSize)<=iSection->iEnd, sl@0: MmuBase::Panic(MmuBase::EFreeHwChunkAddrInvalid)); sl@0: THwChunkRegion* list=NULL; sl@0: MmuBase& m=*MmuBase::TheMmu; sl@0: TInt ix=(aAddr - iSection->iBase)>>m.iChunkShift; sl@0: TInt remain=(aSize+m.iPageMask)>>m.iPageShift; sl@0: TInt pdepageshift=m.iChunkShift-m.iPageShift; sl@0: TInt offset=(aAddr&m.iChunkMask)>>m.iPageShift; sl@0: THwChunkRegion find(ix, 0, 0); sl@0: MmuBase::WaitHwChunk(); sl@0: TInt r=FindInOrder(&find, Order); sl@0: __ASSERT_ALWAYS(r>=0, MmuBase::Panic(MmuBase::EFreeHwChunkAddrInvalid)); sl@0: while (remain) sl@0: { sl@0: THwChunkPageTable* p=(THwChunkPageTable*)(*this)[r]; sl@0: __ASSERT_ALWAYS(p->iIndex==ix, MmuBase::Panic(MmuBase::EFreeHwChunkIndexInvalid)); sl@0: if (p->iRegionSize) sl@0: { sl@0: // multiple-whole-PDE region sl@0: TInt rsz=p->iRegionSize; sl@0: remain-=(rsz<iAllocator.Free(offset, n); sl@0: remain-=n; sl@0: ++ix; sl@0: if (p->iAllocator.iAvail < p->iAllocator.iSize) sl@0: { sl@0: // bitmap still in use sl@0: offset=0; sl@0: ++r; // r indexes following array entry sl@0: continue; sl@0: } sl@0: Remove(r); // r now indexes following array entry sl@0: } sl@0: offset=0; sl@0: p->iNext=list; sl@0: list=p; // chain free region descriptors together sl@0: } sl@0: MmuBase::SignalHwChunk(); sl@0: __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::Free returns %08x", list)); sl@0: return list; sl@0: } sl@0: sl@0: /******************************************** sl@0: * Hardware chunk abstraction sl@0: ********************************************/ sl@0: THwChunkAddressAllocator* MmuBase::MappingRegion(TUint) sl@0: { sl@0: return iHwChunkAllocator; sl@0: } sl@0: sl@0: TInt MmuBase::AllocateAllPageTables(TLinAddr aLinAddr, TInt aSize, TPde aPdePerm, TInt aMapShift, SPageTableInfo::TAttribs aAttrib) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("AllocateAllPageTables lin=%08x, size=%x, pde=%08x, mapshift=%d attribs=%d", sl@0: aLinAddr, aSize, aPdePerm, aMapShift, aAttrib)); sl@0: TInt offset=aLinAddr&iChunkMask; sl@0: TInt remain=aSize; sl@0: TLinAddr a=aLinAddr&~iChunkMask; sl@0: TInt newpts=0; sl@0: for (; remain>0; a+=iChunkSize) sl@0: { sl@0: // don't need page table if a whole PDE mapping is permitted here sl@0: if (aMapShiftClose(0); sl@0: Note that closing a chunk does not free any RAM pages which were mapped by the sl@0: chunk - these must be freed separately using Epoc::FreePhysicalRam(). sl@0: sl@0: @param aChunk Upon successful completion this parameter receives a pointer to sl@0: the newly created chunk. Upon unsuccessful completion it is sl@0: written with a NULL pointer. The virtual address of the mapping sl@0: can subsequently be discovered using the LinearAddress() sl@0: function on the chunk. sl@0: @param aAddr The base address of the physical region to be mapped. This will sl@0: be rounded down to a multiple of the hardware page size before sl@0: being used. sl@0: @param aSize The size of the physical address region to be mapped. This will sl@0: be rounded up to a multiple of the hardware page size before sl@0: being used; the rounding is such that the entire range from sl@0: aAddr to aAddr+aSize-1 inclusive is mapped. For example if sl@0: aAddr=0xB0001FFF, aSize=2 and the hardware page size is 4KB, an sl@0: 8KB range of physical addresses from 0xB0001000 to 0xB0002FFF sl@0: inclusive will be mapped. sl@0: @param aMapAttr Mapping attributes required for the mapping. This is formed sl@0: by ORing together values from the TMappingAttributes enumeration sl@0: to specify the access permissions and caching policy. sl@0: sl@0: @pre Calling thread must be in a critical section. sl@0: @pre Interrupts must be enabled. sl@0: @pre Kernel must be unlocked. sl@0: @pre No fast mutex can be held. sl@0: @pre Call in a thread context. sl@0: @pre Can be used in a device driver. sl@0: @see TMappingAttributes sl@0: */ sl@0: EXPORT_C TInt DPlatChunkHw::New(DPlatChunkHw*& aChunk, TPhysAddr aAddr, TInt aSize, TUint aMapAttr) sl@0: { sl@0: if (aAddr == KPhysAddrInvalid) sl@0: return KErrNotSupported; sl@0: return DoNew(aChunk, aAddr, aSize, aMapAttr); sl@0: } sl@0: sl@0: TInt DPlatChunkHw::DoNew(DPlatChunkHw*& aChunk, TPhysAddr aAddr, TInt aSize, TUint aMapAttr) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"DPlatChunkHw::New"); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("DPlatChunkHw::New phys=%08x, size=%x, attribs=%x",aAddr,aSize,aMapAttr)); sl@0: if (aSize<=0) sl@0: return KErrArgument; sl@0: MmuBase& m=*MmuBase::TheMmu; sl@0: aChunk=NULL; sl@0: TPhysAddr pa=aAddr!=KPhysAddrInvalid ? aAddr&~m.iPageMask : 0; sl@0: TInt size=((aAddr+aSize+m.iPageMask)&~m.iPageMask)-pa; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Rounded %08x+%x", pa, size)); sl@0: DMemModelChunkHw* pC=new DMemModelChunkHw; sl@0: if (!pC) sl@0: return KErrNoMemory; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunkHw created at %08x",pC)); sl@0: pC->iPhysAddr=aAddr; sl@0: pC->iSize=size; sl@0: TUint mapattr=aMapAttr; sl@0: TPde pdePerm=0; sl@0: TPte ptePerm=0; sl@0: TInt r=m.PdePtePermissions(mapattr, pdePerm, ptePerm); sl@0: if (r==KErrNone) sl@0: { sl@0: pC->iAllocator=m.MappingRegion(mapattr); sl@0: pC->iAttribs=mapattr; // save actual mapping attributes sl@0: r=pC->AllocateLinearAddress(pdePerm); sl@0: if (r>=0) sl@0: { sl@0: TInt map_shift=r; sl@0: MmuBase::Wait(); sl@0: r=m.AllocateAllPageTables(pC->iLinAddr, size, pdePerm, map_shift, SPageTableInfo::EGlobal); sl@0: if (r==KErrNone && aAddr!=KPhysAddrInvalid) sl@0: m.Map(pC->iLinAddr, pa, size, pdePerm, ptePerm, map_shift); sl@0: MmuBase::Signal(); sl@0: } sl@0: } sl@0: if (r==KErrNone) sl@0: aChunk=pC; sl@0: else sl@0: pC->Close(NULL); sl@0: return r; sl@0: } sl@0: sl@0: TInt DMemModelChunkHw::AllocateLinearAddress(TPde aPdePerm) sl@0: { sl@0: __KTRACE_OPT(KMMU, Kern::Printf("DMemModelChunkHw::AllocateLinearAddress(%08x)", aPdePerm)); sl@0: __KTRACE_OPT(KMMU, Kern::Printf("iAllocator=%08x iPhysAddr=%08x iSize=%08x", iAllocator, iPhysAddr, iSize)); sl@0: MmuBase& m=*MmuBase::TheMmu; sl@0: TInt map_shift = (iPhysAddr<0xffffffffu) ? 30 : m.iPageShift; sl@0: for (; map_shift>=m.iPageShift; --map_shift) sl@0: { sl@0: TUint32 map_size = 1<m.iPageShift) sl@0: continue; // region not big enough to use this mapping size sl@0: __KTRACE_OPT(KMMU, Kern::Printf("Try map size %08x", map_size)); sl@0: iLinAddr=iAllocator->Alloc(iSize, map_shift, iPhysAddr, aPdePerm); sl@0: if (iLinAddr) sl@0: break; // done sl@0: } sl@0: TInt r=iLinAddr ? map_shift : KErrNoMemory; sl@0: __KTRACE_OPT(KMMU, Kern::Printf("iLinAddr=%08x, returning %d", iLinAddr, r)); sl@0: return r; sl@0: } sl@0: sl@0: void DMemModelChunkHw::DeallocateLinearAddress() sl@0: { sl@0: __KTRACE_OPT(KMMU, Kern::Printf("DMemModelChunkHw::DeallocateLinearAddress %O", this)); sl@0: MmuBase& m=*MmuBase::TheMmu; sl@0: MmuBase::WaitHwChunk(); sl@0: THwChunkRegion* rgn=iAllocator->Free(iLinAddr, iSize); sl@0: iLinAddr=0; sl@0: MmuBase::SignalHwChunk(); sl@0: TLinAddr base = iAllocator->iSection->iBase; sl@0: TBitMapAllocator& section_allocator = iAllocator->iSection->iAllocator; sl@0: while (rgn) sl@0: { sl@0: MmuBase::Wait(); sl@0: if (rgn->iRegionSize) sl@0: { sl@0: // free address range sl@0: __KTRACE_OPT(KMMU, Kern::Printf("Freeing range %03x+%03x", rgn->iIndex, rgn->iRegionSize)); sl@0: section_allocator.Free(rgn->iIndex, rgn->iRegionSize); sl@0: sl@0: // Though this is large region, it still can be made up of page tables (not sections). sl@0: // Check each chunk and remove tables in neccessary sl@0: TInt i = 0; sl@0: TLinAddr a = base + (TLinAddr(rgn->iIndex)<iRegionSize ; i++,a+=m.iChunkSize) sl@0: { sl@0: TInt id = m.UnassignPageTable(a); sl@0: if (id>=0) sl@0: m.FreePageTable(id); sl@0: } sl@0: } sl@0: else sl@0: { sl@0: // free address and page table if it exists sl@0: __KTRACE_OPT(KMMU, Kern::Printf("Freeing index %03x", rgn->iIndex)); sl@0: section_allocator.Free(rgn->iIndex); sl@0: TLinAddr a = base + (TLinAddr(rgn->iIndex)<=0) sl@0: m.FreePageTable(id); sl@0: } sl@0: MmuBase::Signal(); sl@0: THwChunkRegion* free=rgn; sl@0: rgn=rgn->iNext; sl@0: Kern::Free(free); sl@0: } sl@0: } sl@0: sl@0: sl@0: // sl@0: // RamCacheBase sl@0: // sl@0: sl@0: sl@0: RamCacheBase* RamCacheBase::TheRamCache = NULL; sl@0: sl@0: sl@0: RamCacheBase::RamCacheBase() sl@0: { sl@0: } sl@0: sl@0: sl@0: void RamCacheBase::Init2() sl@0: { sl@0: __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">RamCacheBase::Init2")); sl@0: iMmu = MmuBase::TheMmu; sl@0: __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("SetUnused(); sl@0: --iNumberOfFreePages; sl@0: __NK_ASSERT_DEBUG(iNumberOfFreePages>=0); sl@0: // Release system lock before using the RAM allocator. sl@0: NKern::UnlockSystem(); sl@0: iMmu->iRamPageAllocator->FreeRamPage(aPageInfo->PhysAddr(), EPageDiscard); sl@0: NKern::LockSystem(); sl@0: } sl@0: sl@0: sl@0: SPageInfo* RamCacheBase::GetPageFromSystem(TUint aBlockedZoneId, TBool aBlockRest) sl@0: { sl@0: __ASSERT_MUTEX(MmuBase::RamAllocatorMutex); sl@0: SPageInfo* pageInfo; sl@0: TPhysAddr pagePhys; sl@0: TInt r = iMmu->iRamPageAllocator->AllocRamPages(&pagePhys,1, EPageDiscard, aBlockedZoneId, aBlockRest); sl@0: if(r==KErrNone) sl@0: { sl@0: NKern::LockSystem(); sl@0: pageInfo = SPageInfo::FromPhysAddr(pagePhys); sl@0: pageInfo->Change(SPageInfo::EPagedFree,SPageInfo::EStatePagedDead); sl@0: ++iNumberOfFreePages; sl@0: NKern::UnlockSystem(); sl@0: } sl@0: else sl@0: pageInfo = NULL; sl@0: return pageInfo; sl@0: } sl@0: sl@0: sl@0: // sl@0: // RamCache sl@0: // sl@0: sl@0: sl@0: void RamCache::Init2() sl@0: { sl@0: __KTRACE_OPT(KBOOT,Kern::Printf(">RamCache::Init2")); sl@0: RamCacheBase::Init2(); sl@0: __KTRACE_OPT(KBOOT,Kern::Printf("GetFreePages %d",aNumPages)); sl@0: NKern::LockSystem(); sl@0: sl@0: while(aNumPages>0 && NumberOfFreePages()>=aNumPages) sl@0: { sl@0: // steal a page from cache list and return it to the free pool... sl@0: SPageInfo* pageInfo = SPageInfo::FromLink(iPageList.First()->Deque()); sl@0: pageInfo->SetState(SPageInfo::EStatePagedDead); sl@0: SetFree(pageInfo); sl@0: ReturnToSystem(pageInfo); sl@0: --aNumPages; sl@0: } sl@0: sl@0: NKern::UnlockSystem(); sl@0: __KTRACE_OPT(KPAGING,Kern::Printf("DP: Type(); sl@0: if(type==SPageInfo::EChunk) sl@0: { sl@0: //Must not donate locked page. An example is DMA trasferred memory. sl@0: __NK_ASSERT_DEBUG(0 == aPageInfo->LockCount()); sl@0: sl@0: aPageInfo->Change(SPageInfo::EPagedCache,SPageInfo::EStatePagedYoung); sl@0: iPageList.Add(&aPageInfo->iLink); sl@0: ++iNumberOfFreePages; sl@0: // Update ram allocator counts as this page has changed its type sl@0: DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner(); sl@0: iMmu->iRamPageAllocator->ChangePageType(aPageInfo, chunk->GetPageType(), EPageDiscard); sl@0: sl@0: #ifdef BTRACE_PAGING sl@0: BTraceContext8(BTrace::EPaging, BTrace::EPagingChunkDonatePage, chunk, aPageInfo->Offset()); sl@0: #endif sl@0: return; sl@0: } sl@0: // allow already donated pages... sl@0: __NK_ASSERT_DEBUG(type==SPageInfo::EPagedCache); sl@0: } sl@0: sl@0: sl@0: TBool RamCache::ReclaimRamCachePage(SPageInfo* aPageInfo) sl@0: { sl@0: SPageInfo::TType type = aPageInfo->Type(); sl@0: // Kern::Printf("DemandPaging::ReclaimRamCachePage %x %d free=%d",aPageInfo,type,iNumberOfFreePages); sl@0: sl@0: if(type==SPageInfo::EChunk) sl@0: return ETrue; // page already reclaimed sl@0: sl@0: __NK_ASSERT_DEBUG(type==SPageInfo::EPagedCache); sl@0: __NK_ASSERT_DEBUG(aPageInfo->State()==SPageInfo::EStatePagedYoung); sl@0: // Update ram allocator counts as this page has changed its type sl@0: DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner(); sl@0: iMmu->iRamPageAllocator->ChangePageType(aPageInfo, EPageDiscard, chunk->GetPageType()); sl@0: aPageInfo->iLink.Deque(); sl@0: --iNumberOfFreePages; sl@0: aPageInfo->Change(SPageInfo::EChunk,SPageInfo::EStateNormal); sl@0: sl@0: #ifdef BTRACE_PAGING sl@0: BTraceContext8(BTrace::EPaging, BTrace::EPagingChunkReclaimPage, chunk, aPageInfo->Offset()); sl@0: #endif sl@0: return ETrue; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Discard the specified page. sl@0: Should only be called on a page if a previous call to IsPageDiscardable() sl@0: returned ETrue and the system lock hasn't been released between the calls. sl@0: sl@0: @param aPageInfo The page info of the page to be discarded sl@0: @param aBlockedZoneId Not used by this overload. sl@0: @param aBlockRest Not used by this overload. sl@0: @return ETrue if page succesfully discarded sl@0: sl@0: @pre System lock held. sl@0: @post System lock held. sl@0: */ sl@0: TBool RamCache::DoDiscardPage(SPageInfo& aPageInfo, TUint aBlockedZoneId, TBool aBlockRest) sl@0: { sl@0: __NK_ASSERT_DEBUG(iNumberOfFreePages > 0); sl@0: RemovePage(aPageInfo); sl@0: SetFree(&aPageInfo); sl@0: ReturnToSystem(&aPageInfo); sl@0: return ETrue; sl@0: } sl@0: sl@0: sl@0: /** sl@0: First stage in discarding a list of pages. sl@0: sl@0: Must ensure that the pages will still be discardable even if system lock is released. sl@0: To be used in conjunction with RamCacheBase::DoDiscardPages1(). sl@0: sl@0: @param aPageList A NULL terminated list of the pages to be discarded sl@0: @return KErrNone on success. sl@0: sl@0: @pre System lock held sl@0: @post System lock held sl@0: */ sl@0: TInt RamCache::DoDiscardPages0(SPageInfo** aPageList) sl@0: { sl@0: __ASSERT_SYSTEM_LOCK; sl@0: sl@0: SPageInfo* pageInfo; sl@0: while((pageInfo = *aPageList++) != 0) sl@0: { sl@0: RemovePage(*pageInfo); sl@0: } sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Final stage in discarding a list of page sl@0: Finish discarding the pages previously removed by RamCacheBase::DoDiscardPages0(). sl@0: This overload doesn't actually need to do anything. sl@0: sl@0: @param aPageList A NULL terminated list of the pages to be discarded sl@0: @return KErrNone on success. sl@0: sl@0: @pre System lock held sl@0: @post System lock held sl@0: */ sl@0: TInt RamCache::DoDiscardPages1(SPageInfo** aPageList) sl@0: { sl@0: __ASSERT_SYSTEM_LOCK; sl@0: SPageInfo* pageInfo; sl@0: while((pageInfo = *aPageList++) != 0) sl@0: { sl@0: SetFree(pageInfo); sl@0: ReturnToSystem(pageInfo); sl@0: } sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Check whether the specified page can be discarded by the RAM cache. sl@0: sl@0: @param aPageInfo The page info of the page being queried. sl@0: @return ETrue when the page can be discarded, EFalse otherwise. sl@0: @pre System lock held. sl@0: @post System lock held. sl@0: */ sl@0: TBool RamCache::IsPageDiscardable(SPageInfo& aPageInfo) sl@0: { sl@0: SPageInfo::TType type = aPageInfo.Type(); sl@0: SPageInfo::TState state = aPageInfo.State(); sl@0: return (type == SPageInfo::EPagedCache && state == SPageInfo::EStatePagedYoung); sl@0: } sl@0: sl@0: sl@0: /** sl@0: @return ETrue when the unmapped page should be freed, EFalse otherwise sl@0: */ sl@0: TBool RamCache::PageUnmapped(SPageInfo* aPageInfo) sl@0: { sl@0: SPageInfo::TType type = aPageInfo->Type(); sl@0: // Kern::Printf("DemandPaging::PageUnmapped %x %d",aPageInfo,type); sl@0: if(type!=SPageInfo::EPagedCache) sl@0: return ETrue; sl@0: SPageInfo::TState state = aPageInfo->State(); sl@0: if(state==SPageInfo::EStatePagedYoung) sl@0: { sl@0: // This page will be freed by DChunk::DoDecommit as it was originally sl@0: // allocated so update page counts in ram allocator sl@0: DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner(); sl@0: iMmu->iRamPageAllocator->ChangePageType(aPageInfo, EPageDiscard, chunk->GetPageType()); sl@0: aPageInfo->iLink.Deque(); sl@0: --iNumberOfFreePages; sl@0: } sl@0: return ETrue; sl@0: } sl@0: sl@0: sl@0: void RamCache::Panic(TFault aFault) sl@0: { sl@0: Kern::Fault("RamCache",aFault); sl@0: } sl@0: sl@0: /** sl@0: Flush all cache pages. sl@0: sl@0: @pre RAM allocator mutex held sl@0: @post RAM allocator mutex held sl@0: */ sl@0: void RamCache::FlushAll() sl@0: { sl@0: __ASSERT_MUTEX(MmuBase::RamAllocatorMutex); sl@0: #ifdef _DEBUG sl@0: // Should always succeed sl@0: __NK_ASSERT_DEBUG(GetFreePages(iNumberOfFreePages)); sl@0: #else sl@0: GetFreePages(iNumberOfFreePages); sl@0: #endif sl@0: } sl@0: sl@0: sl@0: // sl@0: // Demand Paging sl@0: // sl@0: sl@0: #ifdef __DEMAND_PAGING__ sl@0: sl@0: DemandPaging* DemandPaging::ThePager = 0; sl@0: TBool DemandPaging::PseudoRandInitialised = EFalse; sl@0: volatile TUint32 DemandPaging::PseudoRandSeed = 0; sl@0: sl@0: sl@0: void M::DemandPagingInit() sl@0: { sl@0: __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">M::DemandPagingInit")); sl@0: TInt r = RamCacheBase::TheRamCache->Init3(); sl@0: if (r != KErrNone) sl@0: DemandPaging::Panic(DemandPaging::EInitialiseFailed); sl@0: sl@0: __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("Fault(aExceptionInfo); sl@0: return KErrAbort; sl@0: } sl@0: sl@0: #ifdef _DEBUG sl@0: extern "C" void ASMCheckPagingSafe(TLinAddr aPC, TLinAddr aLR, TLinAddr aStartAddres, TUint aLength) sl@0: { sl@0: if(M::CheckPagingSafe(EFalse, aStartAddres, aLength)) sl@0: return; sl@0: Kern::Printf("ASM_ASSERT_PAGING_SAFE FAILED: pc=%x lr=%x",aPC,aLR); sl@0: __NK_ASSERT_ALWAYS(0); sl@0: } sl@0: sl@0: extern "C" void ASMCheckDataPagingSafe(TLinAddr aPC, TLinAddr aLR, TLinAddr aStartAddres, TUint aLength) sl@0: { sl@0: if(M::CheckPagingSafe(ETrue, aStartAddres, aLength)) sl@0: return; sl@0: __KTRACE_OPT(KDATAPAGEWARN,Kern::Printf("Data paging: ASM_ASSERT_DATA_PAGING_SAFE FAILED: pc=%x lr=%x",aPC,aLR)); sl@0: } sl@0: #endif sl@0: sl@0: sl@0: TBool M::CheckPagingSafe(TBool aDataPaging, TLinAddr aStartAddr, TUint aLength) sl@0: { sl@0: DemandPaging* pager = DemandPaging::ThePager; sl@0: if(!pager || K::Initialising) sl@0: return ETrue; sl@0: sl@0: NThread* nt = NCurrentThread(); sl@0: if(!nt) sl@0: return ETrue; // We've not booted properly yet! sl@0: sl@0: if (!pager->NeedsMutexOrderCheck(aStartAddr, aLength)) sl@0: return ETrue; sl@0: sl@0: TBool dataPagingEnabled = EFalse; // data paging not supported on moving or multiple models sl@0: sl@0: DThread* thread = _LOFF(nt,DThread,iNThread); sl@0: NFastMutex* fm = NKern::HeldFastMutex(); sl@0: if(fm) sl@0: { sl@0: if(!thread->iPagingExcTrap || fm!=&TheScheduler.iLock) sl@0: { sl@0: if (!aDataPaging) sl@0: { sl@0: __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: CheckPagingSafe FAILED - FM Held")); sl@0: return EFalse; sl@0: } sl@0: else sl@0: { sl@0: __KTRACE_OPT(KDATAPAGEWARN, Kern::Printf("Data paging: CheckPagingSafe FAILED - FM Held")); sl@0: return !dataPagingEnabled; sl@0: } sl@0: } sl@0: } sl@0: sl@0: DMutex* m = pager->CheckMutexOrder(); sl@0: if (m) sl@0: { sl@0: if (!aDataPaging) sl@0: { sl@0: __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: Mutex Order Fault %O",m)); sl@0: return EFalse; sl@0: } sl@0: else sl@0: { sl@0: __KTRACE_OPT(KDATAPAGEWARN, Kern::Printf("Data paging: Mutex Order Fault %O",m)); sl@0: return !dataPagingEnabled; sl@0: } sl@0: } sl@0: sl@0: return ETrue; sl@0: } sl@0: sl@0: sl@0: TInt M::LockRegion(TLinAddr aStart,TInt aSize) sl@0: { sl@0: DemandPaging* pager = DemandPaging::ThePager; sl@0: if(pager) sl@0: return pager->LockRegion(aStart,aSize,NULL); sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: TInt M::UnlockRegion(TLinAddr aStart,TInt aSize) sl@0: { sl@0: DemandPaging* pager = DemandPaging::ThePager; sl@0: if(pager) sl@0: return pager->UnlockRegion(aStart,aSize,NULL); sl@0: return KErrNone; sl@0: } sl@0: sl@0: #else // !__DEMAND_PAGING__ sl@0: sl@0: TInt M::LockRegion(TLinAddr /*aStart*/,TInt /*aSize*/) sl@0: { sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: TInt M::UnlockRegion(TLinAddr /*aStart*/,TInt /*aSize*/) sl@0: { sl@0: return KErrNone; sl@0: } sl@0: sl@0: #endif // __DEMAND_PAGING__ sl@0: sl@0: sl@0: sl@0: sl@0: // sl@0: // DemandPaging sl@0: // sl@0: sl@0: #ifdef __DEMAND_PAGING__ sl@0: sl@0: sl@0: const TUint16 KDefaultYoungOldRatio = 3; sl@0: const TUint KDefaultMinPages = 256; sl@0: const TUint KDefaultMaxPages = KMaxTUint >> KPageShift; sl@0: sl@0: /* Need at least 4 mapped pages to guarentee to be able to execute all ARM instructions. sl@0: (Worst case is a THUMB2 STM instruction with both instruction and data stradling page sl@0: boundaries.) sl@0: */ sl@0: const TUint KMinYoungPages = 4; sl@0: const TUint KMinOldPages = 1; sl@0: sl@0: /* A minimum young/old ratio of 1 means that we need at least twice KMinYoungPages pages... sl@0: */ sl@0: const TUint KAbsoluteMinPageCount = 2*KMinYoungPages; sl@0: sl@0: __ASSERT_COMPILE(KMinOldPages<=KAbsoluteMinPageCount/2); sl@0: sl@0: class DMissingPagingDevice : public DPagingDevice sl@0: { sl@0: TInt Read(TThreadMessage* /*aReq*/,TLinAddr /*aBuffer*/,TUint /*aOffset*/,TUint /*aSize*/,TInt /*aDrvNumber*/) sl@0: { DemandPaging::Panic(DemandPaging::EDeviceMissing); return 0; } sl@0: }; sl@0: sl@0: sl@0: TBool DemandPaging::RomPagingRequested() sl@0: { sl@0: return TheRomHeader().iPageableRomSize != 0; sl@0: } sl@0: sl@0: sl@0: TBool DemandPaging::CodePagingRequested() sl@0: { sl@0: return (TheSuperPage().KernelConfigFlags() & EKernelConfigCodePagingPolicyDefaultPaged) != EKernelConfigCodePagingPolicyNoPaging; sl@0: } sl@0: sl@0: sl@0: DemandPaging::DemandPaging() sl@0: { sl@0: } sl@0: sl@0: sl@0: void DemandPaging::Init2() sl@0: { sl@0: __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">DemandPaging::Init2")); sl@0: sl@0: RamCacheBase::Init2(); sl@0: sl@0: // initialise live list... sl@0: SDemandPagingConfig config = TheRomHeader().iDemandPagingConfig; sl@0: sl@0: iMinimumPageCount = KDefaultMinPages; sl@0: if(config.iMinPages) sl@0: iMinimumPageCount = config.iMinPages; sl@0: if(iMinimumPageCountratioLimit) sl@0: iYoungOldRatio = ratioLimit; sl@0: sl@0: iMinimumPageLimit = (KMinYoungPages * (1 + iYoungOldRatio)) / iYoungOldRatio; sl@0: if(iMinimumPageLimitDemandPaging::InitialiseLiveList min=%d max=%d ratio=%d",iMinimumPageCount,iMaximumPageCount,iYoungOldRatio)); sl@0: sl@0: if(iMaximumPageCountiRamPageAllocator->AllocRamPages(&pagePhys,1, EPageDiscard); sl@0: if(r!=0) sl@0: Panic(EInitialiseFailed); sl@0: AddAsFreePage(SPageInfo::FromPhysAddr(pagePhys)); sl@0: } sl@0: sl@0: __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("DemandPaging::Init3")); sl@0: TInt r; sl@0: sl@0: // construct iBufferChunk sl@0: iDeviceBufferSize = 2*KPageSize; sl@0: TChunkCreateInfo info; sl@0: info.iType = TChunkCreateInfo::ESharedKernelMultiple; sl@0: info.iMaxSize = iDeviceBufferSize*KMaxPagingDevices; sl@0: info.iMapAttr = EMapAttrCachedMax; sl@0: info.iOwnsMemory = ETrue; sl@0: TUint32 mapAttr; sl@0: r = Kern::ChunkCreate(info,iDeviceBuffersChunk,iDeviceBuffers,mapAttr); sl@0: if(r!=KErrNone) sl@0: return r; sl@0: sl@0: // Install 'null' paging devices which panic if used... sl@0: DMissingPagingDevice* missingPagingDevice = new DMissingPagingDevice; sl@0: for(TInt i=0; iRoundToPageSize(romHeader.iUncompressedSize); sl@0: if(romHeader.iRomPageIndex) sl@0: iRomPageIndex = (SRomPageInfo*)((TInt)&romHeader+romHeader.iRomPageIndex); sl@0: sl@0: TLinAddr pagedStart = romHeader.iPageableRomSize ? (TLinAddr)&romHeader+romHeader.iPageableRomStart : 0; sl@0: if(pagedStart) sl@0: { sl@0: __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("ROM=%x+%x PagedStart=%x",iRomLinearBase,iRomSize,pagedStart)); sl@0: __NK_ASSERT_ALWAYS(TUint(pagedStart-iRomLinearBase)RoundToPageSize(iRomSize)>>KPageShift; sl@0: iOriginalRomPages = new TPhysAddr[iOriginalRomPageCount]; sl@0: __NK_ASSERT_ALWAYS(iOriginalRomPages); sl@0: TPhysAddr romPhysAddress; sl@0: iMmu->LinearToPhysical(iRomLinearBase,iRomSize,romPhysAddress,iOriginalRomPages); sl@0: #endif sl@0: } sl@0: sl@0: r = Kern::AddHalEntry(EHalGroupVM, VMHalFunction, 0); sl@0: __NK_ASSERT_ALWAYS(r==KErrNone); sl@0: sl@0: #ifdef __DEMAND_PAGING_BENCHMARKS__ sl@0: for (TInt i = 0 ; i < EMaxPagingBm ; ++i) sl@0: ResetBenchmarkData((TPagingBenchmark)i); sl@0: #endif sl@0: sl@0: // Initialisation now complete sl@0: ThePager = this; sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: DemandPaging::~DemandPaging() sl@0: { sl@0: #ifdef __SUPPORT_DEMAND_PAGING_EMULATION__ sl@0: delete[] iOriginalRomPages; sl@0: #endif sl@0: for (TUint i = 0 ; i < iPagingRequestCount ; ++i) sl@0: delete iPagingRequests[i]; sl@0: } sl@0: sl@0: sl@0: TInt DemandPaging::InstallPagingDevice(DPagingDevice* aDevice) sl@0: { sl@0: __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">DemandPaging::InstallPagingDevice name='%s' type=%d",aDevice->iName,aDevice->iType)); sl@0: sl@0: if(aDevice->iReadUnitShift>KPageShift) sl@0: Panic(EInvalidPagingDevice); sl@0: sl@0: TInt i; sl@0: TInt r = KErrNone; sl@0: TBool createRequestObjects = EFalse; sl@0: sl@0: if ((aDevice->iType & DPagingDevice::ERom) && RomPagingRequested()) sl@0: { sl@0: r = DoInstallPagingDevice(aDevice, 0); sl@0: if (r != KErrNone) sl@0: goto done; sl@0: K::MemModelAttributes|=EMemModelAttrRomPaging; sl@0: createRequestObjects = ETrue; sl@0: } sl@0: sl@0: if ((aDevice->iType & DPagingDevice::ECode) && CodePagingRequested()) sl@0: { sl@0: for (i = 0 ; i < KMaxLocalDrives ; ++i) sl@0: { sl@0: if (aDevice->iDrivesSupported & (1<iInstalled) sl@0: { sl@0: __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("**** Attempt to install more than one ROM paging device !!!!!!!! ****")); sl@0: //Panic(EDeviceAlreadyExists); sl@0: NKern::UnlockSystem(); sl@0: return KErrNone; sl@0: } sl@0: sl@0: aDevice->iDeviceId = aId; sl@0: device->iDevice = aDevice; sl@0: device->iInstalled = ETrue; sl@0: NKern::UnlockSystem(); sl@0: sl@0: __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("DemandPaging::InstallPagingDevice id=%d, device=%08x",aId,device)); sl@0: sl@0: return KErrNone; sl@0: } sl@0: sl@0: DemandPaging::DPagingRequest::~DPagingRequest() sl@0: { sl@0: if (iMutex) sl@0: iMutex->Close(NULL); sl@0: } sl@0: sl@0: TInt DemandPaging::CreateRequestObject() sl@0: { sl@0: _LIT(KLitPagingRequest,"PagingRequest-"); sl@0: sl@0: TInt index; sl@0: TInt id = (TInt)__e32_atomic_add_ord32(&iNextPagingRequestCount, 1); sl@0: TLinAddr offset = id * iDeviceBufferSize; sl@0: TUint32 physAddr = 0; sl@0: TInt r = Kern::ChunkCommitContiguous(iDeviceBuffersChunk,offset,iDeviceBufferSize, physAddr); sl@0: if(r != KErrNone) sl@0: return r; sl@0: sl@0: DPagingRequest* req = new DPagingRequest(); sl@0: if (!req) sl@0: return KErrNoMemory; sl@0: sl@0: req->iBuffer = iDeviceBuffers + offset; sl@0: AllocLoadAddress(*req, id); sl@0: sl@0: TBuf<16> mutexName(KLitPagingRequest); sl@0: mutexName.AppendNum(id); sl@0: r = K::MutexCreate(req->iMutex, mutexName, NULL, EFalse, KMutexOrdPageIn); sl@0: if (r!=KErrNone) sl@0: goto done; sl@0: sl@0: // Ensure there are enough young pages to cope with new request object sl@0: r = ResizeLiveList(iMinimumPageCount, iMaximumPageCount); sl@0: if (r!=KErrNone) sl@0: goto done; sl@0: sl@0: NKern::LockSystem(); sl@0: index = iPagingRequestCount++; sl@0: __NK_ASSERT_ALWAYS(index < KMaxPagingRequests); sl@0: iPagingRequests[index] = req; sl@0: iFreeRequestPool.AddHead(req); sl@0: NKern::UnlockSystem(); sl@0: sl@0: done: sl@0: if (r != KErrNone) sl@0: delete req; sl@0: sl@0: return r; sl@0: } sl@0: sl@0: DemandPaging::DPagingRequest* DemandPaging::AcquireRequestObject() sl@0: { sl@0: __ASSERT_SYSTEM_LOCK; sl@0: __NK_ASSERT_DEBUG(iPagingRequestCount > 0); sl@0: sl@0: DPagingRequest* req = NULL; sl@0: sl@0: // System lock used to serialise access to our data strucures as we have to hold it anyway when sl@0: // we wait on the mutex sl@0: sl@0: req = (DPagingRequest*)iFreeRequestPool.GetFirst(); sl@0: if (req != NULL) sl@0: __NK_ASSERT_DEBUG(req->iUsageCount == 0); sl@0: else sl@0: { sl@0: // Pick a random request object to wait on sl@0: TUint index = (FastPseudoRand() * TUint64(iPagingRequestCount)) >> 32; sl@0: __NK_ASSERT_DEBUG(index < iPagingRequestCount); sl@0: req = iPagingRequests[index]; sl@0: __NK_ASSERT_DEBUG(req->iUsageCount > 0); sl@0: } sl@0: sl@0: #ifdef __CONCURRENT_PAGING_INSTRUMENTATION__ sl@0: ++iWaitingCount; sl@0: if (iWaitingCount > iMaxWaitingCount) sl@0: iMaxWaitingCount = iWaitingCount; sl@0: #endif sl@0: sl@0: ++req->iUsageCount; sl@0: TInt r = req->iMutex->Wait(); sl@0: __NK_ASSERT_ALWAYS(r == KErrNone); sl@0: sl@0: #ifdef __CONCURRENT_PAGING_INSTRUMENTATION__ sl@0: --iWaitingCount; sl@0: ++iPagingCount; sl@0: if (iPagingCount > iMaxPagingCount) sl@0: iMaxPagingCount = iPagingCount; sl@0: #endif sl@0: sl@0: return req; sl@0: } sl@0: sl@0: void DemandPaging::ReleaseRequestObject(DPagingRequest* aReq) sl@0: { sl@0: __ASSERT_SYSTEM_LOCK; sl@0: sl@0: #ifdef __CONCURRENT_PAGING_INSTRUMENTATION__ sl@0: --iPagingCount; sl@0: #endif sl@0: sl@0: // If there are no threads waiting on the mutex then return it to the free pool sl@0: __NK_ASSERT_DEBUG(aReq->iUsageCount > 0); sl@0: if (--aReq->iUsageCount == 0) sl@0: iFreeRequestPool.AddHead(aReq); sl@0: sl@0: aReq->iMutex->Signal(); sl@0: NKern::LockSystem(); sl@0: } sl@0: sl@0: TInt DemandPaging::ReadRomPage(const DPagingRequest* aReq, TLinAddr aRomAddress) sl@0: { sl@0: START_PAGING_BENCHMARK; sl@0: sl@0: TInt pageSize = KPageSize; sl@0: TInt dataOffset = aRomAddress-iRomLinearBase; sl@0: TInt pageNumber = dataOffset>>KPageShift; sl@0: TInt readUnitShift = RomPagingDevice().iDevice->iReadUnitShift; sl@0: TInt r; sl@0: if(!iRomPageIndex) sl@0: { sl@0: // ROM not broken into pages, so just read it in directly sl@0: START_PAGING_BENCHMARK; sl@0: r = RomPagingDevice().iDevice->Read(const_cast(&aReq->iMessage),aReq->iLoadAddr,dataOffset>>readUnitShift,pageSize>>readUnitShift,-1/*token for ROM paging*/); sl@0: END_PAGING_BENCHMARK(DemandPaging::ThePager, EPagingBmReadMedia); sl@0: } sl@0: else sl@0: { sl@0: // Work out where data for page is located sl@0: SRomPageInfo* romPageInfo = iRomPageIndex+pageNumber; sl@0: dataOffset = romPageInfo->iDataStart; sl@0: TInt dataSize = romPageInfo->iDataSize; sl@0: if(!dataSize) sl@0: { sl@0: // empty page, fill it with 0xff... sl@0: memset((void*)aReq->iLoadAddr,-1,pageSize); sl@0: r = KErrNone; sl@0: } sl@0: else sl@0: { sl@0: __NK_ASSERT_ALWAYS(romPageInfo->iPagingAttributes&SRomPageInfo::EPageable); sl@0: sl@0: // Read data for page... sl@0: TThreadMessage* msg= const_cast(&aReq->iMessage); sl@0: TLinAddr buffer = aReq->iBuffer; sl@0: TUint readStart = dataOffset>>readUnitShift; sl@0: TUint readSize = ((dataOffset+dataSize-1)>>readUnitShift)-readStart+1; sl@0: __NK_ASSERT_DEBUG((readSize<Read(msg,buffer,readStart,readSize,-1/*token for ROM paging*/); sl@0: END_PAGING_BENCHMARK(DemandPaging::ThePager, EPagingBmReadMedia); sl@0: if(r==KErrNone) sl@0: { sl@0: // Decompress data... sl@0: TLinAddr data = buffer+dataOffset-(readStart<iCompressionType,aReq->iLoadAddr,data,dataSize); sl@0: if(r>=0) sl@0: { sl@0: __NK_ASSERT_ALWAYS(r==pageSize); sl@0: r = KErrNone; sl@0: } sl@0: } sl@0: } sl@0: } sl@0: sl@0: END_PAGING_BENCHMARK(this, EPagingBmReadRomPage); sl@0: return r; sl@0: } sl@0: sl@0: TInt ReadFunc(TAny* aArg1, TAny* aArg2, TLinAddr aBuffer, TInt aBlockNumber, TInt aBlockCount) sl@0: { sl@0: START_PAGING_BENCHMARK; sl@0: TInt drive = (TInt)aArg1; sl@0: TThreadMessage* msg= (TThreadMessage*)aArg2; sl@0: DemandPaging::SPagingDevice& device = DemandPaging::ThePager->CodePagingDevice(drive); sl@0: TInt r = device.iDevice->Read(msg, aBuffer, aBlockNumber, aBlockCount, drive); sl@0: END_PAGING_BENCHMARK(DemandPaging::ThePager, EPagingBmReadMedia); sl@0: return r; sl@0: } sl@0: sl@0: TInt DemandPaging::ReadCodePage(const DPagingRequest* aReq, DMmuCodeSegMemory* aCodeSegMemory, TLinAddr aCodeAddress) sl@0: { sl@0: __KTRACE_OPT(KPAGING,Kern::Printf("ReadCodePage buffer = %08x, csm == %08x, addr == %08x", aReq->iLoadAddr, aCodeSegMemory, aCodeAddress)); sl@0: sl@0: START_PAGING_BENCHMARK; sl@0: sl@0: // Get the paging device for this drive sl@0: SPagingDevice& device = CodePagingDevice(aCodeSegMemory->iCodeLocalDrive); sl@0: sl@0: // Work out which bit of the file to read sl@0: SRamCodeInfo& ri = aCodeSegMemory->iRamInfo; sl@0: TInt codeOffset = aCodeAddress - ri.iCodeRunAddr; sl@0: TInt pageNumber = codeOffset >> KPageShift; sl@0: TBool compressed = aCodeSegMemory->iCompressionType != SRomPageInfo::ENoCompression; sl@0: TInt dataOffset, dataSize; sl@0: if (compressed) sl@0: { sl@0: dataOffset = aCodeSegMemory->iCodePageOffsets[pageNumber]; sl@0: dataSize = aCodeSegMemory->iCodePageOffsets[pageNumber + 1] - dataOffset; sl@0: __KTRACE_OPT(KPAGING,Kern::Printf(" compressed, file offset == %x, size == %d", dataOffset, dataSize)); sl@0: } sl@0: else sl@0: { sl@0: dataOffset = codeOffset + aCodeSegMemory->iCodeStartInFile; sl@0: dataSize = Min(KPageSize, aCodeSegMemory->iBlockMap.DataLength() - dataOffset); sl@0: __NK_ASSERT_DEBUG(dataSize >= 0); sl@0: __KTRACE_OPT(KPAGING,Kern::Printf(" uncompressed, file offset == %x, size == %d", dataOffset, dataSize)); sl@0: } sl@0: sl@0: TInt bufferStart = aCodeSegMemory->iBlockMap.Read(aReq->iBuffer, sl@0: dataOffset, sl@0: dataSize, sl@0: device.iDevice->iReadUnitShift, sl@0: ReadFunc, sl@0: (TAny*)aCodeSegMemory->iCodeLocalDrive, sl@0: (TAny*)&aReq->iMessage); sl@0: sl@0: sl@0: TInt r = KErrNone; sl@0: if(bufferStart<0) sl@0: { sl@0: r = bufferStart; // return error sl@0: __NK_ASSERT_DEBUG(0); sl@0: } sl@0: else sl@0: { sl@0: TLinAddr data = aReq->iBuffer + bufferStart; sl@0: if (compressed) sl@0: { sl@0: TInt r = Decompress(aCodeSegMemory->iCompressionType, aReq->iLoadAddr, data, dataSize); sl@0: if(r>=0) sl@0: { sl@0: dataSize = Min(KPageSize, ri.iCodeSize - codeOffset); sl@0: if(r!=dataSize) sl@0: { sl@0: __NK_ASSERT_DEBUG(0); sl@0: r = KErrCorrupt; sl@0: } sl@0: else sl@0: r = KErrNone; sl@0: } sl@0: else sl@0: { sl@0: __NK_ASSERT_DEBUG(0); sl@0: } sl@0: } sl@0: else sl@0: { sl@0: #ifdef BTRACE_PAGING_VERBOSE sl@0: BTraceContext4(BTrace::EPaging,BTrace::EPagingDecompressStart,SRomPageInfo::ENoCompression); sl@0: #endif sl@0: memcpy((TAny*)aReq->iLoadAddr, (TAny*)data, dataSize); sl@0: #ifdef BTRACE_PAGING_VERBOSE sl@0: BTraceContext0(BTrace::EPaging,BTrace::EPagingDecompressEnd); sl@0: #endif sl@0: } sl@0: } sl@0: sl@0: if(r==KErrNone) sl@0: if (dataSize < KPageSize) sl@0: memset((TAny*)(aReq->iLoadAddr + dataSize), KPageSize - dataSize, 0x03); sl@0: sl@0: END_PAGING_BENCHMARK(this, EPagingBmReadCodePage); sl@0: sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: #include "decompress.h" sl@0: sl@0: sl@0: TInt DemandPaging::Decompress(TInt aCompressionType,TLinAddr aDst,TLinAddr aSrc,TUint aSrcSize) sl@0: { sl@0: #ifdef BTRACE_PAGING_VERBOSE sl@0: BTraceContext4(BTrace::EPaging,BTrace::EPagingDecompressStart,aCompressionType); sl@0: #endif sl@0: TInt r; sl@0: switch(aCompressionType) sl@0: { sl@0: case SRomPageInfo::ENoCompression: sl@0: memcpy((void*)aDst,(void*)aSrc,aSrcSize); sl@0: r = aSrcSize; sl@0: break; sl@0: sl@0: case SRomPageInfo::EBytePair: sl@0: { sl@0: START_PAGING_BENCHMARK; sl@0: TUint8* srcNext=0; sl@0: r=BytePairDecompress((TUint8*)aDst,KPageSize,(TUint8*)aSrc,aSrcSize,srcNext); sl@0: if (r == KErrNone) sl@0: __NK_ASSERT_ALWAYS((TLinAddr)srcNext == aSrc + aSrcSize); sl@0: END_PAGING_BENCHMARK(this, EPagingBmDecompress); sl@0: } sl@0: break; sl@0: sl@0: default: sl@0: r = KErrNotSupported; sl@0: break; sl@0: } sl@0: #ifdef BTRACE_PAGING_VERBOSE sl@0: BTraceContext0(BTrace::EPaging,BTrace::EPagingDecompressEnd); sl@0: #endif sl@0: return r; sl@0: } sl@0: sl@0: sl@0: void DemandPaging::BalanceAges() sl@0: { sl@0: if(iOldCount*iYoungOldRatio>=iYoungCount) sl@0: return; // We have enough old pages sl@0: sl@0: // make one young page into an old page... sl@0: sl@0: __NK_ASSERT_DEBUG(!iYoungList.IsEmpty()); sl@0: __NK_ASSERT_DEBUG(iYoungCount); sl@0: SDblQueLink* link = iYoungList.Last()->Deque(); sl@0: --iYoungCount; sl@0: sl@0: SPageInfo* pageInfo = SPageInfo::FromLink(link); sl@0: pageInfo->SetState(SPageInfo::EStatePagedOld); sl@0: sl@0: iOldList.AddHead(link); sl@0: ++iOldCount; sl@0: sl@0: SetOld(pageInfo); sl@0: sl@0: #ifdef BTRACE_PAGING_VERBOSE sl@0: BTraceContext4(BTrace::EPaging,BTrace::EPagingAged,pageInfo->PhysAddr()); sl@0: #endif sl@0: } sl@0: sl@0: sl@0: void DemandPaging::AddAsYoungest(SPageInfo* aPageInfo) sl@0: { sl@0: #ifdef _DEBUG sl@0: SPageInfo::TType type = aPageInfo->Type(); sl@0: __NK_ASSERT_DEBUG(type==SPageInfo::EPagedROM || type==SPageInfo::EPagedCode || type==SPageInfo::EPagedData || type==SPageInfo::EPagedCache); sl@0: #endif sl@0: aPageInfo->SetState(SPageInfo::EStatePagedYoung); sl@0: iYoungList.AddHead(&aPageInfo->iLink); sl@0: ++iYoungCount; sl@0: } sl@0: sl@0: sl@0: void DemandPaging::AddAsFreePage(SPageInfo* aPageInfo) sl@0: { sl@0: #ifdef BTRACE_PAGING sl@0: TPhysAddr phys = aPageInfo->PhysAddr(); sl@0: BTraceContext4(BTrace::EPaging,BTrace::EPagingPageInFree,phys); sl@0: #endif sl@0: aPageInfo->Change(SPageInfo::EPagedFree,SPageInfo::EStatePagedOld); sl@0: iOldList.Add(&aPageInfo->iLink); sl@0: ++iOldCount; sl@0: } sl@0: sl@0: sl@0: void DemandPaging::RemovePage(SPageInfo* aPageInfo) sl@0: { sl@0: switch(aPageInfo->State()) sl@0: { sl@0: case SPageInfo::EStatePagedYoung: sl@0: __NK_ASSERT_DEBUG(iYoungCount); sl@0: aPageInfo->iLink.Deque(); sl@0: --iYoungCount; sl@0: break; sl@0: sl@0: case SPageInfo::EStatePagedOld: sl@0: __NK_ASSERT_DEBUG(iOldCount); sl@0: aPageInfo->iLink.Deque(); sl@0: --iOldCount; sl@0: break; sl@0: sl@0: case SPageInfo::EStatePagedLocked: sl@0: break; sl@0: sl@0: default: sl@0: __NK_ASSERT_DEBUG(0); sl@0: } sl@0: aPageInfo->SetState(SPageInfo::EStatePagedDead); sl@0: } sl@0: sl@0: sl@0: SPageInfo* DemandPaging::GetOldestPage() sl@0: { sl@0: // remove oldest from list... sl@0: SDblQueLink* link; sl@0: if(iOldCount) sl@0: { sl@0: __NK_ASSERT_DEBUG(!iOldList.IsEmpty()); sl@0: link = iOldList.Last()->Deque(); sl@0: --iOldCount; sl@0: } sl@0: else sl@0: { sl@0: __NK_ASSERT_DEBUG(iYoungCount); sl@0: __NK_ASSERT_DEBUG(!iYoungList.IsEmpty()); sl@0: link = iYoungList.Last()->Deque(); sl@0: --iYoungCount; sl@0: } sl@0: SPageInfo* pageInfo = SPageInfo::FromLink(link); sl@0: pageInfo->SetState(SPageInfo::EStatePagedDead); sl@0: sl@0: // put page in a free state... sl@0: SetFree(pageInfo); sl@0: pageInfo->Change(SPageInfo::EPagedFree,SPageInfo::EStatePagedDead); sl@0: sl@0: // keep live list balanced... sl@0: BalanceAges(); sl@0: sl@0: return pageInfo; sl@0: } sl@0: sl@0: sl@0: TBool DemandPaging::GetFreePages(TInt aNumPages) sl@0: { sl@0: __KTRACE_OPT(KPAGING,Kern::Printf("DP: >GetFreePages %d",aNumPages)); sl@0: NKern::LockSystem(); sl@0: sl@0: while(aNumPages>0 && NumberOfFreePages()>=aNumPages) sl@0: { sl@0: // steal a page from live page list and return it to the free pool... sl@0: ReturnToSystem(GetOldestPage()); sl@0: --aNumPages; sl@0: } sl@0: sl@0: NKern::UnlockSystem(); sl@0: __KTRACE_OPT(KPAGING,Kern::Printf("DP: Type(); sl@0: if(type==SPageInfo::EChunk) sl@0: { sl@0: //Must not donate locked page. An example is DMA trasferred memory. sl@0: __NK_ASSERT_DEBUG(0 == aPageInfo->LockCount()); sl@0: sl@0: aPageInfo->Change(SPageInfo::EPagedCache,SPageInfo::EStatePagedYoung); sl@0: sl@0: // Update ram allocator counts as this page has changed its type sl@0: DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner(); sl@0: iMmu->iRamPageAllocator->ChangePageType(aPageInfo, chunk->GetPageType(), EPageDiscard); sl@0: sl@0: AddAsYoungest(aPageInfo); sl@0: ++iNumberOfFreePages; sl@0: if (iMinimumPageCount + iNumberOfFreePages > iMaximumPageCount) sl@0: ReturnToSystem(GetOldestPage()); sl@0: BalanceAges(); sl@0: return; sl@0: } sl@0: // allow already donated pages... sl@0: __NK_ASSERT_DEBUG(type==SPageInfo::EPagedCache); sl@0: } sl@0: sl@0: sl@0: TBool DemandPaging::ReclaimRamCachePage(SPageInfo* aPageInfo) sl@0: { sl@0: SPageInfo::TType type = aPageInfo->Type(); sl@0: if(type==SPageInfo::EChunk) sl@0: return ETrue; // page already reclaimed sl@0: sl@0: __NK_ASSERT_DEBUG(type==SPageInfo::EPagedCache); sl@0: sl@0: if(!iNumberOfFreePages) sl@0: return EFalse; sl@0: --iNumberOfFreePages; sl@0: sl@0: RemovePage(aPageInfo); sl@0: aPageInfo->Change(SPageInfo::EChunk,SPageInfo::EStateNormal); sl@0: sl@0: // Update ram allocator counts as this page has changed its type sl@0: DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner(); sl@0: iMmu->iRamPageAllocator->ChangePageType(aPageInfo, EPageDiscard, chunk->GetPageType()); sl@0: return ETrue; sl@0: } sl@0: sl@0: sl@0: SPageInfo* DemandPaging::AllocateNewPage() sl@0: { sl@0: __ASSERT_SYSTEM_LOCK sl@0: SPageInfo* pageInfo; sl@0: sl@0: NKern::UnlockSystem(); sl@0: MmuBase::Wait(); sl@0: NKern::LockSystem(); sl@0: sl@0: // Try getting a free page from our active page list sl@0: if(iOldCount) sl@0: { sl@0: pageInfo = SPageInfo::FromLink(iOldList.Last()); sl@0: if(pageInfo->Type()==SPageInfo::EPagedFree) sl@0: { sl@0: pageInfo = GetOldestPage(); sl@0: goto done; sl@0: } sl@0: } sl@0: sl@0: // Try getting a free page from the system pool sl@0: if(iMinimumPageCount+iNumberOfFreePagesState(); sl@0: if(state==SPageInfo::EStatePagedOld) sl@0: { sl@0: // move page from old list to head of young list... sl@0: __NK_ASSERT_DEBUG(iOldCount); sl@0: aPageInfo->iLink.Deque(); sl@0: --iOldCount; sl@0: AddAsYoungest(aPageInfo); sl@0: BalanceAges(); sl@0: } sl@0: else if(state==SPageInfo::EStatePagedYoung) sl@0: { sl@0: // page was already young, move it to the start of the list (make it the youngest) sl@0: aPageInfo->iLink.Deque(); sl@0: iYoungList.AddHead(&aPageInfo->iLink); sl@0: } sl@0: else sl@0: { sl@0: // leave locked pages alone sl@0: __NK_ASSERT_DEBUG(state==SPageInfo::EStatePagedLocked); sl@0: } sl@0: } sl@0: sl@0: sl@0: TInt DemandPaging::CheckRealtimeThreadFault(DThread* aThread, TAny* aContext) sl@0: { sl@0: TInt r = KErrNone; sl@0: DThread* client = aThread->iIpcClient; sl@0: sl@0: // If iIpcClient is set then we are accessing the address space of a remote thread. If we are sl@0: // in an IPC trap, this will contain information the local and remte addresses being accessed. sl@0: // If this is not set then we assume than any fault must be the fault of a bad remote address. sl@0: TIpcExcTrap* ipcTrap = (TIpcExcTrap*)aThread->iExcTrap; sl@0: if (ipcTrap && !ipcTrap->IsTIpcExcTrap()) sl@0: ipcTrap = 0; sl@0: if (client && (!ipcTrap || ipcTrap->ExcLocation(aThread, aContext) == TIpcExcTrap::EExcRemote)) sl@0: { sl@0: // Kill client thread... sl@0: NKern::UnlockSystem(); sl@0: if(K::IllegalFunctionForRealtimeThread(client,"Access to Paged Memory (by other thread)")) sl@0: { sl@0: // Treat memory access as bad... sl@0: r = KErrAbort; sl@0: } sl@0: // else thread is in 'warning only' state so allow paging sl@0: } sl@0: else sl@0: { sl@0: // Kill current thread... sl@0: NKern::UnlockSystem(); sl@0: if(K::IllegalFunctionForRealtimeThread(NULL,"Access to Paged Memory")) sl@0: { sl@0: // If current thread is in critical section, then the above kill will be deferred sl@0: // and we will continue executing. We will handle this by returning an error sl@0: // which means that the thread will take an exception (which hopfully is XTRAPed!) sl@0: r = KErrAbort; sl@0: } sl@0: // else thread is in 'warning only' state so allow paging sl@0: } sl@0: sl@0: NKern::LockSystem(); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: TInt DemandPaging::ResizeLiveList(TUint aMinimumPageCount,TUint aMaximumPageCount) sl@0: { sl@0: if(!aMaximumPageCount) sl@0: { sl@0: aMinimumPageCount = iInitMinimumPageCount; sl@0: aMaximumPageCount = iInitMaximumPageCount; sl@0: } sl@0: sl@0: // Min must not be greater than max... sl@0: if(aMinimumPageCount>aMaximumPageCount) sl@0: return KErrArgument; sl@0: sl@0: NKern::ThreadEnterCS(); sl@0: MmuBase::Wait(); sl@0: sl@0: NKern::LockSystem(); sl@0: sl@0: // Make sure aMinimumPageCount is not less than absolute minimum we can cope with... sl@0: iMinimumPageLimit = ((KMinYoungPages + iNextPagingRequestCount) * (1 + iYoungOldRatio)) / iYoungOldRatio; sl@0: if(iMinimumPageLimit0) sl@0: iMaximumPageCount += extra; sl@0: sl@0: // Reduce iMinimumPageCount? sl@0: TInt spare = iMinimumPageCount-aMinimumPageCount; sl@0: if(spare>0) sl@0: { sl@0: iMinimumPageCount -= spare; sl@0: iNumberOfFreePages += spare; sl@0: } sl@0: sl@0: // Increase iMinimumPageCount? sl@0: TInt r=KErrNone; sl@0: while(aMinimumPageCount>iMinimumPageCount) sl@0: { sl@0: if(iNumberOfFreePages==0) // Need more pages? sl@0: { sl@0: // get a page from the system sl@0: NKern::UnlockSystem(); sl@0: SPageInfo* pageInfo = GetPageFromSystem(); sl@0: NKern::LockSystem(); sl@0: if(!pageInfo) sl@0: { sl@0: r=KErrNoMemory; sl@0: break; sl@0: } sl@0: AddAsFreePage(pageInfo); sl@0: } sl@0: ++iMinimumPageCount; sl@0: --iNumberOfFreePages; sl@0: NKern::FlashSystem(); sl@0: } sl@0: sl@0: // Reduce iMaximumPageCount? sl@0: while(iMaximumPageCount>aMaximumPageCount) sl@0: { sl@0: if (iMinimumPageCount+iNumberOfFreePages==iMaximumPageCount) // Need to free pages? sl@0: { sl@0: ReturnToSystem(GetOldestPage()); sl@0: } sl@0: --iMaximumPageCount; sl@0: NKern::FlashSystem(); sl@0: } sl@0: sl@0: #ifdef BTRACE_KERNEL_MEMORY sl@0: BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryDemandPagingCache,ThePager->iMinimumPageCount << KPageShift); sl@0: #endif sl@0: sl@0: __NK_ASSERT_DEBUG(iMinimumPageCount + iNumberOfFreePages <= iMaximumPageCount); sl@0: sl@0: NKern::UnlockSystem(); sl@0: sl@0: MmuBase::Signal(); sl@0: NKern::ThreadLeaveCS(); sl@0: sl@0: return r; sl@0: } sl@0: sl@0: sl@0: TInt VMHalFunction(TAny*, TInt aFunction, TAny* a1, TAny* a2) sl@0: { sl@0: DemandPaging* pager = DemandPaging::ThePager; sl@0: switch(aFunction) sl@0: { sl@0: case EVMHalFlushCache: sl@0: if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalFlushCache)"))) sl@0: K::UnlockedPlatformSecurityPanic(); sl@0: pager->FlushAll(); sl@0: return KErrNone; sl@0: sl@0: case EVMHalSetCacheSize: sl@0: { sl@0: if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalSetCacheSize)"))) sl@0: K::UnlockedPlatformSecurityPanic(); sl@0: TUint min = (TUint)a1>>KPageShift; sl@0: if((TUint)a1&KPageMask) sl@0: ++min; sl@0: TUint max = (TUint)a2>>KPageShift; sl@0: if((TUint)a2&KPageMask) sl@0: ++max; sl@0: return pager->ResizeLiveList(min,max); sl@0: } sl@0: sl@0: case EVMHalGetCacheSize: sl@0: { sl@0: SVMCacheInfo info; sl@0: NKern::LockSystem(); // lock system to ensure consistent set of values are read... sl@0: info.iMinSize = pager->iMinimumPageCount<iMaximumPageCount<iMinimumPageCount+pager->iNumberOfFreePages)<iNumberOfFreePages<iEventInfo; sl@0: NKern::UnlockSystem(); sl@0: Kern::InfoCopy(*(TDes8*)a1,(TUint8*)&info,sizeof(info)); sl@0: } sl@0: return KErrNone; sl@0: sl@0: case EVMHalResetEventInfo: sl@0: NKern::LockSystem(); sl@0: memclr(&pager->iEventInfo, sizeof(pager->iEventInfo)); sl@0: NKern::UnlockSystem(); sl@0: return KErrNone; sl@0: sl@0: #ifdef __SUPPORT_DEMAND_PAGING_EMULATION__ sl@0: case EVMHalGetOriginalRomPages: sl@0: *(TPhysAddr**)a1 = pager->iOriginalRomPages; sl@0: *(TInt*)a2 = pager->iOriginalRomPageCount; sl@0: return KErrNone; sl@0: #endif sl@0: sl@0: case EVMPageState: sl@0: return pager->PageState((TLinAddr)a1); sl@0: sl@0: #ifdef __CONCURRENT_PAGING_INSTRUMENTATION__ sl@0: case EVMHalGetConcurrencyInfo: sl@0: { sl@0: NKern::LockSystem(); sl@0: SPagingConcurrencyInfo info = { pager->iMaxWaitingCount, pager->iMaxPagingCount }; sl@0: NKern::UnlockSystem(); sl@0: kumemput32(a1,&info,sizeof(info)); sl@0: } sl@0: return KErrNone; sl@0: sl@0: case EVMHalResetConcurrencyInfo: sl@0: NKern::LockSystem(); sl@0: pager->iMaxWaitingCount = 0; sl@0: pager->iMaxPagingCount = 0; sl@0: NKern::UnlockSystem(); sl@0: return KErrNone; sl@0: #endif sl@0: sl@0: #ifdef __DEMAND_PAGING_BENCHMARKS__ sl@0: case EVMHalGetPagingBenchmark: sl@0: { sl@0: TUint index = (TInt) a1; sl@0: if (index >= EMaxPagingBm) sl@0: return KErrNotFound; sl@0: NKern::LockSystem(); sl@0: SPagingBenchmarkInfo info = pager->iBenchmarkInfo[index]; sl@0: NKern::UnlockSystem(); sl@0: kumemput32(a2,&info,sizeof(info)); sl@0: } sl@0: return KErrNone; sl@0: sl@0: case EVMHalResetPagingBenchmark: sl@0: { sl@0: TUint index = (TInt) a1; sl@0: if (index >= EMaxPagingBm) sl@0: return KErrNotFound; sl@0: NKern::LockSystem(); sl@0: pager->ResetBenchmarkData((TPagingBenchmark)index); sl@0: NKern::UnlockSystem(); sl@0: } sl@0: return KErrNone; sl@0: #endif sl@0: sl@0: default: sl@0: return KErrNotSupported; sl@0: } sl@0: } sl@0: sl@0: void DemandPaging::Panic(TFault aFault) sl@0: { sl@0: Kern::Fault("DEMAND-PAGING",aFault); sl@0: } sl@0: sl@0: sl@0: DMutex* DemandPaging::CheckMutexOrder() sl@0: { sl@0: #ifdef _DEBUG sl@0: SDblQue& ml = TheCurrentThread->iMutexList; sl@0: if(ml.IsEmpty()) sl@0: return NULL; sl@0: DMutex* mm = _LOFF(ml.First(), DMutex, iOrderLink); sl@0: if (KMutexOrdPageIn >= mm->iOrder) sl@0: return mm; sl@0: #endif sl@0: return NULL; sl@0: } sl@0: sl@0: sl@0: TBool DemandPaging::ReservePage() sl@0: { sl@0: __ASSERT_SYSTEM_LOCK; sl@0: __ASSERT_CRITICAL; sl@0: sl@0: NKern::UnlockSystem(); sl@0: MmuBase::Wait(); sl@0: NKern::LockSystem(); sl@0: sl@0: __NK_ASSERT_DEBUG(iMinimumPageCount >= iMinimumPageLimit + iReservePageCount); sl@0: while (iMinimumPageCount == iMinimumPageLimit + iReservePageCount && sl@0: iNumberOfFreePages == 0) sl@0: { sl@0: NKern::UnlockSystem(); sl@0: SPageInfo* pageInfo = GetPageFromSystem(); sl@0: if(!pageInfo) sl@0: { sl@0: MmuBase::Signal(); sl@0: NKern::LockSystem(); sl@0: return EFalse; sl@0: } sl@0: NKern::LockSystem(); sl@0: AddAsFreePage(pageInfo); sl@0: } sl@0: if (iMinimumPageCount == iMinimumPageLimit + iReservePageCount) sl@0: { sl@0: ++iMinimumPageCount; sl@0: --iNumberOfFreePages; sl@0: if (iMinimumPageCount > iMaximumPageCount) sl@0: iMaximumPageCount = iMinimumPageCount; sl@0: } sl@0: ++iReservePageCount; sl@0: __NK_ASSERT_DEBUG(iMinimumPageCount >= iMinimumPageLimit + iReservePageCount); sl@0: __NK_ASSERT_DEBUG(iMinimumPageCount + iNumberOfFreePages <= iMaximumPageCount); sl@0: sl@0: NKern::UnlockSystem(); sl@0: MmuBase::Signal(); sl@0: NKern::LockSystem(); sl@0: return ETrue; sl@0: } sl@0: sl@0: sl@0: TInt DemandPaging::LockRegion(TLinAddr aStart,TInt aSize,DProcess* aProcess) sl@0: { sl@0: __KTRACE_OPT(KPAGING,Kern::Printf("DP: LockRegion(%08x,%x)",aStart,aSize)); sl@0: NKern::ThreadEnterCS(); sl@0: sl@0: // calculate the number of pages required to lock aSize bytes sl@0: TUint32 mask=KPageMask; sl@0: TUint32 offset=aStart&mask; sl@0: TInt numPages = (aSize+offset+mask)>>KPageShift; sl@0: sl@0: // Lock pages... sl@0: TInt r=KErrNone; sl@0: TLinAddr page = aStart; sl@0: sl@0: NKern::LockSystem(); sl@0: while(--numPages>=0) sl@0: { sl@0: if (!ReservePage()) sl@0: break; sl@0: TPhysAddr phys; sl@0: r = LockPage(page,aProcess,phys); sl@0: NKern::FlashSystem(); sl@0: if(r!=KErrNone) sl@0: break; sl@0: page += KPageSize; sl@0: } sl@0: sl@0: NKern::UnlockSystem(); sl@0: sl@0: // If error, unlock whatever we managed to lock... sl@0: if(r!=KErrNone) sl@0: { sl@0: while((page-=KPageSize)>=aStart) sl@0: { sl@0: NKern::LockSystem(); sl@0: UnlockPage(aStart,aProcess,KPhysAddrInvalid); sl@0: --iReservePageCount; sl@0: NKern::UnlockSystem(); sl@0: } sl@0: } sl@0: sl@0: NKern::ThreadLeaveCS(); sl@0: __KTRACE_OPT(KPAGING,Kern::Printf("DP: LockRegion returns %d",r)); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: TInt DemandPaging::UnlockRegion(TLinAddr aStart,TInt aSize,DProcess* aProcess) sl@0: { sl@0: __KTRACE_OPT(KPAGING,Kern::Printf("DP: UnlockRegion(%08x,%x)",aStart,aSize)); sl@0: TUint32 mask=KPageMask; sl@0: TUint32 offset=aStart&mask; sl@0: TInt numPages = (aSize+offset+mask)>>KPageShift; sl@0: NKern::LockSystem(); sl@0: __NK_ASSERT_DEBUG(iReservePageCount >= (TUint)numPages); sl@0: while(--numPages>=0) sl@0: { sl@0: UnlockPage(aStart,aProcess,KPhysAddrInvalid); sl@0: --iReservePageCount; sl@0: NKern::FlashSystem(); sl@0: aStart += KPageSize; sl@0: } sl@0: NKern::UnlockSystem(); sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: void DemandPaging::FlushAll() sl@0: { sl@0: NKern::ThreadEnterCS(); sl@0: MmuBase::Wait(); sl@0: // look at all RAM pages in the system, and unmap all those used for paging sl@0: const TUint32* piMap = (TUint32*)KPageInfoMap; sl@0: const TUint32* piMapEnd = piMap+(KNumPageInfoPages>>5); sl@0: SPageInfo* pi = (SPageInfo*)KPageInfoLinearBase; sl@0: NKern::LockSystem(); sl@0: do sl@0: { sl@0: SPageInfo* piNext = pi+(KPageInfosPerPage<<5); sl@0: for(TUint32 piFlags=*piMap++; piFlags; piFlags>>=1) sl@0: { sl@0: if(!(piFlags&1)) sl@0: { sl@0: pi += KPageInfosPerPage; sl@0: continue; sl@0: } sl@0: SPageInfo* piEnd = pi+KPageInfosPerPage; sl@0: do sl@0: { sl@0: SPageInfo::TState state = pi->State(); sl@0: if(state==SPageInfo::EStatePagedYoung || state==SPageInfo::EStatePagedOld) sl@0: { sl@0: RemovePage(pi); sl@0: SetFree(pi); sl@0: AddAsFreePage(pi); sl@0: NKern::FlashSystem(); sl@0: } sl@0: ++pi; sl@0: const TUint KFlashCount = 64; // flash every 64 page infos (must be a power-of-2) sl@0: __ASSERT_COMPILE((TUint)KPageInfosPerPage >= KFlashCount); sl@0: if(((TUint)pi&((KFlashCount-1)<Type(); sl@0: if(type==SPageInfo::EShadow) sl@0: { sl@0: // get the page which is being shadowed and lock that sl@0: phys = (TPhysAddr)pageInfo->Owner(); sl@0: goto retry; sl@0: } sl@0: sl@0: switch(pageInfo->State()) sl@0: { sl@0: case SPageInfo::EStatePagedLocked: sl@0: // already locked, so just increment lock count... sl@0: ++pageInfo->PagedLock(); sl@0: break; sl@0: sl@0: case SPageInfo::EStatePagedYoung: sl@0: { sl@0: if(type!=SPageInfo::EPagedROM && type !=SPageInfo::EPagedCode) sl@0: { sl@0: // not implemented yet sl@0: __NK_ASSERT_ALWAYS(0); sl@0: } sl@0: sl@0: // remove page to be locked from live list... sl@0: RemovePage(pageInfo); sl@0: sl@0: // change to locked state... sl@0: pageInfo->SetState(SPageInfo::EStatePagedLocked); sl@0: pageInfo->PagedLock() = 1; // Start with lock count of one sl@0: sl@0: // open reference on memory... sl@0: if(type==SPageInfo::EPagedCode) sl@0: { sl@0: DMemModelCodeSegMemory* codeSegMemory = (DMemModelCodeSegMemory*)pageInfo->Owner(); sl@0: if(codeSegMemory->Open()!=KErrNone) sl@0: { sl@0: __NK_ASSERT_DEBUG(0); sl@0: } sl@0: } sl@0: } sl@0: sl@0: break; sl@0: sl@0: case SPageInfo::EStatePagedOld: sl@0: // can't happen because we forced the page to be accessible earlier sl@0: __NK_ASSERT_ALWAYS(0); sl@0: return KErrCorrupt; sl@0: sl@0: default: sl@0: return KErrNotFound; sl@0: } sl@0: sl@0: aPhysAddr = phys; sl@0: sl@0: #ifdef BTRACE_PAGING sl@0: BTraceContext8(BTrace::EPaging,BTrace::EPagingPageLock,phys,pageInfo->PagedLock()); sl@0: #endif sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: TInt DemandPaging::UnlockPage(TLinAddr aPage, DProcess* aProcess, TPhysAddr aPhysAddr) sl@0: { sl@0: __KTRACE_OPT(KPAGING,Kern::Printf("DP: UnlockPage() %08x",aPage)); sl@0: __ASSERT_SYSTEM_LOCK; sl@0: __ASSERT_CRITICAL; sl@0: sl@0: // Get info about page to be unlocked sl@0: TPhysAddr phys = LinearToPhysical(aPage,aProcess); sl@0: if(phys==KPhysAddrInvalid) sl@0: { sl@0: phys = aPhysAddr; sl@0: if(phys==KPhysAddrInvalid) sl@0: return KErrNotFound; sl@0: } sl@0: retry: sl@0: SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(phys); sl@0: if(!pageInfo) sl@0: return KErrNotFound; sl@0: sl@0: SPageInfo::TType type = pageInfo->Type(); sl@0: if(type==SPageInfo::EShadow) sl@0: { sl@0: // Get the page which is being shadowed and unlock that sl@0: phys = (TPhysAddr)pageInfo->Owner(); sl@0: goto retry; sl@0: } sl@0: sl@0: __NK_ASSERT_DEBUG(phys==aPhysAddr || aPhysAddr==KPhysAddrInvalid); sl@0: sl@0: // Unlock it... sl@0: switch(pageInfo->State()) sl@0: { sl@0: case SPageInfo::EStatePagedLocked: sl@0: #ifdef BTRACE_PAGING sl@0: BTraceContext8(BTrace::EPaging,BTrace::EPagingPageUnlock,phys,pageInfo->PagedLock()); sl@0: #endif sl@0: if(!(--pageInfo->PagedLock())) sl@0: { sl@0: // get pointer to memory... sl@0: DMemModelCodeSegMemory* codeSegMemory = 0; sl@0: if(type==SPageInfo::EPagedCode) sl@0: codeSegMemory = (DMemModelCodeSegMemory*)pageInfo->Owner(); sl@0: sl@0: // put page back on live list... sl@0: AddAsYoungest(pageInfo); sl@0: BalanceAges(); sl@0: sl@0: // close reference on memory... sl@0: if(codeSegMemory) sl@0: { sl@0: NKern::UnlockSystem(); sl@0: codeSegMemory->Close(); sl@0: NKern::LockSystem(); sl@0: } sl@0: } sl@0: break; sl@0: sl@0: default: sl@0: return KErrNotFound; sl@0: } sl@0: sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: sl@0: TInt DemandPaging::ReserveAlloc(TInt aSize, DDemandPagingLock& aLock) sl@0: { sl@0: __NK_ASSERT_DEBUG(aLock.iPages == NULL); sl@0: sl@0: // calculate the number of pages required to lock aSize bytes sl@0: TInt numPages = ((aSize-1+KPageMask)>>KPageShift)+1; sl@0: sl@0: __KTRACE_OPT(KPAGING,Kern::Printf("DP: ReserveAlloc() pages %d",numPages)); sl@0: sl@0: NKern::ThreadEnterCS(); sl@0: sl@0: aLock.iPages = (TPhysAddr*)Kern::Alloc(numPages*sizeof(TPhysAddr)); sl@0: if(!aLock.iPages) sl@0: { sl@0: NKern::ThreadLeaveCS(); sl@0: return KErrNoMemory; sl@0: } sl@0: sl@0: MmuBase::Wait(); sl@0: NKern::LockSystem(); sl@0: sl@0: // reserve pages, adding more if necessary sl@0: while (aLock.iReservedPageCount < numPages) sl@0: { sl@0: if (!ReservePage()) sl@0: break; sl@0: ++aLock.iReservedPageCount; sl@0: } sl@0: sl@0: NKern::UnlockSystem(); sl@0: MmuBase::Signal(); sl@0: sl@0: TBool enoughPages = aLock.iReservedPageCount == numPages; sl@0: if(!enoughPages) sl@0: ReserveFree(aLock); sl@0: sl@0: NKern::ThreadLeaveCS(); sl@0: return enoughPages ? KErrNone : KErrNoMemory; sl@0: } sl@0: sl@0: sl@0: sl@0: void DemandPaging::ReserveFree(DDemandPagingLock& aLock) sl@0: { sl@0: NKern::ThreadEnterCS(); sl@0: sl@0: // make sure pages aren't still locked sl@0: ReserveUnlock(aLock); sl@0: sl@0: NKern::LockSystem(); sl@0: __NK_ASSERT_DEBUG(iReservePageCount >= (TUint)aLock.iReservedPageCount); sl@0: iReservePageCount -= aLock.iReservedPageCount; sl@0: aLock.iReservedPageCount = 0; sl@0: NKern::UnlockSystem(); sl@0: sl@0: // free page array... sl@0: Kern::Free(aLock.iPages); sl@0: aLock.iPages = 0; sl@0: sl@0: NKern::ThreadLeaveCS(); sl@0: } sl@0: sl@0: sl@0: sl@0: TBool DemandPaging::ReserveLock(DThread* aThread, TLinAddr aStart,TInt aSize, DDemandPagingLock& aLock) sl@0: { sl@0: if(aLock.iLockedPageCount) sl@0: Panic(ELockTwice); sl@0: sl@0: // calculate the number of pages that need to be locked... sl@0: TUint32 mask=KPageMask; sl@0: TUint32 offset=aStart&mask; sl@0: TInt numPages = (aSize+offset+mask)>>KPageShift; sl@0: if(numPages>aLock.iReservedPageCount) sl@0: Panic(ELockTooBig); sl@0: sl@0: NKern::LockSystem(); sl@0: sl@0: // lock the pages sl@0: TBool locked = EFalse; // becomes true if any pages were locked sl@0: DProcess* process = aThread->iOwningProcess; sl@0: TLinAddr page=aStart; sl@0: TInt count=numPages; sl@0: TPhysAddr* physPages = aLock.iPages; sl@0: while(--count>=0) sl@0: { sl@0: if(LockPage(page,process,*physPages)==KErrNone) sl@0: locked = ETrue; sl@0: NKern::FlashSystem(); sl@0: page += KPageSize; sl@0: ++physPages; sl@0: } sl@0: sl@0: // if any pages were locked, save the lock info... sl@0: if(locked) sl@0: { sl@0: if(aLock.iLockedPageCount) sl@0: Panic(ELockTwice); sl@0: aLock.iLockedStart = aStart; sl@0: aLock.iLockedPageCount = numPages; sl@0: aLock.iProcess = process; sl@0: aLock.iProcess->Open(); sl@0: } sl@0: sl@0: NKern::UnlockSystem(); sl@0: return locked; sl@0: } sl@0: sl@0: sl@0: sl@0: void DemandPaging::ReserveUnlock(DDemandPagingLock& aLock) sl@0: { sl@0: NKern::ThreadEnterCS(); sl@0: sl@0: DProcess* process = NULL; sl@0: NKern::LockSystem(); sl@0: TInt numPages = aLock.iLockedPageCount; sl@0: TLinAddr page = aLock.iLockedStart; sl@0: TPhysAddr* physPages = aLock.iPages; sl@0: while(--numPages>=0) sl@0: { sl@0: UnlockPage(page, aLock.iProcess,*physPages); sl@0: NKern::FlashSystem(); sl@0: page += KPageSize; sl@0: ++physPages; sl@0: } sl@0: process = aLock.iProcess; sl@0: aLock.iProcess = NULL; sl@0: aLock.iLockedPageCount = 0; sl@0: NKern::UnlockSystem(); sl@0: if (process) sl@0: process->Close(NULL); sl@0: sl@0: NKern::ThreadLeaveCS(); sl@0: } sl@0: sl@0: /** sl@0: Check whether the specified page can be discarded by the RAM cache. sl@0: sl@0: @param aPageInfo The page info of the page being queried. sl@0: @return ETrue when the page can be discarded, EFalse otherwise. sl@0: @pre System lock held. sl@0: @post System lock held. sl@0: */ sl@0: TBool DemandPaging::IsPageDiscardable(SPageInfo& aPageInfo) sl@0: { sl@0: // on live list? sl@0: SPageInfo::TState state = aPageInfo.State(); sl@0: return (state == SPageInfo::EStatePagedYoung || state == SPageInfo::EStatePagedOld); sl@0: } sl@0: sl@0: sl@0: /** sl@0: Discard the specified page. sl@0: Should only be called on a page if a previous call to IsPageDiscardable() sl@0: returned ETrue and the system lock hasn't been released between the calls. sl@0: sl@0: @param aPageInfo The page info of the page to be discarded sl@0: @param aBlockZoneId The ID of the RAM zone that shouldn't be allocated into. sl@0: @param aBlockRest Set to ETrue to stop allocation as soon as aBlockedZoneId is reached sl@0: in preference ordering. EFalse otherwise. sl@0: @return ETrue if the page could be discarded, EFalse otherwise. sl@0: sl@0: @pre System lock held. sl@0: @post System lock held. sl@0: */ sl@0: TBool DemandPaging::DoDiscardPage(SPageInfo& aPageInfo, TUint aBlockedZoneId, TBool aBlockRest) sl@0: { sl@0: __ASSERT_SYSTEM_LOCK; sl@0: // Ensure that we don't reduce the cache beyond its minimum. sl@0: if (iNumberOfFreePages == 0) sl@0: { sl@0: NKern::UnlockSystem(); sl@0: SPageInfo* newPage = GetPageFromSystem(aBlockedZoneId, aBlockRest); sl@0: NKern::LockSystem(); sl@0: if (newPage == NULL) sl@0: {// couldn't allocate a new page sl@0: return EFalse; sl@0: } sl@0: if (IsPageDiscardable(aPageInfo)) sl@0: {// page can still be discarded so use new page sl@0: // and discard old one sl@0: AddAsFreePage(newPage); sl@0: RemovePage(&aPageInfo); sl@0: SetFree(&aPageInfo); sl@0: ReturnToSystem(&aPageInfo); sl@0: BalanceAges(); sl@0: return ETrue; sl@0: } sl@0: else sl@0: {// page no longer discardable so no longer require new page sl@0: ReturnToSystem(newPage); sl@0: return EFalse; sl@0: } sl@0: } sl@0: sl@0: // Discard the page sl@0: RemovePage(&aPageInfo); sl@0: SetFree(&aPageInfo); sl@0: ReturnToSystem(&aPageInfo); sl@0: BalanceAges(); sl@0: sl@0: return ETrue; sl@0: } sl@0: sl@0: sl@0: /** sl@0: First stage in discarding a list of pages. sl@0: sl@0: Must ensure that the pages will still be discardable even if system lock is released. sl@0: To be used in conjunction with RamCacheBase::DoDiscardPages1(). sl@0: sl@0: @param aPageList A NULL terminated list of the pages to be discarded sl@0: @return KErrNone on success. sl@0: sl@0: @pre System lock held sl@0: @post System lock held sl@0: */ sl@0: TInt DemandPaging::DoDiscardPages0(SPageInfo** aPageList) sl@0: { sl@0: __ASSERT_SYSTEM_LOCK; sl@0: sl@0: SPageInfo* pageInfo; sl@0: while((pageInfo = *aPageList++) != 0) sl@0: { sl@0: RemovePage(pageInfo); sl@0: } sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Final stage in discarding a list of page sl@0: Finish discarding the pages previously removed by RamCacheBase::DoDiscardPages0(). sl@0: sl@0: @param aPageList A NULL terminated list of the pages to be discarded sl@0: @return KErrNone on success. sl@0: sl@0: @pre System lock held sl@0: @post System lock held sl@0: */ sl@0: TInt DemandPaging::DoDiscardPages1(SPageInfo** aPageList) sl@0: { sl@0: __ASSERT_SYSTEM_LOCK; sl@0: sl@0: SPageInfo* pageInfo; sl@0: while((pageInfo = *aPageList++)!=0) sl@0: { sl@0: SetFree(pageInfo); sl@0: ReturnToSystem(pageInfo); sl@0: BalanceAges(); sl@0: } sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: TBool DemandPaging::MayBePaged(TLinAddr aStartAddr, TUint aLength) sl@0: { sl@0: TLinAddr endAddr = aStartAddr + aLength; sl@0: TBool rangeTouchesPagedRom = sl@0: TUint(aStartAddr - iRomPagedLinearBase) < iRomSize || sl@0: TUint(endAddr - iRomPagedLinearBase) < iRomSize; sl@0: TBool rangeTouchesCodeArea = sl@0: TUint(aStartAddr - iCodeLinearBase) < iCodeSize || sl@0: TUint(endAddr - iCodeLinearBase) < iCodeSize; sl@0: return rangeTouchesPagedRom || rangeTouchesCodeArea; sl@0: } sl@0: sl@0: sl@0: #ifdef __DEMAND_PAGING_BENCHMARKS__ sl@0: sl@0: void DemandPaging::ResetBenchmarkData(TPagingBenchmark aBm) sl@0: { sl@0: SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm]; sl@0: info.iCount = 0; sl@0: info.iTotalTime = 0; sl@0: info.iMaxTime = 0; sl@0: info.iMinTime = KMaxTInt; sl@0: } sl@0: sl@0: void DemandPaging::RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime) sl@0: { sl@0: SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm]; sl@0: ++info.iCount; sl@0: #if !defined(HIGH_RES_TIMER) || defined(HIGH_RES_TIMER_COUNTS_UP) sl@0: TInt64 elapsed = aEndTime - aStartTime; sl@0: #else sl@0: TInt64 elapsed = aStartTime - aEndTime; sl@0: #endif sl@0: info.iTotalTime += elapsed; sl@0: if (elapsed > info.iMaxTime) sl@0: info.iMaxTime = elapsed; sl@0: if (elapsed < info.iMinTime) sl@0: info.iMinTime = elapsed; sl@0: } sl@0: sl@0: #endif sl@0: sl@0: sl@0: // sl@0: // DDemandPagingLock sl@0: // sl@0: sl@0: EXPORT_C DDemandPagingLock::DDemandPagingLock() sl@0: : iThePager(DemandPaging::ThePager), iReservedPageCount(0), iLockedPageCount(0), iPages(0) sl@0: { sl@0: } sl@0: sl@0: sl@0: EXPORT_C TInt DDemandPagingLock::Alloc(TInt aSize) sl@0: { sl@0: if (iThePager) sl@0: return iThePager->ReserveAlloc(aSize,*this); sl@0: else sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: EXPORT_C void DDemandPagingLock::DoUnlock() sl@0: { sl@0: if (iThePager) sl@0: iThePager->ReserveUnlock(*this); sl@0: } sl@0: sl@0: sl@0: EXPORT_C void DDemandPagingLock::Free() sl@0: { sl@0: if (iThePager) sl@0: iThePager->ReserveFree(*this); sl@0: } sl@0: sl@0: sl@0: EXPORT_C TInt Kern::InstallPagingDevice(DPagingDevice* aDevice) sl@0: { sl@0: if (DemandPaging::ThePager) sl@0: return DemandPaging::ThePager->InstallPagingDevice(aDevice); sl@0: else sl@0: return KErrNotSupported; sl@0: } sl@0: sl@0: sl@0: #else // !__DEMAND_PAGING__ sl@0: sl@0: EXPORT_C DDemandPagingLock::DDemandPagingLock() sl@0: : iLockedPageCount(0) sl@0: { sl@0: } sl@0: sl@0: EXPORT_C TInt DDemandPagingLock::Alloc(TInt /*aSize*/) sl@0: { sl@0: return KErrNone; sl@0: } sl@0: sl@0: EXPORT_C TBool DDemandPagingLock::Lock(DThread* /*aThread*/, TLinAddr /*aStart*/, TInt /*aSize*/) sl@0: { sl@0: return EFalse; sl@0: } sl@0: sl@0: EXPORT_C void DDemandPagingLock::DoUnlock() sl@0: { sl@0: } sl@0: sl@0: EXPORT_C void DDemandPagingLock::Free() sl@0: { sl@0: } sl@0: sl@0: EXPORT_C TInt Kern::InstallPagingDevice(DPagingDevice* aDevice) sl@0: { sl@0: return KErrNotSupported; sl@0: } sl@0: sl@0: #endif // __DEMAND_PAGING__ sl@0: sl@0: sl@0: DMmuCodeSegMemory::DMmuCodeSegMemory(DEpocCodeSeg* aCodeSeg) sl@0: : DEpocCodeSegMemory(aCodeSeg), iCodeAllocBase(KMinTInt) sl@0: { sl@0: } sl@0: sl@0: //#define __DUMP_BLOCKMAP_INFO sl@0: DMmuCodeSegMemory::~DMmuCodeSegMemory() sl@0: { sl@0: #ifdef __DEMAND_PAGING__ sl@0: Kern::Free(iCodeRelocTable); sl@0: Kern::Free(iCodePageOffsets); sl@0: Kern::Free(iDataSectionMemory); sl@0: #endif sl@0: } sl@0: sl@0: #ifdef __DEMAND_PAGING__ sl@0: sl@0: /** sl@0: Read and process the block map and related data. sl@0: */ sl@0: TInt DMmuCodeSegMemory::ReadBlockMap(const TCodeSegCreateInfo& aInfo) sl@0: { sl@0: __KTRACE_OPT(KPAGING,Kern::Printf("DP: Reading block map for %C", iCodeSeg)); sl@0: sl@0: if (aInfo.iCodeBlockMapEntriesSize <= 0) sl@0: return KErrArgument; // no block map provided sl@0: sl@0: // Get compression data sl@0: switch (aInfo.iCompressionType) sl@0: { sl@0: case KFormatNotCompressed: sl@0: iCompressionType = SRomPageInfo::ENoCompression; sl@0: break; sl@0: sl@0: case KUidCompressionBytePair: sl@0: { sl@0: iCompressionType = SRomPageInfo::EBytePair; sl@0: if (!aInfo.iCodePageOffsets) sl@0: return KErrArgument; sl@0: TInt size = sizeof(TInt32) * (iPageCount + 1); sl@0: iCodePageOffsets = (TInt32*)Kern::Alloc(size); sl@0: if (!iCodePageOffsets) sl@0: return KErrNoMemory; sl@0: kumemget32(iCodePageOffsets, aInfo.iCodePageOffsets, size); sl@0: sl@0: #ifdef __DUMP_BLOCKMAP_INFO sl@0: Kern::Printf("CodePageOffsets:"); sl@0: for (TInt i = 0 ; i < iPageCount + 1 ; ++i) sl@0: Kern::Printf(" %08x", iCodePageOffsets[i]); sl@0: #endif sl@0: sl@0: TInt last = 0; sl@0: for (TInt j = 0 ; j < iPageCount + 1 ; ++j) sl@0: { sl@0: if (iCodePageOffsets[j] < last || sl@0: iCodePageOffsets[j] > (aInfo.iCodeLengthInFile + aInfo.iCodeStartInFile)) sl@0: { sl@0: __NK_ASSERT_DEBUG(0); sl@0: return KErrCorrupt; sl@0: } sl@0: last = iCodePageOffsets[j]; sl@0: } sl@0: } sl@0: break; sl@0: sl@0: default: sl@0: return KErrNotSupported; sl@0: } sl@0: sl@0: // Copy block map data itself... sl@0: sl@0: #ifdef __DUMP_BLOCKMAP_INFO sl@0: Kern::Printf("Original block map"); sl@0: Kern::Printf(" block granularity: %d", aInfo.iCodeBlockMapCommon.iBlockGranularity); sl@0: Kern::Printf(" block start offset: %x", aInfo.iCodeBlockMapCommon.iBlockStartOffset); sl@0: Kern::Printf(" start block address: %016lx", aInfo.iCodeBlockMapCommon.iStartBlockAddress); sl@0: Kern::Printf(" local drive number: %d", aInfo.iCodeBlockMapCommon.iLocalDriveNumber); sl@0: Kern::Printf(" entry size: %d", aInfo.iCodeBlockMapEntriesSize); sl@0: #endif sl@0: sl@0: // Find relevant paging device sl@0: iCodeLocalDrive = aInfo.iCodeBlockMapCommon.iLocalDriveNumber; sl@0: if (TUint(iCodeLocalDrive) >= (TUint)KMaxLocalDrives) sl@0: { sl@0: __KTRACE_OPT(KPAGING,Kern::Printf("Bad local drive number")); sl@0: return KErrArgument; sl@0: } sl@0: DemandPaging* pager = DemandPaging::ThePager; sl@0: sl@0: if (!pager->CodePagingDevice(iCodeLocalDrive).iInstalled) sl@0: { sl@0: __KTRACE_OPT(KPAGING,Kern::Printf("No paging device installed for drive")); sl@0: return KErrNotSupported; sl@0: } sl@0: DPagingDevice* device = pager->CodePagingDevice(iCodeLocalDrive).iDevice; sl@0: sl@0: // Set code start offset sl@0: iCodeStartInFile = aInfo.iCodeStartInFile; sl@0: if (iCodeStartInFile < 0) sl@0: { sl@0: __KTRACE_OPT(KPAGING,Kern::Printf("Bad code start offset")); sl@0: return KErrArgument; sl@0: } sl@0: sl@0: // Allocate buffer for block map and copy from user-side sl@0: TBlockMapEntryBase* buffer = (TBlockMapEntryBase*)Kern::Alloc(aInfo.iCodeBlockMapEntriesSize); sl@0: if (!buffer) sl@0: return KErrNoMemory; sl@0: kumemget32(buffer, aInfo.iCodeBlockMapEntries, aInfo.iCodeBlockMapEntriesSize); sl@0: sl@0: #ifdef __DUMP_BLOCKMAP_INFO sl@0: Kern::Printf(" entries:"); sl@0: for (TInt k = 0 ; k < aInfo.iCodeBlockMapEntriesSize / sizeof(TBlockMapEntryBase) ; ++k) sl@0: Kern::Printf(" %d: %d blocks at %08x", k, buffer[k].iNumberOfBlocks, buffer[k].iStartBlock); sl@0: #endif sl@0: sl@0: // Initialise block map sl@0: TInt r = iBlockMap.Initialise(aInfo.iCodeBlockMapCommon, sl@0: buffer, sl@0: aInfo.iCodeBlockMapEntriesSize, sl@0: device->iReadUnitShift, sl@0: iCodeStartInFile + aInfo.iCodeLengthInFile); sl@0: if (r != KErrNone) sl@0: { sl@0: Kern::Free(buffer); sl@0: return r; sl@0: } sl@0: sl@0: #if defined(__DUMP_BLOCKMAP_INFO) && defined(_DEBUG) sl@0: iBlockMap.Dump(); sl@0: #endif sl@0: sl@0: return KErrNone; sl@0: } sl@0: sl@0: /** sl@0: Read code relocation table and import fixup table from user side. sl@0: */ sl@0: TInt DMmuCodeSegMemory::ReadFixupTables(const TCodeSegCreateInfo& aInfo) sl@0: { sl@0: __KTRACE_OPT(KPAGING,Kern::Printf("DP: Reading fixup tables for %C", iCodeSeg)); sl@0: sl@0: iCodeRelocTableSize = aInfo.iCodeRelocTableSize; sl@0: iImportFixupTableSize = aInfo.iImportFixupTableSize; sl@0: iCodeDelta = aInfo.iCodeDelta; sl@0: iDataDelta = aInfo.iDataDelta; sl@0: sl@0: // round sizes to four-byte boundaris... sl@0: TInt relocSize = (iCodeRelocTableSize + 3) & ~3; sl@0: TInt fixupSize = (iImportFixupTableSize + 3) & ~3; sl@0: sl@0: // copy relocs and fixups... sl@0: iCodeRelocTable = (TUint8*)Kern::Alloc(relocSize+fixupSize); sl@0: if (!iCodeRelocTable) sl@0: return KErrNoMemory; sl@0: iImportFixupTable = iCodeRelocTable + relocSize; sl@0: kumemget32(iCodeRelocTable, aInfo.iCodeRelocTable, relocSize); sl@0: kumemget32(iImportFixupTable, aInfo.iImportFixupTable, fixupSize); sl@0: sl@0: return KErrNone; sl@0: } sl@0: sl@0: #endif sl@0: sl@0: sl@0: TInt DMmuCodeSegMemory::Create(TCodeSegCreateInfo& aInfo) sl@0: { sl@0: TInt r = KErrNone; sl@0: if (!aInfo.iUseCodePaging) sl@0: iPageCount=(iRamInfo.iCodeSize+iRamInfo.iDataSize+KPageMask)>>KPageShift; sl@0: else sl@0: { sl@0: #ifdef __DEMAND_PAGING__ sl@0: iDataSectionMemory = Kern::Alloc(iRamInfo.iDataSize); sl@0: if (!iDataSectionMemory) sl@0: return KErrNoMemory; sl@0: sl@0: iPageCount=(iRamInfo.iCodeSize+KPageMask)>>KPageShift; sl@0: iDataPageCount=(iRamInfo.iDataSize+KPageMask)>>KPageShift; sl@0: sl@0: r = ReadBlockMap(aInfo); sl@0: if (r != KErrNone) sl@0: return r; sl@0: sl@0: iIsDemandPaged = ETrue; sl@0: iCodeSeg->iAttr |= ECodeSegAttCodePaged; sl@0: #endif sl@0: } sl@0: sl@0: iCodeSeg->iSize = (iPageCount+iDataPageCount)<= (TUint)iRamInfo.iCodeSize) sl@0: return; sl@0: sl@0: UNLOCK_USER_MEMORY(); sl@0: sl@0: TInt page = offset >> KPageShift; sl@0: sl@0: // Relocate code sl@0: sl@0: if (iCodeRelocTableSize > 0) sl@0: { sl@0: TUint32* codeRelocTable32 = (TUint32*)iCodeRelocTable; sl@0: TUint startOffset = codeRelocTable32[page]; sl@0: TUint endOffset = codeRelocTable32[page + 1]; sl@0: sl@0: __KTRACE_OPT(KPAGING, Kern::Printf("Performing code relocation: start == %x, end == %x", startOffset, endOffset)); sl@0: __ASSERT_ALWAYS(startOffset <= endOffset && endOffset <= (TUint)iCodeRelocTableSize, sl@0: K::Fault(K::ECodeSegBadFixupTables)); sl@0: sl@0: TUint8* codeRelocTable8 = (TUint8*)codeRelocTable32; sl@0: const TUint16* ptr = (const TUint16*)(codeRelocTable8 + startOffset); sl@0: const TUint16* end = (const TUint16*)(codeRelocTable8 + endOffset); sl@0: sl@0: const TUint32 codeDelta = iCodeDelta; sl@0: const TUint32 dataDelta = iDataDelta; sl@0: sl@0: while (ptr < end) sl@0: { sl@0: TUint16 entry = *ptr++; sl@0: sl@0: // address of word to fix up is sum of page start and 12-bit offset sl@0: TUint32* addr = (TUint32*)((TUint8*)aBuffer + (entry & 0x0fff)); sl@0: sl@0: TUint32 word = *addr; sl@0: #ifdef _DEBUG sl@0: TInt type = entry & 0xf000; sl@0: __NK_ASSERT_DEBUG(type == KTextRelocType || type == KDataRelocType); sl@0: #endif sl@0: if (entry < KDataRelocType /* => type == KTextRelocType */) sl@0: word += codeDelta; sl@0: else sl@0: word += dataDelta; sl@0: *addr = word; sl@0: } sl@0: } sl@0: sl@0: // Fixup imports sl@0: sl@0: if (iImportFixupTableSize > 0) sl@0: { sl@0: TUint32* importFixupTable32 = (TUint32*)iImportFixupTable; sl@0: TUint startOffset = importFixupTable32[page]; sl@0: TUint endOffset = importFixupTable32[page + 1]; sl@0: sl@0: __KTRACE_OPT(KPAGING, Kern::Printf("Performing import fixup: start == %x, end == %x", startOffset, endOffset)); sl@0: __ASSERT_ALWAYS(startOffset <= endOffset && endOffset <= (TUint)iImportFixupTableSize, sl@0: K::Fault(K::ECodeSegBadFixupTables)); sl@0: sl@0: TUint8* importFixupTable8 = (TUint8*)importFixupTable32; sl@0: const TUint16* ptr = (const TUint16*)(importFixupTable8 + startOffset); sl@0: const TUint16* end = (const TUint16*)(importFixupTable8 + endOffset); sl@0: sl@0: while (ptr < end) sl@0: { sl@0: TUint16 offset = *ptr++; sl@0: sl@0: // get word to write into that address sl@0: // (don't read as a single TUint32 because may not be word-aligned) sl@0: TUint32 wordLow = *ptr++; sl@0: TUint32 wordHigh = *ptr++; sl@0: TUint32 word = (wordHigh << 16) | wordLow; sl@0: sl@0: __KTRACE_OPT(KPAGING, Kern::Printf("DP: Fixup %08x=%08x", iRamInfo.iCodeRunAddr+(page<LockRegion((TLinAddr)aBuffer,KPageSize,&Kern::CurrentProcess()); sl@0: if(r!=KErrNone) sl@0: return r; sl@0: #endif sl@0: ApplyCodeFixups(aBuffer,aDestAddress); sl@0: UNLOCK_USER_MEMORY(); sl@0: CacheMaintenance::CodeChanged((TLinAddr)aBuffer, KPageSize); sl@0: LOCK_USER_MEMORY(); sl@0: #ifdef __DEMAND_PAGING__ sl@0: DemandPaging::ThePager->UnlockRegion((TLinAddr)aBuffer,KPageSize,&Kern::CurrentProcess()); sl@0: #endif sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: #ifdef __DEMAND_PAGING__ sl@0: sl@0: TInt M::CreateVirtualPinObject(TVirtualPinObject*& aPinObject) sl@0: { sl@0: aPinObject = (TVirtualPinObject*) new DDemandPagingLock; sl@0: return aPinObject != NULL ? KErrNone : KErrNoMemory; sl@0: } sl@0: sl@0: TInt M::PinVirtualMemory(TVirtualPinObject* aPinObject, TLinAddr aStart, TUint aSize, DThread* aThread) sl@0: { sl@0: if (!DemandPaging::ThePager) sl@0: return KErrNone; sl@0: sl@0: if (!DemandPaging::ThePager->MayBePaged(aStart, aSize)) sl@0: return KErrNone; sl@0: sl@0: DDemandPagingLock* lock = (DDemandPagingLock*)aPinObject; sl@0: TInt r = lock->Alloc(aSize); sl@0: if (r != KErrNone) sl@0: return r; sl@0: lock->Lock(aThread, aStart, aSize); sl@0: return KErrNone; sl@0: } sl@0: sl@0: TInt M::CreateAndPinVirtualMemory(TVirtualPinObject*& aPinObject, TLinAddr aStart, TUint aSize) sl@0: { sl@0: aPinObject = 0; sl@0: sl@0: if (!DemandPaging::ThePager) sl@0: return KErrNone; sl@0: if (!DemandPaging::ThePager->MayBePaged(aStart, aSize)) sl@0: return KErrNone; sl@0: sl@0: TInt r = CreateVirtualPinObject(aPinObject); sl@0: if (r != KErrNone) sl@0: return r; sl@0: sl@0: DDemandPagingLock* lock = (DDemandPagingLock*)aPinObject; sl@0: r = lock->Alloc(aSize); sl@0: if (r != KErrNone) sl@0: return r; sl@0: lock->Lock(TheCurrentThread, aStart, aSize); sl@0: return KErrNone; sl@0: } sl@0: sl@0: void M::UnpinVirtualMemory(TVirtualPinObject* aPinObject) sl@0: { sl@0: DDemandPagingLock* lock = (DDemandPagingLock*)aPinObject; sl@0: if (lock) sl@0: lock->Free(); sl@0: } sl@0: sl@0: void M::DestroyVirtualPinObject(TVirtualPinObject*& aPinObject) sl@0: { sl@0: DDemandPagingLock* lock = (DDemandPagingLock*)__e32_atomic_swp_ord_ptr(&aPinObject, 0); sl@0: if (lock) sl@0: lock->AsyncDelete(); sl@0: } sl@0: sl@0: #else sl@0: sl@0: class TVirtualPinObject sl@0: { sl@0: }; sl@0: sl@0: TInt M::CreateVirtualPinObject(TVirtualPinObject*& aPinObject) sl@0: { sl@0: aPinObject = new TVirtualPinObject; sl@0: return aPinObject != NULL ? KErrNone : KErrNoMemory; sl@0: } sl@0: sl@0: TInt M::PinVirtualMemory(TVirtualPinObject* aPinObject, TLinAddr, TUint, DThread*) sl@0: { sl@0: __ASSERT_DEBUG(aPinObject, K::Fault(K::EVirtualPinObjectBad)); sl@0: (void)aPinObject; sl@0: return KErrNone; sl@0: } sl@0: sl@0: TInt M::CreateAndPinVirtualMemory(TVirtualPinObject*& aPinObject, TLinAddr, TUint) sl@0: { sl@0: aPinObject = 0; sl@0: return KErrNone; sl@0: } sl@0: sl@0: void M::UnpinVirtualMemory(TVirtualPinObject* aPinObject) sl@0: { sl@0: __ASSERT_DEBUG(aPinObject, K::Fault(K::EVirtualPinObjectBad)); sl@0: (void)aPinObject; sl@0: } sl@0: sl@0: void M::DestroyVirtualPinObject(TVirtualPinObject*& aPinObject) sl@0: { sl@0: TVirtualPinObject* object = (TVirtualPinObject*)__e32_atomic_swp_ord_ptr(&aPinObject, 0); sl@0: if (object) sl@0: Kern::AsyncFree(object); sl@0: } sl@0: sl@0: #endif sl@0: sl@0: TInt M::CreatePhysicalPinObject(TPhysicalPinObject*& aPinObject) sl@0: { sl@0: return KErrNotSupported; sl@0: } sl@0: sl@0: TInt M::PinPhysicalMemory(TPhysicalPinObject*, TLinAddr, TUint, TBool, TUint32&, TUint32*, TUint32&, TUint&, DThread*) sl@0: { sl@0: K::Fault(K::EPhysicalPinObjectBad); sl@0: return KErrNone; sl@0: } sl@0: sl@0: void M::UnpinPhysicalMemory(TPhysicalPinObject* aPinObject) sl@0: { sl@0: K::Fault(K::EPhysicalPinObjectBad); sl@0: } sl@0: sl@0: void M::DestroyPhysicalPinObject(TPhysicalPinObject*& aPinObject) sl@0: { sl@0: K::Fault(K::EPhysicalPinObjectBad); sl@0: } sl@0: sl@0: sl@0: // sl@0: // Kernel map and pin (Not supported on the moving or multiple memory models). sl@0: // sl@0: sl@0: TInt M::CreateKernelMapObject(TKernelMapObject*&, TUint) sl@0: { sl@0: return KErrNotSupported; sl@0: } sl@0: sl@0: sl@0: TInt M::MapAndPinMemory(TKernelMapObject*, DThread*, TLinAddr, TUint, TUint, TLinAddr&, TPhysAddr*) sl@0: { sl@0: return KErrNotSupported; sl@0: } sl@0: sl@0: sl@0: void M::UnmapAndUnpinMemory(TKernelMapObject*) sl@0: { sl@0: } sl@0: sl@0: sl@0: void M::DestroyKernelMapObject(TKernelMapObject*&) sl@0: { sl@0: } sl@0: sl@0: sl@0: // Misc DPagingDevice methods sl@0: sl@0: EXPORT_C void DPagingDevice::NotifyIdle() sl@0: { sl@0: // Not used on this memory model sl@0: } sl@0: sl@0: EXPORT_C void DPagingDevice::NotifyBusy() sl@0: { sl@0: // Not used on this memory model sl@0: } sl@0: sl@0: EXPORT_C TInt Cache::SyncPhysicalMemoryBeforeDmaWrite(TPhysAddr* , TUint , TUint , TUint , TUint32 ) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryBeforeDmaWrite"); sl@0: return KErrNotSupported; sl@0: } sl@0: sl@0: EXPORT_C TInt Cache::SyncPhysicalMemoryBeforeDmaRead(TPhysAddr* , TUint , TUint , TUint , TUint32 ) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryBeforeDmaRead"); sl@0: return KErrNotSupported; sl@0: } sl@0: EXPORT_C TInt Cache::SyncPhysicalMemoryAfterDmaRead(TPhysAddr* , TUint , TUint , TUint , TUint32 ) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryAfterDmaRead"); sl@0: return KErrNotSupported; sl@0: } sl@0: sl@0: // sl@0: // Page moving methods sl@0: // sl@0: sl@0: /* sl@0: * Move a page from aOld to aNew safely, updating any references to the page sl@0: * stored elsewhere (such as page table entries). The destination page must sl@0: * already be allocated. If the move is successful, the source page will be sl@0: * freed and returned to the allocator. sl@0: * sl@0: * @pre RAM alloc mutex must be held. sl@0: * @pre Calling thread must be in a critical section. sl@0: * @pre Interrupts must be enabled. sl@0: * @pre Kernel must be unlocked. sl@0: * @pre No fast mutex can be held. sl@0: * @pre Call in a thread context. sl@0: */ sl@0: TInt MmuBase::MovePage(TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Defrag::DoMovePage"); sl@0: __ASSERT_WITH_MESSAGE_MUTEX(MmuBase::RamAllocatorMutex, "Ram allocator mutex must be held", "Defrag::DoMovePage"); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage() old=%08x",aOld)); sl@0: TInt r = KErrNotSupported; sl@0: #if defined(__CPU_X86) && defined(__MEMMODEL_MULTIPLE__) sl@0: return r; sl@0: #endif sl@0: aNew = KPhysAddrInvalid; sl@0: NKern::LockSystem(); sl@0: SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aOld); sl@0: if (!pi) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage() fails: page has no PageInfo")); sl@0: r = KErrArgument; sl@0: goto fail; sl@0: } sl@0: if (pi->LockCount()) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage() fails: page is locked")); sl@0: goto fail; sl@0: } sl@0: sl@0: switch(pi->Type()) sl@0: { sl@0: case SPageInfo::EUnused: sl@0: // Nothing to do - we allow this, though, in case the caller wasn't sl@0: // actually checking the free bitmap. sl@0: r = KErrNotFound; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage(): page unused")); sl@0: break; sl@0: sl@0: case SPageInfo::EChunk: sl@0: { sl@0: // It's a chunk - we need to investigate what it's used for. sl@0: DChunk* chunk = (DChunk*)pi->Owner(); sl@0: TInt offset = pi->Offset()<iChunkType) sl@0: { sl@0: case EKernelData: sl@0: case EKernelMessage: sl@0: // The kernel data/bss/heap chunk pages are not moved as DMA may be accessing them. sl@0: __KTRACE_OPT(KMMU, Kern::Printf("MmuBase::MovePage() fails: kernel data")); sl@0: goto fail; sl@0: sl@0: case EKernelStack: sl@0: // The kernel thread stack chunk. sl@0: r = MoveKernelStackPage(chunk, offset, aOld, aNew, aBlockZoneId, aBlockRest); sl@0: __KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: k stack r%d",r)); sl@0: __NK_ASSERT_DEBUG(NKern::HeldFastMutex()==0); sl@0: goto released; sl@0: sl@0: case EKernelCode: sl@0: case EDll: sl@0: // The kernel code chunk, or a global user code chunk. sl@0: r = MoveCodeChunkPage(chunk, offset, aOld, aNew, aBlockZoneId, aBlockRest); sl@0: __KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: code chk r%d",r)); sl@0: __NK_ASSERT_DEBUG(NKern::HeldFastMutex()==0); sl@0: goto released; sl@0: sl@0: case ERamDrive: sl@0: case EUserData: sl@0: case EDllData: sl@0: case EUserSelfModCode: sl@0: // A data chunk of some description. sl@0: r = MoveDataChunkPage(chunk, offset, aOld, aNew, aBlockZoneId, aBlockRest); sl@0: __KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: data chk r%d",r)); sl@0: __NK_ASSERT_DEBUG(NKern::HeldFastMutex()==0); sl@0: goto released; sl@0: sl@0: case ESharedKernelSingle: sl@0: case ESharedKernelMultiple: sl@0: case ESharedIo: sl@0: case ESharedKernelMirror: sl@0: // These chunk types cannot be moved sl@0: r = KErrNotSupported; sl@0: __KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: shared r%d",r)); sl@0: break; sl@0: sl@0: case EUserCode: sl@0: default: sl@0: // Unknown page type, or EUserCode. sl@0: // EUserCode is not used in moving model, and on multiple model sl@0: // it never owns any pages so shouldn't be found via SPageInfo sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Defrag::DoMovePage fails: unknown chunk type %d",chunk->iChunkType)); sl@0: Panic(EDefragUnknownChunkType); sl@0: } sl@0: } sl@0: break; sl@0: sl@0: case SPageInfo::ECodeSegMemory: sl@0: // It's a code segment memory section (multiple model only) sl@0: r = MoveCodeSegMemoryPage((DMemModelCodeSegMemory*)pi->Owner(), pi->Offset()<Type())); sl@0: Panic(EDefragUnknownPageType); sl@0: } sl@0: sl@0: fail: sl@0: NKern::UnlockSystem(); sl@0: released: sl@0: __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage() returns %d",r)); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: TInt MmuBase::DiscardPage(TPhysAddr aAddr, TUint aBlockZoneId, TBool aBlockRest) sl@0: { sl@0: TInt r = KErrInUse; sl@0: NKern::LockSystem(); sl@0: SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(aAddr); sl@0: if (pageInfo != NULL) sl@0: {// Allocatable page at this address so is it a discardable one? sl@0: if (iRamCache->IsPageDiscardable(*pageInfo)) sl@0: { sl@0: // Discard this page and return it to the ram allocator sl@0: if (!iRamCache->DoDiscardPage(*pageInfo, aBlockZoneId, aBlockRest)) sl@0: {// Couldn't discard the page. sl@0: if (aBlockRest) sl@0: { sl@0: __KTRACE_OPT(KMMU, Kern::Printf("ClearDiscardableFromZone: page discard fail addr %x", aAddr)); sl@0: NKern::UnlockSystem(); sl@0: return KErrNoMemory; sl@0: } sl@0: } sl@0: else sl@0: {// Page discarded successfully. sl@0: r = KErrNone; sl@0: } sl@0: } sl@0: } sl@0: NKern::UnlockSystem(); sl@0: return r; sl@0: } sl@0: sl@0: TUint MmuBase::NumberOfFreeDpPages() sl@0: { sl@0: TUint free = 0; sl@0: if(iRamCache) sl@0: { sl@0: free = iRamCache->NumberOfFreePages(); sl@0: } sl@0: return free; sl@0: } sl@0: sl@0: sl@0: EXPORT_C TInt Epoc::MovePhysicalPage(TPhysAddr aOld, TPhysAddr& aNew, TRamDefragPageToMove aPageToMove) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::MovePhysicalPage"); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Epoc::MovePhysicalPage() old=%08x pageToMove=%d",aOld,aPageToMove)); sl@0: sl@0: switch(aPageToMove) sl@0: { sl@0: case ERamDefragPage_Physical: sl@0: break; sl@0: default: sl@0: return KErrNotSupported; sl@0: } sl@0: sl@0: MmuBase::Wait(); sl@0: TInt r=M::MovePage(aOld,aNew,KRamZoneInvalidId,EFalse); sl@0: if (r!=KErrNone) sl@0: aNew = KPhysAddrInvalid; sl@0: MmuBase::Signal(); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Epoc::MovePhysicalPage() returns %d",r)); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: TInt M::RamDefragFault(TAny* aExceptionInfo) sl@0: { sl@0: // If the mmu has been initialised then let it try processing the fault. sl@0: if(MmuBase::TheMmu) sl@0: return MmuBase::TheMmu->RamDefragFault(aExceptionInfo); sl@0: return KErrAbort; sl@0: } sl@0: sl@0: sl@0: void M::RamZoneClaimed(SZone* aZone) sl@0: { sl@0: // Lock each page. OK to traverse SPageInfo array as we know no unknown sl@0: // pages are in the zone. sl@0: SPageInfo* pageInfo = SPageInfo::FromPhysAddr(aZone->iPhysBase); sl@0: SPageInfo* pageInfoEnd = pageInfo + aZone->iPhysPages; sl@0: for (; pageInfo < pageInfoEnd; ++pageInfo) sl@0: { sl@0: NKern::LockSystem(); sl@0: __NK_ASSERT_DEBUG(pageInfo->Type()==SPageInfo::EUnused); sl@0: pageInfo->Lock(); sl@0: NKern::UnlockSystem(); sl@0: } sl@0: // For the sake of platform security we have to clear the memory. E.g. the driver sl@0: // could assign it to a chunk visible to user side. Set LSB so ClearPages sl@0: // knows this is a contiguous memory region. sl@0: Mmu::Get().ClearPages(aZone->iPhysPages, (TPhysAddr*)(aZone->iPhysBase|1)); sl@0: }