Update contrib.
1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\memmodel\epoc\mmubase\mmubase.cpp
18 #include <memmodel/epoc/mmubase/mmubase.h>
19 #include <mmubase.inl>
21 #include <demand_paging.h>
22 #include "cache_maintenance.h"
23 #include "highrestimer.h"
28 __ASSERT_COMPILE(sizeof(SPageInfo)==(1<<KPageInfoShift));
30 _LIT(KLitRamAlloc,"RamAlloc");
31 _LIT(KLitHwChunk,"HwChunk");
34 DMutex* MmuBase::HwChunkMutex;
35 DMutex* MmuBase::RamAllocatorMutex;
36 #ifdef BTRACE_KERNEL_MEMORY
37 TInt Epoc::DriverAllocdPhysRam = 0;
38 TInt Epoc::KernelMiscPages = 0;
41 /******************************************************************************
42 * Code common to all MMU memory models
43 ******************************************************************************/
45 const TInt KFreePagesStepSize=16;
47 void MmuBase::Panic(TPanic aPanic)
49 Kern::Fault("MMUBASE",aPanic);
52 void SPageInfo::Lock()
54 CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::Lock");
57 MmuBase::Panic(MmuBase::EPageLockedTooManyTimes);
60 TInt SPageInfo::Unlock()
62 CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::Unlock");
64 MmuBase::Panic(MmuBase::EPageUnlockedTooManyTimes);
69 void SPageInfo::Set(TType aType, TAny* aOwner, TUint32 aOffset)
71 CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::Set");
72 (TUint16&)iType = aType; // also sets iState to EStateNormal
79 void SPageInfo::Change(TType aType,TState aState)
81 CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::Change");
87 void SPageInfo::SetState(TState aState)
89 CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::SetState");
94 void SPageInfo::SetModifier(TAny* aModifier)
96 CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::SetModifier");
97 iModifier = aModifier;
100 TInt SPageInfo::CheckModified(TAny* aModifier)
102 CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::CheckModified");
103 return iModifier!=aModifier;
106 void SPageInfo::SetZone(TUint8 aZoneIndex)
108 __ASSERT_ALWAYS(K::Initialising,Kern::Fault("SPageInfo::SetZone",0));
116 : iRamCache(NULL), iDefrag(NULL)
120 TUint32 MmuBase::RoundToPageSize(TUint32 aSize)
122 return (aSize+KPageMask)&~KPageMask;
125 TUint32 MmuBase::RoundToChunkSize(TUint32 aSize)
127 TUint32 mask=TheMmu->iChunkMask;
128 return (aSize+mask)&~mask;
131 TInt MmuBase::RoundUpRangeToPageSize(TUint32& aBase, TUint32& aSize)
133 TUint32 mask=KPageMask;
134 TUint32 shift=KPageShift;
135 TUint32 offset=aBase&mask;
137 aSize=(aSize+offset+mask)&~mask;
138 return TInt(aSize>>shift);
143 Kern::MutexWait(*RamAllocatorMutex);
144 if (RamAllocatorMutex->iHoldCount==1)
147 m.iInitialFreeMemory=Kern::FreeRamInBytes();
148 m.iAllocFailed=EFalse;
152 void MmuBase::Signal()
154 if (RamAllocatorMutex->iHoldCount>1)
156 Kern::MutexSignal(*RamAllocatorMutex);
160 TInt initial=m.iInitialFreeMemory;
161 TBool failed=m.iAllocFailed;
162 TInt final=Kern::FreeRamInBytes();
163 Kern::MutexSignal(*RamAllocatorMutex);
164 K::CheckFreeMemoryLevel(initial,final,failed);
167 void MmuBase::WaitHwChunk()
169 Kern::MutexWait(*HwChunkMutex);
172 void MmuBase::SignalHwChunk()
174 Kern::MutexSignal(*HwChunkMutex);
178 void MmuBase::MapRamPage(TLinAddr aAddr, TPhysAddr aPage, TPte aPtePerm)
180 __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MapRamPage %08x@%08x perm %08x", aPage, aAddr, aPtePerm));
181 TInt ptid=PageTableId(aAddr);
183 MapRamPages(ptid,SPageInfo::EInvalid,0,aAddr,&aPage,1,aPtePerm);
184 NKern::UnlockSystem();
188 // Unmap and free pages from a global area
190 void MmuBase::UnmapAndFree(TLinAddr aAddr, TInt aNumPages)
192 __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::UnmapAndFree(%08x,%d)",aAddr,aNumPages));
195 TInt pt_np=(iChunkSize-(aAddr&iChunkMask))>>iPageShift;
196 TInt np=Min(aNumPages,pt_np);
198 TInt id=PageTableId(aAddr);
203 TInt np2=Min(np,KFreePagesStepSize);
204 TPhysAddr phys[KFreePagesStepSize];
208 UnmapPages(id,aAddr,np2,phys,true,nptes,nfree,NULL);
209 NKern::UnlockSystem();
212 if (iDecommitThreshold)
213 CacheMaintenanceOnDecommit(phys, nfree);
214 iRamPageAllocator->FreeRamPages(phys,nfree,EPageFixed);
217 aAddr+=(np2<<iPageShift);
222 aAddr+=(np<<iPageShift);
227 void MmuBase::FreePages(TPhysAddr* aPageList, TInt aCount, TZonePageType aPageType)
229 __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::FreePages(%08x,%d)",aPageList,aCount));
232 TBool sync_decommit = (TUint(aCount)<iDecommitThreshold);
233 TPhysAddr* ppa=aPageList;
234 TPhysAddr* ppaE=ppa+aCount;
239 SPageInfo* pi=SPageInfo::SafeFromPhysAddr(pa);
244 ppa[-1]=KPhysAddrInvalid; // don't free page if it's locked down
245 else if (sync_decommit)
247 NKern::UnlockSystem();
248 CacheMaintenanceOnDecommit(pa);
253 NKern::FlashSystem();
255 NKern::UnlockSystem();
256 if (iDecommitThreshold && !sync_decommit)
257 CacheMaintenance::SyncPhysicalCache_All();
258 iRamPageAllocator->FreeRamPages(aPageList,aCount, aPageType);
261 TInt MmuBase::InitPageTableInfo(TInt aId)
263 __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::InitPageTableInfo(%x)",aId));
264 TInt ptb=aId>>iPtBlockShift;
265 if (++iPtBlockCount[ptb]==1)
267 // expand page table info array
269 if (AllocRamPages(&pagePhys,1, EPageFixed)!=KErrNone)
271 __KTRACE_OPT(KMMU,Kern::Printf("Unable to allocate page"));
272 iPtBlockCount[ptb]=0;
276 #ifdef BTRACE_KERNEL_MEMORY
277 BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, 1<<KPageShift);
278 ++Epoc::KernelMiscPages;
280 TLinAddr pil=PtInfoBlockLinAddr(ptb);
282 SPageInfo::FromPhysAddr(pagePhys)->SetPtInfo(ptb);
283 NKern::UnlockSystem();
284 MapRamPage(pil, pagePhys, iPtInfoPtePerm);
285 memclr((TAny*)pil, iPageSize);
290 TInt MmuBase::DoAllocPageTable(TPhysAddr& aPhysAddr)
292 // Allocate a new page table but don't map it.
293 // Return page table id and page number/phys address of new page if any.
296 __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::DoAllocPageTable()"));
298 if(K::CheckForSimulatedAllocFail())
301 TInt id=iPageTableAllocator?iPageTableAllocator->Alloc():-1;
304 // need to allocate a new page
305 if (AllocRamPages(&aPhysAddr,1, EPageFixed)!=KErrNone)
307 __KTRACE_OPT(KMMU,Kern::Printf("Unable to allocate page"));
312 // allocate an ID for the new page
313 id=iPageTableLinearAllocator->Alloc();
316 id<<=iPtClusterShift;
317 __KTRACE_OPT(KMMU,Kern::Printf("Allocated ID %04x",id));
319 if (id<0 || InitPageTableInfo(id)!=KErrNone)
321 __KTRACE_OPT(KMMU,Kern::Printf("Unable to allocate page table info"));
322 iPageTableLinearAllocator->Free(id>>iPtClusterShift);
323 if (iDecommitThreshold)
324 CacheMaintenanceOnDecommit(aPhysAddr);
326 iRamPageAllocator->FreeRamPage(aPhysAddr, EPageFixed);
331 // Set up page info for new page
333 SPageInfo::FromPhysAddr(aPhysAddr)->SetPageTable(id>>iPtClusterShift);
334 NKern::UnlockSystem();
335 #ifdef BTRACE_KERNEL_MEMORY
336 BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, 1<<KPageShift);
337 ++Epoc::KernelMiscPages;
339 // mark all subpages other than first as free for use as page tables
340 if (iPtClusterSize>1)
341 iPageTableAllocator->Free(id+1,iPtClusterSize-1);
344 aPhysAddr=KPhysAddrInvalid;
346 __KTRACE_OPT(KMMU,Kern::Printf("DoAllocPageTable returns %d (%08x)",id,aPhysAddr));
347 PtInfo(id).SetUnused();
351 TInt MmuBase::MapPageTable(TInt aId, TPhysAddr aPhysAddr, TBool aAllowExpand)
353 __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MapPageTable(%d,%08x)",aId,aPhysAddr));
354 TLinAddr ptLin=PageTableLinAddr(aId);
355 TInt ptg=aId>>iPtGroupShift;
356 if (++iPtGroupCount[ptg]==1)
358 // need to allocate a new page table
359 __ASSERT_ALWAYS(aAllowExpand, Panic(EMapPageTableBadExpand));
361 TInt xptid=DoAllocPageTable(xptPhys);
364 __KTRACE_OPT(KMMU,Kern::Printf("Unable to allocate extra page table"));
365 iPtGroupCount[ptg]=0;
368 if (xptPhys==KPhysAddrInvalid)
369 xptPhys=aPhysAddr + ((xptid-aId)<<iPageTableShift);
370 BootstrapPageTable(xptid, xptPhys, aId, aPhysAddr); // initialise XPT and map it
373 MapRamPage(ptLin, aPhysAddr, iPtPtePerm);
377 TInt MmuBase::AllocPageTable()
379 // Allocate a new page table, mapped at the correct linear address.
380 // Clear all entries to Not Present. Return page table id.
383 __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::AllocPageTable()"));
384 __ASSERT_MUTEX(MmuBase::RamAllocatorMutex);
387 TInt id=DoAllocPageTable(ptPhys);
390 if (ptPhys!=KPhysAddrInvalid)
392 TInt r=MapPageTable(id,ptPhys);
396 SPageInfo* pi=SPageInfo::FromPhysAddr(ptPhys);
399 NKern::UnlockSystem();
400 if (iDecommitThreshold)
401 CacheMaintenanceOnDecommit(ptPhys);
403 iRamPageAllocator->FreeRamPage(ptPhys, EPageFixed);
408 __KTRACE_OPT(KMMU,Kern::Printf("AllocPageTable returns %d",id));
412 TBool MmuBase::DoFreePageTable(TInt aId)
414 // Free an empty page table. We assume that all pages mapped by the page table have
415 // already been unmapped and freed.
418 __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::DoFreePageTable(%d)",aId));
419 SPageTableInfo& s=PtInfo(aId);
420 __NK_ASSERT_DEBUG(!s.iCount); // shouldn't have any pages mapped
423 TInt id=aId &~ iPtClusterMask;
424 if (iPageTableAllocator)
426 iPageTableAllocator->Free(aId);
427 if (iPageTableAllocator->NotFree(id,iPtClusterSize))
429 // some subpages still in use
432 __KTRACE_OPT(KMMU,Kern::Printf("Freeing whole page, id=%d",id));
433 // whole page is now free
434 // remove it from the page table allocator
435 iPageTableAllocator->Alloc(id,iPtClusterSize);
438 TInt ptb=aId>>iPtBlockShift;
439 if (--iPtBlockCount[ptb]==0)
441 // shrink page table info array
442 TLinAddr pil=PtInfoBlockLinAddr(ptb);
443 UnmapAndFree(pil,1); // remove PTE, null page info, free page
444 #ifdef BTRACE_KERNEL_MEMORY
445 BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscFree, 1<<KPageShift);
446 --Epoc::KernelMiscPages;
450 // free the page table linear address
451 iPageTableLinearAllocator->Free(id>>iPtClusterShift);
455 void MmuBase::FreePageTable(TInt aId)
457 // Free an empty page table. We assume that all pages mapped by the page table have
458 // already been unmapped and freed.
461 __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::FreePageTable(%d)",aId));
462 if (DoFreePageTable(aId))
465 TInt id=aId &~ iPtClusterMask;
467 // calculate linear address of page
468 TLinAddr ptLin=PageTableLinAddr(id);
469 __KTRACE_OPT(KMMU,Kern::Printf("Page lin %08x",ptLin));
471 // unmap and free the page
472 UnmapAndFree(ptLin,1);
473 #ifdef BTRACE_KERNEL_MEMORY
474 BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscFree, 1<<KPageShift);
475 --Epoc::KernelMiscPages;
478 TInt ptg=aId>>iPtGroupShift;
479 --iPtGroupCount[ptg];
480 // don't shrink the page table mapping for now
483 TInt MmuBase::AllocPhysicalRam(TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
485 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam() size=%x align=%d",aSize,aAlign));
486 TInt r=AllocContiguousRam(aSize, aPhysAddr, EPageFixed, aAlign);
492 TInt n=TInt(TUint32(aSize+iPageMask)>>iPageShift);
493 SPageInfo* pI=SPageInfo::FromPhysAddr(aPhysAddr);
498 __NK_ASSERT_DEBUG(pI->Type()==SPageInfo::EUnused);
500 NKern::UnlockSystem();
505 /** Attempt to allocate a contiguous block of RAM from the specified zone.
507 @param aZoneIdList An array of the IDs of the RAM zones to allocate from.
508 @param aZoneIdCount The number of RAM zone IDs listed in aZoneIdList.
509 @param aSize The number of contiguous bytes to allocate
510 @param aPhysAddr The physical address of the start of the contiguous block of
512 @param aAlign Required alignment
513 @return KErrNone on success, KErrArgument if zone doesn't exist or aSize is larger than the
514 size of the RAM zone or KErrNoMemory when the RAM zone is too full.
516 TInt MmuBase::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
518 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam() size=0x%x align=%d", aSize, aAlign));
519 TInt r = ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, EPageFixed, aAlign);
525 TInt n=TInt(TUint32(aSize+iPageMask)>>iPageShift);
526 SPageInfo* pI=SPageInfo::FromPhysAddr(aPhysAddr);
531 __NK_ASSERT_DEBUG(pI->Type()==SPageInfo::EUnused);
533 NKern::UnlockSystem();
539 /** Attempt to allocate discontiguous RAM pages.
541 @param aNumPages The number of pages to allocate.
542 @param aPageList Pointer to an array where each element will be the physical
543 address of each page allocated.
544 @return KErrNone on success, KErrNoMemory otherwise
546 TInt MmuBase::AllocPhysicalRam(TInt aNumPages, TPhysAddr* aPageList)
548 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam() numpages=%x", aNumPages));
549 TInt r = AllocRamPages(aPageList, aNumPages, EPageFixed);
555 TPhysAddr* pageEnd = aPageList + aNumPages;
556 for (TPhysAddr* page = aPageList; page < pageEnd; page++)
558 SPageInfo* pageInfo = SPageInfo::FromPhysAddr(*page);
560 __NK_ASSERT_DEBUG(pageInfo->Type() == SPageInfo::EUnused);
562 NKern::UnlockSystem();
568 /** Attempt to allocate discontiguous RAM pages from the specified RAM zones.
570 @param aZoneIdList An array of the IDs of the RAM zones to allocate from.
571 @param aZoneIdCount The number of RAM zone IDs listed in aZoneIdList.
572 @param aNumPages The number of pages to allocate.
573 @param aPageList Pointer to an array where each element will be the physical
574 address of each page allocated.
575 @return KErrNone on success, KErrArgument if zone doesn't exist or aNumPages is
576 larger than the total number of pages in the RAM zone or KErrNoMemory when the RAM
579 TInt MmuBase::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList)
581 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam() numpages 0x%x zones 0x%x", aNumPages, aZoneIdCount));
582 TInt r = ZoneAllocRamPages(aZoneIdList, aZoneIdCount, aPageList, aNumPages, EPageFixed);
589 TPhysAddr* pageEnd = aPageList + aNumPages;
590 for (TPhysAddr* page = aPageList; page < pageEnd; page++)
592 SPageInfo* pageInfo = SPageInfo::FromPhysAddr(*page);
594 __NK_ASSERT_DEBUG(pageInfo->Type() == SPageInfo::EUnused);
596 NKern::UnlockSystem();
602 TInt MmuBase::FreePhysicalRam(TPhysAddr aPhysAddr, TInt aSize)
604 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreePhysicalRam(%08x,%x)",aPhysAddr,aSize));
606 TInt n=TInt(TUint32(aSize+iPageMask)>>iPageShift);
607 SPageInfo* pI=SPageInfo::FromPhysAddr(aPhysAddr);
612 __ASSERT_ALWAYS(pI->Type()==SPageInfo::EUnused && pI->Unlock()==0, Panic(EBadFreePhysicalRam));
613 NKern::UnlockSystem();
615 TInt r=iRamPageAllocator->FreePhysicalRam(aPhysAddr, aSize);
619 /** Free discontiguous RAM pages that were previously allocated using discontiguous
620 overload of MmuBase::AllocPhysicalRam() or MmuBase::ZoneAllocPhysicalRam().
622 Specifying one of the following may cause the system to panic:
623 a) an invalid physical RAM address.
624 b) valid physical RAM addresses where some had not been previously allocated.
625 c) an adrress not aligned to a page boundary.
627 @param aNumPages Number of pages to free
628 @param aPageList Array of the physical address of each page to free
630 @return KErrNone if the operation was successful.
633 TInt MmuBase::FreePhysicalRam(TInt aNumPages, TPhysAddr* aPageList)
635 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreePhysicalRam(%08x,%08x)", aNumPages, aPageList));
637 TPhysAddr* pageEnd = aPageList + aNumPages;
640 for (TPhysAddr* page = aPageList; page < pageEnd && r == KErrNone; page++)
642 SPageInfo* pageInfo = SPageInfo::FromPhysAddr(*page);
644 __ASSERT_ALWAYS(pageInfo->Type()==SPageInfo::EUnused && pageInfo->Unlock()==0, Panic(EBadFreePhysicalRam));
645 NKern::UnlockSystem();
648 r = iRamPageAllocator->FreePhysicalRam(*page, KPageSize);
654 TInt MmuBase::ClaimPhysicalRam(TPhysAddr aPhysAddr, TInt aSize)
656 __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ClaimPhysicalRam(%08x,%x)",aPhysAddr,aSize));
657 TUint32 pa=aPhysAddr;
659 TInt n=RoundUpRangeToPageSize(pa,size);
660 TInt r=iRamPageAllocator->ClaimPhysicalRam(pa, size);
663 SPageInfo* pI=SPageInfo::FromPhysAddr(pa);
668 __NK_ASSERT_DEBUG(pI->Type()==SPageInfo::EUnused && pI->LockCount()==0);
670 NKern::UnlockSystem();
677 Allocate a set of discontiguous RAM pages from the specified zone.
679 @param aZoneIdList The array of IDs of the RAM zones to allocate from.
680 @param aZoneIdCount The number of RAM zone IDs in aZoneIdList.
681 @param aPageList Preallocated array of TPhysAddr elements that will receive the
682 physical address of each page allocated.
683 @param aNumPages The number of pages to allocate.
684 @param aPageType The type of the pages being allocated.
686 @return KErrNone on success, KErrArgument if a zone of aZoneIdList doesn't exist,
687 KErrNoMemory if there aren't enough free pages in the zone
689 TInt MmuBase::ZoneAllocRamPages(TUint* aZoneIdList, TUint aZoneIdCount, TPhysAddr* aPageList, TInt aNumPages, TZonePageType aPageType)
692 if(K::CheckForSimulatedAllocFail())
695 __NK_ASSERT_DEBUG(aPageType == EPageFixed);
697 return iRamPageAllocator->ZoneAllocRamPages(aZoneIdList, aZoneIdCount, aPageList, aNumPages, aPageType);
701 TInt MmuBase::AllocRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aPageType, TUint aBlockedZoneId, TBool aBlockRest)
704 if(K::CheckForSimulatedAllocFail())
707 TInt missing = iRamPageAllocator->AllocRamPages(aPageList, aNumPages, aPageType, aBlockedZoneId, aBlockRest);
709 // If missing some pages, ask the RAM cache to donate some of its pages.
710 // Don't ask it for discardable pages as those are intended for itself.
711 if(missing && aPageType != EPageDiscard && iRamCache->GetFreePages(missing))
712 missing = iRamPageAllocator->AllocRamPages(aPageList, aNumPages, aPageType, aBlockedZoneId, aBlockRest);
713 return missing ? KErrNoMemory : KErrNone;
717 TInt MmuBase::AllocContiguousRam(TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aPageType, TInt aAlign, TUint aBlockedZoneId, TBool aBlockRest)
720 if(K::CheckForSimulatedAllocFail())
723 __NK_ASSERT_DEBUG(aPageType == EPageFixed);
724 TUint contigPages = (aSize + KPageSize - 1) >> KPageShift;
725 TInt r = iRamPageAllocator->AllocContiguousRam(contigPages, aPhysAddr, aPageType, aAlign, aBlockedZoneId, aBlockRest);
726 if (r == KErrNoMemory && contigPages > KMaxFreeableContiguousPages)
727 {// Allocation failed but as this is a large allocation flush the RAM cache
728 // and reattempt the allocation as large allocation wouldn't discard pages.
729 iRamCache->FlushAll();
730 r = iRamPageAllocator->AllocContiguousRam(contigPages, aPhysAddr, aPageType, aAlign, aBlockedZoneId, aBlockRest);
737 Allocate contiguous RAM from the specified RAM zones.
738 @param aZoneIdList An array of IDs of the RAM zones to allocate from
739 @param aZoneIdCount The number of IDs listed in aZoneIdList
740 @param aSize The number of bytes to allocate
741 @param aPhysAddr Will receive the physical base address of the allocated RAM
742 @param aPageType The type of the pages being allocated
743 @param aAlign The log base 2 alginment required
745 TInt MmuBase::ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aPageType, TInt aAlign)
748 if(K::CheckForSimulatedAllocFail())
751 return iRamPageAllocator->ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, aPageType, aAlign);
754 SPageInfo* SPageInfo::SafeFromPhysAddr(TPhysAddr aAddress)
756 TUint index = aAddress>>(KPageShift+KPageShift-KPageInfoShift);
757 TUint flags = ((TUint8*)KPageInfoMap)[index>>3];
758 TUint mask = 1<<(index&7);
760 return 0; // no SPageInfo for aAddress
761 SPageInfo* info = FromPhysAddr(aAddress);
762 if(info->Type()==SPageInfo::EInvalid)
767 /** HAL Function wrapper for the RAM allocator.
770 TInt RamHalFunction(TAny*, TInt aFunction, TAny* a1, TAny* a2)
772 DRamAllocator *pRamAlloc = MmuBase::TheMmu->iRamPageAllocator;
775 return pRamAlloc->HalFunction(aFunction, a1, a2);
776 return KErrNotSupported;
780 /******************************************************************************
782 ******************************************************************************/
784 void MmuBase::Init1()
786 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("MmuBase::Init1"));
787 iInitialFreeMemory=0;
791 void MmuBase::Init2()
793 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("MmuBase::Init2"));
794 TInt total_ram=TheSuperPage().iTotalRamSize;
795 TInt total_ram_pages=total_ram>>iPageShift;
796 iNumPages = total_ram_pages;
797 const SRamInfo& info=*(const SRamInfo*)TheSuperPage().iRamBootData;
798 iRamPageAllocator=DRamAllocator::New(info, RamZoneConfig, RamZoneCallback);
800 TInt max_pt=total_ram>>iPageTableShift;
801 if (max_pt<iMaxPageTables)
802 iMaxPageTables=max_pt;
803 iMaxPageTables &= ~iPtClusterMask;
804 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iMaxPageTables=%d",iMaxPageTables));
805 TInt max_ptpg=iMaxPageTables>>iPtClusterShift;
806 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("max_ptpg=%d",max_ptpg));
807 iPageTableLinearAllocator=TBitMapAllocator::New(max_ptpg,ETrue);
808 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iPageTableLinearAllocator=%08x",iPageTableLinearAllocator));
809 __ASSERT_ALWAYS(iPageTableLinearAllocator,Panic(EPtLinAllocCreateFailed));
810 if (iPtClusterShift) // if more than one page table per page
812 iPageTableAllocator=TBitMapAllocator::New(iMaxPageTables,EFalse);
813 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iPageTableAllocator=%08x",iPageTableAllocator));
814 __ASSERT_ALWAYS(iPageTableAllocator,Panic(EPtAllocCreateFailed));
816 TInt max_ptb=(iMaxPageTables+iPtBlockMask)>>iPtBlockShift;
817 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("max_ptb=%d",max_ptb));
818 iPtBlockCount=(TInt*)Kern::AllocZ(max_ptb*sizeof(TInt));
819 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iPtBlockCount=%08x",iPtBlockCount));
820 __ASSERT_ALWAYS(iPtBlockCount,Panic(EPtBlockCountCreateFailed));
821 TInt max_ptg=(iMaxPageTables+iPtGroupMask)>>iPtGroupShift;
822 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("ptg_shift=%d, max_ptg=%d",iPtGroupShift,max_ptg));
823 iPtGroupCount=(TInt*)Kern::AllocZ(max_ptg*sizeof(TInt));
824 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iPtGroupCount=%08x",iPtGroupCount));
825 __ASSERT_ALWAYS(iPtGroupCount,Panic(EPtGroupCountCreateFailed));
828 // Clear the inital (and only so far) page table info page so all unused
829 // page tables will be marked as unused.
830 memclr((TAny*)KPageTableInfoBase, KPageSize);
832 // look for page tables - assume first page table (id=0) maps page tables
833 TPte* pPte=(TPte*)iPageTableLinBase;
835 for (i=0; i<iChunkSize/iPageSize; ++i)
838 if (!PteIsPresent(pte)) // after boot, page tables are contiguous
840 iPageTableLinearAllocator->Alloc(i,1);
841 TPhysAddr ptpgPhys=PtePhysAddr(pte, i);
842 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(ptpgPhys);
843 __ASSERT_ALWAYS(pi, Panic(EInvalidPageTableAtBoot));
846 TInt id=i<<iPtClusterShift;
847 TInt ptb=id>>iPtBlockShift;
848 ++iPtBlockCount[ptb];
849 TInt ptg=id>>iPtGroupShift;
850 ++iPtGroupCount[ptg];
853 // look for mapped pages
854 TInt npdes=1<<(32-iChunkShift);
856 for (i=0; i<npdes; ++i)
858 TLinAddr cAddr=TLinAddr(i<<iChunkShift);
859 if (cAddr>=PP::RamDriveStartAddress && TUint32(cAddr-PP::RamDriveStartAddress)<TUint32(PP::RamDriveRange))
860 continue; // leave RAM drive for now
861 TInt ptid=PageTableId(cAddr);
862 TPhysAddr pdePhys = PdePhysAddr(cAddr); // check for whole PDE mapping
867 __KTRACE_OPT(KMMU,Kern::Printf("Addr %08x -> page table %d", cAddr, ptid));
868 pPte=(TPte*)PageTableLinAddr(ptid);
871 if (pdePhys != KPhysAddrInvalid)
873 __KTRACE_OPT(KMMU,Kern::Printf("Addr %08x -> Whole PDE Phys %08x", cAddr, pdePhys));
876 if (ptid>=0 || pdePhys != KPhysAddrInvalid)
880 for (j=0; j<iChunkSize/iPageSize; ++j)
882 TBool present = ETrue; // all pages present if whole PDE mapping
887 present = PteIsPresent(pte);
892 TPhysAddr pa = pPte ? PtePhysAddr(pte, j) : (pdePhys + (j<<iPageShift));
893 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa);
894 __KTRACE_OPT(KMMU,Kern::Printf("Addr: %08x PA=%08x",
895 cAddr+(j<<iPageShift), pa));
896 if (pi) // ignore non-RAM mappings
897 {//these pages will never be freed and can't be moved
898 TInt r = iRamPageAllocator->MarkPageAllocated(pa, EPageFixed);
899 // allow KErrAlreadyExists since it's possible that a page is doubly mapped
900 __ASSERT_ALWAYS(r==KErrNone || r==KErrAlreadyExists, Panic(EBadMappedPageAfterBoot));
901 SetupInitialPageInfo(pi,cAddr,j);
902 #ifdef BTRACE_KERNEL_MEMORY
904 ++Epoc::KernelMiscPages;
909 __KTRACE_OPT(KMMU,Kern::Printf("Addr: %08x #PTEs=%d",cAddr,np));
911 SetupInitialPageTableInfo(ptid,cAddr,np);
915 TInt oddpt=npt & iPtClusterMask;
917 oddpt=iPtClusterSize-oddpt;
918 __KTRACE_OPT(KBOOT,Kern::Printf("Total page tables %d, left over subpages %d",npt,oddpt));
920 iPageTableAllocator->Free(npt,oddpt);
924 // Save current free RAM size - there can never be more free RAM than this
925 TInt max_free = Kern::FreeRamInBytes();
926 K::MaxFreeRam = max_free;
927 if (max_free < PP::RamDriveMaxSize)
928 PP::RamDriveMaxSize = max_free;
931 ClearRamDrive(PP::RamDriveStartAddress);
935 TInt r=K::MutexCreate((DMutex*&)RamAllocatorMutex, KLitRamAlloc, NULL, EFalse, KMutexOrdRamAlloc);
937 Panic(ERamAllocMutexCreateFailed);
938 r=K::MutexCreate((DMutex*&)HwChunkMutex, KLitHwChunk, NULL, EFalse, KMutexOrdHwChunk);
940 Panic(EHwChunkMutexCreateFailed);
942 #ifdef __DEMAND_PAGING__
943 if (DemandPaging::RomPagingRequested() || DemandPaging::CodePagingRequested())
944 iRamCache = DemandPaging::New();
946 iRamCache = new RamCache;
948 iRamCache = new RamCache;
951 Panic(ERamCacheAllocFailed);
953 RamCacheBase::TheRamCache = iRamCache;
955 // Get the allocator to signal to the variant which RAM zones are in use so far
956 iRamPageAllocator->InitialCallback();
959 void MmuBase::Init3()
961 __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("MmuBase::Init3"));
963 // Initialise demand paging
964 #ifdef __DEMAND_PAGING__
965 M::DemandPagingInit();
968 // Register a HAL Function for the Ram allocator.
969 TInt r = Kern::AddHalEntry(EHalGroupRam, RamHalFunction, 0);
970 __NK_ASSERT_ALWAYS(r==KErrNone);
973 // Perform the intialisation for page moving and RAM defrag object.
976 // allocate a page to use as an alt stack
979 r = AllocPhysicalRam(KPageSize, stackpage);
982 Panic(EDefragStackAllocFailed);
984 // map it at a predetermined address
985 TInt ptid = PageTableId(KDefragAltStackAddr);
986 TPte perm = PtePermissions(EKernelStack);
988 MapRamPages(ptid, SPageInfo::EFixed, NULL, KDefragAltStackAddr, &stackpage, 1, perm);
989 NKern::UnlockSystem();
990 iAltStackBase = KDefragAltStackAddr + KPageSize;
992 __KTRACE_OPT(KMMU,Kern::Printf("Allocated defrag alt stack page at %08x, mapped to %08x, base is now %08x", stackpage, KDefragAltStackAddr, iAltStackBase));
994 // Create the actual defrag object and initialise it.
995 iDefrag = new Defrag;
997 Panic(EDefragAllocFailed);
998 iDefrag->Init3(iRamPageAllocator);
1001 void MmuBase::CreateKernelSection(TLinAddr aEnd, TInt aHwChunkAlign)
1003 TLinAddr base=(TLinAddr)TheRomHeader().iKernelLimit;
1004 iKernelSection=TLinearSection::New(base, aEnd);
1005 __ASSERT_ALWAYS(iKernelSection!=NULL, Panic(ECreateKernelSectionFailed));
1006 iHwChunkAllocator=THwChunkAddressAllocator::New(aHwChunkAlign, iKernelSection);
1007 __ASSERT_ALWAYS(iHwChunkAllocator!=NULL, Panic(ECreateHwChunkAllocFailed));
1010 // Recover RAM drive contents after a reset
1011 TInt MmuBase::RecoverRamDrive()
1013 __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::RecoverRamDrive()"));
1015 TLinAddr chunk = PP::RamDriveStartAddress;
1016 TLinAddr end = chunk + (TLinAddr)PP::RamDriveRange;
1018 TInt limit = RoundToPageSize(TheSuperPage().iRamDriveSize);
1019 for( ; chunk<end; chunk+=iChunkSize)
1021 if (size==limit) // have reached end of ram drive
1023 TPhysAddr ptphys = 0;
1024 TInt ptid = BootPageTableId(chunk, ptphys); // ret KErrNotFound if PDE not present, KErrUnknown if present but as yet unknown page table
1025 __KTRACE_OPT(KMMU,Kern::Printf("Addr %08x: PTID=%d PTPHYS=%08x", chunk, ptid, ptphys));
1026 if (ptid==KErrNotFound)
1027 break; // no page table so stop here and clear to end of range
1028 TPhysAddr ptpgphys = ptphys & ~iPageMask;
1029 TInt r = iRamPageAllocator->MarkPageAllocated(ptpgphys, EPageMovable);
1030 __KTRACE_OPT(KMMU,Kern::Printf("MPA: r=%d",r));
1031 if (r==KErrArgument)
1032 break; // page table address was invalid - stop here and clear to end of range
1035 // this page was currently unallocated
1037 break; // ID has been allocated - bad news - bail here
1038 ptid = iPageTableLinearAllocator->Alloc();
1039 __ASSERT_ALWAYS(ptid>=0, Panic(ERecoverRamDriveAllocPTIDFailed));
1040 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(ptpgphys);
1041 __ASSERT_ALWAYS(pi, Panic(ERecoverRamDriveBadPageTable));
1042 pi->SetPageTable(ptid); // id = cluster number here
1043 ptid <<= iPtClusterShift;
1044 MapPageTable(ptid, ptpgphys, EFalse);
1045 if (iPageTableAllocator)
1046 iPageTableAllocator->Free(ptid, iPtClusterSize);
1047 ptid |= ((ptphys>>iPageTableShift)&iPtClusterMask);
1048 ptlin = PageTableLinAddr(ptid);
1049 __KTRACE_OPT(KMMU,Kern::Printf("Page table ID %d lin %08x", ptid, ptlin));
1050 if (iPageTableAllocator)
1051 iPageTableAllocator->Alloc(ptid, 1);
1055 // this page was already allocated
1057 break; // ID not allocated - bad news - bail here
1058 ptlin = PageTableLinAddr(ptid);
1059 __KTRACE_OPT(KMMU,Kern::Printf("Page table lin %08x", ptlin));
1060 if (iPageTableAllocator)
1061 iPageTableAllocator->Alloc(ptid, 1);
1064 TBool chunk_inc = 0;
1065 TPte* page_table = (TPte*)ptlin;
1066 for (pte_index=0; pte_index<(iChunkSize>>iPageSize); ++pte_index)
1068 if (size==limit) // have reached end of ram drive
1070 TPte pte = page_table[pte_index];
1071 if (PteIsPresent(pte))
1073 TPhysAddr pa=PtePhysAddr(pte, pte_index);
1074 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa);
1077 TInt r = iRamPageAllocator->MarkPageAllocated(pa, EPageMovable);
1078 __ASSERT_ALWAYS(r==KErrNone, Panic(ERecoverRamDriveBadPage));
1080 chunk_inc = iChunkSize;
1083 if (pte_index < (iChunkSize>>iPageSize) )
1085 // if we recovered pages in this page table, leave it in place
1088 // clear from here on
1089 ClearPageTable(ptid, pte_index);
1094 ClearRamDrive(chunk);
1095 __KTRACE_OPT(KMMU,Kern::Printf("Recovered RAM drive size %08x",size));
1096 if (size<TheSuperPage().iRamDriveSize)
1098 __KTRACE_OPT(KMMU,Kern::Printf("Truncating RAM drive from %08x to %08x", TheSuperPage().iRamDriveSize, size));
1099 TheSuperPage().iRamDriveSize=size;
1104 TInt MmuBase::AllocShadowPage(TLinAddr aRomAddr)
1106 __KTRACE_OPT(KMMU,Kern::Printf("MmuBase:AllocShadowPage(%08x)", aRomAddr));
1107 aRomAddr &= ~iPageMask;
1108 TPhysAddr orig_phys = KPhysAddrInvalid;
1109 if (aRomAddr>=iRomLinearBase && aRomAddr<=(iRomLinearEnd-iPageSize))
1110 orig_phys = LinearToPhysical(aRomAddr);
1111 __KTRACE_OPT(KMMU,Kern::Printf("OrigPhys = %08x",orig_phys));
1112 if (orig_phys == KPhysAddrInvalid)
1114 __KTRACE_OPT(KMMU,Kern::Printf("Invalid ROM address"));
1115 return KErrArgument;
1117 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(orig_phys);
1118 if (pi && pi->Type()==SPageInfo::EShadow)
1120 __KTRACE_OPT(KMMU,Kern::Printf("ROM address already shadowed"));
1121 return KErrAlreadyExists;
1123 TInt ptid = PageTableId(aRomAddr);
1124 __KTRACE_OPT(KMMU, Kern::Printf("Shadow PTID %d", ptid));
1128 newptid = AllocPageTable();
1129 __KTRACE_OPT(KMMU, Kern::Printf("New shadow PTID %d", newptid));
1131 return KErrNoMemory;
1133 PtInfo(ptid).SetShadow( (aRomAddr-iRomLinearBase)>>iChunkShift );
1134 InitShadowPageTable(ptid, aRomAddr, orig_phys);
1136 TPhysAddr shadow_phys;
1138 if (AllocRamPages(&shadow_phys, 1, EPageFixed) != KErrNone)
1140 __KTRACE_OPT(KMMU,Kern::Printf("Unable to allocate page"));
1144 FreePageTable(newptid);
1146 return KErrNoMemory;
1148 #ifdef BTRACE_KERNEL_MEMORY
1149 BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, 1<<KPageShift);
1150 ++Epoc::KernelMiscPages;
1152 InitShadowPage(shadow_phys, aRomAddr); // copy original ROM contents
1153 NKern::LockSystem();
1154 Pagify(ptid, aRomAddr);
1155 MapRamPages(ptid, SPageInfo::EShadow, (TAny*)orig_phys, (aRomAddr-iRomLinearBase), &shadow_phys, 1, iShadowPtePerm);
1156 NKern::UnlockSystem();
1159 NKern::LockSystem();
1160 AssignShadowPageTable(newptid, aRomAddr);
1161 NKern::UnlockSystem();
1163 FlushShadow(aRomAddr);
1164 __KTRACE_OPT(KMMU,Kern::Printf("AllocShadowPage successful"));
1168 TInt MmuBase::FreeShadowPage(TLinAddr aRomAddr)
1170 __KTRACE_OPT(KMMU,Kern::Printf("MmuBase:FreeShadowPage(%08x)", aRomAddr));
1171 aRomAddr &= ~iPageMask;
1172 TPhysAddr shadow_phys = KPhysAddrInvalid;
1173 if (aRomAddr>=iRomLinearBase || aRomAddr<=(iRomLinearEnd-iPageSize))
1174 shadow_phys = LinearToPhysical(aRomAddr);
1175 __KTRACE_OPT(KMMU,Kern::Printf("ShadowPhys = %08x",shadow_phys));
1176 if (shadow_phys == KPhysAddrInvalid)
1178 __KTRACE_OPT(KMMU,Kern::Printf("Invalid ROM address"));
1179 return KErrArgument;
1181 TInt ptid = PageTableId(aRomAddr);
1182 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(shadow_phys);
1183 if (ptid<0 || !pi || pi->Type()!=SPageInfo::EShadow)
1185 __KTRACE_OPT(KMMU,Kern::Printf("No shadow page at this address"));
1188 TPhysAddr orig_phys = (TPhysAddr)pi->Owner();
1189 DoUnmapShadowPage(ptid, aRomAddr, orig_phys);
1190 SPageTableInfo& pti = PtInfo(ptid);
1191 if (pti.Attribs()==SPageTableInfo::EShadow && --pti.iCount==0)
1193 TInt r = UnassignShadowPageTable(aRomAddr, orig_phys);
1195 FreePageTable(ptid);
1197 pti.SetGlobal(aRomAddr>>iChunkShift);
1200 FreePages(&shadow_phys, 1, EPageFixed);
1201 __KTRACE_OPT(KMMU,Kern::Printf("FreeShadowPage successful"));
1202 #ifdef BTRACE_KERNEL_MEMORY
1203 BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscFree, 1<<KPageShift);
1204 --Epoc::KernelMiscPages;
1209 TInt MmuBase::FreezeShadowPage(TLinAddr aRomAddr)
1211 __KTRACE_OPT(KMMU,Kern::Printf("MmuBase:FreezeShadowPage(%08x)", aRomAddr));
1212 aRomAddr &= ~iPageMask;
1213 TPhysAddr shadow_phys = KPhysAddrInvalid;
1214 if (aRomAddr>=iRomLinearBase || aRomAddr<=(iRomLinearEnd-iPageSize))
1215 shadow_phys = LinearToPhysical(aRomAddr);
1216 __KTRACE_OPT(KMMU,Kern::Printf("ShadowPhys = %08x",shadow_phys));
1217 if (shadow_phys == KPhysAddrInvalid)
1219 __KTRACE_OPT(KMMU,Kern::Printf("Invalid ROM address"));
1220 return KErrArgument;
1222 TInt ptid = PageTableId(aRomAddr);
1223 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(shadow_phys);
1224 if (ptid<0 || pi==0)
1226 __KTRACE_OPT(KMMU,Kern::Printf("No shadow page at this address"));
1229 DoFreezeShadowPage(ptid, aRomAddr);
1230 __KTRACE_OPT(KMMU,Kern::Printf("FreezeShadowPage successful"));
1234 TInt MmuBase::CopyToShadowMemory(TLinAddr aDest, TLinAddr aSrc, TUint32 aLength)
1236 memcpy ((TAny*)aDest, (const TAny*)aSrc, aLength);
1240 void M::BTracePrime(TUint aCategory)
1244 #ifdef BTRACE_KERNEL_MEMORY
1245 // Must check for -1 as that is the default value of aCategory for
1246 // BTrace::Prime() which is intended to prime all categories that are
1247 // currently enabled via a single invocation of BTrace::Prime().
1248 if(aCategory==BTrace::EKernelMemory || (TInt)aCategory == -1)
1250 NKern::ThreadEnterCS();
1252 BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryInitialFree,TheSuperPage().iTotalRamSize);
1253 BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryCurrentFree,Kern::FreeRamInBytes());
1254 BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, Epoc::KernelMiscPages<<KPageShift);
1255 #ifdef __DEMAND_PAGING__
1256 if (DemandPaging::ThePager)
1257 BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryDemandPagingCache,DemandPaging::ThePager->iMinimumPageCount << KPageShift);
1259 BTrace8(BTrace::EKernelMemory,BTrace::EKernelMemoryDrvPhysAlloc, Epoc::DriverAllocdPhysRam, -1);
1261 NKern::ThreadLeaveCS();
1265 #ifdef BTRACE_RAM_ALLOCATOR
1266 // Must check for -1 as that is the default value of aCategroy for
1267 // BTrace::Prime() which is intended to prime all categories that are
1268 // currently enabled via a single invocation of BTrace::Prime().
1269 if(aCategory==BTrace::ERamAllocator || (TInt)aCategory == -1)
1271 NKern::ThreadEnterCS();
1273 Mmu::Get().iRamPageAllocator->SendInitialBtraceLogs();
1275 NKern::ThreadLeaveCS();
1281 /******************************************************************************
1282 * Code common to all virtual memory models
1283 ******************************************************************************/
1285 void RHeapK::Mutate(TInt aOffset, TInt aMaxLength)
1287 // Used by the kernel to mutate a fixed heap into a chunk heap.
1290 iMinLength += aOffset;
1291 iMaxLength = aMaxLength + aOffset;
1293 iChunkHandle = (TInt)K::HeapInfo.iChunk;
1294 iPageSize = M::PageSizeInBytes();
1295 iGrowBy = iPageSize;
1299 TInt M::PageSizeInBytes()
1304 TInt MmuBase::FreeRamInBytes()
1306 TInt free = iRamPageAllocator->FreeRamInBytes();
1308 free += iRamCache->NumberOfFreePages()<<iPageShift;
1312 /** Returns the amount of free RAM currently available.
1314 @return The number of bytes of free RAM currently available.
1317 EXPORT_C TInt Kern::FreeRamInBytes()
1319 return MmuBase::TheMmu->FreeRamInBytes();
1323 /** Rounds up the argument to the size of a MMU page.
1325 To find out the size of a MMU page:
1327 size = Kern::RoundToPageSize(1);
1330 @param aSize Value to round up
1333 EXPORT_C TUint32 Kern::RoundToPageSize(TUint32 aSize)
1335 return MmuBase::RoundToPageSize(aSize);
1339 /** Rounds up the argument to the amount of memory mapped by a MMU page
1342 Chunks occupy one or more consecutive page directory entries (PDE) and
1343 therefore the amount of linear and physical memory allocated to a chunk is
1344 always a multiple of the amount of memory mapped by a page directory entry.
1346 EXPORT_C TUint32 Kern::RoundToChunkSize(TUint32 aSize)
1348 return MmuBase::RoundToChunkSize(aSize);
1353 Allows the variant to specify the details of the RAM zones. This should be invoked
1354 by the variant in its implementation of the pure virtual function Asic::Init1().
1356 There are some limitations to how the RAM zones can be specified:
1357 - Each RAM zone's address space must be distinct and not overlap with any
1358 other RAM zone's address space
1359 - Each RAM zone's address space must have a size that is multiples of the
1360 ASIC's MMU small page size and be aligned to the ASIC's MMU small page size,
1361 usually 4KB on ARM MMUs.
1362 - When taken together all of the RAM zones must cover the whole of the physical RAM
1363 address space as specified by the bootstrap in the SuperPage members iTotalRamSize
1365 - There can be no more than KMaxRamZones RAM zones specified by the base port
1367 Note the verification of the RAM zone data is not performed here but by the ram
1368 allocator later in the boot up sequence. This is because it is only possible to
1369 verify the zone data once the physical RAM configuration has been read from
1370 the super page. Any verification errors result in a "RAM-ALLOC" panic
1371 faulting the kernel during initialisation.
1373 @param aZones Pointer to an array of SRamZone structs containing the details for all
1374 the zones. The end of the array is specified by an element with an iSize of zero. The array must
1375 remain in memory at least until the kernel has successfully booted.
1377 @param aCallback Pointer to a call back function that the kernel may invoke to request
1378 one of the operations specified by TRamZoneOp.
1380 @return KErrNone if successful, otherwise one of the system wide error codes
1384 @see TRamZoneCallback
1386 EXPORT_C TInt Epoc::SetRamZoneConfig(const SRamZone* aZones, TRamZoneCallback aCallback)
1388 // Ensure this is only called once and only while we are initialising the kernel
1389 if (!K::Initialising || MmuBase::RamZoneConfig != NULL)
1390 {// fault kernel, won't return
1391 K::Fault(K::EBadSetRamZoneConfig);
1396 return KErrArgument;
1398 MmuBase::RamZoneConfig=aZones;
1399 MmuBase::RamZoneCallback=aCallback;
1405 Modify the specified RAM zone's flags.
1407 This allows the BSP or device driver to configure which type of pages, if any,
1408 can be allocated into a RAM zone by the system.
1410 Note: updating a RAM zone's flags can result in
1411 1 - memory allocations failing despite there being enough free RAM in the system.
1412 2 - the methods TRamDefragRequest::EmptyRamZone(), TRamDefragRequest::ClaimRamZone()
1413 or TRamDefragRequest::DefragRam() never succeeding.
1415 The flag masks KRamZoneFlagDiscardOnly, KRamZoneFlagMovAndDisOnly and KRamZoneFlagNoAlloc
1416 are intended to be used with this method.
1418 @param aId The ID of the RAM zone to modify.
1419 @param aClearMask The bit mask to clear, each flag of which must already be set on the RAM zone.
1420 @param aSetMask The bit mask to set.
1422 @return KErrNone on success, KErrArgument if the RAM zone of aId not found or if
1423 aSetMask contains invalid flag bits.
1425 @see TRamDefragRequest::EmptyRamZone()
1426 @see TRamDefragRequest::ClaimRamZone()
1427 @see TRamDefragRequest::DefragRam()
1429 @see KRamZoneFlagDiscardOnly
1430 @see KRamZoneFlagMovAndDisOnly
1431 @see KRamZoneFlagNoAlloc
1433 EXPORT_C TInt Epoc::ModifyRamZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask)
1435 MmuBase& m = *MmuBase::TheMmu;
1438 TInt ret = m.ModifyRamZoneFlags(aId, aClearMask, aSetMask);
1444 TInt MmuBase::ModifyRamZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask)
1446 return iRamPageAllocator->ModifyZoneFlags(aId, aClearMask, aSetMask);
1451 Gets the current count of a particular RAM zone's pages by type.
1453 @param aId The ID of the RAM zone to enquire about
1454 @param aPageData If successful, on return this contains the page count
1456 @return KErrNone if successful, KErrArgument if a RAM zone of aId is not found or
1457 one of the system wide error codes
1459 @pre Calling thread must be in a critical section.
1460 @pre Interrupts must be enabled.
1461 @pre Kernel must be unlocked.
1462 @pre No fast mutex can be held.
1463 @pre Call in a thread context.
1465 @see SRamZonePageCount
1467 EXPORT_C TInt Epoc::GetRamZonePageCount(TUint aId, SRamZonePageCount& aPageData)
1469 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::GetRamZonePageCount");
1471 MmuBase& m = *MmuBase::TheMmu;
1472 MmuBase::Wait(); // Gets RAM alloc mutex
1474 TInt r = m.GetRamZonePageCount(aId, aPageData);
1481 TInt MmuBase::GetRamZonePageCount(TUint aId, SRamZonePageCount& aPageData)
1483 return iRamPageAllocator->GetZonePageCount(aId, aPageData);
1487 Replace a page of the system's execute-in-place (XIP) ROM image with a page of
1488 RAM having the same contents. This RAM can subsequently be written to in order
1489 to apply patches to the XIP ROM or to insert software breakpoints for debugging
1491 Call Epoc::FreeShadowPage() when you wish to revert to the original ROM page.
1493 @param aRomAddr The virtual address of the ROM page to be replaced.
1494 @return KErrNone if the operation completed successfully.
1495 KErrArgument if the specified address is not a valid XIP ROM address.
1496 KErrNoMemory if the operation failed due to insufficient free RAM.
1497 KErrAlreadyExists if the XIP ROM page at the specified address has
1498 already been shadowed by a RAM page.
1500 @pre Calling thread must be in a critical section.
1501 @pre Interrupts must be enabled.
1502 @pre Kernel must be unlocked.
1503 @pre No fast mutex can be held.
1504 @pre Call in a thread context.
1506 EXPORT_C TInt Epoc::AllocShadowPage(TLinAddr aRomAddr)
1508 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::AllocShadowPage");
1511 r=M::LockRegion(aRomAddr,1);
1512 if(r!=KErrNone && r!=KErrNotFound)
1514 MmuBase& m=*MmuBase::TheMmu;
1516 r=m.AllocShadowPage(aRomAddr);
1519 M::UnlockRegion(aRomAddr,1);
1524 Copies data into shadow memory. Source data is presumed to be in Kernel memory.
1526 @param aSrc Data to copy from.
1527 @param aDest Address to copy into.
1528 @param aLength Number of bytes to copy. Maximum of 32 bytes of data can be copied.
1530 @return KErrNone if the operation completed successfully.
1531 KErrArgument if any part of destination region is not shadow page or
1532 if aLength is greater then 32 bytes.
1534 @pre Calling thread must be in a critical section.
1535 @pre Interrupts must be enabled.
1536 @pre Kernel must be unlocked.
1537 @pre No fast mutex can be held.
1538 @pre Call in a thread context.
1540 EXPORT_C TInt Epoc::CopyToShadowMemory(TLinAddr aDest, TLinAddr aSrc, TUint32 aLength)
1542 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::CopyToShadowMemory");
1545 return KErrArgument;
1546 MmuBase& m=*MmuBase::TheMmu;
1547 // This is a simple copy operation except on platforms with __CPU_MEMORY_TYPE_REMAPPING defined,
1548 // where shadow page is read-only and it has to be remapped before it is written into.
1549 return m.CopyToShadowMemory(aDest, aSrc, aLength);
1552 Revert an XIP ROM address which has previously been shadowed to the original
1555 @param aRomAddr The virtual address of the ROM page to be reverted.
1556 @return KErrNone if the operation completed successfully.
1557 KErrArgument if the specified address is not a valid XIP ROM address.
1558 KErrGeneral if the specified address has not previously been shadowed
1559 using Epoc::AllocShadowPage().
1561 @pre Calling thread must be in a critical section.
1562 @pre Interrupts must be enabled.
1563 @pre Kernel must be unlocked.
1564 @pre No fast mutex can be held.
1565 @pre Call in a thread context.
1567 EXPORT_C TInt Epoc::FreeShadowPage(TLinAddr aRomAddr)
1569 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreeShadowPage");
1570 MmuBase& m=*MmuBase::TheMmu;
1572 TInt r=m.FreeShadowPage(aRomAddr);
1575 M::UnlockRegion(aRomAddr,1);
1581 Change the permissions on an XIP ROM address which has previously been shadowed
1582 by a RAM page so that the RAM page may no longer be written to.
1584 Note: Shadow page on the latest platforms (that use the reduced set of access permissions:
1585 arm11mpcore, arm1176, cortex) is implemented with read only permissions. Therefore, calling
1586 this function in not necessary, as shadow page is already created as 'frozen'.
1588 @param aRomAddr The virtual address of the shadow RAM page to be frozen.
1589 @return KErrNone if the operation completed successfully.
1590 KErrArgument if the specified address is not a valid XIP ROM address.
1591 KErrGeneral if the specified address has not previously been shadowed
1592 using Epoc::AllocShadowPage().
1594 @pre Calling thread must be in a critical section.
1595 @pre Interrupts must be enabled.
1596 @pre Kernel must be unlocked.
1597 @pre No fast mutex can be held.
1598 @pre Call in a thread context.
1600 EXPORT_C TInt Epoc::FreezeShadowPage(TLinAddr aRomAddr)
1602 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreezeShadowPage");
1603 MmuBase& m=*MmuBase::TheMmu;
1605 TInt r=m.FreezeShadowPage(aRomAddr);
1612 Allocate a block of physically contiguous RAM with a physical address aligned
1613 to a specified power of 2 boundary.
1614 When the RAM is no longer required it should be freed using
1615 Epoc::FreePhysicalRam()
1617 @param aSize The size in bytes of the required block. The specified size
1618 is rounded up to the page size, since only whole pages of
1619 physical RAM can be allocated.
1620 @param aPhysAddr Receives the physical address of the base of the block on
1621 successful allocation.
1622 @param aAlign Specifies the number of least significant bits of the
1623 physical address which are required to be zero. If a value
1624 less than log2(page size) is specified, page alignment is
1625 assumed. Pass 0 for aAlign if there are no special alignment
1626 constraints (other than page alignment).
1627 @return KErrNone if the allocation was successful.
1628 KErrNoMemory if a sufficiently large physically contiguous block of free
1629 RAM with the specified alignment could not be found.
1630 @pre Calling thread must be in a critical section.
1631 @pre Interrupts must be enabled.
1632 @pre Kernel must be unlocked.
1633 @pre No fast mutex can be held.
1634 @pre Call in a thread context.
1635 @pre Can be used in a device driver.
1637 EXPORT_C TInt Epoc::AllocPhysicalRam(TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
1639 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::AllocPhysicalRam");
1640 MmuBase& m=*MmuBase::TheMmu;
1642 TInt r=m.AllocPhysicalRam(aSize,aPhysAddr,aAlign);
1645 // For the sake of platform security we have to clear the memory. E.g. the driver
1646 // could assign it to a chunk visible to user side.
1647 m.ClearPages(Kern::RoundToPageSize(aSize)>>m.iPageShift, (TPhysAddr*)(aPhysAddr|1));
1648 #ifdef BTRACE_KERNEL_MEMORY
1649 TUint size = Kern::RoundToPageSize(aSize);
1650 BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, size, aPhysAddr);
1651 Epoc::DriverAllocdPhysRam += size;
1659 Allocate a block of physically contiguous RAM with a physical address aligned
1660 to a specified power of 2 boundary from the specified zone.
1661 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
1663 Note that this method only repsects the KRamZoneFlagNoAlloc flag and will always attempt
1664 to allocate regardless of whether the other flags are set for the specified RAM zones
1667 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
1669 @param aZoneId The ID of the zone to attempt to allocate from.
1670 @param aSize The size in bytes of the required block. The specified size
1671 is rounded up to the page size, since only whole pages of
1672 physical RAM can be allocated.
1673 @param aPhysAddr Receives the physical address of the base of the block on
1674 successful allocation.
1675 @param aAlign Specifies the number of least significant bits of the
1676 physical address which are required to be zero. If a value
1677 less than log2(page size) is specified, page alignment is
1678 assumed. Pass 0 for aAlign if there are no special alignment
1679 constraints (other than page alignment).
1680 @return KErrNone if the allocation was successful.
1681 KErrNoMemory if a sufficiently large physically contiguous block of free
1682 RAM with the specified alignment could not be found within the specified
1684 KErrArgument if a RAM zone of the specified ID can't be found or if the
1685 RAM zone has a total number of physical pages which is less than those
1686 requested for the allocation.
1688 @pre Calling thread must be in a critical section.
1689 @pre Interrupts must be enabled.
1690 @pre Kernel must be unlocked.
1691 @pre No fast mutex can be held.
1692 @pre Call in a thread context.
1693 @pre Can be used in a device driver.
1695 EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint aZoneId, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
1697 return ZoneAllocPhysicalRam(&aZoneId, 1, aSize, aPhysAddr, aAlign);
1702 Allocate a block of physically contiguous RAM with a physical address aligned
1703 to a specified power of 2 boundary from the specified RAM zones.
1704 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
1706 RAM will be allocated into the RAM zones in the order they are specified in the
1707 aZoneIdList parameter. If the contiguous allocations are intended to span RAM zones
1708 when required then aZoneIdList should be listed with the RAM zones in ascending
1709 physical address order.
1711 Note that this method only repsects the KRamZoneFlagNoAlloc flag and will always attempt
1712 to allocate regardless of whether the other flags are set for the specified RAM zones
1715 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
1717 @param aZoneIdList A pointer to an array of RAM zone IDs of the RAM zones to
1718 attempt to allocate from.
1719 @param aZoneIdCount The number of RAM zone IDs contained in aZoneIdList.
1720 @param aSize The size in bytes of the required block. The specified size
1721 is rounded up to the page size, since only whole pages of
1722 physical RAM can be allocated.
1723 @param aPhysAddr Receives the physical address of the base of the block on
1724 successful allocation.
1725 @param aAlign Specifies the number of least significant bits of the
1726 physical address which are required to be zero. If a value
1727 less than log2(page size) is specified, page alignment is
1728 assumed. Pass 0 for aAlign if there are no special alignment
1729 constraints (other than page alignment).
1730 @return KErrNone if the allocation was successful.
1731 KErrNoMemory if a sufficiently large physically contiguous block of free
1732 RAM with the specified alignment could not be found within the specified
1734 KErrArgument if a RAM zone of a specified ID can't be found or if the
1735 RAM zones have a total number of physical pages which is less than those
1736 requested for the allocation.
1738 @pre Calling thread must be in a critical section.
1739 @pre Interrupts must be enabled.
1740 @pre Kernel must be unlocked.
1741 @pre No fast mutex can be held.
1742 @pre Call in a thread context.
1743 @pre Can be used in a device driver.
1745 EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
1747 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::ZoneAllocPhysicalRam");
1748 MmuBase& m=*MmuBase::TheMmu;
1750 TInt r = m.ZoneAllocPhysicalRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, aAlign);
1753 // For the sake of platform security we have to clear the memory. E.g. the driver
1754 // could assign it to a chunk visible to user side.
1755 m.ClearPages(Kern::RoundToPageSize(aSize)>>m.iPageShift, (TPhysAddr*)(aPhysAddr|1));
1756 #ifdef BTRACE_KERNEL_MEMORY
1757 TUint size = Kern::RoundToPageSize(aSize);
1758 BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, size, aPhysAddr);
1759 Epoc::DriverAllocdPhysRam += size;
1768 Attempt to allocate discontiguous RAM pages.
1770 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
1772 @param aNumPages The number of discontiguous pages required to be allocated
1773 @param aPageList This should be a pointer to a previously allocated array of
1774 aNumPages TPhysAddr elements. On a succesful allocation it
1775 will receive the physical addresses of each page allocated.
1777 @return KErrNone if the allocation was successful.
1778 KErrNoMemory if the requested number of pages can't be allocated
1780 @pre Calling thread must be in a critical section.
1781 @pre Interrupts must be enabled.
1782 @pre Kernel must be unlocked.
1783 @pre No fast mutex can be held.
1784 @pre Call in a thread context.
1785 @pre Can be used in a device driver.
1787 EXPORT_C TInt Epoc::AllocPhysicalRam(TInt aNumPages, TPhysAddr* aPageList)
1789 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Epoc::AllocPhysicalRam");
1790 MmuBase& m = *MmuBase::TheMmu;
1792 TInt r = m.AllocPhysicalRam(aNumPages, aPageList);
1795 // For the sake of platform security we have to clear the memory. E.g. the driver
1796 // could assign it to a chunk visible to user side.
1797 m.ClearPages(aNumPages, aPageList);
1799 #ifdef BTRACE_KERNEL_MEMORY
1800 if (BTrace::CheckFilter(BTrace::EKernelMemory))
1801 {// Only loop round each page if EKernelMemory tracing is enabled
1802 TPhysAddr* pAddr = aPageList;
1803 TPhysAddr* pAddrEnd = aPageList + aNumPages;
1804 while (pAddr < pAddrEnd)
1806 BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, KPageSize, *pAddr++);
1807 Epoc::DriverAllocdPhysRam += KPageSize;
1818 Attempt to allocate discontiguous RAM pages from the specified zone.
1820 Note that this method only repsects the KRamZoneFlagNoAlloc flag and will always attempt
1821 to allocate regardless of whether the other flags are set for the specified RAM zones
1824 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
1826 @param aZoneId The ID of the zone to attempt to allocate from.
1827 @param aNumPages The number of discontiguous pages required to be allocated
1828 from the specified zone.
1829 @param aPageList This should be a pointer to a previously allocated array of
1830 aNumPages TPhysAddr elements. On a succesful
1831 allocation it will receive the physical addresses of each
1833 @return KErrNone if the allocation was successful.
1834 KErrNoMemory if the requested number of pages can't be allocated from the
1836 KErrArgument if a RAM zone of the specified ID can't be found or if the
1837 RAM zone has a total number of physical pages which is less than those
1838 requested for the allocation.
1840 @pre Calling thread must be in a critical section.
1841 @pre Interrupts must be enabled.
1842 @pre Kernel must be unlocked.
1843 @pre No fast mutex can be held.
1844 @pre Call in a thread context.
1845 @pre Can be used in a device driver.
1847 EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint aZoneId, TInt aNumPages, TPhysAddr* aPageList)
1849 return ZoneAllocPhysicalRam(&aZoneId, 1, aNumPages, aPageList);
1854 Attempt to allocate discontiguous RAM pages from the specified RAM zones.
1855 The RAM pages will be allocated into the RAM zones in the order that they are specified
1856 in the aZoneIdList parameter, the RAM zone preferences will be ignored.
1858 Note that this method only repsects the KRamZoneFlagNoAlloc flag and will always attempt
1859 to allocate regardless of whether the other flags are set for the specified RAM zones
1862 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
1864 @param aZoneIdList A pointer to an array of RAM zone IDs of the RAM zones to
1865 attempt to allocate from.
1866 @param aZoneIdCount The number of RAM zone IDs pointed to by aZoneIdList.
1867 @param aNumPages The number of discontiguous pages required to be allocated
1868 from the specified zone.
1869 @param aPageList This should be a pointer to a previously allocated array of
1870 aNumPages TPhysAddr elements. On a succesful
1871 allocation it will receive the physical addresses of each
1873 @return KErrNone if the allocation was successful.
1874 KErrNoMemory if the requested number of pages can't be allocated from the
1876 KErrArgument if a RAM zone of a specified ID can't be found or if the
1877 RAM zones have a total number of physical pages which is less than those
1878 requested for the allocation.
1880 @pre Calling thread must be in a critical section.
1881 @pre Interrupts must be enabled.
1882 @pre Kernel must be unlocked.
1883 @pre No fast mutex can be held.
1884 @pre Call in a thread context.
1885 @pre Can be used in a device driver.
1887 EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList)
1889 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Epoc::ZoneAllocPhysicalRam");
1890 MmuBase& m = *MmuBase::TheMmu;
1892 TInt r = m.ZoneAllocPhysicalRam(aZoneIdList, aZoneIdCount, aNumPages, aPageList);
1895 // For the sake of platform security we have to clear the memory. E.g. the driver
1896 // could assign it to a chunk visible to user side.
1897 m.ClearPages(aNumPages, aPageList);
1899 #ifdef BTRACE_KERNEL_MEMORY
1900 if (BTrace::CheckFilter(BTrace::EKernelMemory))
1901 {// Only loop round each page if EKernelMemory tracing is enabled
1902 TPhysAddr* pAddr = aPageList;
1903 TPhysAddr* pAddrEnd = aPageList + aNumPages;
1904 while (pAddr < pAddrEnd)
1906 BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, KPageSize, *pAddr++);
1907 Epoc::DriverAllocdPhysRam += KPageSize;
1917 Free a previously-allocated block of physically contiguous RAM.
1919 Specifying one of the following may cause the system to panic:
1920 a) an invalid physical RAM address.
1921 b) valid physical RAM addresses where some had not been previously allocated.
1922 c) an adrress not aligned to a page boundary.
1924 @param aPhysAddr The physical address of the base of the block to be freed.
1925 This must be the address returned by a previous call to
1926 Epoc::AllocPhysicalRam(), Epoc::ZoneAllocPhysicalRam(),
1927 Epoc::ClaimPhysicalRam() or Epoc::ClaimRamZone().
1928 @param aSize The size in bytes of the required block. The specified size
1929 is rounded up to the page size, since only whole pages of
1930 physical RAM can be allocated.
1931 @return KErrNone if the operation was successful.
1935 @pre Calling thread must be in a critical section.
1936 @pre Interrupts must be enabled.
1937 @pre Kernel must be unlocked.
1938 @pre No fast mutex can be held.
1939 @pre Call in a thread context.
1940 @pre Can be used in a device driver.
1942 EXPORT_C TInt Epoc::FreePhysicalRam(TPhysAddr aPhysAddr, TInt aSize)
1944 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreePhysicalRam");
1945 MmuBase& m=*MmuBase::TheMmu;
1947 TInt r=m.FreePhysicalRam(aPhysAddr,aSize);
1948 #ifdef BTRACE_KERNEL_MEMORY
1951 TUint size = Kern::RoundToPageSize(aSize);
1952 BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysFree, size, aPhysAddr);
1953 Epoc::DriverAllocdPhysRam -= size;
1962 Free a number of physical RAM pages that were previously allocated using
1963 Epoc::AllocPhysicalRam() or Epoc::ZoneAllocPhysicalRam().
1965 Specifying one of the following may cause the system to panic:
1966 a) an invalid physical RAM address.
1967 b) valid physical RAM addresses where some had not been previously allocated.
1968 c) an adrress not aligned to a page boundary.
1970 @param aNumPages The number of pages to be freed.
1971 @param aPhysAddr An array of aNumPages TPhysAddr elements. Where each element
1972 should contain the physical address of each page to be freed.
1973 This must be the same set of addresses as those returned by a
1974 previous call to Epoc::AllocPhysicalRam() or
1975 Epoc::ZoneAllocPhysicalRam().
1976 @return KErrNone if the operation was successful.
1978 @pre Calling thread must be in a critical section.
1979 @pre Interrupts must be enabled.
1980 @pre Kernel must be unlocked.
1981 @pre No fast mutex can be held.
1982 @pre Call in a thread context.
1983 @pre Can be used in a device driver.
1986 EXPORT_C TInt Epoc::FreePhysicalRam(TInt aNumPages, TPhysAddr* aPageList)
1988 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreePhysicalRam");
1989 MmuBase& m=*MmuBase::TheMmu;
1991 TInt r=m.FreePhysicalRam(aNumPages, aPageList);
1992 #ifdef BTRACE_KERNEL_MEMORY
1993 if (r == KErrNone && BTrace::CheckFilter(BTrace::EKernelMemory))
1994 {// Only loop round each page if EKernelMemory tracing is enabled
1995 TPhysAddr* pAddr = aPageList;
1996 TPhysAddr* pAddrEnd = aPageList + aNumPages;
1997 while (pAddr < pAddrEnd)
1999 BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysFree, KPageSize, *pAddr++);
2000 Epoc::DriverAllocdPhysRam -= KPageSize;
2010 Allocate a specific block of physically contiguous RAM, specified by physical
2011 base address and size.
2012 If and when the RAM is no longer required it should be freed using
2013 Epoc::FreePhysicalRam()
2015 @param aPhysAddr The physical address of the base of the required block.
2016 @param aSize The size in bytes of the required block. The specified size
2017 is rounded up to the page size, since only whole pages of
2018 physical RAM can be allocated.
2019 @return KErrNone if the operation was successful.
2020 KErrArgument if the range of physical addresses specified included some
2021 which are not valid physical RAM addresses.
2022 KErrInUse if the range of physical addresses specified are all valid
2023 physical RAM addresses but some of them have already been
2024 allocated for other purposes.
2025 @pre Calling thread must be in a critical section.
2026 @pre Interrupts must be enabled.
2027 @pre Kernel must be unlocked.
2028 @pre No fast mutex can be held.
2029 @pre Call in a thread context.
2030 @pre Can be used in a device driver.
2032 EXPORT_C TInt Epoc::ClaimPhysicalRam(TPhysAddr aPhysAddr, TInt aSize)
2034 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::ClaimPhysicalRam");
2035 MmuBase& m=*MmuBase::TheMmu;
2037 TInt r=m.ClaimPhysicalRam(aPhysAddr,aSize);
2038 #ifdef BTRACE_KERNEL_MEMORY
2041 TUint32 pa=aPhysAddr;
2043 m.RoundUpRangeToPageSize(pa,size);
2044 BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, size, pa);
2045 Epoc::DriverAllocdPhysRam += size;
2054 Translate a virtual address to the corresponding physical address.
2056 @param aLinAddr The virtual address to be translated.
2057 @return The physical address corresponding to the given virtual address, or
2058 KPhysAddrInvalid if the specified virtual address is unmapped.
2059 @pre Interrupts must be enabled.
2060 @pre Kernel must be unlocked.
2061 @pre Call in a thread context.
2062 @pre Can be used in a device driver.
2063 @pre Hold system lock if there is any possibility that the virtual address is
2064 unmapped, may become unmapped, or may be remapped during the operation.
2065 This will potentially be the case unless the virtual address refers to a
2066 hardware chunk or shared chunk under the control of the driver calling this
2069 EXPORT_C TPhysAddr Epoc::LinearToPhysical(TLinAddr aLinAddr)
2071 // This precondition is violated by various parts of the system under some conditions,
2072 // e.g. when __FLUSH_PT_INTO_RAM__ is defined. This function might also be called by
2073 // a higher-level RTOS for which these conditions are meaningless. Thus, it's been
2074 // disabled for now.
2075 // CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"Epoc::LinearToPhysical");
2076 MmuBase& m=*MmuBase::TheMmu;
2077 TPhysAddr pa=m.LinearToPhysical(aLinAddr);
2082 EXPORT_C TInt TInternalRamDrive::MaxSize()
2084 return TheSuperPage().iRamDriveSize+Kern::FreeRamInBytes();
2088 /******************************************************************************
2090 ******************************************************************************/
2091 TLinearSection* TLinearSection::New(TLinAddr aBase, TLinAddr aEnd)
2093 __KTRACE_OPT(KMMU,Kern::Printf("TLinearSection::New(%08x,%08x)", aBase, aEnd));
2094 MmuBase& m=*MmuBase::TheMmu;
2095 TUint npdes=(aEnd-aBase)>>m.iChunkShift;
2096 TInt nmapw=(npdes+31)>>5;
2097 TInt memsz=sizeof(TLinearSection)+(nmapw-1)*sizeof(TUint32);
2098 TLinearSection* p=(TLinearSection*)Kern::Alloc(memsz);
2101 new(&p->iAllocator) TBitMapAllocator(npdes, ETrue);
2105 __KTRACE_OPT(KMMU,Kern::Printf("TLinearSection at %08x", p));
2109 /******************************************************************************
2110 * Address allocator for HW chunks
2111 ******************************************************************************/
2112 THwChunkPageTable::THwChunkPageTable(TInt aIndex, TInt aSize, TPde aPdePerm)
2113 : THwChunkRegion(aIndex, 0, aPdePerm),
2114 iAllocator(aSize, ETrue)
2118 THwChunkPageTable* THwChunkPageTable::New(TInt aIndex, TPde aPdePerm)
2120 __KTRACE_OPT(KMMU, Kern::Printf("THwChunkPageTable::New(%03x,%08x)",aIndex,aPdePerm));
2121 MmuBase& m=*MmuBase::TheMmu;
2122 TInt pdepages=m.iChunkSize>>m.iPageShift;
2123 TInt nmapw=(pdepages+31)>>5;
2124 TInt memsz=sizeof(THwChunkPageTable)+(nmapw-1)*sizeof(TUint32);
2125 THwChunkPageTable* p=(THwChunkPageTable*)Kern::Alloc(memsz);
2127 new (p) THwChunkPageTable(aIndex, pdepages, aPdePerm);
2128 __KTRACE_OPT(KMMU, Kern::Printf("THwChunkPageTable at %08x",p));
2132 THwChunkAddressAllocator::THwChunkAddressAllocator()
2136 THwChunkAddressAllocator* THwChunkAddressAllocator::New(TInt aAlign, TLinearSection* aSection)
2138 __KTRACE_OPT(KMMU, Kern::Printf("THwChunkAddressAllocator::New(%d,%08x)",aAlign,aSection));
2139 THwChunkAddressAllocator* p=new THwChunkAddressAllocator;
2143 p->iSection=aSection;
2145 __KTRACE_OPT(KMMU, Kern::Printf("THwChunkAddressAllocator at %08x",p));
2149 THwChunkRegion* THwChunkAddressAllocator::NewRegion(TInt aIndex, TInt aSize, TPde aPdePerm)
2151 __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::NewRegion(index=%x, size=%x, pde=%08x)",aIndex,aSize,aPdePerm));
2152 THwChunkRegion* p=new THwChunkRegion(aIndex, aSize, aPdePerm);
2155 TInt r=InsertInOrder(p, Order);
2156 __KTRACE_OPT(KMMU, Kern::Printf("p=%08x, insert ret %d",p,r));
2160 __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::NewRegion ret %08x)",p));
2164 THwChunkPageTable* THwChunkAddressAllocator::NewPageTable(TInt aIndex, TPde aPdePerm, TInt aInitB, TInt aInitC)
2166 __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::NewPageTable(index=%x, pde=%08x, iB=%d, iC=%d)",aIndex,aPdePerm,aInitB,aInitC));
2167 THwChunkPageTable* p=THwChunkPageTable::New(aIndex, aPdePerm);
2170 TInt r=InsertInOrder(p, Order);
2171 __KTRACE_OPT(KMMU, Kern::Printf("p=%08x, insert ret %d",p,r));
2175 p->iAllocator.Alloc(aInitB, aInitC);
2177 __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::NewPageTable ret %08x)",p));
2181 TLinAddr THwChunkAddressAllocator::SearchExisting(TInt aNumPages, TInt aPageAlign, TInt aPageOffset, TPde aPdePerm)
2183 __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::SrchEx np=%03x align=%d offset=%03x pdeperm=%08x",
2184 aNumPages, aPageAlign, aPageOffset, aPdePerm));
2187 return 0; // don't try to access [0] if array empty!
2188 THwChunkPageTable** pp=(THwChunkPageTable**)&(*this)[0];
2189 THwChunkPageTable** ppE=pp+c;
2192 THwChunkPageTable* p=*pp++;
2193 if (p->iRegionSize!=0 || p->iPdePerm!=aPdePerm)
2194 continue; // if not page table or PDE permissions wrong, we can't use it
2195 TInt r=p->iAllocator.AllocAligned(aNumPages, aPageAlign, -aPageOffset, EFalse);
2196 __KTRACE_OPT(KMMU, Kern::Printf("r=%d", r));
2198 continue; // not enough space in this page table
2200 // got enough space in existing page table, so use it
2201 p->iAllocator.Alloc(r, aNumPages);
2202 MmuBase& m=*MmuBase::TheMmu;
2203 TLinAddr a = iSection->iBase + (TLinAddr(p->iIndex)<<m.iChunkShift) + (r<<m.iPageShift);
2204 __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::SrchEx OK, returning %08x", a));
2207 __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::SrchEx not found"));
2211 TLinAddr THwChunkAddressAllocator::Alloc(TInt aSize, TInt aAlign, TInt aOffset, TPde aPdePerm)
2213 __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::Alloc size=%08x align=%d offset=%08x pdeperm=%08x",
2214 aSize, aAlign, aOffset, aPdePerm));
2215 MmuBase& m=*MmuBase::TheMmu;
2216 TInt npages=(aSize+m.iPageMask)>>m.iPageShift;
2217 TInt align=Max(aAlign,iAlign);
2218 if (align>m.iChunkShift)
2220 TInt aligns=1<<align;
2221 TInt alignm=aligns-1;
2222 TInt offset=(aOffset&alignm)>>m.iPageShift;
2223 TInt pdepages=m.iChunkSize>>m.iPageShift;
2224 TInt pdepageshift=m.iChunkShift-m.iPageShift;
2225 MmuBase::WaitHwChunk();
2226 if (npages<pdepages)
2228 // for small regions, first try to share an existing page table
2229 TLinAddr a=SearchExisting(npages, align-m.iPageShift, offset, aPdePerm);
2232 MmuBase::SignalHwChunk();
2237 // large region or no free space in existing page tables - allocate whole PDEs
2238 TInt npdes=(npages+offset+pdepages-1)>>pdepageshift;
2239 __KTRACE_OPT(KMMU, Kern::Printf("Allocate %d PDEs", npdes));
2241 TInt ix=iSection->iAllocator.AllocConsecutive(npdes, EFalse);
2243 iSection->iAllocator.Alloc(ix, npdes);
2247 a = iSection->iBase + (TLinAddr(ix)<<m.iChunkShift) + (TLinAddr(offset)<<m.iPageShift);
2249 // Create bitmaps for each page table and placeholders for section blocks.
2250 // We only create a bitmap for the first and last PDE and then only if they are not
2251 // fully occupied by this request
2252 THwChunkPageTable* first=NULL;
2253 THwChunkRegion* middle=NULL;
2256 if (a && (offset || npages<pdepages))
2258 // first PDE is bitmap
2259 TInt first_count = Min(remain, pdepages-offset);
2260 first=NewPageTable(nix, aPdePerm, offset, first_count);
2262 remain -= first_count;
2266 if (a && remain>=pdepages)
2268 // next need whole-PDE-block placeholder
2269 TInt whole_pdes=remain>>pdepageshift;
2270 middle=NewRegion(nix, whole_pdes, aPdePerm);
2272 remain-=(whole_pdes<<pdepageshift);
2278 // need final bitmap section
2279 if (!NewPageTable(nix, aPdePerm, 0, remain))
2284 // alloc failed somewhere - free anything we did create
2292 iSection->iAllocator.Free(ix, npdes);
2296 MmuBase::SignalHwChunk();
2297 __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::Alloc returns %08x", a));
2301 void THwChunkAddressAllocator::Discard(THwChunkRegion* aRegion)
2303 // remove a region from the array and destroy it
2304 TInt r=FindInOrder(aRegion, Order);
2307 Kern::Free(aRegion);
2310 TInt THwChunkAddressAllocator::Order(const THwChunkRegion& a1, const THwChunkRegion& a2)
2312 // order two regions by address
2313 return a1.iIndex-a2.iIndex;
2316 THwChunkRegion* THwChunkAddressAllocator::Free(TLinAddr aAddr, TInt aSize)
2318 __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::Free addr=%08x size=%08x", aAddr, aSize));
2319 __ASSERT_ALWAYS(aAddr>=iSection->iBase && (aAddr+aSize)<=iSection->iEnd,
2320 MmuBase::Panic(MmuBase::EFreeHwChunkAddrInvalid));
2321 THwChunkRegion* list=NULL;
2322 MmuBase& m=*MmuBase::TheMmu;
2323 TInt ix=(aAddr - iSection->iBase)>>m.iChunkShift;
2324 TInt remain=(aSize+m.iPageMask)>>m.iPageShift;
2325 TInt pdepageshift=m.iChunkShift-m.iPageShift;
2326 TInt offset=(aAddr&m.iChunkMask)>>m.iPageShift;
2327 THwChunkRegion find(ix, 0, 0);
2328 MmuBase::WaitHwChunk();
2329 TInt r=FindInOrder(&find, Order);
2330 __ASSERT_ALWAYS(r>=0, MmuBase::Panic(MmuBase::EFreeHwChunkAddrInvalid));
2333 THwChunkPageTable* p=(THwChunkPageTable*)(*this)[r];
2334 __ASSERT_ALWAYS(p->iIndex==ix, MmuBase::Panic(MmuBase::EFreeHwChunkIndexInvalid));
2337 // multiple-whole-PDE region
2338 TInt rsz=p->iRegionSize;
2339 remain-=(rsz<<pdepageshift);
2340 Remove(r); // r now indexes following array entry
2346 TInt n=Min(remain, (1<<pdepageshift)-offset);
2347 p->iAllocator.Free(offset, n);
2350 if (p->iAllocator.iAvail < p->iAllocator.iSize)
2352 // bitmap still in use
2354 ++r; // r indexes following array entry
2357 Remove(r); // r now indexes following array entry
2361 list=p; // chain free region descriptors together
2363 MmuBase::SignalHwChunk();
2364 __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::Free returns %08x", list));
2368 /********************************************
2369 * Hardware chunk abstraction
2370 ********************************************/
2371 THwChunkAddressAllocator* MmuBase::MappingRegion(TUint)
2373 return iHwChunkAllocator;
2376 TInt MmuBase::AllocateAllPageTables(TLinAddr aLinAddr, TInt aSize, TPde aPdePerm, TInt aMapShift, SPageTableInfo::TAttribs aAttrib)
2378 __KTRACE_OPT(KMMU,Kern::Printf("AllocateAllPageTables lin=%08x, size=%x, pde=%08x, mapshift=%d attribs=%d",
2379 aLinAddr, aSize, aPdePerm, aMapShift, aAttrib));
2380 TInt offset=aLinAddr&iChunkMask;
2382 TLinAddr a=aLinAddr&~iChunkMask;
2384 for (; remain>0; a+=iChunkSize)
2386 // don't need page table if a whole PDE mapping is permitted here
2387 if (aMapShift<iChunkShift || offset || remain<iChunkSize)
2389 // need to check for a page table at a
2390 TInt id=PageTableId(a);
2393 // no page table - must allocate one
2394 id = AllocPageTable();
2397 // got page table, assign it
2398 // AssignPageTable(TInt aId, TInt aUsage, TAny* aObject, TLinAddr aAddr, TPde aPdePerm)
2399 AssignPageTable(id, aAttrib, NULL, a, aPdePerm);
2403 remain -= (iChunkSize-offset);
2407 return KErrNone; // completed OK
2409 // ran out of memory somewhere - free page tables which were allocated
2410 for (; newpts; --newpts)
2413 TInt id=UnassignPageTable(a);
2416 return KErrNoMemory;
2421 Create a hardware chunk object mapping a specified block of physical addresses
2422 with specified access permissions and cache policy.
2424 When the mapping is no longer required, close the chunk using chunk->Close(0);
2425 Note that closing a chunk does not free any RAM pages which were mapped by the
2426 chunk - these must be freed separately using Epoc::FreePhysicalRam().
2428 @param aChunk Upon successful completion this parameter receives a pointer to
2429 the newly created chunk. Upon unsuccessful completion it is
2430 written with a NULL pointer. The virtual address of the mapping
2431 can subsequently be discovered using the LinearAddress()
2432 function on the chunk.
2433 @param aAddr The base address of the physical region to be mapped. This will
2434 be rounded down to a multiple of the hardware page size before
2436 @param aSize The size of the physical address region to be mapped. This will
2437 be rounded up to a multiple of the hardware page size before
2438 being used; the rounding is such that the entire range from
2439 aAddr to aAddr+aSize-1 inclusive is mapped. For example if
2440 aAddr=0xB0001FFF, aSize=2 and the hardware page size is 4KB, an
2441 8KB range of physical addresses from 0xB0001000 to 0xB0002FFF
2442 inclusive will be mapped.
2443 @param aMapAttr Mapping attributes required for the mapping. This is formed
2444 by ORing together values from the TMappingAttributes enumeration
2445 to specify the access permissions and caching policy.
2447 @pre Calling thread must be in a critical section.
2448 @pre Interrupts must be enabled.
2449 @pre Kernel must be unlocked.
2450 @pre No fast mutex can be held.
2451 @pre Call in a thread context.
2452 @pre Can be used in a device driver.
2453 @see TMappingAttributes
2455 EXPORT_C TInt DPlatChunkHw::New(DPlatChunkHw*& aChunk, TPhysAddr aAddr, TInt aSize, TUint aMapAttr)
2457 if (aAddr == KPhysAddrInvalid)
2458 return KErrNotSupported;
2459 return DoNew(aChunk, aAddr, aSize, aMapAttr);
2462 TInt DPlatChunkHw::DoNew(DPlatChunkHw*& aChunk, TPhysAddr aAddr, TInt aSize, TUint aMapAttr)
2464 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"DPlatChunkHw::New");
2465 __KTRACE_OPT(KMMU,Kern::Printf("DPlatChunkHw::New phys=%08x, size=%x, attribs=%x",aAddr,aSize,aMapAttr));
2467 return KErrArgument;
2468 MmuBase& m=*MmuBase::TheMmu;
2470 TPhysAddr pa=aAddr!=KPhysAddrInvalid ? aAddr&~m.iPageMask : 0;
2471 TInt size=((aAddr+aSize+m.iPageMask)&~m.iPageMask)-pa;
2472 __KTRACE_OPT(KMMU,Kern::Printf("Rounded %08x+%x", pa, size));
2473 DMemModelChunkHw* pC=new DMemModelChunkHw;
2475 return KErrNoMemory;
2476 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunkHw created at %08x",pC));
2477 pC->iPhysAddr=aAddr;
2479 TUint mapattr=aMapAttr;
2482 TInt r=m.PdePtePermissions(mapattr, pdePerm, ptePerm);
2485 pC->iAllocator=m.MappingRegion(mapattr);
2486 pC->iAttribs=mapattr; // save actual mapping attributes
2487 r=pC->AllocateLinearAddress(pdePerm);
2492 r=m.AllocateAllPageTables(pC->iLinAddr, size, pdePerm, map_shift, SPageTableInfo::EGlobal);
2493 if (r==KErrNone && aAddr!=KPhysAddrInvalid)
2494 m.Map(pC->iLinAddr, pa, size, pdePerm, ptePerm, map_shift);
2505 TInt DMemModelChunkHw::AllocateLinearAddress(TPde aPdePerm)
2507 __KTRACE_OPT(KMMU, Kern::Printf("DMemModelChunkHw::AllocateLinearAddress(%08x)", aPdePerm));
2508 __KTRACE_OPT(KMMU, Kern::Printf("iAllocator=%08x iPhysAddr=%08x iSize=%08x", iAllocator, iPhysAddr, iSize));
2509 MmuBase& m=*MmuBase::TheMmu;
2510 TInt map_shift = (iPhysAddr<0xffffffffu) ? 30 : m.iPageShift;
2511 for (; map_shift>=m.iPageShift; --map_shift)
2513 TUint32 map_size = 1<<map_shift;
2514 TUint32 map_mask = map_size-1;
2515 if (!(m.iMapSizes & map_size))
2516 continue; // map_size is not supported on this hardware
2517 TPhysAddr base = (iPhysAddr+map_mask) &~ map_mask; // base rounded up
2518 TPhysAddr end = (iPhysAddr+iSize)&~map_mask; // end rounded down
2519 if ((base-end)<0x80000000u && map_shift>m.iPageShift)
2520 continue; // region not big enough to use this mapping size
2521 __KTRACE_OPT(KMMU, Kern::Printf("Try map size %08x", map_size));
2522 iLinAddr=iAllocator->Alloc(iSize, map_shift, iPhysAddr, aPdePerm);
2526 TInt r=iLinAddr ? map_shift : KErrNoMemory;
2527 __KTRACE_OPT(KMMU, Kern::Printf("iLinAddr=%08x, returning %d", iLinAddr, r));
2531 void DMemModelChunkHw::DeallocateLinearAddress()
2533 __KTRACE_OPT(KMMU, Kern::Printf("DMemModelChunkHw::DeallocateLinearAddress %O", this));
2534 MmuBase& m=*MmuBase::TheMmu;
2535 MmuBase::WaitHwChunk();
2536 THwChunkRegion* rgn=iAllocator->Free(iLinAddr, iSize);
2538 MmuBase::SignalHwChunk();
2539 TLinAddr base = iAllocator->iSection->iBase;
2540 TBitMapAllocator& section_allocator = iAllocator->iSection->iAllocator;
2544 if (rgn->iRegionSize)
2546 // free address range
2547 __KTRACE_OPT(KMMU, Kern::Printf("Freeing range %03x+%03x", rgn->iIndex, rgn->iRegionSize));
2548 section_allocator.Free(rgn->iIndex, rgn->iRegionSize);
2550 // Though this is large region, it still can be made up of page tables (not sections).
2551 // Check each chunk and remove tables in neccessary
2553 TLinAddr a = base + (TLinAddr(rgn->iIndex)<<m.iChunkShift);
2554 for (; i<rgn->iRegionSize ; i++,a+=m.iChunkSize)
2556 TInt id = m.UnassignPageTable(a);
2558 m.FreePageTable(id);
2563 // free address and page table if it exists
2564 __KTRACE_OPT(KMMU, Kern::Printf("Freeing index %03x", rgn->iIndex));
2565 section_allocator.Free(rgn->iIndex);
2566 TLinAddr a = base + (TLinAddr(rgn->iIndex)<<m.iChunkShift);
2567 TInt id = m.UnassignPageTable(a);
2569 m.FreePageTable(id);
2572 THwChunkRegion* free=rgn;
2584 RamCacheBase* RamCacheBase::TheRamCache = NULL;
2587 RamCacheBase::RamCacheBase()
2592 void RamCacheBase::Init2()
2594 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">RamCacheBase::Init2"));
2595 iMmu = MmuBase::TheMmu;
2596 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<RamCacheBase::Init2"));
2600 void RamCacheBase::ReturnToSystem(SPageInfo* aPageInfo)
2602 __ASSERT_MUTEX(MmuBase::RamAllocatorMutex);
2603 __ASSERT_SYSTEM_LOCK;
2604 aPageInfo->SetUnused();
2605 --iNumberOfFreePages;
2606 __NK_ASSERT_DEBUG(iNumberOfFreePages>=0);
2607 // Release system lock before using the RAM allocator.
2608 NKern::UnlockSystem();
2609 iMmu->iRamPageAllocator->FreeRamPage(aPageInfo->PhysAddr(), EPageDiscard);
2610 NKern::LockSystem();
2614 SPageInfo* RamCacheBase::GetPageFromSystem(TUint aBlockedZoneId, TBool aBlockRest)
2616 __ASSERT_MUTEX(MmuBase::RamAllocatorMutex);
2617 SPageInfo* pageInfo;
2619 TInt r = iMmu->iRamPageAllocator->AllocRamPages(&pagePhys,1, EPageDiscard, aBlockedZoneId, aBlockRest);
2622 NKern::LockSystem();
2623 pageInfo = SPageInfo::FromPhysAddr(pagePhys);
2624 pageInfo->Change(SPageInfo::EPagedFree,SPageInfo::EStatePagedDead);
2625 ++iNumberOfFreePages;
2626 NKern::UnlockSystem();
2639 void RamCache::Init2()
2641 __KTRACE_OPT(KBOOT,Kern::Printf(">RamCache::Init2"));
2642 RamCacheBase::Init2();
2643 __KTRACE_OPT(KBOOT,Kern::Printf("<RamCache::Init2"));
2647 TInt RamCache::Init3()
2652 void RamCache::RemovePage(SPageInfo& aPageInfo)
2654 __NK_ASSERT_DEBUG(aPageInfo.Type() == SPageInfo::EPagedCache);
2655 __NK_ASSERT_DEBUG(aPageInfo.State() == SPageInfo::EStatePagedYoung);
2656 aPageInfo.iLink.Deque();
2657 aPageInfo.SetState(SPageInfo::EStatePagedDead);
2660 TBool RamCache::GetFreePages(TInt aNumPages)
2662 __KTRACE_OPT(KPAGING,Kern::Printf("DP: >GetFreePages %d",aNumPages));
2663 NKern::LockSystem();
2665 while(aNumPages>0 && NumberOfFreePages()>=aNumPages)
2667 // steal a page from cache list and return it to the free pool...
2668 SPageInfo* pageInfo = SPageInfo::FromLink(iPageList.First()->Deque());
2669 pageInfo->SetState(SPageInfo::EStatePagedDead);
2671 ReturnToSystem(pageInfo);
2675 NKern::UnlockSystem();
2676 __KTRACE_OPT(KPAGING,Kern::Printf("DP: <GetFreePages %d",!aNumPages));
2681 void RamCache::DonateRamCachePage(SPageInfo* aPageInfo)
2683 SPageInfo::TType type = aPageInfo->Type();
2684 if(type==SPageInfo::EChunk)
2686 //Must not donate locked page. An example is DMA trasferred memory.
2687 __NK_ASSERT_DEBUG(0 == aPageInfo->LockCount());
2689 aPageInfo->Change(SPageInfo::EPagedCache,SPageInfo::EStatePagedYoung);
2690 iPageList.Add(&aPageInfo->iLink);
2691 ++iNumberOfFreePages;
2692 // Update ram allocator counts as this page has changed its type
2693 DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
2694 iMmu->iRamPageAllocator->ChangePageType(aPageInfo, chunk->GetPageType(), EPageDiscard);
2696 #ifdef BTRACE_PAGING
2697 BTraceContext8(BTrace::EPaging, BTrace::EPagingChunkDonatePage, chunk, aPageInfo->Offset());
2701 // allow already donated pages...
2702 __NK_ASSERT_DEBUG(type==SPageInfo::EPagedCache);
2706 TBool RamCache::ReclaimRamCachePage(SPageInfo* aPageInfo)
2708 SPageInfo::TType type = aPageInfo->Type();
2709 // Kern::Printf("DemandPaging::ReclaimRamCachePage %x %d free=%d",aPageInfo,type,iNumberOfFreePages);
2711 if(type==SPageInfo::EChunk)
2712 return ETrue; // page already reclaimed
2714 __NK_ASSERT_DEBUG(type==SPageInfo::EPagedCache);
2715 __NK_ASSERT_DEBUG(aPageInfo->State()==SPageInfo::EStatePagedYoung);
2716 // Update ram allocator counts as this page has changed its type
2717 DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
2718 iMmu->iRamPageAllocator->ChangePageType(aPageInfo, EPageDiscard, chunk->GetPageType());
2719 aPageInfo->iLink.Deque();
2720 --iNumberOfFreePages;
2721 aPageInfo->Change(SPageInfo::EChunk,SPageInfo::EStateNormal);
2723 #ifdef BTRACE_PAGING
2724 BTraceContext8(BTrace::EPaging, BTrace::EPagingChunkReclaimPage, chunk, aPageInfo->Offset());
2731 Discard the specified page.
2732 Should only be called on a page if a previous call to IsPageDiscardable()
2733 returned ETrue and the system lock hasn't been released between the calls.
2735 @param aPageInfo The page info of the page to be discarded
2736 @param aBlockedZoneId Not used by this overload.
2737 @param aBlockRest Not used by this overload.
2738 @return ETrue if page succesfully discarded
2740 @pre System lock held.
2741 @post System lock held.
2743 TBool RamCache::DoDiscardPage(SPageInfo& aPageInfo, TUint aBlockedZoneId, TBool aBlockRest)
2745 __NK_ASSERT_DEBUG(iNumberOfFreePages > 0);
2746 RemovePage(aPageInfo);
2747 SetFree(&aPageInfo);
2748 ReturnToSystem(&aPageInfo);
2754 First stage in discarding a list of pages.
2756 Must ensure that the pages will still be discardable even if system lock is released.
2757 To be used in conjunction with RamCacheBase::DoDiscardPages1().
2759 @param aPageList A NULL terminated list of the pages to be discarded
2760 @return KErrNone on success.
2762 @pre System lock held
2763 @post System lock held
2765 TInt RamCache::DoDiscardPages0(SPageInfo** aPageList)
2767 __ASSERT_SYSTEM_LOCK;
2769 SPageInfo* pageInfo;
2770 while((pageInfo = *aPageList++) != 0)
2772 RemovePage(*pageInfo);
2779 Final stage in discarding a list of page
2780 Finish discarding the pages previously removed by RamCacheBase::DoDiscardPages0().
2781 This overload doesn't actually need to do anything.
2783 @param aPageList A NULL terminated list of the pages to be discarded
2784 @return KErrNone on success.
2786 @pre System lock held
2787 @post System lock held
2789 TInt RamCache::DoDiscardPages1(SPageInfo** aPageList)
2791 __ASSERT_SYSTEM_LOCK;
2792 SPageInfo* pageInfo;
2793 while((pageInfo = *aPageList++) != 0)
2796 ReturnToSystem(pageInfo);
2803 Check whether the specified page can be discarded by the RAM cache.
2805 @param aPageInfo The page info of the page being queried.
2806 @return ETrue when the page can be discarded, EFalse otherwise.
2807 @pre System lock held.
2808 @post System lock held.
2810 TBool RamCache::IsPageDiscardable(SPageInfo& aPageInfo)
2812 SPageInfo::TType type = aPageInfo.Type();
2813 SPageInfo::TState state = aPageInfo.State();
2814 return (type == SPageInfo::EPagedCache && state == SPageInfo::EStatePagedYoung);
2819 @return ETrue when the unmapped page should be freed, EFalse otherwise
2821 TBool RamCache::PageUnmapped(SPageInfo* aPageInfo)
2823 SPageInfo::TType type = aPageInfo->Type();
2824 // Kern::Printf("DemandPaging::PageUnmapped %x %d",aPageInfo,type);
2825 if(type!=SPageInfo::EPagedCache)
2827 SPageInfo::TState state = aPageInfo->State();
2828 if(state==SPageInfo::EStatePagedYoung)
2830 // This page will be freed by DChunk::DoDecommit as it was originally
2831 // allocated so update page counts in ram allocator
2832 DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
2833 iMmu->iRamPageAllocator->ChangePageType(aPageInfo, EPageDiscard, chunk->GetPageType());
2834 aPageInfo->iLink.Deque();
2835 --iNumberOfFreePages;
2841 void RamCache::Panic(TFault aFault)
2843 Kern::Fault("RamCache",aFault);
2847 Flush all cache pages.
2849 @pre RAM allocator mutex held
2850 @post RAM allocator mutex held
2852 void RamCache::FlushAll()
2854 __ASSERT_MUTEX(MmuBase::RamAllocatorMutex);
2856 // Should always succeed
2857 __NK_ASSERT_DEBUG(GetFreePages(iNumberOfFreePages));
2859 GetFreePages(iNumberOfFreePages);
2868 #ifdef __DEMAND_PAGING__
2870 DemandPaging* DemandPaging::ThePager = 0;
2871 TBool DemandPaging::PseudoRandInitialised = EFalse;
2872 volatile TUint32 DemandPaging::PseudoRandSeed = 0;
2875 void M::DemandPagingInit()
2877 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">M::DemandPagingInit"));
2878 TInt r = RamCacheBase::TheRamCache->Init3();
2880 DemandPaging::Panic(DemandPaging::EInitialiseFailed);
2882 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<M::DemandPagingInit"));
2886 TInt M::DemandPagingFault(TAny* aExceptionInfo)
2888 DemandPaging* pager = DemandPaging::ThePager;
2890 return pager->Fault(aExceptionInfo);
2895 extern "C" void ASMCheckPagingSafe(TLinAddr aPC, TLinAddr aLR, TLinAddr aStartAddres, TUint aLength)
2897 if(M::CheckPagingSafe(EFalse, aStartAddres, aLength))
2899 Kern::Printf("ASM_ASSERT_PAGING_SAFE FAILED: pc=%x lr=%x",aPC,aLR);
2900 __NK_ASSERT_ALWAYS(0);
2903 extern "C" void ASMCheckDataPagingSafe(TLinAddr aPC, TLinAddr aLR, TLinAddr aStartAddres, TUint aLength)
2905 if(M::CheckPagingSafe(ETrue, aStartAddres, aLength))
2907 __KTRACE_OPT(KDATAPAGEWARN,Kern::Printf("Data paging: ASM_ASSERT_DATA_PAGING_SAFE FAILED: pc=%x lr=%x",aPC,aLR));
2912 TBool M::CheckPagingSafe(TBool aDataPaging, TLinAddr aStartAddr, TUint aLength)
2914 DemandPaging* pager = DemandPaging::ThePager;
2915 if(!pager || K::Initialising)
2918 NThread* nt = NCurrentThread();
2920 return ETrue; // We've not booted properly yet!
2922 if (!pager->NeedsMutexOrderCheck(aStartAddr, aLength))
2925 TBool dataPagingEnabled = EFalse; // data paging not supported on moving or multiple models
2927 DThread* thread = _LOFF(nt,DThread,iNThread);
2928 NFastMutex* fm = NKern::HeldFastMutex();
2931 if(!thread->iPagingExcTrap || fm!=&TheScheduler.iLock)
2935 __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: CheckPagingSafe FAILED - FM Held"));
2940 __KTRACE_OPT(KDATAPAGEWARN, Kern::Printf("Data paging: CheckPagingSafe FAILED - FM Held"));
2941 return !dataPagingEnabled;
2946 DMutex* m = pager->CheckMutexOrder();
2951 __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: Mutex Order Fault %O",m));
2956 __KTRACE_OPT(KDATAPAGEWARN, Kern::Printf("Data paging: Mutex Order Fault %O",m));
2957 return !dataPagingEnabled;
2965 TInt M::LockRegion(TLinAddr aStart,TInt aSize)
2967 DemandPaging* pager = DemandPaging::ThePager;
2969 return pager->LockRegion(aStart,aSize,NULL);
2974 TInt M::UnlockRegion(TLinAddr aStart,TInt aSize)
2976 DemandPaging* pager = DemandPaging::ThePager;
2978 return pager->UnlockRegion(aStart,aSize,NULL);
2982 #else // !__DEMAND_PAGING__
2984 TInt M::LockRegion(TLinAddr /*aStart*/,TInt /*aSize*/)
2990 TInt M::UnlockRegion(TLinAddr /*aStart*/,TInt /*aSize*/)
2995 #endif // __DEMAND_PAGING__
3004 #ifdef __DEMAND_PAGING__
3007 const TUint16 KDefaultYoungOldRatio = 3;
3008 const TUint KDefaultMinPages = 256;
3009 const TUint KDefaultMaxPages = KMaxTUint >> KPageShift;
3011 /* Need at least 4 mapped pages to guarentee to be able to execute all ARM instructions.
3012 (Worst case is a THUMB2 STM instruction with both instruction and data stradling page
3015 const TUint KMinYoungPages = 4;
3016 const TUint KMinOldPages = 1;
3018 /* A minimum young/old ratio of 1 means that we need at least twice KMinYoungPages pages...
3020 const TUint KAbsoluteMinPageCount = 2*KMinYoungPages;
3022 __ASSERT_COMPILE(KMinOldPages<=KAbsoluteMinPageCount/2);
3024 class DMissingPagingDevice : public DPagingDevice
3026 TInt Read(TThreadMessage* /*aReq*/,TLinAddr /*aBuffer*/,TUint /*aOffset*/,TUint /*aSize*/,TInt /*aDrvNumber*/)
3027 { DemandPaging::Panic(DemandPaging::EDeviceMissing); return 0; }
3031 TBool DemandPaging::RomPagingRequested()
3033 return TheRomHeader().iPageableRomSize != 0;
3037 TBool DemandPaging::CodePagingRequested()
3039 return (TheSuperPage().KernelConfigFlags() & EKernelConfigCodePagingPolicyDefaultPaged) != EKernelConfigCodePagingPolicyNoPaging;
3043 DemandPaging::DemandPaging()
3048 void DemandPaging::Init2()
3050 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">DemandPaging::Init2"));
3052 RamCacheBase::Init2();
3054 // initialise live list...
3055 SDemandPagingConfig config = TheRomHeader().iDemandPagingConfig;
3057 iMinimumPageCount = KDefaultMinPages;
3058 if(config.iMinPages)
3059 iMinimumPageCount = config.iMinPages;
3060 if(iMinimumPageCount<KAbsoluteMinPageCount)
3061 iMinimumPageCount = KAbsoluteMinPageCount;
3062 iInitMinimumPageCount = iMinimumPageCount;
3064 iMaximumPageCount = KDefaultMaxPages;
3065 if(config.iMaxPages)
3066 iMaximumPageCount = config.iMaxPages;
3067 iInitMaximumPageCount = iMaximumPageCount;
3069 iYoungOldRatio = KDefaultYoungOldRatio;
3070 if(config.iYoungOldRatio)
3071 iYoungOldRatio = config.iYoungOldRatio;
3072 TInt ratioLimit = (iMinimumPageCount-KMinOldPages)/KMinOldPages;
3073 if(iYoungOldRatio>ratioLimit)
3074 iYoungOldRatio = ratioLimit;
3076 iMinimumPageLimit = (KMinYoungPages * (1 + iYoungOldRatio)) / iYoungOldRatio;
3077 if(iMinimumPageLimit<KAbsoluteMinPageCount)
3078 iMinimumPageLimit = KAbsoluteMinPageCount;
3080 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">DemandPaging::InitialiseLiveList min=%d max=%d ratio=%d",iMinimumPageCount,iMaximumPageCount,iYoungOldRatio));
3082 if(iMaximumPageCount<iMinimumPageCount)
3083 Panic(EInitialiseBadArgs);
3086 // This routine doesn't acuire any mutexes because it should be called before the system
3087 // is fully up and running. I.e. called before another thread can preempt this.
3090 // Calculate page counts
3091 iOldCount = iMinimumPageCount/(1+iYoungOldRatio);
3092 if(iOldCount<KMinOldPages)
3093 Panic(EInitialiseBadArgs);
3094 iYoungCount = iMinimumPageCount-iOldCount;
3095 if(iYoungCount<KMinYoungPages)
3096 Panic(EInitialiseBadArgs); // Need at least 4 pages mapped to execute an ARM LDM instruction in THUMB2 mode
3097 iNumberOfFreePages = 0;
3099 // Allocate RAM pages and put them all on the old list
3102 for(TUint i=0; i<iMinimumPageCount; i++)
3104 // Allocate a single page
3106 TInt r = iMmu->iRamPageAllocator->AllocRamPages(&pagePhys,1, EPageDiscard);
3108 Panic(EInitialiseFailed);
3109 AddAsFreePage(SPageInfo::FromPhysAddr(pagePhys));
3112 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<DemandPaging::Init2"));
3116 TInt VMHalFunction(TAny*, TInt aFunction, TAny* a1, TAny* a2);
3118 TInt DemandPaging::Init3()
3120 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">DemandPaging::Init3"));
3123 // construct iBufferChunk
3124 iDeviceBufferSize = 2*KPageSize;
3125 TChunkCreateInfo info;
3126 info.iType = TChunkCreateInfo::ESharedKernelMultiple;
3127 info.iMaxSize = iDeviceBufferSize*KMaxPagingDevices;
3128 info.iMapAttr = EMapAttrCachedMax;
3129 info.iOwnsMemory = ETrue;
3131 r = Kern::ChunkCreate(info,iDeviceBuffersChunk,iDeviceBuffers,mapAttr);
3135 // Install 'null' paging devices which panic if used...
3136 DMissingPagingDevice* missingPagingDevice = new DMissingPagingDevice;
3137 for(TInt i=0; i<KMaxPagingDevices; i++)
3139 iPagingDevices[i].iInstalled = EFalse;
3140 iPagingDevices[i].iDevice = missingPagingDevice;
3143 // Initialise ROM info...
3144 const TRomHeader& romHeader = TheRomHeader();
3145 iRomLinearBase = (TLinAddr)&romHeader;
3146 iRomSize = iMmu->RoundToPageSize(romHeader.iUncompressedSize);
3147 if(romHeader.iRomPageIndex)
3148 iRomPageIndex = (SRomPageInfo*)((TInt)&romHeader+romHeader.iRomPageIndex);
3150 TLinAddr pagedStart = romHeader.iPageableRomSize ? (TLinAddr)&romHeader+romHeader.iPageableRomStart : 0;
3153 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("ROM=%x+%x PagedStart=%x",iRomLinearBase,iRomSize,pagedStart));
3154 __NK_ASSERT_ALWAYS(TUint(pagedStart-iRomLinearBase)<TUint(iRomSize));
3155 iRomPagedLinearBase = pagedStart;
3156 iRomPagedSize = iRomLinearBase+iRomSize-pagedStart;
3157 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("DemandPaging::Init3, ROM Paged start(0x%x), sixe(0x%x)",iRomPagedLinearBase,iRomPagedSize));
3159 #ifdef __SUPPORT_DEMAND_PAGING_EMULATION__
3160 // Get physical addresses of ROM pages
3161 iOriginalRomPageCount = iMmu->RoundToPageSize(iRomSize)>>KPageShift;
3162 iOriginalRomPages = new TPhysAddr[iOriginalRomPageCount];
3163 __NK_ASSERT_ALWAYS(iOriginalRomPages);
3164 TPhysAddr romPhysAddress;
3165 iMmu->LinearToPhysical(iRomLinearBase,iRomSize,romPhysAddress,iOriginalRomPages);
3169 r = Kern::AddHalEntry(EHalGroupVM, VMHalFunction, 0);
3170 __NK_ASSERT_ALWAYS(r==KErrNone);
3172 #ifdef __DEMAND_PAGING_BENCHMARKS__
3173 for (TInt i = 0 ; i < EMaxPagingBm ; ++i)
3174 ResetBenchmarkData((TPagingBenchmark)i);
3177 // Initialisation now complete
3183 DemandPaging::~DemandPaging()
3185 #ifdef __SUPPORT_DEMAND_PAGING_EMULATION__
3186 delete[] iOriginalRomPages;
3188 for (TUint i = 0 ; i < iPagingRequestCount ; ++i)
3189 delete iPagingRequests[i];
3193 TInt DemandPaging::InstallPagingDevice(DPagingDevice* aDevice)
3195 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">DemandPaging::InstallPagingDevice name='%s' type=%d",aDevice->iName,aDevice->iType));
3197 if(aDevice->iReadUnitShift>KPageShift)
3198 Panic(EInvalidPagingDevice);
3202 TBool createRequestObjects = EFalse;
3204 if ((aDevice->iType & DPagingDevice::ERom) && RomPagingRequested())
3206 r = DoInstallPagingDevice(aDevice, 0);
3209 K::MemModelAttributes|=EMemModelAttrRomPaging;
3210 createRequestObjects = ETrue;
3213 if ((aDevice->iType & DPagingDevice::ECode) && CodePagingRequested())
3215 for (i = 0 ; i < KMaxLocalDrives ; ++i)
3217 if (aDevice->iDrivesSupported & (1<<i))
3219 r = DoInstallPagingDevice(aDevice, i + 1);
3224 K::MemModelAttributes|=EMemModelAttrCodePaging;
3225 createRequestObjects = ETrue;
3228 if (createRequestObjects)
3230 for (i = 0 ; i < KPagingRequestsPerDevice ; ++i)
3232 r = CreateRequestObject();
3239 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<DemandPaging::InstallPagingDevice returns %d",r));
3243 TInt DemandPaging::DoInstallPagingDevice(DPagingDevice* aDevice, TInt aId)
3245 NKern::LockSystem();
3246 SPagingDevice* device = &iPagingDevices[aId];
3247 if(device->iInstalled)
3249 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("**** Attempt to install more than one ROM paging device !!!!!!!! ****"));
3250 //Panic(EDeviceAlreadyExists);
3251 NKern::UnlockSystem();
3255 aDevice->iDeviceId = aId;
3256 device->iDevice = aDevice;
3257 device->iInstalled = ETrue;
3258 NKern::UnlockSystem();
3260 __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("DemandPaging::InstallPagingDevice id=%d, device=%08x",aId,device));
3265 DemandPaging::DPagingRequest::~DPagingRequest()
3268 iMutex->Close(NULL);
3271 TInt DemandPaging::CreateRequestObject()
3273 _LIT(KLitPagingRequest,"PagingRequest-");
3276 TInt id = (TInt)__e32_atomic_add_ord32(&iNextPagingRequestCount, 1);
3277 TLinAddr offset = id * iDeviceBufferSize;
3278 TUint32 physAddr = 0;
3279 TInt r = Kern::ChunkCommitContiguous(iDeviceBuffersChunk,offset,iDeviceBufferSize, physAddr);
3283 DPagingRequest* req = new DPagingRequest();
3285 return KErrNoMemory;
3287 req->iBuffer = iDeviceBuffers + offset;
3288 AllocLoadAddress(*req, id);
3290 TBuf<16> mutexName(KLitPagingRequest);
3291 mutexName.AppendNum(id);
3292 r = K::MutexCreate(req->iMutex, mutexName, NULL, EFalse, KMutexOrdPageIn);
3296 // Ensure there are enough young pages to cope with new request object
3297 r = ResizeLiveList(iMinimumPageCount, iMaximumPageCount);
3301 NKern::LockSystem();
3302 index = iPagingRequestCount++;
3303 __NK_ASSERT_ALWAYS(index < KMaxPagingRequests);
3304 iPagingRequests[index] = req;
3305 iFreeRequestPool.AddHead(req);
3306 NKern::UnlockSystem();
3315 DemandPaging::DPagingRequest* DemandPaging::AcquireRequestObject()
3317 __ASSERT_SYSTEM_LOCK;
3318 __NK_ASSERT_DEBUG(iPagingRequestCount > 0);
3320 DPagingRequest* req = NULL;
3322 // System lock used to serialise access to our data strucures as we have to hold it anyway when
3323 // we wait on the mutex
3325 req = (DPagingRequest*)iFreeRequestPool.GetFirst();
3327 __NK_ASSERT_DEBUG(req->iUsageCount == 0);
3330 // Pick a random request object to wait on
3331 TUint index = (FastPseudoRand() * TUint64(iPagingRequestCount)) >> 32;
3332 __NK_ASSERT_DEBUG(index < iPagingRequestCount);
3333 req = iPagingRequests[index];
3334 __NK_ASSERT_DEBUG(req->iUsageCount > 0);
3337 #ifdef __CONCURRENT_PAGING_INSTRUMENTATION__
3339 if (iWaitingCount > iMaxWaitingCount)
3340 iMaxWaitingCount = iWaitingCount;
3344 TInt r = req->iMutex->Wait();
3345 __NK_ASSERT_ALWAYS(r == KErrNone);
3347 #ifdef __CONCURRENT_PAGING_INSTRUMENTATION__
3350 if (iPagingCount > iMaxPagingCount)
3351 iMaxPagingCount = iPagingCount;
3357 void DemandPaging::ReleaseRequestObject(DPagingRequest* aReq)
3359 __ASSERT_SYSTEM_LOCK;
3361 #ifdef __CONCURRENT_PAGING_INSTRUMENTATION__
3365 // If there are no threads waiting on the mutex then return it to the free pool
3366 __NK_ASSERT_DEBUG(aReq->iUsageCount > 0);
3367 if (--aReq->iUsageCount == 0)
3368 iFreeRequestPool.AddHead(aReq);
3370 aReq->iMutex->Signal();
3371 NKern::LockSystem();
3374 TInt DemandPaging::ReadRomPage(const DPagingRequest* aReq, TLinAddr aRomAddress)
3376 START_PAGING_BENCHMARK;
3378 TInt pageSize = KPageSize;
3379 TInt dataOffset = aRomAddress-iRomLinearBase;
3380 TInt pageNumber = dataOffset>>KPageShift;
3381 TInt readUnitShift = RomPagingDevice().iDevice->iReadUnitShift;
3385 // ROM not broken into pages, so just read it in directly
3386 START_PAGING_BENCHMARK;
3387 r = RomPagingDevice().iDevice->Read(const_cast<TThreadMessage*>(&aReq->iMessage),aReq->iLoadAddr,dataOffset>>readUnitShift,pageSize>>readUnitShift,-1/*token for ROM paging*/);
3388 END_PAGING_BENCHMARK(DemandPaging::ThePager, EPagingBmReadMedia);
3392 // Work out where data for page is located
3393 SRomPageInfo* romPageInfo = iRomPageIndex+pageNumber;
3394 dataOffset = romPageInfo->iDataStart;
3395 TInt dataSize = romPageInfo->iDataSize;
3398 // empty page, fill it with 0xff...
3399 memset((void*)aReq->iLoadAddr,-1,pageSize);
3404 __NK_ASSERT_ALWAYS(romPageInfo->iPagingAttributes&SRomPageInfo::EPageable);
3406 // Read data for page...
3407 TThreadMessage* msg= const_cast<TThreadMessage*>(&aReq->iMessage);
3408 TLinAddr buffer = aReq->iBuffer;
3409 TUint readStart = dataOffset>>readUnitShift;
3410 TUint readSize = ((dataOffset+dataSize-1)>>readUnitShift)-readStart+1;
3411 __NK_ASSERT_DEBUG((readSize<<readUnitShift)<=iDeviceBufferSize);
3412 START_PAGING_BENCHMARK;
3413 r = RomPagingDevice().iDevice->Read(msg,buffer,readStart,readSize,-1/*token for ROM paging*/);
3414 END_PAGING_BENCHMARK(DemandPaging::ThePager, EPagingBmReadMedia);
3417 // Decompress data...
3418 TLinAddr data = buffer+dataOffset-(readStart<<readUnitShift);
3419 r = Decompress(romPageInfo->iCompressionType,aReq->iLoadAddr,data,dataSize);
3422 __NK_ASSERT_ALWAYS(r==pageSize);
3429 END_PAGING_BENCHMARK(this, EPagingBmReadRomPage);
3433 TInt ReadFunc(TAny* aArg1, TAny* aArg2, TLinAddr aBuffer, TInt aBlockNumber, TInt aBlockCount)
3435 START_PAGING_BENCHMARK;
3436 TInt drive = (TInt)aArg1;
3437 TThreadMessage* msg= (TThreadMessage*)aArg2;
3438 DemandPaging::SPagingDevice& device = DemandPaging::ThePager->CodePagingDevice(drive);
3439 TInt r = device.iDevice->Read(msg, aBuffer, aBlockNumber, aBlockCount, drive);
3440 END_PAGING_BENCHMARK(DemandPaging::ThePager, EPagingBmReadMedia);
3444 TInt DemandPaging::ReadCodePage(const DPagingRequest* aReq, DMmuCodeSegMemory* aCodeSegMemory, TLinAddr aCodeAddress)
3446 __KTRACE_OPT(KPAGING,Kern::Printf("ReadCodePage buffer = %08x, csm == %08x, addr == %08x", aReq->iLoadAddr, aCodeSegMemory, aCodeAddress));
3448 START_PAGING_BENCHMARK;
3450 // Get the paging device for this drive
3451 SPagingDevice& device = CodePagingDevice(aCodeSegMemory->iCodeLocalDrive);
3453 // Work out which bit of the file to read
3454 SRamCodeInfo& ri = aCodeSegMemory->iRamInfo;
3455 TInt codeOffset = aCodeAddress - ri.iCodeRunAddr;
3456 TInt pageNumber = codeOffset >> KPageShift;
3457 TBool compressed = aCodeSegMemory->iCompressionType != SRomPageInfo::ENoCompression;
3458 TInt dataOffset, dataSize;
3461 dataOffset = aCodeSegMemory->iCodePageOffsets[pageNumber];
3462 dataSize = aCodeSegMemory->iCodePageOffsets[pageNumber + 1] - dataOffset;
3463 __KTRACE_OPT(KPAGING,Kern::Printf(" compressed, file offset == %x, size == %d", dataOffset, dataSize));
3467 dataOffset = codeOffset + aCodeSegMemory->iCodeStartInFile;
3468 dataSize = Min(KPageSize, aCodeSegMemory->iBlockMap.DataLength() - dataOffset);
3469 __NK_ASSERT_DEBUG(dataSize >= 0);
3470 __KTRACE_OPT(KPAGING,Kern::Printf(" uncompressed, file offset == %x, size == %d", dataOffset, dataSize));
3473 TInt bufferStart = aCodeSegMemory->iBlockMap.Read(aReq->iBuffer,
3476 device.iDevice->iReadUnitShift,
3478 (TAny*)aCodeSegMemory->iCodeLocalDrive,
3479 (TAny*)&aReq->iMessage);
3485 r = bufferStart; // return error
3486 __NK_ASSERT_DEBUG(0);
3490 TLinAddr data = aReq->iBuffer + bufferStart;
3493 TInt r = Decompress(aCodeSegMemory->iCompressionType, aReq->iLoadAddr, data, dataSize);
3496 dataSize = Min(KPageSize, ri.iCodeSize - codeOffset);
3499 __NK_ASSERT_DEBUG(0);
3507 __NK_ASSERT_DEBUG(0);
3512 #ifdef BTRACE_PAGING_VERBOSE
3513 BTraceContext4(BTrace::EPaging,BTrace::EPagingDecompressStart,SRomPageInfo::ENoCompression);
3515 memcpy((TAny*)aReq->iLoadAddr, (TAny*)data, dataSize);
3516 #ifdef BTRACE_PAGING_VERBOSE
3517 BTraceContext0(BTrace::EPaging,BTrace::EPagingDecompressEnd);
3523 if (dataSize < KPageSize)
3524 memset((TAny*)(aReq->iLoadAddr + dataSize), KPageSize - dataSize, 0x03);
3526 END_PAGING_BENCHMARK(this, EPagingBmReadCodePage);
3532 #include "decompress.h"
3535 TInt DemandPaging::Decompress(TInt aCompressionType,TLinAddr aDst,TLinAddr aSrc,TUint aSrcSize)
3537 #ifdef BTRACE_PAGING_VERBOSE
3538 BTraceContext4(BTrace::EPaging,BTrace::EPagingDecompressStart,aCompressionType);
3541 switch(aCompressionType)
3543 case SRomPageInfo::ENoCompression:
3544 memcpy((void*)aDst,(void*)aSrc,aSrcSize);
3548 case SRomPageInfo::EBytePair:
3550 START_PAGING_BENCHMARK;
3552 r=BytePairDecompress((TUint8*)aDst,KPageSize,(TUint8*)aSrc,aSrcSize,srcNext);
3554 __NK_ASSERT_ALWAYS((TLinAddr)srcNext == aSrc + aSrcSize);
3555 END_PAGING_BENCHMARK(this, EPagingBmDecompress);
3560 r = KErrNotSupported;
3563 #ifdef BTRACE_PAGING_VERBOSE
3564 BTraceContext0(BTrace::EPaging,BTrace::EPagingDecompressEnd);
3570 void DemandPaging::BalanceAges()
3572 if(iOldCount*iYoungOldRatio>=iYoungCount)
3573 return; // We have enough old pages
3575 // make one young page into an old page...
3577 __NK_ASSERT_DEBUG(!iYoungList.IsEmpty());
3578 __NK_ASSERT_DEBUG(iYoungCount);
3579 SDblQueLink* link = iYoungList.Last()->Deque();
3582 SPageInfo* pageInfo = SPageInfo::FromLink(link);
3583 pageInfo->SetState(SPageInfo::EStatePagedOld);
3585 iOldList.AddHead(link);
3590 #ifdef BTRACE_PAGING_VERBOSE
3591 BTraceContext4(BTrace::EPaging,BTrace::EPagingAged,pageInfo->PhysAddr());
3596 void DemandPaging::AddAsYoungest(SPageInfo* aPageInfo)
3599 SPageInfo::TType type = aPageInfo->Type();
3600 __NK_ASSERT_DEBUG(type==SPageInfo::EPagedROM || type==SPageInfo::EPagedCode || type==SPageInfo::EPagedData || type==SPageInfo::EPagedCache);
3602 aPageInfo->SetState(SPageInfo::EStatePagedYoung);
3603 iYoungList.AddHead(&aPageInfo->iLink);
3608 void DemandPaging::AddAsFreePage(SPageInfo* aPageInfo)
3610 #ifdef BTRACE_PAGING
3611 TPhysAddr phys = aPageInfo->PhysAddr();
3612 BTraceContext4(BTrace::EPaging,BTrace::EPagingPageInFree,phys);
3614 aPageInfo->Change(SPageInfo::EPagedFree,SPageInfo::EStatePagedOld);
3615 iOldList.Add(&aPageInfo->iLink);
3620 void DemandPaging::RemovePage(SPageInfo* aPageInfo)
3622 switch(aPageInfo->State())
3624 case SPageInfo::EStatePagedYoung:
3625 __NK_ASSERT_DEBUG(iYoungCount);
3626 aPageInfo->iLink.Deque();
3630 case SPageInfo::EStatePagedOld:
3631 __NK_ASSERT_DEBUG(iOldCount);
3632 aPageInfo->iLink.Deque();
3636 case SPageInfo::EStatePagedLocked:
3640 __NK_ASSERT_DEBUG(0);
3642 aPageInfo->SetState(SPageInfo::EStatePagedDead);
3646 SPageInfo* DemandPaging::GetOldestPage()
3648 // remove oldest from list...
3652 __NK_ASSERT_DEBUG(!iOldList.IsEmpty());
3653 link = iOldList.Last()->Deque();
3658 __NK_ASSERT_DEBUG(iYoungCount);
3659 __NK_ASSERT_DEBUG(!iYoungList.IsEmpty());
3660 link = iYoungList.Last()->Deque();
3663 SPageInfo* pageInfo = SPageInfo::FromLink(link);
3664 pageInfo->SetState(SPageInfo::EStatePagedDead);
3666 // put page in a free state...
3668 pageInfo->Change(SPageInfo::EPagedFree,SPageInfo::EStatePagedDead);
3670 // keep live list balanced...
3677 TBool DemandPaging::GetFreePages(TInt aNumPages)
3679 __KTRACE_OPT(KPAGING,Kern::Printf("DP: >GetFreePages %d",aNumPages));
3680 NKern::LockSystem();
3682 while(aNumPages>0 && NumberOfFreePages()>=aNumPages)
3684 // steal a page from live page list and return it to the free pool...
3685 ReturnToSystem(GetOldestPage());
3689 NKern::UnlockSystem();
3690 __KTRACE_OPT(KPAGING,Kern::Printf("DP: <GetFreePages %d",!aNumPages));
3695 void DemandPaging::DonateRamCachePage(SPageInfo* aPageInfo)
3697 __NK_ASSERT_DEBUG(iMinimumPageCount + iNumberOfFreePages <= iMaximumPageCount);
3698 SPageInfo::TType type = aPageInfo->Type();
3699 if(type==SPageInfo::EChunk)
3701 //Must not donate locked page. An example is DMA trasferred memory.
3702 __NK_ASSERT_DEBUG(0 == aPageInfo->LockCount());
3704 aPageInfo->Change(SPageInfo::EPagedCache,SPageInfo::EStatePagedYoung);
3706 // Update ram allocator counts as this page has changed its type
3707 DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
3708 iMmu->iRamPageAllocator->ChangePageType(aPageInfo, chunk->GetPageType(), EPageDiscard);
3710 AddAsYoungest(aPageInfo);
3711 ++iNumberOfFreePages;
3712 if (iMinimumPageCount + iNumberOfFreePages > iMaximumPageCount)
3713 ReturnToSystem(GetOldestPage());
3717 // allow already donated pages...
3718 __NK_ASSERT_DEBUG(type==SPageInfo::EPagedCache);
3722 TBool DemandPaging::ReclaimRamCachePage(SPageInfo* aPageInfo)
3724 SPageInfo::TType type = aPageInfo->Type();
3725 if(type==SPageInfo::EChunk)
3726 return ETrue; // page already reclaimed
3728 __NK_ASSERT_DEBUG(type==SPageInfo::EPagedCache);
3730 if(!iNumberOfFreePages)
3732 --iNumberOfFreePages;
3734 RemovePage(aPageInfo);
3735 aPageInfo->Change(SPageInfo::EChunk,SPageInfo::EStateNormal);
3737 // Update ram allocator counts as this page has changed its type
3738 DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
3739 iMmu->iRamPageAllocator->ChangePageType(aPageInfo, EPageDiscard, chunk->GetPageType());
3744 SPageInfo* DemandPaging::AllocateNewPage()
3746 __ASSERT_SYSTEM_LOCK
3747 SPageInfo* pageInfo;
3749 NKern::UnlockSystem();
3751 NKern::LockSystem();
3753 // Try getting a free page from our active page list
3756 pageInfo = SPageInfo::FromLink(iOldList.Last());
3757 if(pageInfo->Type()==SPageInfo::EPagedFree)
3759 pageInfo = GetOldestPage();
3764 // Try getting a free page from the system pool
3765 if(iMinimumPageCount+iNumberOfFreePages<iMaximumPageCount)
3767 NKern::UnlockSystem();
3768 pageInfo = GetPageFromSystem();
3769 NKern::LockSystem();
3774 // As a last resort, steal one from our list of active pages
3775 pageInfo = GetOldestPage();
3778 NKern::UnlockSystem();
3780 NKern::LockSystem();
3785 void DemandPaging::Rejuvenate(SPageInfo* aPageInfo)
3787 SPageInfo::TState state = aPageInfo->State();
3788 if(state==SPageInfo::EStatePagedOld)
3790 // move page from old list to head of young list...
3791 __NK_ASSERT_DEBUG(iOldCount);
3792 aPageInfo->iLink.Deque();
3794 AddAsYoungest(aPageInfo);
3797 else if(state==SPageInfo::EStatePagedYoung)
3799 // page was already young, move it to the start of the list (make it the youngest)
3800 aPageInfo->iLink.Deque();
3801 iYoungList.AddHead(&aPageInfo->iLink);
3805 // leave locked pages alone
3806 __NK_ASSERT_DEBUG(state==SPageInfo::EStatePagedLocked);
3811 TInt DemandPaging::CheckRealtimeThreadFault(DThread* aThread, TAny* aContext)
3814 DThread* client = aThread->iIpcClient;
3816 // If iIpcClient is set then we are accessing the address space of a remote thread. If we are
3817 // in an IPC trap, this will contain information the local and remte addresses being accessed.
3818 // If this is not set then we assume than any fault must be the fault of a bad remote address.
3819 TIpcExcTrap* ipcTrap = (TIpcExcTrap*)aThread->iExcTrap;
3820 if (ipcTrap && !ipcTrap->IsTIpcExcTrap())
3822 if (client && (!ipcTrap || ipcTrap->ExcLocation(aThread, aContext) == TIpcExcTrap::EExcRemote))
3824 // Kill client thread...
3825 NKern::UnlockSystem();
3826 if(K::IllegalFunctionForRealtimeThread(client,"Access to Paged Memory (by other thread)"))
3828 // Treat memory access as bad...
3831 // else thread is in 'warning only' state so allow paging
3835 // Kill current thread...
3836 NKern::UnlockSystem();
3837 if(K::IllegalFunctionForRealtimeThread(NULL,"Access to Paged Memory"))
3839 // If current thread is in critical section, then the above kill will be deferred
3840 // and we will continue executing. We will handle this by returning an error
3841 // which means that the thread will take an exception (which hopfully is XTRAPed!)
3844 // else thread is in 'warning only' state so allow paging
3847 NKern::LockSystem();
3852 TInt DemandPaging::ResizeLiveList(TUint aMinimumPageCount,TUint aMaximumPageCount)
3854 if(!aMaximumPageCount)
3856 aMinimumPageCount = iInitMinimumPageCount;
3857 aMaximumPageCount = iInitMaximumPageCount;
3860 // Min must not be greater than max...
3861 if(aMinimumPageCount>aMaximumPageCount)
3862 return KErrArgument;
3864 NKern::ThreadEnterCS();
3867 NKern::LockSystem();
3869 // Make sure aMinimumPageCount is not less than absolute minimum we can cope with...
3870 iMinimumPageLimit = ((KMinYoungPages + iNextPagingRequestCount) * (1 + iYoungOldRatio)) / iYoungOldRatio;
3871 if(iMinimumPageLimit<KAbsoluteMinPageCount)
3872 iMinimumPageLimit = KAbsoluteMinPageCount;
3873 if(aMinimumPageCount<iMinimumPageLimit+iReservePageCount)
3874 aMinimumPageCount = iMinimumPageLimit+iReservePageCount;
3875 if(aMaximumPageCount<aMinimumPageCount)
3876 aMaximumPageCount=aMinimumPageCount;
3878 // Increase iMaximumPageCount?
3879 TInt extra = aMaximumPageCount-iMaximumPageCount;
3881 iMaximumPageCount += extra;
3883 // Reduce iMinimumPageCount?
3884 TInt spare = iMinimumPageCount-aMinimumPageCount;
3887 iMinimumPageCount -= spare;
3888 iNumberOfFreePages += spare;
3891 // Increase iMinimumPageCount?
3893 while(aMinimumPageCount>iMinimumPageCount)
3895 if(iNumberOfFreePages==0) // Need more pages?
3897 // get a page from the system
3898 NKern::UnlockSystem();
3899 SPageInfo* pageInfo = GetPageFromSystem();
3900 NKern::LockSystem();
3906 AddAsFreePage(pageInfo);
3908 ++iMinimumPageCount;
3909 --iNumberOfFreePages;
3910 NKern::FlashSystem();
3913 // Reduce iMaximumPageCount?
3914 while(iMaximumPageCount>aMaximumPageCount)
3916 if (iMinimumPageCount+iNumberOfFreePages==iMaximumPageCount) // Need to free pages?
3918 ReturnToSystem(GetOldestPage());
3920 --iMaximumPageCount;
3921 NKern::FlashSystem();
3924 #ifdef BTRACE_KERNEL_MEMORY
3925 BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryDemandPagingCache,ThePager->iMinimumPageCount << KPageShift);
3928 __NK_ASSERT_DEBUG(iMinimumPageCount + iNumberOfFreePages <= iMaximumPageCount);
3930 NKern::UnlockSystem();
3933 NKern::ThreadLeaveCS();
3939 TInt VMHalFunction(TAny*, TInt aFunction, TAny* a1, TAny* a2)
3941 DemandPaging* pager = DemandPaging::ThePager;
3944 case EVMHalFlushCache:
3945 if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalFlushCache)")))
3946 K::UnlockedPlatformSecurityPanic();
3950 case EVMHalSetCacheSize:
3952 if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalSetCacheSize)")))
3953 K::UnlockedPlatformSecurityPanic();
3954 TUint min = (TUint)a1>>KPageShift;
3955 if((TUint)a1&KPageMask)
3957 TUint max = (TUint)a2>>KPageShift;
3958 if((TUint)a2&KPageMask)
3960 return pager->ResizeLiveList(min,max);
3963 case EVMHalGetCacheSize:
3966 NKern::LockSystem(); // lock system to ensure consistent set of values are read...
3967 info.iMinSize = pager->iMinimumPageCount<<KPageShift;
3968 info.iMaxSize = pager->iMaximumPageCount<<KPageShift;
3969 info.iCurrentSize = (pager->iMinimumPageCount+pager->iNumberOfFreePages)<<KPageShift;
3970 info.iMaxFreeSize = pager->iNumberOfFreePages<<KPageShift;
3971 NKern::UnlockSystem();
3972 kumemput32(a1,&info,sizeof(info));
3976 case EVMHalGetEventInfo:
3979 NKern::LockSystem(); // lock system to ensure consistent set of values are read...
3980 info = pager->iEventInfo;
3981 NKern::UnlockSystem();
3982 Kern::InfoCopy(*(TDes8*)a1,(TUint8*)&info,sizeof(info));
3986 case EVMHalResetEventInfo:
3987 NKern::LockSystem();
3988 memclr(&pager->iEventInfo, sizeof(pager->iEventInfo));
3989 NKern::UnlockSystem();
3992 #ifdef __SUPPORT_DEMAND_PAGING_EMULATION__
3993 case EVMHalGetOriginalRomPages:
3994 *(TPhysAddr**)a1 = pager->iOriginalRomPages;
3995 *(TInt*)a2 = pager->iOriginalRomPageCount;
4000 return pager->PageState((TLinAddr)a1);
4002 #ifdef __CONCURRENT_PAGING_INSTRUMENTATION__
4003 case EVMHalGetConcurrencyInfo:
4005 NKern::LockSystem();
4006 SPagingConcurrencyInfo info = { pager->iMaxWaitingCount, pager->iMaxPagingCount };
4007 NKern::UnlockSystem();
4008 kumemput32(a1,&info,sizeof(info));
4012 case EVMHalResetConcurrencyInfo:
4013 NKern::LockSystem();
4014 pager->iMaxWaitingCount = 0;
4015 pager->iMaxPagingCount = 0;
4016 NKern::UnlockSystem();
4020 #ifdef __DEMAND_PAGING_BENCHMARKS__
4021 case EVMHalGetPagingBenchmark:
4023 TUint index = (TInt) a1;
4024 if (index >= EMaxPagingBm)
4025 return KErrNotFound;
4026 NKern::LockSystem();
4027 SPagingBenchmarkInfo info = pager->iBenchmarkInfo[index];
4028 NKern::UnlockSystem();
4029 kumemput32(a2,&info,sizeof(info));
4033 case EVMHalResetPagingBenchmark:
4035 TUint index = (TInt) a1;
4036 if (index >= EMaxPagingBm)
4037 return KErrNotFound;
4038 NKern::LockSystem();
4039 pager->ResetBenchmarkData((TPagingBenchmark)index);
4040 NKern::UnlockSystem();
4046 return KErrNotSupported;
4050 void DemandPaging::Panic(TFault aFault)
4052 Kern::Fault("DEMAND-PAGING",aFault);
4056 DMutex* DemandPaging::CheckMutexOrder()
4059 SDblQue& ml = TheCurrentThread->iMutexList;
4062 DMutex* mm = _LOFF(ml.First(), DMutex, iOrderLink);
4063 if (KMutexOrdPageIn >= mm->iOrder)
4070 TBool DemandPaging::ReservePage()
4072 __ASSERT_SYSTEM_LOCK;
4075 NKern::UnlockSystem();
4077 NKern::LockSystem();
4079 __NK_ASSERT_DEBUG(iMinimumPageCount >= iMinimumPageLimit + iReservePageCount);
4080 while (iMinimumPageCount == iMinimumPageLimit + iReservePageCount &&
4081 iNumberOfFreePages == 0)
4083 NKern::UnlockSystem();
4084 SPageInfo* pageInfo = GetPageFromSystem();
4088 NKern::LockSystem();
4091 NKern::LockSystem();
4092 AddAsFreePage(pageInfo);
4094 if (iMinimumPageCount == iMinimumPageLimit + iReservePageCount)
4096 ++iMinimumPageCount;
4097 --iNumberOfFreePages;
4098 if (iMinimumPageCount > iMaximumPageCount)
4099 iMaximumPageCount = iMinimumPageCount;
4101 ++iReservePageCount;
4102 __NK_ASSERT_DEBUG(iMinimumPageCount >= iMinimumPageLimit + iReservePageCount);
4103 __NK_ASSERT_DEBUG(iMinimumPageCount + iNumberOfFreePages <= iMaximumPageCount);
4105 NKern::UnlockSystem();
4107 NKern::LockSystem();
4112 TInt DemandPaging::LockRegion(TLinAddr aStart,TInt aSize,DProcess* aProcess)
4114 __KTRACE_OPT(KPAGING,Kern::Printf("DP: LockRegion(%08x,%x)",aStart,aSize));
4115 NKern::ThreadEnterCS();
4117 // calculate the number of pages required to lock aSize bytes
4118 TUint32 mask=KPageMask;
4119 TUint32 offset=aStart&mask;
4120 TInt numPages = (aSize+offset+mask)>>KPageShift;
4124 TLinAddr page = aStart;
4126 NKern::LockSystem();
4127 while(--numPages>=0)
4132 r = LockPage(page,aProcess,phys);
4133 NKern::FlashSystem();
4139 NKern::UnlockSystem();
4141 // If error, unlock whatever we managed to lock...
4144 while((page-=KPageSize)>=aStart)
4146 NKern::LockSystem();
4147 UnlockPage(aStart,aProcess,KPhysAddrInvalid);
4148 --iReservePageCount;
4149 NKern::UnlockSystem();
4153 NKern::ThreadLeaveCS();
4154 __KTRACE_OPT(KPAGING,Kern::Printf("DP: LockRegion returns %d",r));
4159 TInt DemandPaging::UnlockRegion(TLinAddr aStart,TInt aSize,DProcess* aProcess)
4161 __KTRACE_OPT(KPAGING,Kern::Printf("DP: UnlockRegion(%08x,%x)",aStart,aSize));
4162 TUint32 mask=KPageMask;
4163 TUint32 offset=aStart&mask;
4164 TInt numPages = (aSize+offset+mask)>>KPageShift;
4165 NKern::LockSystem();
4166 __NK_ASSERT_DEBUG(iReservePageCount >= (TUint)numPages);
4167 while(--numPages>=0)
4169 UnlockPage(aStart,aProcess,KPhysAddrInvalid);
4170 --iReservePageCount;
4171 NKern::FlashSystem();
4172 aStart += KPageSize;
4174 NKern::UnlockSystem();
4179 void DemandPaging::FlushAll()
4181 NKern::ThreadEnterCS();
4183 // look at all RAM pages in the system, and unmap all those used for paging
4184 const TUint32* piMap = (TUint32*)KPageInfoMap;
4185 const TUint32* piMapEnd = piMap+(KNumPageInfoPages>>5);
4186 SPageInfo* pi = (SPageInfo*)KPageInfoLinearBase;
4187 NKern::LockSystem();
4190 SPageInfo* piNext = pi+(KPageInfosPerPage<<5);
4191 for(TUint32 piFlags=*piMap++; piFlags; piFlags>>=1)
4195 pi += KPageInfosPerPage;
4198 SPageInfo* piEnd = pi+KPageInfosPerPage;
4201 SPageInfo::TState state = pi->State();
4202 if(state==SPageInfo::EStatePagedYoung || state==SPageInfo::EStatePagedOld)
4207 NKern::FlashSystem();
4210 const TUint KFlashCount = 64; // flash every 64 page infos (must be a power-of-2)
4211 __ASSERT_COMPILE((TUint)KPageInfosPerPage >= KFlashCount);
4212 if(((TUint)pi&((KFlashCount-1)<<KPageInfoShift))==0)
4213 NKern::FlashSystem();
4219 while(piMap<piMapEnd);
4220 NKern::UnlockSystem();
4222 // reduce live page list to a minimum
4223 while(GetFreePages(1)) {};
4226 NKern::ThreadLeaveCS();
4230 TInt DemandPaging::LockPage(TLinAddr aPage, DProcess *aProcess, TPhysAddr& aPhysAddr)
4232 __KTRACE_OPT(KPAGING,Kern::Printf("DP: LockPage() %08x",aPage));
4233 __ASSERT_SYSTEM_LOCK
4235 aPhysAddr = KPhysAddrInvalid;
4237 TInt r = EnsurePagePresent(aPage,aProcess);
4239 return KErrArgument; // page doesn't exist
4241 // get info about page to be locked...
4242 TPhysAddr phys = LinearToPhysical(aPage,aProcess);
4244 __NK_ASSERT_DEBUG(phys!=KPhysAddrInvalid);
4246 SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(phys);
4248 return KErrNotFound;
4251 SPageInfo::TType type = pageInfo->Type();
4252 if(type==SPageInfo::EShadow)
4254 // get the page which is being shadowed and lock that
4255 phys = (TPhysAddr)pageInfo->Owner();
4259 switch(pageInfo->State())
4261 case SPageInfo::EStatePagedLocked:
4262 // already locked, so just increment lock count...
4263 ++pageInfo->PagedLock();
4266 case SPageInfo::EStatePagedYoung:
4268 if(type!=SPageInfo::EPagedROM && type !=SPageInfo::EPagedCode)
4270 // not implemented yet
4271 __NK_ASSERT_ALWAYS(0);
4274 // remove page to be locked from live list...
4275 RemovePage(pageInfo);
4277 // change to locked state...
4278 pageInfo->SetState(SPageInfo::EStatePagedLocked);
4279 pageInfo->PagedLock() = 1; // Start with lock count of one
4281 // open reference on memory...
4282 if(type==SPageInfo::EPagedCode)
4284 DMemModelCodeSegMemory* codeSegMemory = (DMemModelCodeSegMemory*)pageInfo->Owner();
4285 if(codeSegMemory->Open()!=KErrNone)
4287 __NK_ASSERT_DEBUG(0);
4294 case SPageInfo::EStatePagedOld:
4295 // can't happen because we forced the page to be accessible earlier
4296 __NK_ASSERT_ALWAYS(0);
4300 return KErrNotFound;
4305 #ifdef BTRACE_PAGING
4306 BTraceContext8(BTrace::EPaging,BTrace::EPagingPageLock,phys,pageInfo->PagedLock());
4312 TInt DemandPaging::UnlockPage(TLinAddr aPage, DProcess* aProcess, TPhysAddr aPhysAddr)
4314 __KTRACE_OPT(KPAGING,Kern::Printf("DP: UnlockPage() %08x",aPage));
4315 __ASSERT_SYSTEM_LOCK;
4318 // Get info about page to be unlocked
4319 TPhysAddr phys = LinearToPhysical(aPage,aProcess);
4320 if(phys==KPhysAddrInvalid)
4323 if(phys==KPhysAddrInvalid)
4324 return KErrNotFound;
4327 SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(phys);
4329 return KErrNotFound;
4331 SPageInfo::TType type = pageInfo->Type();
4332 if(type==SPageInfo::EShadow)
4334 // Get the page which is being shadowed and unlock that
4335 phys = (TPhysAddr)pageInfo->Owner();
4339 __NK_ASSERT_DEBUG(phys==aPhysAddr || aPhysAddr==KPhysAddrInvalid);
4342 switch(pageInfo->State())
4344 case SPageInfo::EStatePagedLocked:
4345 #ifdef BTRACE_PAGING
4346 BTraceContext8(BTrace::EPaging,BTrace::EPagingPageUnlock,phys,pageInfo->PagedLock());
4348 if(!(--pageInfo->PagedLock()))
4350 // get pointer to memory...
4351 DMemModelCodeSegMemory* codeSegMemory = 0;
4352 if(type==SPageInfo::EPagedCode)
4353 codeSegMemory = (DMemModelCodeSegMemory*)pageInfo->Owner();
4355 // put page back on live list...
4356 AddAsYoungest(pageInfo);
4359 // close reference on memory...
4362 NKern::UnlockSystem();
4363 codeSegMemory->Close();
4364 NKern::LockSystem();
4370 return KErrNotFound;
4378 TInt DemandPaging::ReserveAlloc(TInt aSize, DDemandPagingLock& aLock)
4380 __NK_ASSERT_DEBUG(aLock.iPages == NULL);
4382 // calculate the number of pages required to lock aSize bytes
4383 TInt numPages = ((aSize-1+KPageMask)>>KPageShift)+1;
4385 __KTRACE_OPT(KPAGING,Kern::Printf("DP: ReserveAlloc() pages %d",numPages));
4387 NKern::ThreadEnterCS();
4389 aLock.iPages = (TPhysAddr*)Kern::Alloc(numPages*sizeof(TPhysAddr));
4392 NKern::ThreadLeaveCS();
4393 return KErrNoMemory;
4397 NKern::LockSystem();
4399 // reserve pages, adding more if necessary
4400 while (aLock.iReservedPageCount < numPages)
4404 ++aLock.iReservedPageCount;
4407 NKern::UnlockSystem();
4410 TBool enoughPages = aLock.iReservedPageCount == numPages;
4414 NKern::ThreadLeaveCS();
4415 return enoughPages ? KErrNone : KErrNoMemory;
4420 void DemandPaging::ReserveFree(DDemandPagingLock& aLock)
4422 NKern::ThreadEnterCS();
4424 // make sure pages aren't still locked
4425 ReserveUnlock(aLock);
4427 NKern::LockSystem();
4428 __NK_ASSERT_DEBUG(iReservePageCount >= (TUint)aLock.iReservedPageCount);
4429 iReservePageCount -= aLock.iReservedPageCount;
4430 aLock.iReservedPageCount = 0;
4431 NKern::UnlockSystem();
4433 // free page array...
4434 Kern::Free(aLock.iPages);
4437 NKern::ThreadLeaveCS();
4442 TBool DemandPaging::ReserveLock(DThread* aThread, TLinAddr aStart,TInt aSize, DDemandPagingLock& aLock)
4444 if(aLock.iLockedPageCount)
4447 // calculate the number of pages that need to be locked...
4448 TUint32 mask=KPageMask;
4449 TUint32 offset=aStart&mask;
4450 TInt numPages = (aSize+offset+mask)>>KPageShift;
4451 if(numPages>aLock.iReservedPageCount)
4454 NKern::LockSystem();
4457 TBool locked = EFalse; // becomes true if any pages were locked
4458 DProcess* process = aThread->iOwningProcess;
4459 TLinAddr page=aStart;
4460 TInt count=numPages;
4461 TPhysAddr* physPages = aLock.iPages;
4464 if(LockPage(page,process,*physPages)==KErrNone)
4466 NKern::FlashSystem();
4471 // if any pages were locked, save the lock info...
4474 if(aLock.iLockedPageCount)
4476 aLock.iLockedStart = aStart;
4477 aLock.iLockedPageCount = numPages;
4478 aLock.iProcess = process;
4479 aLock.iProcess->Open();
4482 NKern::UnlockSystem();
4488 void DemandPaging::ReserveUnlock(DDemandPagingLock& aLock)
4490 NKern::ThreadEnterCS();
4492 DProcess* process = NULL;
4493 NKern::LockSystem();
4494 TInt numPages = aLock.iLockedPageCount;
4495 TLinAddr page = aLock.iLockedStart;
4496 TPhysAddr* physPages = aLock.iPages;
4497 while(--numPages>=0)
4499 UnlockPage(page, aLock.iProcess,*physPages);
4500 NKern::FlashSystem();
4504 process = aLock.iProcess;
4505 aLock.iProcess = NULL;
4506 aLock.iLockedPageCount = 0;
4507 NKern::UnlockSystem();
4509 process->Close(NULL);
4511 NKern::ThreadLeaveCS();
4515 Check whether the specified page can be discarded by the RAM cache.
4517 @param aPageInfo The page info of the page being queried.
4518 @return ETrue when the page can be discarded, EFalse otherwise.
4519 @pre System lock held.
4520 @post System lock held.
4522 TBool DemandPaging::IsPageDiscardable(SPageInfo& aPageInfo)
4525 SPageInfo::TState state = aPageInfo.State();
4526 return (state == SPageInfo::EStatePagedYoung || state == SPageInfo::EStatePagedOld);
4531 Discard the specified page.
4532 Should only be called on a page if a previous call to IsPageDiscardable()
4533 returned ETrue and the system lock hasn't been released between the calls.
4535 @param aPageInfo The page info of the page to be discarded
4536 @param aBlockZoneId The ID of the RAM zone that shouldn't be allocated into.
4537 @param aBlockRest Set to ETrue to stop allocation as soon as aBlockedZoneId is reached
4538 in preference ordering. EFalse otherwise.
4539 @return ETrue if the page could be discarded, EFalse otherwise.
4541 @pre System lock held.
4542 @post System lock held.
4544 TBool DemandPaging::DoDiscardPage(SPageInfo& aPageInfo, TUint aBlockedZoneId, TBool aBlockRest)
4546 __ASSERT_SYSTEM_LOCK;
4547 // Ensure that we don't reduce the cache beyond its minimum.
4548 if (iNumberOfFreePages == 0)
4550 NKern::UnlockSystem();
4551 SPageInfo* newPage = GetPageFromSystem(aBlockedZoneId, aBlockRest);
4552 NKern::LockSystem();
4553 if (newPage == NULL)
4554 {// couldn't allocate a new page
4557 if (IsPageDiscardable(aPageInfo))
4558 {// page can still be discarded so use new page
4559 // and discard old one
4560 AddAsFreePage(newPage);
4561 RemovePage(&aPageInfo);
4562 SetFree(&aPageInfo);
4563 ReturnToSystem(&aPageInfo);
4568 {// page no longer discardable so no longer require new page
4569 ReturnToSystem(newPage);
4575 RemovePage(&aPageInfo);
4576 SetFree(&aPageInfo);
4577 ReturnToSystem(&aPageInfo);
4585 First stage in discarding a list of pages.
4587 Must ensure that the pages will still be discardable even if system lock is released.
4588 To be used in conjunction with RamCacheBase::DoDiscardPages1().
4590 @param aPageList A NULL terminated list of the pages to be discarded
4591 @return KErrNone on success.
4593 @pre System lock held
4594 @post System lock held
4596 TInt DemandPaging::DoDiscardPages0(SPageInfo** aPageList)
4598 __ASSERT_SYSTEM_LOCK;
4600 SPageInfo* pageInfo;
4601 while((pageInfo = *aPageList++) != 0)
4603 RemovePage(pageInfo);
4610 Final stage in discarding a list of page
4611 Finish discarding the pages previously removed by RamCacheBase::DoDiscardPages0().
4613 @param aPageList A NULL terminated list of the pages to be discarded
4614 @return KErrNone on success.
4616 @pre System lock held
4617 @post System lock held
4619 TInt DemandPaging::DoDiscardPages1(SPageInfo** aPageList)
4621 __ASSERT_SYSTEM_LOCK;
4623 SPageInfo* pageInfo;
4624 while((pageInfo = *aPageList++)!=0)
4627 ReturnToSystem(pageInfo);
4634 TBool DemandPaging::MayBePaged(TLinAddr aStartAddr, TUint aLength)
4636 TLinAddr endAddr = aStartAddr + aLength;
4637 TBool rangeTouchesPagedRom =
4638 TUint(aStartAddr - iRomPagedLinearBase) < iRomSize ||
4639 TUint(endAddr - iRomPagedLinearBase) < iRomSize;
4640 TBool rangeTouchesCodeArea =
4641 TUint(aStartAddr - iCodeLinearBase) < iCodeSize ||
4642 TUint(endAddr - iCodeLinearBase) < iCodeSize;
4643 return rangeTouchesPagedRom || rangeTouchesCodeArea;
4647 #ifdef __DEMAND_PAGING_BENCHMARKS__
4649 void DemandPaging::ResetBenchmarkData(TPagingBenchmark aBm)
4651 SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm];
4653 info.iTotalTime = 0;
4655 info.iMinTime = KMaxTInt;
4658 void DemandPaging::RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime)
4660 SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm];
4662 #if !defined(HIGH_RES_TIMER) || defined(HIGH_RES_TIMER_COUNTS_UP)
4663 TInt64 elapsed = aEndTime - aStartTime;
4665 TInt64 elapsed = aStartTime - aEndTime;
4667 info.iTotalTime += elapsed;
4668 if (elapsed > info.iMaxTime)
4669 info.iMaxTime = elapsed;
4670 if (elapsed < info.iMinTime)
4671 info.iMinTime = elapsed;
4678 // DDemandPagingLock
4681 EXPORT_C DDemandPagingLock::DDemandPagingLock()
4682 : iThePager(DemandPaging::ThePager), iReservedPageCount(0), iLockedPageCount(0), iPages(0)
4687 EXPORT_C TInt DDemandPagingLock::Alloc(TInt aSize)
4690 return iThePager->ReserveAlloc(aSize,*this);
4696 EXPORT_C void DDemandPagingLock::DoUnlock()
4699 iThePager->ReserveUnlock(*this);
4703 EXPORT_C void DDemandPagingLock::Free()
4706 iThePager->ReserveFree(*this);
4710 EXPORT_C TInt Kern::InstallPagingDevice(DPagingDevice* aDevice)
4712 if (DemandPaging::ThePager)
4713 return DemandPaging::ThePager->InstallPagingDevice(aDevice);
4715 return KErrNotSupported;
4719 #else // !__DEMAND_PAGING__
4721 EXPORT_C DDemandPagingLock::DDemandPagingLock()
4722 : iLockedPageCount(0)
4726 EXPORT_C TInt DDemandPagingLock::Alloc(TInt /*aSize*/)
4731 EXPORT_C TBool DDemandPagingLock::Lock(DThread* /*aThread*/, TLinAddr /*aStart*/, TInt /*aSize*/)
4736 EXPORT_C void DDemandPagingLock::DoUnlock()
4740 EXPORT_C void DDemandPagingLock::Free()
4744 EXPORT_C TInt Kern::InstallPagingDevice(DPagingDevice* aDevice)
4746 return KErrNotSupported;
4749 #endif // __DEMAND_PAGING__
4752 DMmuCodeSegMemory::DMmuCodeSegMemory(DEpocCodeSeg* aCodeSeg)
4753 : DEpocCodeSegMemory(aCodeSeg), iCodeAllocBase(KMinTInt)
4757 //#define __DUMP_BLOCKMAP_INFO
4758 DMmuCodeSegMemory::~DMmuCodeSegMemory()
4760 #ifdef __DEMAND_PAGING__
4761 Kern::Free(iCodeRelocTable);
4762 Kern::Free(iCodePageOffsets);
4763 Kern::Free(iDataSectionMemory);
4767 #ifdef __DEMAND_PAGING__
4770 Read and process the block map and related data.
4772 TInt DMmuCodeSegMemory::ReadBlockMap(const TCodeSegCreateInfo& aInfo)
4774 __KTRACE_OPT(KPAGING,Kern::Printf("DP: Reading block map for %C", iCodeSeg));
4776 if (aInfo.iCodeBlockMapEntriesSize <= 0)
4777 return KErrArgument; // no block map provided
4779 // Get compression data
4780 switch (aInfo.iCompressionType)
4782 case KFormatNotCompressed:
4783 iCompressionType = SRomPageInfo::ENoCompression;
4786 case KUidCompressionBytePair:
4788 iCompressionType = SRomPageInfo::EBytePair;
4789 if (!aInfo.iCodePageOffsets)
4790 return KErrArgument;
4791 TInt size = sizeof(TInt32) * (iPageCount + 1);
4792 iCodePageOffsets = (TInt32*)Kern::Alloc(size);
4793 if (!iCodePageOffsets)
4794 return KErrNoMemory;
4795 kumemget32(iCodePageOffsets, aInfo.iCodePageOffsets, size);
4797 #ifdef __DUMP_BLOCKMAP_INFO
4798 Kern::Printf("CodePageOffsets:");
4799 for (TInt i = 0 ; i < iPageCount + 1 ; ++i)
4800 Kern::Printf(" %08x", iCodePageOffsets[i]);
4804 for (TInt j = 0 ; j < iPageCount + 1 ; ++j)
4806 if (iCodePageOffsets[j] < last ||
4807 iCodePageOffsets[j] > (aInfo.iCodeLengthInFile + aInfo.iCodeStartInFile))
4809 __NK_ASSERT_DEBUG(0);
4812 last = iCodePageOffsets[j];
4818 return KErrNotSupported;
4821 // Copy block map data itself...
4823 #ifdef __DUMP_BLOCKMAP_INFO
4824 Kern::Printf("Original block map");
4825 Kern::Printf(" block granularity: %d", aInfo.iCodeBlockMapCommon.iBlockGranularity);
4826 Kern::Printf(" block start offset: %x", aInfo.iCodeBlockMapCommon.iBlockStartOffset);
4827 Kern::Printf(" start block address: %016lx", aInfo.iCodeBlockMapCommon.iStartBlockAddress);
4828 Kern::Printf(" local drive number: %d", aInfo.iCodeBlockMapCommon.iLocalDriveNumber);
4829 Kern::Printf(" entry size: %d", aInfo.iCodeBlockMapEntriesSize);
4832 // Find relevant paging device
4833 iCodeLocalDrive = aInfo.iCodeBlockMapCommon.iLocalDriveNumber;
4834 if (TUint(iCodeLocalDrive) >= (TUint)KMaxLocalDrives)
4836 __KTRACE_OPT(KPAGING,Kern::Printf("Bad local drive number"));
4837 return KErrArgument;
4839 DemandPaging* pager = DemandPaging::ThePager;
4841 if (!pager->CodePagingDevice(iCodeLocalDrive).iInstalled)
4843 __KTRACE_OPT(KPAGING,Kern::Printf("No paging device installed for drive"));
4844 return KErrNotSupported;
4846 DPagingDevice* device = pager->CodePagingDevice(iCodeLocalDrive).iDevice;
4848 // Set code start offset
4849 iCodeStartInFile = aInfo.iCodeStartInFile;
4850 if (iCodeStartInFile < 0)
4852 __KTRACE_OPT(KPAGING,Kern::Printf("Bad code start offset"));
4853 return KErrArgument;
4856 // Allocate buffer for block map and copy from user-side
4857 TBlockMapEntryBase* buffer = (TBlockMapEntryBase*)Kern::Alloc(aInfo.iCodeBlockMapEntriesSize);
4859 return KErrNoMemory;
4860 kumemget32(buffer, aInfo.iCodeBlockMapEntries, aInfo.iCodeBlockMapEntriesSize);
4862 #ifdef __DUMP_BLOCKMAP_INFO
4863 Kern::Printf(" entries:");
4864 for (TInt k = 0 ; k < aInfo.iCodeBlockMapEntriesSize / sizeof(TBlockMapEntryBase) ; ++k)
4865 Kern::Printf(" %d: %d blocks at %08x", k, buffer[k].iNumberOfBlocks, buffer[k].iStartBlock);
4868 // Initialise block map
4869 TInt r = iBlockMap.Initialise(aInfo.iCodeBlockMapCommon,
4871 aInfo.iCodeBlockMapEntriesSize,
4872 device->iReadUnitShift,
4873 iCodeStartInFile + aInfo.iCodeLengthInFile);
4880 #if defined(__DUMP_BLOCKMAP_INFO) && defined(_DEBUG)
4888 Read code relocation table and import fixup table from user side.
4890 TInt DMmuCodeSegMemory::ReadFixupTables(const TCodeSegCreateInfo& aInfo)
4892 __KTRACE_OPT(KPAGING,Kern::Printf("DP: Reading fixup tables for %C", iCodeSeg));
4894 iCodeRelocTableSize = aInfo.iCodeRelocTableSize;
4895 iImportFixupTableSize = aInfo.iImportFixupTableSize;
4896 iCodeDelta = aInfo.iCodeDelta;
4897 iDataDelta = aInfo.iDataDelta;
4899 // round sizes to four-byte boundaris...
4900 TInt relocSize = (iCodeRelocTableSize + 3) & ~3;
4901 TInt fixupSize = (iImportFixupTableSize + 3) & ~3;
4903 // copy relocs and fixups...
4904 iCodeRelocTable = (TUint8*)Kern::Alloc(relocSize+fixupSize);
4905 if (!iCodeRelocTable)
4906 return KErrNoMemory;
4907 iImportFixupTable = iCodeRelocTable + relocSize;
4908 kumemget32(iCodeRelocTable, aInfo.iCodeRelocTable, relocSize);
4909 kumemget32(iImportFixupTable, aInfo.iImportFixupTable, fixupSize);
4917 TInt DMmuCodeSegMemory::Create(TCodeSegCreateInfo& aInfo)
4920 if (!aInfo.iUseCodePaging)
4921 iPageCount=(iRamInfo.iCodeSize+iRamInfo.iDataSize+KPageMask)>>KPageShift;
4924 #ifdef __DEMAND_PAGING__
4925 iDataSectionMemory = Kern::Alloc(iRamInfo.iDataSize);
4926 if (!iDataSectionMemory)
4927 return KErrNoMemory;
4929 iPageCount=(iRamInfo.iCodeSize+KPageMask)>>KPageShift;
4930 iDataPageCount=(iRamInfo.iDataSize+KPageMask)>>KPageShift;
4932 r = ReadBlockMap(aInfo);
4936 iIsDemandPaged = ETrue;
4937 iCodeSeg->iAttr |= ECodeSegAttCodePaged;
4941 iCodeSeg->iSize = (iPageCount+iDataPageCount)<<KPageShift;
4946 TInt DMmuCodeSegMemory::Loaded(TCodeSegCreateInfo& aInfo)
4948 #ifdef __DEMAND_PAGING__
4951 TInt r = ReadFixupTables(aInfo);
4955 TAny* dataSection = iDataSectionMemory;
4958 UNLOCK_USER_MEMORY();
4959 memcpy(dataSection,(TAny*)iRamInfo.iDataLoadAddr,iRamInfo.iDataSize);
4961 iRamInfo.iDataLoadAddr = (TLinAddr)dataSection;
4968 void DMmuCodeSegMemory::ApplyCodeFixups(TUint32* aBuffer, TLinAddr aDestAddress)
4970 __NK_ASSERT_DEBUG(iRamInfo.iCodeRunAddr==iRamInfo.iCodeLoadAddr); // code doesn't work if this isn't true
4972 START_PAGING_BENCHMARK;
4974 TUint offset = aDestAddress - iRamInfo.iCodeRunAddr;
4975 __ASSERT_ALWAYS(offset < (TUint)(iRamInfo.iCodeSize + iRamInfo.iDataSize), K::Fault(K::ECodeSegBadFixupAddress));
4977 // Index tables are only valid for pages containg code
4978 if (offset >= (TUint)iRamInfo.iCodeSize)
4981 UNLOCK_USER_MEMORY();
4983 TInt page = offset >> KPageShift;
4987 if (iCodeRelocTableSize > 0)
4989 TUint32* codeRelocTable32 = (TUint32*)iCodeRelocTable;
4990 TUint startOffset = codeRelocTable32[page];
4991 TUint endOffset = codeRelocTable32[page + 1];
4993 __KTRACE_OPT(KPAGING, Kern::Printf("Performing code relocation: start == %x, end == %x", startOffset, endOffset));
4994 __ASSERT_ALWAYS(startOffset <= endOffset && endOffset <= (TUint)iCodeRelocTableSize,
4995 K::Fault(K::ECodeSegBadFixupTables));
4997 TUint8* codeRelocTable8 = (TUint8*)codeRelocTable32;
4998 const TUint16* ptr = (const TUint16*)(codeRelocTable8 + startOffset);
4999 const TUint16* end = (const TUint16*)(codeRelocTable8 + endOffset);
5001 const TUint32 codeDelta = iCodeDelta;
5002 const TUint32 dataDelta = iDataDelta;
5006 TUint16 entry = *ptr++;
5008 // address of word to fix up is sum of page start and 12-bit offset
5009 TUint32* addr = (TUint32*)((TUint8*)aBuffer + (entry & 0x0fff));
5011 TUint32 word = *addr;
5013 TInt type = entry & 0xf000;
5014 __NK_ASSERT_DEBUG(type == KTextRelocType || type == KDataRelocType);
5016 if (entry < KDataRelocType /* => type == KTextRelocType */)
5026 if (iImportFixupTableSize > 0)
5028 TUint32* importFixupTable32 = (TUint32*)iImportFixupTable;
5029 TUint startOffset = importFixupTable32[page];
5030 TUint endOffset = importFixupTable32[page + 1];
5032 __KTRACE_OPT(KPAGING, Kern::Printf("Performing import fixup: start == %x, end == %x", startOffset, endOffset));
5033 __ASSERT_ALWAYS(startOffset <= endOffset && endOffset <= (TUint)iImportFixupTableSize,
5034 K::Fault(K::ECodeSegBadFixupTables));
5036 TUint8* importFixupTable8 = (TUint8*)importFixupTable32;
5037 const TUint16* ptr = (const TUint16*)(importFixupTable8 + startOffset);
5038 const TUint16* end = (const TUint16*)(importFixupTable8 + endOffset);
5042 TUint16 offset = *ptr++;
5044 // get word to write into that address
5045 // (don't read as a single TUint32 because may not be word-aligned)
5046 TUint32 wordLow = *ptr++;
5047 TUint32 wordHigh = *ptr++;
5048 TUint32 word = (wordHigh << 16) | wordLow;
5050 __KTRACE_OPT(KPAGING, Kern::Printf("DP: Fixup %08x=%08x", iRamInfo.iCodeRunAddr+(page<<KPageShift)+offset, word));
5051 *(TUint32*)((TLinAddr)aBuffer+offset) = word;
5057 END_PAGING_BENCHMARK(DemandPaging::ThePager, EPagingBmFixupCodePage);
5061 TInt DMmuCodeSegMemory::ApplyCodeFixupsOnLoad(TUint32* aBuffer, TLinAddr aDestAddress)
5063 #ifdef __DEMAND_PAGING__
5064 TInt r=DemandPaging::ThePager->LockRegion((TLinAddr)aBuffer,KPageSize,&Kern::CurrentProcess());
5068 ApplyCodeFixups(aBuffer,aDestAddress);
5069 UNLOCK_USER_MEMORY();
5070 CacheMaintenance::CodeChanged((TLinAddr)aBuffer, KPageSize);
5072 #ifdef __DEMAND_PAGING__
5073 DemandPaging::ThePager->UnlockRegion((TLinAddr)aBuffer,KPageSize,&Kern::CurrentProcess());
5079 #ifdef __DEMAND_PAGING__
5081 TInt M::CreateVirtualPinObject(TVirtualPinObject*& aPinObject)
5083 aPinObject = (TVirtualPinObject*) new DDemandPagingLock;
5084 return aPinObject != NULL ? KErrNone : KErrNoMemory;
5087 TInt M::PinVirtualMemory(TVirtualPinObject* aPinObject, TLinAddr aStart, TUint aSize, DThread* aThread)
5089 if (!DemandPaging::ThePager)
5092 if (!DemandPaging::ThePager->MayBePaged(aStart, aSize))
5095 DDemandPagingLock* lock = (DDemandPagingLock*)aPinObject;
5096 TInt r = lock->Alloc(aSize);
5099 lock->Lock(aThread, aStart, aSize);
5103 TInt M::CreateAndPinVirtualMemory(TVirtualPinObject*& aPinObject, TLinAddr aStart, TUint aSize)
5107 if (!DemandPaging::ThePager)
5109 if (!DemandPaging::ThePager->MayBePaged(aStart, aSize))
5112 TInt r = CreateVirtualPinObject(aPinObject);
5116 DDemandPagingLock* lock = (DDemandPagingLock*)aPinObject;
5117 r = lock->Alloc(aSize);
5120 lock->Lock(TheCurrentThread, aStart, aSize);
5124 void M::UnpinVirtualMemory(TVirtualPinObject* aPinObject)
5126 DDemandPagingLock* lock = (DDemandPagingLock*)aPinObject;
5131 void M::DestroyVirtualPinObject(TVirtualPinObject*& aPinObject)
5133 DDemandPagingLock* lock = (DDemandPagingLock*)__e32_atomic_swp_ord_ptr(&aPinObject, 0);
5135 lock->AsyncDelete();
5140 class TVirtualPinObject
5144 TInt M::CreateVirtualPinObject(TVirtualPinObject*& aPinObject)
5146 aPinObject = new TVirtualPinObject;
5147 return aPinObject != NULL ? KErrNone : KErrNoMemory;
5150 TInt M::PinVirtualMemory(TVirtualPinObject* aPinObject, TLinAddr, TUint, DThread*)
5152 __ASSERT_DEBUG(aPinObject, K::Fault(K::EVirtualPinObjectBad));
5157 TInt M::CreateAndPinVirtualMemory(TVirtualPinObject*& aPinObject, TLinAddr, TUint)
5163 void M::UnpinVirtualMemory(TVirtualPinObject* aPinObject)
5165 __ASSERT_DEBUG(aPinObject, K::Fault(K::EVirtualPinObjectBad));
5169 void M::DestroyVirtualPinObject(TVirtualPinObject*& aPinObject)
5171 TVirtualPinObject* object = (TVirtualPinObject*)__e32_atomic_swp_ord_ptr(&aPinObject, 0);
5173 Kern::AsyncFree(object);
5178 TInt M::CreatePhysicalPinObject(TPhysicalPinObject*& aPinObject)
5180 return KErrNotSupported;
5183 TInt M::PinPhysicalMemory(TPhysicalPinObject*, TLinAddr, TUint, TBool, TUint32&, TUint32*, TUint32&, TUint&, DThread*)
5185 K::Fault(K::EPhysicalPinObjectBad);
5189 void M::UnpinPhysicalMemory(TPhysicalPinObject* aPinObject)
5191 K::Fault(K::EPhysicalPinObjectBad);
5194 void M::DestroyPhysicalPinObject(TPhysicalPinObject*& aPinObject)
5196 K::Fault(K::EPhysicalPinObjectBad);
5201 // Kernel map and pin (Not supported on the moving or multiple memory models).
5204 TInt M::CreateKernelMapObject(TKernelMapObject*&, TUint)
5206 return KErrNotSupported;
5210 TInt M::MapAndPinMemory(TKernelMapObject*, DThread*, TLinAddr, TUint, TUint, TLinAddr&, TPhysAddr*)
5212 return KErrNotSupported;
5216 void M::UnmapAndUnpinMemory(TKernelMapObject*)
5221 void M::DestroyKernelMapObject(TKernelMapObject*&)
5226 // Misc DPagingDevice methods
5228 EXPORT_C void DPagingDevice::NotifyIdle()
5230 // Not used on this memory model
5233 EXPORT_C void DPagingDevice::NotifyBusy()
5235 // Not used on this memory model
5238 EXPORT_C TInt Cache::SyncPhysicalMemoryBeforeDmaWrite(TPhysAddr* , TUint , TUint , TUint , TUint32 )
5240 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryBeforeDmaWrite");
5241 return KErrNotSupported;
5244 EXPORT_C TInt Cache::SyncPhysicalMemoryBeforeDmaRead(TPhysAddr* , TUint , TUint , TUint , TUint32 )
5246 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryBeforeDmaRead");
5247 return KErrNotSupported;
5249 EXPORT_C TInt Cache::SyncPhysicalMemoryAfterDmaRead(TPhysAddr* , TUint , TUint , TUint , TUint32 )
5251 CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryAfterDmaRead");
5252 return KErrNotSupported;
5256 // Page moving methods
5260 * Move a page from aOld to aNew safely, updating any references to the page
5261 * stored elsewhere (such as page table entries). The destination page must
5262 * already be allocated. If the move is successful, the source page will be
5263 * freed and returned to the allocator.
5265 * @pre RAM alloc mutex must be held.
5266 * @pre Calling thread must be in a critical section.
5267 * @pre Interrupts must be enabled.
5268 * @pre Kernel must be unlocked.
5269 * @pre No fast mutex can be held.
5270 * @pre Call in a thread context.
5272 TInt MmuBase::MovePage(TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest)
5274 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Defrag::DoMovePage");
5275 __ASSERT_WITH_MESSAGE_MUTEX(MmuBase::RamAllocatorMutex, "Ram allocator mutex must be held", "Defrag::DoMovePage");
5276 __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage() old=%08x",aOld));
5277 TInt r = KErrNotSupported;
5278 #if defined(__CPU_X86) && defined(__MEMMODEL_MULTIPLE__)
5281 aNew = KPhysAddrInvalid;
5282 NKern::LockSystem();
5283 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aOld);
5286 __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage() fails: page has no PageInfo"));
5290 if (pi->LockCount())
5292 __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage() fails: page is locked"));
5298 case SPageInfo::EUnused:
5299 // Nothing to do - we allow this, though, in case the caller wasn't
5300 // actually checking the free bitmap.
5302 __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage(): page unused"));
5305 case SPageInfo::EChunk:
5307 // It's a chunk - we need to investigate what it's used for.
5308 DChunk* chunk = (DChunk*)pi->Owner();
5309 TInt offset = pi->Offset()<<KPageShift;
5311 switch(chunk->iChunkType)
5314 case EKernelMessage:
5315 // The kernel data/bss/heap chunk pages are not moved as DMA may be accessing them.
5316 __KTRACE_OPT(KMMU, Kern::Printf("MmuBase::MovePage() fails: kernel data"));
5320 // The kernel thread stack chunk.
5321 r = MoveKernelStackPage(chunk, offset, aOld, aNew, aBlockZoneId, aBlockRest);
5322 __KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: k stack r%d",r));
5323 __NK_ASSERT_DEBUG(NKern::HeldFastMutex()==0);
5328 // The kernel code chunk, or a global user code chunk.
5329 r = MoveCodeChunkPage(chunk, offset, aOld, aNew, aBlockZoneId, aBlockRest);
5330 __KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: code chk r%d",r));
5331 __NK_ASSERT_DEBUG(NKern::HeldFastMutex()==0);
5337 case EUserSelfModCode:
5338 // A data chunk of some description.
5339 r = MoveDataChunkPage(chunk, offset, aOld, aNew, aBlockZoneId, aBlockRest);
5340 __KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: data chk r%d",r));
5341 __NK_ASSERT_DEBUG(NKern::HeldFastMutex()==0);
5344 case ESharedKernelSingle:
5345 case ESharedKernelMultiple:
5347 case ESharedKernelMirror:
5348 // These chunk types cannot be moved
5349 r = KErrNotSupported;
5350 __KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: shared r%d",r));
5355 // Unknown page type, or EUserCode.
5356 // EUserCode is not used in moving model, and on multiple model
5357 // it never owns any pages so shouldn't be found via SPageInfo
5358 __KTRACE_OPT(KMMU,Kern::Printf("Defrag::DoMovePage fails: unknown chunk type %d",chunk->iChunkType));
5359 Panic(EDefragUnknownChunkType);
5364 case SPageInfo::ECodeSegMemory:
5365 // It's a code segment memory section (multiple model only)
5366 r = MoveCodeSegMemoryPage((DMemModelCodeSegMemory*)pi->Owner(), pi->Offset()<<KPageShift, aOld, aNew, aBlockZoneId, aBlockRest);
5367 __KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: codeseg r%d",r));
5368 __NK_ASSERT_DEBUG(NKern::HeldFastMutex()==0);
5371 case SPageInfo::EPagedROM:
5372 case SPageInfo::EPagedCode:
5373 case SPageInfo::EPagedData:
5374 case SPageInfo::EPagedCache:
5375 case SPageInfo::EPagedFree:
5376 {// DP or RamCache page so attempt to discard it. Added for testing purposes only
5377 // In normal use ClearDiscardableFromZone will have already removed RAM cache pages
5379 MmuBase& mmu = *MmuBase::TheMmu;
5380 RamCacheBase& ramCache = *(mmu.iRamCache);
5381 if (ramCache.IsPageDiscardable(*pi))
5383 if (ramCache.DoDiscardPage(*pi, KRamZoneInvalidId, EFalse))
5384 {// Sucessfully discarded the page.
5388 __KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: paged r%d",r));
5389 goto fail; // Goto fail to release the system lock.
5393 case SPageInfo::EPageTable:
5394 case SPageInfo::EPageDir:
5395 case SPageInfo::EPtInfo:
5396 case SPageInfo::EInvalid:
5397 case SPageInfo::EFixed:
5398 case SPageInfo::EShadow:
5399 // These page types cannot be moved (or don't need to be moved)
5400 r = KErrNotSupported;
5401 __KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: PT etc r%d",r));
5405 // Unknown page type
5406 __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage() fails: unknown page type %d",pi->Type()));
5407 Panic(EDefragUnknownPageType);
5411 NKern::UnlockSystem();
5413 __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage() returns %d",r));
5418 TInt MmuBase::DiscardPage(TPhysAddr aAddr, TUint aBlockZoneId, TBool aBlockRest)
5421 NKern::LockSystem();
5422 SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(aAddr);
5423 if (pageInfo != NULL)
5424 {// Allocatable page at this address so is it a discardable one?
5425 if (iRamCache->IsPageDiscardable(*pageInfo))
5427 // Discard this page and return it to the ram allocator
5428 if (!iRamCache->DoDiscardPage(*pageInfo, aBlockZoneId, aBlockRest))
5429 {// Couldn't discard the page.
5432 __KTRACE_OPT(KMMU, Kern::Printf("ClearDiscardableFromZone: page discard fail addr %x", aAddr));
5433 NKern::UnlockSystem();
5434 return KErrNoMemory;
5438 {// Page discarded successfully.
5443 NKern::UnlockSystem();
5447 TUint MmuBase::NumberOfFreeDpPages()
5452 free = iRamCache->NumberOfFreePages();
5458 EXPORT_C TInt Epoc::MovePhysicalPage(TPhysAddr aOld, TPhysAddr& aNew, TRamDefragPageToMove aPageToMove)
5460 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::MovePhysicalPage");
5461 __KTRACE_OPT(KMMU,Kern::Printf("Epoc::MovePhysicalPage() old=%08x pageToMove=%d",aOld,aPageToMove));
5465 case ERamDefragPage_Physical:
5468 return KErrNotSupported;
5472 TInt r=M::MovePage(aOld,aNew,KRamZoneInvalidId,EFalse);
5474 aNew = KPhysAddrInvalid;
5476 __KTRACE_OPT(KMMU,Kern::Printf("Epoc::MovePhysicalPage() returns %d",r));
5481 TInt M::RamDefragFault(TAny* aExceptionInfo)
5483 // If the mmu has been initialised then let it try processing the fault.
5485 return MmuBase::TheMmu->RamDefragFault(aExceptionInfo);
5490 void M::RamZoneClaimed(SZone* aZone)
5492 // Lock each page. OK to traverse SPageInfo array as we know no unknown
5493 // pages are in the zone.
5494 SPageInfo* pageInfo = SPageInfo::FromPhysAddr(aZone->iPhysBase);
5495 SPageInfo* pageInfoEnd = pageInfo + aZone->iPhysPages;
5496 for (; pageInfo < pageInfoEnd; ++pageInfo)
5498 NKern::LockSystem();
5499 __NK_ASSERT_DEBUG(pageInfo->Type()==SPageInfo::EUnused);
5501 NKern::UnlockSystem();
5503 // For the sake of platform security we have to clear the memory. E.g. the driver
5504 // could assign it to a chunk visible to user side. Set LSB so ClearPages
5505 // knows this is a contiguous memory region.
5506 Mmu::Get().ClearPages(aZone->iPhysPages, (TPhysAddr*)(aZone->iPhysBase|1));