First public contribution.
1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\include\memmodel\epoc\mmubase\mmubase.h
16 // WARNING: This file contains some APIs which are internal and are subject
17 // to change without notice. Such APIs should therefore not be used
18 // outside the Kernel and Hardware Services package.
23 #include <plat_priv.h>
24 #include <memmodel/epoc/mmubase/kblockmap.h>
27 /******************************************************************************
28 * Definitions common to all MMU memory models
29 ******************************************************************************/
38 EInvalid=0, // No physical RAM exists for this page
39 EFixed=1, // RAM fixed at boot time,
40 EUnused=2, // Page is unused
41 EChunk=3, // iOwner=DChunk* iOffset=index into chunk
42 ECodeSegMemory=4, // iOwner=DCodeSegMemory* iOffset=index into CodeSeg memory (Multiple Memory Model only)
43 // EHwChunk=5, // Not used
44 EPageTable=6, // iOwner=0 iOffset=index into KPageTableBase
45 EPageDir=7, // iOwner=ASID iOffset=index into Page Directory
46 EPtInfo=8, // iOwner=0 iOffset=index into KPageTableInfoBase
47 EShadow=9, // iOwner=phys ROM page iOffset=index into ROM
48 EPagedROM=10, // iOwner=0, iOffset=index into ROM
49 EPagedCode=11, // iOwner=DCodeSegMemory* iOffset=index into code chunk (not offset into CodeSeg!)
50 EPagedData=12, // NOT YET SUPPORTED
51 EPagedCache=13, // iOwner=DChunk* iOffset=index into chunk
52 EPagedFree=14, // In demand paging 'live list' but not used for any purpose
57 EStateNormal = 0, // no special state
58 EStatePagedYoung = 1, // demand paged and is on the young list
59 EStatePagedOld = 2, // demand paged and is on the old list
60 EStatePagedDead = 3, // demand paged and is currently being modified
61 EStatePagedLocked = 4 // demand paged but is temporarily not being demand paged
70 return (TState)iState;
76 inline TUint32 Offset()
80 inline TInt LockCount()
85 /** Return the index of the zone the page is in
93 void Set(TType aType, TAny* aOwner, TUint32 aOffset);
94 void Change(TType aType,TState aState);
95 void SetState(TState aState);
96 void SetModifier(TAny* aModifier);
97 TInt CheckModified(TAny* aModifier);
98 void SetZone(TUint8 aZoneIndex);
100 inline void Set(TType aType, TAny* aOwner, TUint32 aOffset)
102 (TUint16&)iType = aType; // also sets iState to EStateNormal
107 inline void Change(TType aType,TState aState)
113 inline void SetState(TState aState)
118 inline void SetModifier(TAny* aModifier)
120 iModifier = aModifier;
122 inline TInt CheckModified(TAny* aModifier)
124 return iModifier!=aModifier;
127 inline void SetZone(TUint8 aZoneIndex)
135 inline void SetFixed()
140 inline void SetUnused()
142 __NK_ASSERT_DEBUG(0 == LockCount());
143 (TUint16&)iType = EUnused; // also sets iState to zero
145 // do not modify iOffset in this function because cache cleaning operations
146 // rely on using this value
148 inline void SetChunk(TAny* aChunk, TUint32 aOffset)
150 Set(EChunk,aChunk,aOffset);
152 inline void SetCodeSegMemory(TAny* aCodeSegMemory,TUint32 aOffset)
154 Set(ECodeSegMemory,aCodeSegMemory,aOffset);
156 // inline void SetHwChunk(TAny* aChunk, TUint32 aOffset)
158 // Set(EHwChunk,aChunk,aOffset);
160 inline void SetPageTable(TUint32 aId)
162 Set(EPageTable,0,aId);
164 inline void SetPageDir(TUint32 aOsAsid, TInt aOffset)
166 Set(EPageDir,(TAny*)aOsAsid,aOffset);
168 inline void SetPtInfo(TUint32 aOffset)
170 Set(EPtInfo,0,aOffset);
172 inline void SetShadow(TPhysAddr aOrigPhys, TUint32 aOffset)
174 Set(EShadow,(TAny*)aOrigPhys,aOffset);
176 inline void SetPagedROM(TUint32 aOffset)
178 Set(EPagedROM,0,aOffset);
180 inline void SetPagedCode(TAny* aCodeSegMemory, TUint32 aOffset)
182 Set(EPagedCode,aCodeSegMemory,aOffset);
185 inline static SPageInfo* FromLink(SDblQueLink* aLink)
186 { return (SPageInfo*)((TInt)aLink-_FOFF(SPageInfo,iLink)); }
188 inline TUint& PagedLock()
189 { return (TUint&)iLink.iPrev; }
192 Return the SPageInfo for a given page of physical RAM.
194 inline static SPageInfo* FromPhysAddr(TPhysAddr aAddress);
197 Return physical address of the RAM page which this SPageInfo object is associated.
198 If the address has no SPageInfo, then a null pointer is returned.
200 static SPageInfo* SafeFromPhysAddr(TPhysAddr aAddress);
203 Return physical address of the RAM page which this SPageInfo object is associated.
205 inline TPhysAddr PhysAddr();
208 TUint8 iType; // enum TType
209 TUint8 iState; // enum TState
210 TUint8 iZone; // The index of the zone the page is in, for use by DRamAllocator
212 TAny* iOwner; // owning object
213 TUint32 iOffset; // page offset withing owning object
214 TAny* iModifier; // pointer to object currently manipulating page
215 TUint32 iLockCount; // non-zero if page acquired by code outside of the kernel
218 SDblQueLink iLink; // used for placing page into linked lists
221 /******************************************************************************
226 chunk ptr (26 bits) offset (12 bits)
227 HW chunk ptr (26 bits) offset (12 bits)
228 global offset (12 bits)
229 shadow page offset (12 bits)
232 *******************************************************************************/
236 struct SPageTableInfo
247 enum {EAttShift=6, EAttMask=0x3f};
249 inline TInt Attribs()
250 {return iAttPtr&EAttMask;}
253 inline TUint32 Offset()
256 {return iAttPtr>>EAttShift;}
258 inline void SetUnused()
259 {iCount=0; iOffset=0; iAttPtr=0;}
260 inline void SetChunk(TUint32 aChunk, TUint32 aOffset)
261 {iOffset=aOffset; iAttPtr=(aChunk<<EAttShift)|EChunk;}
262 // inline void SetHwChunk(TUint32 aChunk, TUint32 aOffset)
263 // {iOffset=aOffset; iAttPtr=(aChunk<<EAttShift)|EHwChunk;}
264 inline void SetGlobal(TUint32 aOffset)
265 {iOffset=aOffset; iAttPtr=EGlobal;}
266 inline void SetShadow(TUint32 aOffset)
267 {iCount=0; iOffset=aOffset; iAttPtr=EShadow;}
274 /******************************************************************************
277 PageTableAllocator free page tables within allocated pages
278 PageTableLinearAllocator free linear addresses for page tables
279 ASIDAllocator free process slots
281 Page directory linear address = PageDirBase + (ASID<<PageDirSizeShift)
282 Page table linear address = PageTableBase + (PTID<<PageTableSizeShift)
286 Page table cluster = no. of page tables in one page
287 Page table block = no. of SPageTableInfo structures in one page
288 Page table group = no. of page tables mapped with a single page table
290 Local = specific to process
291 Shared = subset of processes but not necessarily all
292 Global = all processes
293 *******************************************************************************/
295 /********************************************
296 * Address range allocator
297 ********************************************/
304 static TLinearSection* New(TLinAddr aBase, TLinAddr aEnd);
308 TBitMapAllocator iAllocator; // bitmap of used PDE positions
311 /******************************************************************************
312 * Base class for MMU stuff
313 ******************************************************************************/
318 const TPhysAddr KRomPhysAddrInvalid=0xFFFFFFFFu;
323 const TUint16 KPageTableNotPresentId=0xFFFF;
328 const TInt KUnmapPagesTLBFlushDeferred=0x80000000;
333 const TInt KUnmapPagesCountMask=0xffff;
338 const TInt KMaxPages = 32;
343 typedef TUint32 TPde;
348 typedef TUint32 TPte;
350 class THwChunkAddressAllocator;
354 class DMemModelCodeSegMemory;
355 class DMemModelChunk;
365 EAsyncFreePageStillInUse=0,
366 EPtLinAllocCreateFailed=1,
367 EPtAllocCreateFailed=2,
368 EPageInfoCreateFailed=3,
369 EAsyncFreeListCreateFailed=4,
370 EPtBlockCountCreateFailed=5,
371 EPtGroupCountCreateFailed=6,
372 EInvalidPageTableAtBoot=7,
373 ERamAllocMutexCreateFailed=8,
374 EHwChunkMutexCreateFailed=9,
375 ECreateKernelSectionFailed=10,
376 ECreateHwChunkAllocFailed=11,
377 EFreeHwChunkAddrInvalid=12,
378 EFreeHwChunkIndexInvalid=13,
379 EBadMappedPageAfterBoot=14,
380 ERecoverRamDriveAllocPTIDFailed=15,
381 EMapPageTableBadExpand=16,
382 ERecoverRamDriveBadPageTable=17,
383 ERecoverRamDriveBadPage=18,
384 EBadFreePhysicalRam=19,
385 EPageLockedTooManyTimes=20,
386 EPageUnlockedTooManyTimes=21,
387 EPageInfoSetWhenNotUnused=22,
388 ERamCacheAllocFailed=23,
389 EDefragAllocFailed=24,
390 EDefragUnknownPageType=25,
391 EDefragUnknownPageTableType=27,
392 EDefragUnknownChunkType=28,
393 EDefragStackAllocFailed=29,
394 EDefragKernelChunkNoPageTable=30,
395 EDefragProcessWrongPageDir=31,
401 TInt AllocPageTable();
402 TInt DoAllocPageTable(TPhysAddr& aPhysAddr);
403 TInt InitPageTableInfo(TInt aId);
404 TInt MapPageTable(TInt aId, TPhysAddr aPhysAddr, TBool aAllowExpand=ETrue);
405 void FreePageTable(TInt aId);
406 TBool DoFreePageTable(TInt aId);
407 TInt AllocPhysicalRam(TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign=0);
408 TInt ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign=0);
409 TInt AllocPhysicalRam(TInt aNumPages, TPhysAddr* aPageList);
410 TInt ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList);
411 TInt FreePhysicalRam(TPhysAddr aPhysAddr, TInt aSize);
412 TInt FreePhysicalRam(TInt aNumPages, TPhysAddr* aPageList);
413 TInt ClaimPhysicalRam(TPhysAddr aPhysAddr, TInt aSize);
414 TInt GetPageTableId(TPhysAddr aPtPhys);
415 void MapRamPage(TLinAddr aAddr, TPhysAddr aPage, TPte aPtePerm);
416 void UnmapAndFree(TLinAddr aAddr, TInt aNumPages);
417 void FreePages(TPhysAddr* aPageList, TInt aCount, TZonePageType aPageType);
418 void CreateKernelSection(TLinAddr aEnd, TInt aHwChunkAlign);
419 TInt AllocateAllPageTables(TLinAddr aLinAddr, TInt aSize, TPde aPdePerm, TInt aMapShift, SPageTableInfo::TAttribs aAttrib);
420 TInt AllocShadowPage(TLinAddr aRomAddr);
421 TInt FreeShadowPage(TLinAddr aRomAddr);
422 TInt FreezeShadowPage(TLinAddr aRomAddr);
423 TInt FreeRamInBytes();
424 TInt GetRamZonePageCount(TUint aId, SRamZonePageCount& aPageData);
425 TInt ModifyRamZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask);
427 // RAM allocator and defrag interfaces.
429 void RamAllocUnlock();
430 TUint NumberOfFreeDpPages();
431 TInt MovePage(TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest);
432 TInt DiscardPage(TPhysAddr aAddr, TUint aBlockZoneId, TBool aBlockRest);
435 virtual void Init1();
436 virtual void Init2();
437 virtual void Init3();
438 virtual THwChunkAddressAllocator* MappingRegion(TUint aMapAttr);
439 virtual TInt RecoverRamDrive();
440 virtual TInt CopyToShadowMemory(TLinAddr aDest, TLinAddr aSrc, TUint32 aLength);
442 // cpu dependent page moving method - cutils.cia
443 TInt MoveKernelStackPage(DChunk* aChunk, TUint32 aOffset, TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest);
446 virtual void DoInit2()=0;
447 virtual TBool PteIsPresent(TPte aPte)=0;
448 virtual TPhysAddr PtePhysAddr(TPte aPte, TInt aPteIndex)=0;
449 virtual TPhysAddr PdePhysAddr(TLinAddr aAddr)=0;
450 virtual void SetupInitialPageInfo(SPageInfo* aPageInfo, TLinAddr aChunkAddr, TInt aPdeIndex)=0;
451 virtual void SetupInitialPageTableInfo(TInt aId, TLinAddr aChunkAddr, TInt aNumPtes)=0;
452 virtual void AssignPageTable(TInt aId, TInt aUsage, TAny* aObject, TLinAddr aAddr, TPde aPdePerm)=0;
453 virtual TInt UnassignPageTable(TLinAddr aAddr)=0;
454 virtual void BootstrapPageTable(TInt aXptId, TPhysAddr aXptPhys, TInt aId, TPhysAddr aPhysAddr)=0;
455 virtual void FixupXPageTable(TInt aId, TLinAddr aTempMap, TPhysAddr aOld, TPhysAddr aNew)=0;
456 virtual TInt PageTableId(TLinAddr aAddr)=0;
457 virtual TInt BootPageTableId(TLinAddr aAddr, TPhysAddr& aPtPhys)=0;
458 virtual void ClearPageTable(TInt aId, TInt aFirstIndex=0)=0;
459 virtual TPhysAddr LinearToPhysical(TLinAddr aAddr)=0;
460 virtual TInt LinearToPhysical(TLinAddr aAddr, TInt aSize, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList=NULL)=0;
461 virtual void MapRamPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, const TPhysAddr* aPageList, TInt aNumPages, TPte aPtePerm)=0;
462 virtual void MapPhysicalPages(TInt aId, SPageInfo::TType aType, TAny* aPtr, TUint32 aOffset, TPhysAddr aPhysAddr, TInt aNumPages, TPte aPtePerm)=0;
463 virtual void RemapPage(TInt aId, TUint32 aAddr, TPhysAddr aOldAddr, TPhysAddr aNewAddr, TPte aPtePerm, DProcess* aProcess)=0;
464 virtual TInt UnmapPages(TInt aId, TUint32 aAddr, TInt aNumPages, TPhysAddr* aPageList, TBool aSetPagesFree, TInt& aNumPtes, TInt& aNumFree, DProcess* aProcess)=0;
465 virtual void ClearRamDrive(TLinAddr aStart)=0;
466 virtual TInt PdePtePermissions(TUint& aMapAttr, TPde& aPde, TPte& aPte)=0;
467 virtual void Map(TLinAddr aLinAddr, TPhysAddr aPhysAddr, TInt aSize, TPde aPdePerm, TPte aPtePerm, TInt aMapShift)=0;
468 virtual void Unmap(TLinAddr aLinAddr, TInt aSize)=0;
469 virtual void InitShadowPageTable(TInt aId, TLinAddr aRomAddr, TPhysAddr aOrigPhys)=0;
470 virtual void InitShadowPage(TPhysAddr aShadowPhys, TLinAddr aRomAddr)=0;
471 virtual void DoUnmapShadowPage(TInt aId, TLinAddr aRomAddr, TPhysAddr aOrigPhys)=0;
472 virtual TInt UnassignShadowPageTable(TLinAddr aRomAddr, TPhysAddr aOrigPhys)=0;
473 virtual void DoFreezeShadowPage(TInt aId, TLinAddr aRomAddr)=0;
474 virtual void FlushShadow(TLinAddr aRomAddr)=0;
475 virtual void AssignShadowPageTable(TInt aId, TLinAddr aRomAddr)=0;
476 virtual void ClearPages(TInt aNumPages, TPhysAddr* aPageList, TUint8 aClearByte = KChunkClearByteDefault)=0;
477 virtual void Pagify(TInt aId, TLinAddr aLinAddr)=0;
478 virtual void CacheMaintenanceOnDecommit(const TPhysAddr* aPhysAdr, TInt aPageCount)=0;
479 virtual void CacheMaintenanceOnDecommit(const TPhysAddr aPhysAdr)=0;
480 virtual void CacheMaintenanceOnPreserve(const TPhysAddr* aPhysAdr, TInt aPageCount, TUint aMapAttr)=0;
481 virtual void CacheMaintenanceOnPreserve(const TPhysAddr aPhysAdr, TUint aMapAttr)=0;
482 virtual void CacheMaintenanceOnPreserve(TPhysAddr aPhysAddr, TInt aSize, TLinAddr aLinAddr, TUint iMapAttr)=0;
484 // memory model dependent page moving methods - mdefrag.cpp
485 virtual TInt MoveCodeSegMemoryPage(DMemModelCodeSegMemory* aCodeSegMemory, TUint32 aOffset, TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest)=0;
486 virtual TInt MoveCodeChunkPage(DChunk* aChunk, TUint32 aOffset, TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest)=0;
487 virtual TInt MoveDataChunkPage(DChunk* aChunk, TUint32 aOffset, TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest)=0;
489 // cpu and memory model dependent page moving methods - xmmu.cpp
490 virtual TInt RamDefragFault(TAny* aExceptionInfo)=0;
491 virtual void DisablePageModification(DMemModelChunk* aChunk, TInt aOffset)=0;
492 virtual TPte PtePermissions(TChunkType aChunkType)=0;
495 static TUint32 RoundToPageSize(TUint32 aSize);
496 static TUint32 RoundToChunkSize(TUint32 aSize);
497 static TInt RoundUpRangeToPageSize(TUint32& aBase, TUint32& aSize);
499 static void Signal();
500 static void WaitHwChunk();
501 static void SignalHwChunk();
502 static void Panic(TPanic aPanic);
504 inline TLinAddr PageTableLinAddr(TInt aId)
505 {return iPageTableLinBase+(aId<<iPageTableShift);}
506 inline SPageTableInfo& PtInfo(TInt aId)
507 {return iPtInfo[aId];}
509 inline TLinAddr PtInfoBlockLinAddr(TInt aBlock)
510 {return (TLinAddr)iPtInfo+(aBlock<<iPageShift);}
512 /** Get the page table info block number from a page table ID
513 @param aId The ID of the page table.
514 @return The page table info block
516 inline TInt PtInfoBlock(TInt aId)
517 {return aId >> iPtBlockShift;}
520 @return The page table entry for the page table info pages.
522 inline TPte PtInfoPtePerm()
523 {return iPtInfoPtePerm;}
526 TInt AllocRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aPageType, TUint aBlockedZoneId=KRamZoneInvalidId, TBool aBlockRest=EFalse);
527 TInt ZoneAllocRamPages(TUint* aZoneIdList, TUint aZoneIdCount, TPhysAddr* aPageList, TInt aNumPages, TZonePageType aPageType);
528 TInt AllocContiguousRam(TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aPageType, TInt aAlign, TUint aBlockedZoneId=KRamZoneInvalidId, TBool aBlockRest=EFalse);
529 TInt ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aPageType, TInt aAlign);
532 TInt iPageSize; // page size in bytes
533 TInt iPageMask; // page size - 1
534 TInt iPageShift; // log2(page size)
535 TInt iChunkSize; // PDE size in bytes
536 TInt iChunkMask; // PDE size - 1
537 TInt iChunkShift; // log2(PDE size)
538 TInt iPageTableSize; // 2nd level page table size in bytes
539 TInt iPageTableMask; // 2nd level page table size - 1
540 TInt iPageTableShift; // log2(2nd level page table size)
541 TInt iPtClusterSize; // number of page tables per page
542 TInt iPtClusterMask; // number of page tables per page - 1
543 TInt iPtClusterShift; // log2(number of page tables per page)
544 TInt iPtBlockSize; // number of SPageTableInfo per page
545 TInt iPtBlockMask; // number of SPageTableInfo per page - 1
546 TInt iPtBlockShift; // log2(number of SPageTableInfo per page)
547 TInt iPtGroupSize; // number of page tables mapped by a page table
548 TInt iPtGroupMask; // number of page tables mapped by a page table - 1
549 TInt iPtGroupShift; // log2(number of page tables mapped by a page table)
550 TInt iMaxPageTables; // maximum number of page tables (<65536)
551 TInt* iPtBlockCount; // number of page table pages in each block
552 TInt* iPtGroupCount; // number of page table pages in each group
553 TInt iNumPages; // Number of pages being managed
554 SPageTableInfo* iPtInfo; // per-page table information array
555 TLinAddr iPageTableLinBase; // base address of page tables
556 DRamAllocator* iRamPageAllocator;
557 TBitMapAllocator* iPageTableAllocator; // NULL if page table size = page size
558 TBitMapAllocator* iPageTableLinearAllocator;
559 TInt iInitialFreeMemory;
564 TPte* iTempPte; // PTE used for temporary mappings
565 TLinAddr iTempAddr; // address corresponding to iTempPte
566 TLinearSection* iKernelSection; // bitmap used to allocate kernel section addresses
567 THwChunkAddressAllocator* iHwChunkAllocator; // address allocator for HW chunks in kernel section
568 TUint32 iMapSizes; // bit mask of supported mapping sizes
569 TUint iDecommitThreshold; // threshold for selective/global cache flush on decommit for VIPT caches
570 TLinAddr iRomLinearBase;
571 TLinAddr iRomLinearEnd;
575 // Page moving and defrag fault handling members.
576 TLinAddr iAltStackBase;
577 TLinAddr iDisabledAddr;
578 TInt iDisabledAddrAsid;
580 TPte iDisabledOldVal;
582 RamCacheBase* iRamCache;
585 static DMutex* HwChunkMutex; // mutex protecting HW chunk address allocators
586 static DMutex* RamAllocatorMutex; // the main mutex protecting alloc/dealloc and most map/unmap
587 static MmuBase* TheMmu; // pointer to the single instance of this class
588 static const SRamZone* RamZoneConfig; /**<Pointer to variant specified array containing details on RAM banks and their allocation preferences*/
589 static TRamZoneCallback RamZoneCallback; /**<Pointer to callback function to be invoked when RAM power state changes*/
592 friend class Monitor;
596 /******************************************************************************
597 * Address allocator for HW chunks
598 ******************************************************************************/
606 inline THwChunkRegion(TInt aIndex, TInt aSize, TPde aPdePerm)
607 : iIndex((TUint16)aIndex), iRegionSize((TUint16)aSize), iPdePerm(aPdePerm)
610 TUint16 iIndex; // index of base of this region in linear section
611 TUint16 iRegionSize; // number of PDEs covered; 0 means page table
614 TPde iPdePerm; // PDE permissions for this region
615 THwChunkRegion* iNext; // used during deallocation
622 class THwChunkPageTable : public THwChunkRegion
625 THwChunkPageTable(TInt aIndex, TInt aSize, TPde aPdePerm);
626 static THwChunkPageTable* New(TInt aIndex, TPde aPdePerm);
628 TBitMapAllocator iAllocator; // bitmap of used page positions
634 class THwChunkAddressAllocator : public RPointerArray<THwChunkRegion>
637 static THwChunkAddressAllocator* New(TInt aAlign, TLinearSection* aSection);
638 TLinAddr Alloc(TInt aSize, TInt aAlign, TInt aOffset, TPde aPdePerm);
639 THwChunkRegion* Free(TLinAddr aAddr, TInt aSize);
641 THwChunkAddressAllocator();
642 TLinAddr SearchExisting(TInt aNumPages, TInt aPageAlign, TInt aPageOffset, TPde aPdePerm);
643 void Discard(THwChunkRegion* aRegion);
644 static TInt Order(const THwChunkRegion& a1, const THwChunkRegion& a2);
645 THwChunkRegion* NewRegion(TInt aIndex, TInt aSize, TPde aPdePerm);
646 THwChunkPageTable* NewPageTable(TInt aIndex, TPde aPdePerm, TInt aInitB, TInt aInitC);
648 TInt iAlign; // alignment required for allocated addresses
649 TLinearSection* iSection; // linear section in which allocation occurs
657 class DMemModelChunkHw : public DPlatChunkHw
660 virtual TInt Close(TAny* aPtr);
662 TInt AllocateLinearAddress(TPde aPdePerm);
663 void DeallocateLinearAddress();
665 THwChunkAddressAllocator* iAllocator;
669 /******************************************************************************
670 * MMU-specifc code segment data
671 ******************************************************************************/
676 class DMmuCodeSegMemory : public DEpocCodeSegMemory
679 DMmuCodeSegMemory(DEpocCodeSeg* aCodeSeg);
680 ~DMmuCodeSegMemory();
681 virtual TInt Create(TCodeSegCreateInfo& aInfo);
682 virtual TInt Loaded(TCodeSegCreateInfo& aInfo);
685 Apply code relocations and import fixups to one page of code.
686 @param aBuffer The buffer containg the code
687 @param aCodeAddress The address the page will be mapped at
689 void ApplyCodeFixups(TUint32* aBuffer, TLinAddr aCodeAddress);
692 Apply code relocations and import fixups to one page of code.
693 Called by DMemModelCodeSegMemory::Loaded to fixup pages which are already paged-in.
695 @param aBuffer The buffer containg the code
696 @param aCodeAddress The address the page will be mapped at
698 TInt ApplyCodeFixupsOnLoad(TUint32* aBuffer, TLinAddr aCodeAddress);
700 TInt ReadBlockMap(const TCodeSegCreateInfo& aInfo);
701 TInt ReadFixupTables(const TCodeSegCreateInfo& aInfo);
703 TBool iIsDemandPaged;
705 TInt iPageCount; // Number of pages used for code
706 TInt iDataPageCount; // Number of extra pages used to store data section
707 TUint8* iCodeRelocTable; // Code relocation information
708 TInt iCodeRelocTableSize; // Size of code relocation table in bytes
709 TUint8* iImportFixupTable; // Import fixup information
710 TInt iImportFixupTableSize; // Size of import fixup table in bytes
711 TUint32 iCodeDelta; // Code relocation delta
712 TUint32 iDataDelta; // Data relocation delta
714 TInt iCompressionType; // Compression scheme in use
715 TInt32* iCodePageOffsets; // Array of compressed page offsets within the file
716 TInt iCodeLocalDrive; // Local drive number
717 TBlockMap iBlockMap; // Kernel-side representation of block map
718 TInt iCodeStartInFile; // Offset of (possibly compressed) code from start of file
720 TAny* iDataSectionMemory; // pointer to saved copy of data section (when demand paging)