First public contribution.
1 // Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\memmodel\epoc\moving\mchunk.cpp
19 #include "cache_maintenance.h"
20 #include <mmubase.inl>
23 DMemModelChunk::DMemModelChunk()
27 void DMemModelChunk::Destruct()
29 __KTRACE_OPT(KTHREAD,Kern::Printf("DMemModelChunk destruct %O",this));
31 TInt nPdes=iMaxSize>>m.iChunkShift;
32 if (nPdes<=32 || iPdeBitMap!=NULL)
34 if ((iAttributes & EDisconnected) && iPageBitMap!=NULL)
36 else if (iAttributes & EDoubleEnded)
37 AdjustDoubleEnded(0,0);
42 if ((iAttributes&EFixedAddress) && iHomeRegionBase>=m.iKernelSection->iBase)
45 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::~DMemModelChunk remove region"));
46 if (TLinAddr(iBase)==iHomeBase)
48 DeallocateHomeAddress(); // unlink from home section queue
53 if ((iMaxSize>>m.iChunkShift) > 32)
55 TAny* pM = __e32_atomic_swp_ord_ptr(&iPdeBitMap, 0);
58 TBitMapAllocator* pM = (TBitMapAllocator*)__e32_atomic_swp_ord_ptr(&iPageBitMap, 0);
60 pM = (TBitMapAllocator*)__e32_atomic_swp_ord_ptr(&iPermanentPageBitMap, 0);
63 TDfc* dfc = (TDfc*)__e32_atomic_swp_ord_ptr(&iDestroyedDfc, 0);
67 __KTRACE_OPT(KMEMTRACE, {Mmu::Wait(); Kern::Printf("MT:D %d %x %O",NTickCount(),this,this);Mmu::Signal();});
69 BTraceContext4(BTrace::EChunks,BTrace::EChunkDestroyed,this);
73 TInt DMemModelChunk::Close(TAny* aPtr)
77 DMemModelProcess* pP=(DMemModelProcess*)aPtr;
78 pP->RemoveChunk(this);
81 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Close %d %O",r,this));
82 __NK_ASSERT_DEBUG(r > 0); // Should never be negative.
86 return EObjectDeleted;
92 TUint8* DMemModelChunk::Base(DProcess* aProcess)
98 TInt DMemModelChunk::DoCreate(SChunkCreateInfo& aInfo)
100 __ASSERT_COMPILE(!(EMMChunkAttributesMask & EChunkAttributesMask));
102 if (aInfo.iMaxSize<=0)
105 TInt nPdes=(aInfo.iMaxSize+m.iChunkMask)>>m.iChunkShift;
106 iMaxSize=nPdes<<m.iChunkShift;
107 iMapAttr = aInfo.iMapAttr;
111 TInt words=(nPdes+31)>>5;
112 iPdeBitMap=(TUint32*)Kern::Alloc(words*sizeof(TUint32));
115 memclr(iPdeBitMap, words*sizeof(TUint32));
120 TInt maxpages=iMaxSize>>m.iPageShift;
121 if (iAttributes & EDisconnected)
123 TBitMapAllocator* pM=TBitMapAllocator::New(maxpages,ETrue);
127 __KTRACE_OPT(KMMU,Kern::Printf("PageBitMap at %08x, MaxPages %d",pM,maxpages));
129 if(iChunkType==ESharedKernelSingle || iChunkType==ESharedKernelMultiple)
131 TBitMapAllocator* pM=TBitMapAllocator::New(maxpages,ETrue);
134 iPermanentPageBitMap = pM;
136 __KTRACE_OPT(KMEMTRACE, {Mmu::Wait();Kern::Printf("MT:C %d %x %O",NTickCount(),this,this);Mmu::Signal();});
140 BTraceContextN(BTrace::EChunks,BTrace::EChunkCreated,this,iMaxSize,nameBuf.Ptr(),nameBuf.Size());
142 BTrace8(BTrace::EChunks,BTrace::EChunkOwner,this,iOwningProcess);
143 BTraceContext12(BTrace::EChunks,BTrace::EChunkInfo,this,iChunkType,iAttributes);
148 void DMemModelChunk::ClaimInitialPages()
150 __KTRACE_OPT(KMMU,Kern::Printf("Chunk %O ClaimInitialPages()",this));
153 TUint32 ccp=K::CompressKHeapPtr(this);
157 TInt ptid=m.GetPageTableId(TLinAddr(iBase)+offset);
158 __ASSERT_ALWAYS(ptid>=0,MM::Panic(MM::EClaimInitialPagesBadPageTable));
159 __KTRACE_OPT(KMMU,Kern::Printf("Offset %x PTID=%d",offset,ptid));
161 SPageTableInfo& ptinfo = m.PtInfo(ptid);
162 ptinfo.SetChunk(ccp,offset>>m.iChunkShift);
163 TPte* pPte=(TPte*)m.PageTableLinAddr(ptid);
166 TInt flashCount = MM::MaxPagesInOneGo;
167 for (i=0; i<m.iChunkSize>>m.iPageShift; ++i, offset+=m.iPageSize)
171 flashCount = MM::MaxPagesInOneGo;
172 NKern::FlashSystem();
175 if (m.PteIsPresent(pte))
178 TPhysAddr phys=m.PtePhysAddr(pte, i);
179 __KTRACE_OPT(KMMU,Kern::Printf("Offset %x phys %08x",offset,phys));
180 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(phys);
183 pi->SetChunk(this,offset>>m.iPageShift);
184 #ifdef BTRACE_KERNEL_MEMORY
185 --Epoc::KernelMiscPages; // page now owned by chunk, and is not 'miscelaneous'
191 __KTRACE_OPT(KMMU,Kern::Printf("Offset %x PTID %d NP %d", offset, ptid, np));
193 NKern::UnlockSystem();
194 __KTRACE_OPT(KMMU,Kern::Printf("nPdes=%d, Pdes=%08x, HomePdes=%08x",iNumPdes,iPdes,iHomePdes));
197 void DMemModelChunk::SetFixedAddress(TLinAddr aAddr, TInt aInitialSize)
199 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O SetFixedAddress %08X size %08X",this,aAddr,aInitialSize));
201 iHomeRegionBase=aAddr;
203 iBase=(TUint8*)aAddr;
204 iHomeRegionSize=iMaxSize;
205 iAttributes|=EFixedAddress;
206 iSize=Mmu::RoundToPageSize(aInitialSize);
210 TInt DMemModelChunk::Reserve(TInt aInitialSize)
212 // Reserve home section address space for a chunk
215 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O Reserve() size %08x",this,aInitialSize));
217 if (!K::Initialising)
219 iHomeRegionBase=AllocateHomeAddress(iMaxSize);
220 if (!K::Initialising)
222 iHomeBase=iHomeRegionBase;
223 iBase=(TUint8*)iHomeRegionBase;
224 if (iHomeRegionBase==0)
226 iSize=Mmu::RoundToPageSize(aInitialSize);
227 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O address %08x",this,iHomeRegionBase));
232 TInt DMemModelChunk::Adjust(TInt aNewSize)
234 // Adjust a standard chunk.
238 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust %08x",aNewSize));
239 if (iAttributes & (EDoubleEnded|EDisconnected))
241 if (aNewSize<0 || aNewSize>iMaxSize)
245 TInt newSize=Mmu::RoundToPageSize(aNewSize);
251 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust growing"));
252 r=DoCommit(iSize,newSize-iSize);
254 else if (newSize<iSize)
256 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust shrinking"));
257 DoDecommit(newSize,iSize-newSize);
261 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
262 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x base %08x home %08x",this,iSize,iBase,iHomeRegionBase));
266 TInt DMemModelChunk::ExpandHomeRegion(TInt aOffset, TInt aSize)
268 // Ensure that the chunk's home region is big enough to accommodate extra RAM being committed
269 __KTRACE_OPT(KMMU,Kern::Printf("Chunk %O ExpandHomeRegion(%x,%x)",this,aOffset,aSize));
271 TBool lowerLimitOk=(aOffset>=iHomeRegionOffset && aOffset<=iHomeRegionOffset+iHomeRegionSize);
272 TBool upperLimitOk=(aOffset+aSize>=iHomeRegionOffset && aOffset+aSize<=iHomeRegionOffset+iHomeRegionSize);
273 if (lowerLimitOk && upperLimitOk)
274 return KErrNone; // no change required
279 newLowerLimit=Min(iHomeRegionOffset,aOffset);
280 newUpperLimit=Max(iHomeRegionOffset+iHomeRegionSize,aOffset+aSize);
284 newLowerLimit=aOffset;
285 newUpperLimit=aOffset+aSize;
287 newLowerLimit &= ~m.iChunkMask;
288 newUpperLimit = (newUpperLimit+m.iChunkMask)&~m.iChunkMask;
289 TInt newHomeRegionSize=newUpperLimit-newLowerLimit;
290 __KTRACE_OPT(KMMU,Kern::Printf("newLowerLimit=%x, newUpperLimit=%x",newLowerLimit,newUpperLimit));
291 if (newHomeRegionSize>iMaxSize)
293 TLinAddr newHomeRegionBase;
294 if (iHomeRegionSize==0)
295 newHomeRegionBase=AllocateHomeAddress(newHomeRegionSize);
297 newHomeRegionBase=ReallocateHomeAddress(newHomeRegionSize);
298 __KTRACE_OPT(KMMU,Kern::Printf("newHomeRegionBase=%08x",newHomeRegionBase));
299 if (newHomeRegionBase==0)
301 TInt deltaOffset=iHomeRegionOffset-newLowerLimit;
302 TLinAddr newHomeBase=newHomeRegionBase-newLowerLimit;
303 TLinAddr translatedHomeBase=newHomeRegionBase+deltaOffset;
305 // lock the kernel while we change the chunk's home region
306 // Note: The new home region always contains the original home region, so
307 // if we reach here, it must be strictly larger.
309 if (iNumPdes && iHomeRegionBase!=translatedHomeBase)
311 TLinAddr oldBase=TLinAddr(iBase);
312 if (oldBase==iHomeBase)
314 // chunk is currently at home, so must move it
315 // Note: this operation must cope with overlapping initial and final regions
316 m.GenericFlush(Mmu::EFlushDMove); // preemption could occur here...
317 if (TLinAddr(iBase)==iHomeBase) // ...so need to check chunk is still at home address
319 m.MoveChunk(iHomeRegionBase,translatedHomeBase,iNumPdes);
320 iBase=(TUint8*)newHomeBase;
321 MoveCurrentPdes(iHomeRegionBase,translatedHomeBase);
322 MoveHomePdes(iHomeRegionBase,translatedHomeBase);
327 MoveHomePdes(iHomeRegionBase,translatedHomeBase);
329 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::ExpandHomeRegion moved home base from %08x to %08x",
330 iHomeRegionBase,newHomeRegionBase));
333 iBase=(TUint8*)newHomeBase;
334 iHomeRegionBase=newHomeRegionBase;
335 iHomeRegionOffset=newLowerLimit;
336 iHomeBase=newHomeBase;
337 __KTRACE_OPT(KMMU,Kern::Printf("Final iHomeRegionBase=%08x, iHomeRegionOffset=%08x",iHomeRegionBase,iHomeRegionOffset));
338 __KTRACE_OPT(KMMU,Kern::Printf("Final iHomeRegionSize=%08x, iBase=%08x, iHomeBase=%08x",iHomeRegionSize,iBase,iHomeBase));
339 __KTRACE_OPT(KMMU,Kern::Printf("nPdes=%d, Pdes=%08x, HomePdes=%08x",iNumPdes,iPdes,iHomePdes));
340 NKern::UnlockSystem();
344 TInt DMemModelChunk::Address(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress)
346 if(!iPermanentPageBitMap)
347 return KErrAccessDenied;
348 if(TUint(aOffset)>=TUint(iMaxSize))
350 if(TUint(aOffset+aSize)>TUint(iMaxSize))
354 TInt pageShift = Mmu::Get().iPageShift;
355 TInt start = aOffset>>pageShift;
356 TInt size = ((aOffset+aSize-1)>>pageShift)-start+1;
357 if(iPermanentPageBitMap->NotAllocated(start,size))
359 aKernelAddress = (TLinAddr)iBase+aOffset;
363 TInt DMemModelChunk::PhysicalAddress(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress, TUint32& aPhysicalAddress, TUint32* aPhysicalPageList)
365 TInt r=Address(aOffset,aSize,aKernelAddress);
369 return Mmu::Get().LinearToPhysical(aKernelAddress,aSize,aPhysicalAddress,aPhysicalPageList);
372 void DMemModelChunk::Substitute(TInt aOffset, TPhysAddr aOldAddr, TPhysAddr aNewAddr)
374 // Substitute the page mapping at aOffset with aNewAddr.
375 // Called with the system lock held.
376 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Substitute %x %08x %08x",aOffset,aOldAddr,aNewAddr));
379 TLinAddr addr=(TLinAddr)iBase+aOffset;
380 TInt ptid=m.GetPageTableId(addr);
382 MM::Panic(MM::EChunkRemapNoPageTable);
384 m.RemapPage(ptid, addr, aOldAddr, aNewAddr, iPtePermissions, iOwningProcess);
385 if(iChunkType==EKernelCode || iChunkType==EDll || iChunkType==EUserSelfModCode)
386 m.SyncCodeMappings();
390 Get the movability type of the chunk's pages
391 @return How movable the chunk's pages are
393 TZonePageType DMemModelChunk::GetPageType()
395 // Shared chunks have their physical addresses available
396 if (iChunkType == ESharedKernelSingle ||
397 iChunkType == ESharedKernelMultiple ||
398 iChunkType == ESharedIo ||
399 iChunkType == ESharedKernelMirror ||
400 iChunkType == EKernelMessage ||
401 iChunkType == EKernelData) // Don't move kernel heap pages as DMA may be accessing them.
405 // All other types of chunk are movable
409 TInt DMemModelChunk::DoCommit(TInt aOffset, TInt aSize, TCommitType aCommitType, TUint32* aExtraArg)
411 // Commit more RAM to a chunk at a specified offset
412 // enter and leave with system unlocked
413 // must hold RamAlloc mutex before calling this function
414 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoCommit %x+%x type=%d extra=%08x",aOffset,aSize,aCommitType,aExtraArg));
416 TInt endOffset=offset+aSize;
419 DRamAllocator& a = *m.iRamPageAllocator;
421 TPhysAddr pageList[KMaxPages];
422 TPhysAddr* pPageList=0;
423 TPhysAddr nextPage=0;
424 TUint32 ccp=K::CompressKHeapPtr(this);
425 SPageInfo::TType type = SPageInfo::EChunk;
427 if (iHomeRegionSize==0 || (iAttributes&EFixedAddress)==0)
429 r=ExpandHomeRegion(aOffset,aSize);
434 // Set flag to indicate if RAM should be cleared before being committed.
435 // Note, EDll, EUserCode are covered in the code segment, in order not to clear
436 // the region overwritten by the loader
437 TBool clearRam = iChunkType==EUserData
438 || iChunkType==EDllData
439 || iChunkType==EUserSelfModCode
440 || iChunkType==ESharedKernelSingle
441 || iChunkType==ESharedKernelMultiple
442 || iChunkType==ESharedIo
443 || iChunkType==ERamDrive;
446 TBool ownsMemory = !(iAttributes&EMemoryNotOwned);
447 TBool physicalCommit = aCommitType&DChunk::ECommitPhysicalMask;
451 return KErrNotSupported;
456 return KErrNotSupported;
457 type = SPageInfo::EInvalid; // to indicate page info not to be updated
462 case DChunk::ECommitDiscontiguous:
466 case DChunk::ECommitContiguous:
468 // Allocate a block of contiguous RAM from the free pool
469 TInt numPages=(endOffset-offset)>>m.iPageShift;
470 r=m.AllocContiguousRam(numPages<<m.iPageShift, nextPage, GetPageType(), 0);
474 m.ClearPages(numPages, (TPhysAddr*)(nextPage|1), iClearByte); // clear RAM if required
475 *aExtraArg = nextPage; // store physical address of RAM as return argument
479 case DChunk::ECommitDiscontiguousPhysical:
481 pPageList = aExtraArg; // use pages given given to us
483 // Check address of pages are multiples of page size...
484 TInt numPages=(endOffset-offset)>>m.iPageShift;
485 TUint32* ptr = aExtraArg;
486 TUint32* endPtr = aExtraArg+numPages;
488 return KErrNone; // Zero size commit is OK
489 TPhysAddr pageBits = 0;
493 if(pageBits&(m.iPageSize-1))
494 return KErrArgument; // all addresses must be multiple of page size
498 case DChunk::ECommitContiguousPhysical:
499 nextPage = (TPhysAddr)aExtraArg; // we have been given the physical address to use
500 if(nextPage&(m.iPageSize-1))
501 return KErrArgument; // address must be multiple of page size
505 case DChunk::ECommitVirtual:
510 return KErrNotSupported;
513 // Commit memory a bit at a time (so system lock is only needs to be held for limited time)
514 while(offset<endOffset)
516 TInt np=(endOffset-offset)>>m.iPageShift; // pages remaining to satisfy request
517 TInt npEnd=(m.iChunkSize-(offset&m.iChunkMask))>>m.iPageShift; // number of pages to end of page table
519 np=npEnd; // limit to single page table
520 if (np>MM::MaxPagesInOneGo)
521 np=MM::MaxPagesInOneGo; // limit
522 NKern::LockSystem(); // lock the system while we look at the page directory
523 TLinAddr addr=(TLinAddr)iBase+offset; // current address
524 TInt ptid=m.GetPageTableId(addr); // get page table ID if a page table is already assigned here
525 NKern::UnlockSystem(); // we can now unlock the system
529 // need to allocate a new page table
530 newPtId=m.AllocPageTable();
533 // out of memory, so break out and revert
540 if(aCommitType==DChunk::ECommitDiscontiguous)
542 pPageList = pageList;
543 r=m.AllocRamPages(pPageList,np, GetPageType()); // try to allocate pages
545 break; // if we fail, break out and revert
547 m.ClearPages(np, pPageList, iClearByte); // clear RAM if required
550 // lock the system while we change the MMU mappings
552 TInt commitSize = np<<m.iPageShift;
553 iSize += commitSize; // update committed size
554 if (aCommitType==DChunk::ECommitVirtual)
555 m.MapVirtual(ptid, np);
558 m.MapRamPages(ptid, type, this, offset, pPageList, np, iPtePermissions);
563 m.MapPhysicalPages(ptid, type, this, offset, nextPage, np, iPtePermissions);
564 nextPage += commitSize;
566 NKern::UnlockSystem();
571 // We have allocated a new page table, now we must assign it and update PDE info
572 SPageTableInfo& pti=m.PtInfo(ptid);
573 pti.SetChunk(ccp, offset>>m.iChunkShift);
574 TLinAddr addr=(TLinAddr)iBase+offset; // current address
575 m.DoAssignPageTable(ptid, addr, iPdePermissions[iChunkState]);
576 AddPde(offset); // update PDE info
578 __KTRACE_OPT(KMMU,Kern::Printf("nPdes=%d, Pdes=%08x, HomePdes=%08x",iNumPdes,iPdes,iHomePdes));
579 NKern::UnlockSystem();
580 __KTRACE_OPT(KMEMTRACE,Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this));
582 BTraceContext12(BTrace::EChunks,ownsMemory?BTrace::EChunkMemoryAllocated:BTrace::EChunkMemoryAdded,this,offset,commitSize);
585 offset += commitSize; // update offset
590 if(iPermanentPageBitMap)
591 iPermanentPageBitMap->Alloc(aOffset>>m.iPageShift,aSize>>m.iPageShift);
595 // we ran out of memory somewhere
596 // first check if we have an unassigned page table
598 m.FreePageTable(newPtId); // free the unassigned page table
600 // now free any memory we succeeded in allocating and return the chunk to its initial state
601 DChunk::TDecommitType decommitType = aCommitType==DChunk::ECommitVirtual ?
602 DChunk::EDecommitVirtual : DChunk::EDecommitNormal;
603 DoDecommit(aOffset,offset-aOffset,decommitType);
605 if(aCommitType==DChunk::ECommitContiguous)
607 // Free the pages we allocated but didn't get around to commiting
608 TPhysAddr last = nextPage + ((endOffset-offset)>>m.iPageShift<<m.iPageShift);
611 a.FreeRamPage(nextPage, GetPageType());
612 nextPage += m.iPageSize;
614 *aExtraArg = KPhysAddrInvalid; // return invalid physical address
617 m.iAllocFailed=ETrue;
622 void DMemModelChunk::DoDecommit(TInt aOffset, TInt aSize, TDecommitType aDecommitType)
624 // Decommit RAM from a chunk at a specified offset
625 // enter and leave with kernel unlocked
626 // must hold RamAlloc mutex before calling this function
627 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoDecommit %x+%x",aOffset,aSize));
628 if (iHomeRegionBase==0)
631 TBool ownsMemory = !(iAttributes&EMemoryNotOwned);
634 // Physical memory not owned by the chunk also has to be evicted from cache(s).
635 // We cannot just purge, as it can still be in use by the driver. Therefore, we'll flush it.
636 // Purging physical memory from cache(s) that is owned by the chunk is done below.
637 CacheMaintenance::MemoryToPreserveAndReuse((TLinAddr)(iBase+aOffset), aSize, iMapAttr);
641 TInt endOffset=offset+aSize;
643 DRamAllocator& a = *m.iRamPageAllocator;
644 TPhysAddr pageList[KMaxPages];
645 #ifdef __CPU_WRITE_BACK_CACHE
646 TInt size_reduction = Min(aSize,iSize);
647 TBool selectiveFlush=((TUint)size_reduction<=(CacheMaintenance::SyncAllPerformanceThresholdPages()<<KPageShift));
649 while(offset<endOffset)
651 TInt np=(endOffset-offset)>>m.iPageShift; // number of pages remaining to decommit
652 TInt pdeEnd=(offset+m.iChunkSize)&~m.iChunkMask;
653 TInt npEnd=(pdeEnd-offset)>>m.iPageShift; // number of pages to end of page table
655 np=npEnd; // limit to single page table
656 if (np>MM::MaxPagesInOneGo)
657 np=MM::MaxPagesInOneGo; // limit
658 NKern::LockSystem(); // lock the system while we look at the page directory
659 TUint8* base=iBase; // save base address
660 TLinAddr addr=(TLinAddr)base+offset; // current address
661 TInt ptid=m.GetPageTableId(addr); // get page table ID if a page table is already assigned here
667 // Unmap the pages, clear the PTEs and place the physical addresses of the now-free RAM pages in
668 // pageList. Return nPtes=number of pages placed in list, remain=number of PTEs remaining in page table
669 // This also invalidates any TLB entries for the unmapped pages.
670 // NB for WriteBack cache, we must also invalidate any cached entries for these pages - this might be done
671 // by invalidating entry-by-entry or by a complete cache flush at the end.
672 // NB For split TLB, ITLB may not be invalidated. In that case it will be invalidated by
673 // Mmu::SyncCodeMappings() at the end of the function.
675 if (aDecommitType == EDecommitVirtual)
676 remain=m.UnmapVirtual(ptid,addr,np,pageList,ownsMemory,nPtes,nFree,iOwningProcess);
678 remain=m.UnmapPages(ptid,addr,np,pageList,ownsMemory,nPtes,nFree,iOwningProcess);
679 TInt decommitSize=nPtes<<m.iPageShift;
680 iSize-=decommitSize; // reduce the committed size
682 // if page table is now completely empty, unassign it and update chunk PDE info
683 remain &= KUnmapPagesCountMask;
686 m.DoUnassignPageTable(addr);
688 NKern::UnlockSystem();
689 m.FreePageTable(ptid);
692 __KTRACE_OPT(KMMU,Kern::Printf("nPdes=%d, Pdes=%08x, HomePdes=%08x",iNumPdes,iPdes,iHomePdes));
693 #ifdef __CPU_WRITE_BACK_CACHE
697 while(n && iBase==base) // reschedule may move base, but then cache will have been flushed so we can stop purging L1
699 CacheMaintenance::PageToReuseVirtualCache(addr);
702 NKern::FlashSystem();
704 Mmu::Get().CacheMaintenanceOnDecommit(pageList, nFree); //On ARMv5, this deals with L2 cache only
707 NKern::UnlockSystem(); // we can now unlock the system
708 __KTRACE_OPT(KMEMTRACE,Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this));
711 BTraceContext12(BTrace::EChunks,ownsMemory?BTrace::EChunkMemoryDeallocated:BTrace::EChunkMemoryRemoved,this,offset,nFree<<m.iPageShift);
714 // We can now return the decommitted pages to the free page list
716 a.FreeRamPages(pageList,nFree, GetPageType());
718 offset+=(np<<m.iPageShift);
722 NKern::UnlockSystem();
723 __KTRACE_OPT(KMMU,Kern::Printf("No page table at %08x",addr));
724 if ((iAttributes&EDisconnected)==0)
725 MM::Panic(MM::EChunkDecommitNoPageTable);
726 offset=pdeEnd; // disconnected chunk - step on to next PDE
729 if (iSize==0 && (iAttributes&EFixedAddress)==0)
731 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoDecommit remove region"));
733 if (TLinAddr(iBase)==iHomeBase)
735 DeallocateHomeAddress();
736 NKern::UnlockSystem();
738 #ifdef __CPU_WRITE_BACK_CACHE
742 m.GenericFlush((TUint)Mmu::EFlushDDecommit); //Flush virtual DCache
743 CacheMaintenance::SyncPhysicalCache_All();
744 NKern::UnlockSystem();
747 if (iAttributes & ECode)
748 m.SyncCodeMappings(); // flush ITLB if necessary
752 TInt DMemModelChunk::AdjustDoubleEnded(TInt aBottom, TInt aTop)
754 // Adjust a double-ended chunk.
758 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::AdjustDoubleEnded %x-%x",aBottom,aTop));
759 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDoubleEnded)
761 if (aTop<0 || aBottom<0 || aTop<aBottom || aTop>iMaxSize)
764 aBottom &= ~m.iPageMask;
765 aTop=(aTop+m.iPageMask)&~m.iPageMask;
766 TInt newSize=aTop-aBottom;
767 if (newSize>iMaxSize)
771 TInt initBottom=iStartPos;
772 TInt initTop=iStartPos+iSize;
773 TInt nBottom=Max(aBottom,iStartPos); // intersection bottom
774 TInt nTop=Min(aTop,iStartPos+iSize); // intersection top
778 __KTRACE_OPT(KMMU,Kern::Printf("Initial and final regions intersect"));
779 if (initBottom<nBottom)
782 DoDecommit(initBottom,nBottom-initBottom);
785 DoDecommit(nTop,initTop-nTop); // this changes iSize
788 r=DoCommit(aBottom,nBottom-aBottom);
792 r=DoCommit(nTop,aTop-nTop);
796 DoDecommit(aBottom,nBottom-aBottom);
800 r=DoCommit(nTop,aTop-nTop);
804 __KTRACE_OPT(KMMU,Kern::Printf("Initial and final regions disjoint"));
806 DoDecommit(initBottom,iSize);
809 r=DoCommit(iStartPos,newSize);
812 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
813 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x+%x base %08x home %08x",this,iStartPos,iSize,iBase,iHomeRegionBase));
817 TInt DMemModelChunk::Commit(TInt aOffset, TInt aSize, TCommitType aCommitType, TUint32* aExtraArg)
819 // Commit to a disconnected chunk.
822 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Commit %x+%x type=%d extra=%08x",aOffset,aSize,aCommitType,aExtraArg));
823 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
825 if (aOffset<0 || aSize<0)
830 aSize+=(aOffset & m.iPageMask);
831 aOffset &= ~m.iPageMask;
832 aSize=(aSize+m.iPageMask)&~m.iPageMask;
833 if ((aOffset+aSize)>iMaxSize)
838 TInt i=aOffset>>m.iPageShift;
839 TInt n=aSize>>m.iPageShift;
840 if (iPageBitMap->NotFree(i,n))
844 r=DoCommit(aOffset,aSize,aCommitType,aExtraArg);
846 iPageBitMap->Alloc(i,n);
849 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
853 TInt DMemModelChunk::Allocate(TInt aSize, TInt aGuard, TInt aAlign)
855 // Allocate offset and commit to a disconnected chunk.
858 TInt r = DoAllocate(aSize, aGuard, aAlign, ETrue);
859 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
863 TInt DMemModelChunk::FindFree(TInt aSize, TInt aGuard, TInt aAlign)
865 // Find free offset but don't commit any memory.
868 return DoAllocate(aSize, aGuard, aAlign, EFalse);
871 TInt DMemModelChunk::DoAllocate(TInt aSize, TInt aGuard, TInt aAlign, TBool aCommit)
873 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoAllocate %x %x %d",aSize,aGuard,aAlign));
875 // Only allow this to be called on disconnected chunks and not disconnected
876 // cache chunks as when guards pages exist the bit map can't be used to determine
877 // the size of disconnected cache chunks as is required by Decommit().
878 if ((iAttributes & (EDoubleEnded|EDisconnected|ECache))!=EDisconnected)
881 if (aSize<=0 || aGuard<0)
884 aAlign=Max(aAlign-m.iPageShift,0);
885 aSize=(aSize+m.iPageMask)&~m.iPageMask;
886 aGuard=(aGuard+m.iPageMask)&~m.iPageMask;
887 if ((aSize+aGuard)>iMaxSize)
892 TInt n=(aSize+aGuard)>>m.iPageShift;
893 TInt i=iPageBitMap->AllocAligned(n,aAlign,0,EFalse); // allocate the offset
895 r=KErrNoMemory; // run out of reserved space for this chunk
898 TInt offset=i<<m.iPageShift;
899 __KTRACE_OPT(KMMU,Kern::Printf("Offset %x allocated",offset));
902 r=DoCommit(offset+aGuard,aSize,ECommitDiscontiguous);
904 iPageBitMap->Alloc(i,n);
907 r=offset; // if operation successful, return allocated offset
910 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoAllocate returns %x",r));
914 TInt DMemModelChunk::Decommit(TInt aOffset, TInt aSize)
916 // Decommit from a disconnected chunk.
919 return Decommit(aOffset, aSize, EDecommitNormal);
922 TInt DMemModelChunk::Decommit(TInt aOffset, TInt aSize, TDecommitType aDecommitType)
924 // Decommit from a disconnected chunk.
927 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Decommit %x+%x",aOffset,aSize));
928 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
930 if (aOffset<0 || aSize<0)
935 aSize+=(aOffset & m.iPageMask);
936 aOffset &= ~m.iPageMask;
937 aSize=(aSize+m.iPageMask)&~m.iPageMask;
938 if ((aOffset+aSize)>iMaxSize)
943 // limit the range to the home region range
944 TInt end = aOffset+aSize;
945 if (aOffset<iHomeRegionOffset)
946 aOffset=iHomeRegionOffset;
947 if (end>iHomeRegionOffset+iHomeRegionSize)
948 end=iHomeRegionOffset+iHomeRegionSize;
952 __KTRACE_OPT(KMMU,Kern::Printf("Rounded and Clipped range %x+%x",aOffset,aSize));
956 TInt i=aOffset>>m.iPageShift;
957 TInt n=aSize>>m.iPageShift;
958 __KTRACE_OPT(KMMU,Kern::Printf("Calling SelectiveFree(%d,%d)",i,n));
959 TUint oldAvail = iPageBitMap->iAvail;
960 TUint oldSize = iSize;
962 // Free those positions which are still commited and also any guard pages,
963 // i.e. pages that are reserved in this chunk but which are not commited.
964 iPageBitMap->SelectiveFree(i,n);
965 DoDecommit(aOffset,aSize,aDecommitType);
967 if (iAttributes & ECache)
968 {// If this is the file server cache chunk then adjust the size based
969 // on the bit map size because:-
970 // - Unlocked and reclaimed pages will be unmapped without updating
971 // iSize or the bit map.
972 // - DoDecommit() only decommits the mapped pages.
973 // For all other chunks what is mapped is what is committed to the
974 // chunk so iSize is accurate.
975 TUint actualFreedPages = iPageBitMap->iAvail - oldAvail;
976 iSize = oldSize - (actualFreedPages << KPageShift);
981 __DEBUG_EVENT(EEventUpdateChunk, this);
985 TInt DMemModelChunk::Unlock(TInt aOffset, TInt aSize)
987 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Unlock %x+%x",aOffset,aSize));
988 if (!(iAttributes&ECache))
990 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
993 // Mark this as the file server cache chunk. This is safe as it is only the
994 // file server that can invoke this function.
995 iAttributes |= ECache;
997 if (aOffset<0 || aSize<0)
1001 Mmu& m = Mmu::Get();
1002 aSize+=(aOffset & m.iPageMask);
1003 aOffset &= ~m.iPageMask;
1004 aSize=(aSize+m.iPageMask)&~m.iPageMask;
1005 if ((aOffset+aSize)>iMaxSize)
1006 return KErrArgument;
1010 TInt i=aOffset>>m.iPageShift;
1011 TInt n=aSize>>m.iPageShift;
1012 if (iPageBitMap->NotAllocated(i,n))
1016 #ifdef BTRACE_CHUNKS
1017 TUint oldFree = m.FreeRamInBytes();
1019 r=Mmu::Get().UnlockRamCachePages(iBase,i,n);
1020 #ifdef BTRACE_CHUNKS
1023 TUint unlocked = m.FreeRamInBytes()-oldFree; // size of memory unlocked
1025 BTraceContext12(BTrace::EChunks,BTrace::EChunkMemoryDeallocated,this,aOffset,unlocked);
1030 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
1034 TInt DMemModelChunk::Lock(TInt aOffset, TInt aSize)
1036 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Lock %x+%x",aOffset,aSize));
1037 if (!(iAttributes&ECache))
1039 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
1041 if (aOffset<0 || aSize<0)
1042 return KErrArgument;
1045 Mmu& m = Mmu::Get();
1046 aSize+=(aOffset & m.iPageMask);
1047 aOffset &= ~m.iPageMask;
1048 aSize=(aSize+m.iPageMask)&~m.iPageMask;
1049 if ((aOffset+aSize)>iMaxSize)
1050 return KErrArgument;
1054 TInt i=aOffset>>m.iPageShift;
1055 TInt n=aSize>>m.iPageShift;
1056 if (iPageBitMap->NotAllocated(i,n))
1060 #ifdef BTRACE_CHUNKS
1061 TUint oldFree = m.FreeRamInBytes();
1063 r=Mmu::Get().LockRamCachePages(iBase,i,n);
1064 #ifdef BTRACE_CHUNKS
1067 TUint locked = oldFree-m.FreeRamInBytes();
1069 BTraceContext12(BTrace::EChunks,BTrace::EChunkMemoryAllocated,this,aOffset,locked);
1075 // decommit memory on error...
1076 __KTRACE_OPT(KMMU,Kern::Printf("Calling SelectiveFree(%d,%d)",i,n));
1077 TUint oldAvail = iPageBitMap->iAvail;
1078 iPageBitMap->SelectiveFree(i,n); // free those positions which are actually allocated
1079 TUint oldSize = iSize;
1081 DoDecommit(aOffset,aSize);
1083 // Use the bit map to adjust the size of the chunk as unlocked and reclaimed pages
1084 // will have been unmapped but not removed from the bit map as DoDecommit() only
1085 // decommits the mapped pages.
1086 TUint actualFreedPages = iPageBitMap->iAvail - oldAvail;
1087 iSize = oldSize - (actualFreedPages << KPageShift);
1091 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
1095 #ifndef __SCHEDULER_MACHINE_CODED__
1096 // System locked in this function for a time proportional to chunk size.
1097 // This is unavoidable since the chunk state must always be well defined
1098 // whenever the system is unlocked.
1099 TUint32 DMemModelChunk::ApplyTopLevelPermissions(TChunkState aChunkState)
1101 __KTRACE_OPT(KMMU,Kern::Printf("ApplyTopLevelPermissions ChunkState=%d",aChunkState));
1102 if (!(iAttributes&EFixedAccess))
1104 iChunkState=aChunkState;
1107 Mmu& m = Mmu::Get();
1108 TLinAddr base=(TLinAddr)iBase;
1110 TUint32 mask=m.iChunkMask;
1111 if (iAttributes & EDoubleEnded)
1113 base+=(iStartPos & ~mask);
1114 size=((iStartPos&mask)+size+mask)&~mask;
1116 m.ApplyTopLevelPermissions(base,size,iPdePermissions[aChunkState]);
1118 return (iAttributes&ECode)?Mmu::EFlushDPermChg|Mmu::EFlushIPermChg:Mmu::EFlushDPermChg;
1123 // System locked in this function for a time proportional to chunk size.
1124 // This is unavoidable since the chunk state must always be well defined
1125 // whenever the system is unlocked.
1126 TUint32 DMemModelChunk::MoveToRunAddress(TLinAddr aLinearAddr, TChunkState aChunkState)
1128 iChunkState=aChunkState;
1131 TLinAddr base=(TLinAddr)iBase;
1132 TLinAddr dest=aLinearAddr;
1134 if (iAttributes & EDoubleEnded)
1136 Mmu& m = Mmu::Get();
1137 TUint32 mask=m.iChunkMask;
1138 base+=(iStartPos & ~mask);
1139 dest+=(iStartPos & ~mask);
1140 size=((iStartPos&mask)+size+mask)&~mask;
1142 m.MoveChunk(base,size,dest,iPdePermissions[aChunkState]);
1144 MoveCurrentPdes((TLinAddr)iBase,aLinearAddr);
1145 iBase=(TUint8 *)aLinearAddr;
1146 return Mmu::EFlushDMove; // chunk can't contain code
1149 // System locked in this function for a time proportional to chunk size.
1150 // This is unavoidable since the chunk state must always be well defined
1151 // whenever the system is unlocked.
1152 TUint32 DMemModelChunk::MoveToHomeSection()
1154 iChunkState=ENotRunning;
1157 TLinAddr base=TLinAddr(iBase);
1158 TLinAddr home=iHomeRegionBase;
1160 if (iAttributes & EDoubleEnded)
1162 Mmu& m = Mmu::Get();
1163 TUint32 mask=m.iChunkMask;
1164 base+=(iStartPos & ~mask);
1165 home+=(iStartPos & ~mask);
1166 size=((iStartPos&mask)+size+mask)&~mask;
1168 m.MoveChunk(base,size,home,iPdePermissions[0]);
1170 iBase=(TUint8 *)iHomeRegionBase;
1172 return Mmu::EFlushDMove; // chunk can't contain code
1176 TLinAddr DMemModelChunk::AllocateHomeAddress(TInt aSize)
1178 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::AllocateHomeAddress size %08x",aSize));
1179 Mmu& m = Mmu::Get();
1180 TLinearSection* s = m.iKernelSection;
1182 if (iAttributes&EFixedAddress)
1183 required=Mmu::RoundToChunkSize(iMaxSize);
1185 required=Mmu::RoundToChunkSize(aSize);
1186 required >>= m.iChunkShift;
1187 TInt r = s->iAllocator.AllocConsecutive(required, EFalse);
1190 s->iAllocator.Alloc(r, required);
1191 TLinAddr addr = s->iBase + (r<<m.iChunkShift);
1192 __KTRACE_OPT(KMMU,Kern::Printf("Address %08x allocated",addr));
1193 iHomeRegionSize = required << m.iChunkShift;
1197 void DMemModelChunk::DeallocateHomeAddress()
1199 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DeallocateHomeAddress %08x+%x", iHomeRegionBase, iHomeRegionSize));
1200 if (iHomeRegionSize)
1202 Mmu& m = Mmu::Get();
1203 TLinearSection* s = m.iKernelSection;
1204 TInt first = (TInt)((iHomeRegionBase - s->iBase)>>m.iChunkShift);
1205 TInt count = (TInt)(iHomeRegionSize >> m.iChunkShift);
1206 s->iAllocator.Free(first, count);
1212 TLinAddr DMemModelChunk::ReallocateHomeAddress(TInt aNewSize)
1214 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::ReallocateHomeAddress(%08x) for chunk %O",aNewSize,this));
1216 // can never be called for a fixed address chunk
1217 __ASSERT_ALWAYS((iAttributes&(EFixedAddress))==0,MM::Panic(MM::EFixedChunkMoving));
1219 Mmu& m = Mmu::Get();
1220 TLinearSection* s = m.iKernelSection;
1221 TUint required=Mmu::RoundToChunkSize(aNewSize);
1222 TInt next = (TInt)((iHomeRegionBase + iHomeRegionSize - s->iBase)>>m.iChunkShift);
1223 TInt count = (TInt)((required - iHomeRegionSize) >> m.iChunkShift);
1224 if (!s->iAllocator.NotFree(next, count))
1226 // we can expand in place
1227 s->iAllocator.Alloc(next, count);
1228 iHomeRegionSize = required;
1229 return iHomeRegionBase;
1231 TUint oldHomeSize = iHomeRegionSize;
1232 TLinAddr addr = AllocateHomeAddress(required); // try to get a new home address
1233 if (addr && oldHomeSize)
1235 // succeeded - free old region
1236 next = (TInt)((iHomeRegionBase - s->iBase)>>m.iChunkShift);
1237 count = (TInt)(oldHomeSize >> m.iChunkShift);
1238 s->iAllocator.Free(next, count);
1240 // if it fails, keep our current home region
1244 TInt DMemModelChunk::CheckAccess()
1246 DProcess* pP=TheCurrentThread->iOwningProcess;
1247 if (iAttributes&EPrivate)
1249 if (iOwningProcess && iOwningProcess!=pP && pP!=K::TheKernelProcess)
1250 return KErrAccessDenied;
1255 TInt DMemModelChunkHw::Close(TAny*)
1257 __KTRACE_OPT(KOBJECT,Kern::Printf("DMemModelChunkHw::Close %d %O",AccessCount(),this));
1263 // Physical memory has to be evicted from cache(s).
1264 // Must be preserved as well, as it can still be in use by the driver.
1265 CacheMaintenance::MemoryToPreserveAndReuse(iLinAddr, iSize, iAttribs);
1267 MmuBase& m=*MmuBase::TheMmu;
1269 m.Unmap(iLinAddr,iSize);
1271 DeallocateLinearAddress();
1278 void DMemModelChunk::BTracePrime(TInt aCategory)
1280 DChunk::BTracePrime(aCategory);
1282 #ifdef BTRACE_CHUNKS
1283 if (aCategory == BTrace::EChunks || aCategory == -1)
1287 TBool memoryOwned = !(iAttributes&EMemoryNotOwned);
1289 TInt committedBase = -1;
1291 // look at each page table in this chunk...
1292 TUint chunkEndIndex = iMaxSize>>KChunkShift;
1293 NKern::LockSystem();
1294 for(TUint chunkIndex=0; chunkIndex<chunkEndIndex; ++chunkIndex)
1296 TLinAddr addr=(TLinAddr)iBase+chunkIndex*KChunkSize; // current address
1297 TInt ptid = m.GetPageTableId(addr);
1301 if(committedBase!=-1)
1303 NKern::FlashSystem();
1304 TUint committedEnd = chunkIndex*KChunkSize;
1305 BTrace12(BTrace::EChunks, memoryOwned?BTrace::EChunkMemoryAllocated:BTrace::EChunkMemoryAdded,this,committedBase,committedEnd-committedBase);
1310 TPte* pPte=(TPte*)m.PageTableLinAddr(ptid);
1312 // look at each page in page table...
1313 for(TUint pageIndex=0; pageIndex<KChunkSize/KPageSize; ++pageIndex)
1315 TBool committed = false;
1316 TPhysAddr phys = m.PtePhysAddr(pPte[pageIndex], pageIndex);
1317 if(phys!=KPhysAddrInvalid)
1319 // we have a page...
1324 // make sure we own the page...
1325 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(phys);
1326 if(pi && pi->Type()==SPageInfo::EChunk && pi->Owner()==this)
1333 if(committedBase==-1)
1334 committedBase = chunkIndex*KChunkSize+pageIndex*KPageSize; // start of new region
1338 if(committedBase!=-1)
1340 // generate trace for region...
1341 NKern::FlashSystem();
1342 TUint committedEnd = chunkIndex*KChunkSize+pageIndex*KPageSize;
1343 BTrace12(BTrace::EChunks, memoryOwned?BTrace::EChunkMemoryAllocated:BTrace::EChunkMemoryAdded,this,committedBase,committedEnd-committedBase);
1348 if((pageIndex&15)==0)
1349 NKern::FlashSystem();
1352 NKern::UnlockSystem();
1354 if(committedBase!=-1)
1356 TUint committedEnd = chunkEndIndex*KChunkSize;
1357 BTrace12(BTrace::EChunks, memoryOwned?BTrace::EChunkMemoryAllocated:BTrace::EChunkMemoryAdded,this,committedBase,committedEnd-committedBase);