Update contrib.
1 // Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\memmodel\epoc\multiple\mchunk.cpp
19 #include "cache_maintenance.h"
20 #include <mmubase.inl>
23 DMemModelChunk::DMemModelChunk()
27 TLinearSection* DMemModelChunk::LinearSection()
30 TInt ar=(iAttributes&EAddressRangeMask);
33 case EAddressLocal: return ((DMemModelProcess*)iOwningProcess)->iLocalSection;
34 case EAddressFixed: return NULL;
35 case EAddressShared: return m.iSharedSection;
36 case EAddressUserGlobal: return m.iUserGlobalSection;
37 case EAddressKernel: return m.iKernelSection;
39 MM::Panic(MM::EChunkBadAddressRange);
43 void DMemModelChunk::Destruct()
45 __KTRACE_OPT(KTHREAD,Kern::Printf("DMemModelChunk destruct %O",this));
50 #define SET_R_IF_DEBUG(x) r = (x)
52 #define SET_R_IF_DEBUG(x) (void)(x)
54 if (iAttributes & EDisconnected)
55 SET_R_IF_DEBUG(Decommit(0,iMaxSize));
56 else if (iAttributes & EDoubleEnded)
57 SET_R_IF_DEBUG(AdjustDoubleEnded(0,0));
59 SET_R_IF_DEBUG(Adjust(0));
60 __ASSERT_DEBUG(r==KErrNone, MM::Panic(MM::EDecommitFailed));
62 // check all page tables have been freed...
64 TInt nPdes=(iMaxSize+m.iChunkMask)>>m.iChunkShift;
65 for(TInt i=0; i<nPdes; i++)
67 __NK_ASSERT_DEBUG(iPageTables[i]==0xffff);
73 TLinearSection* s=LinearSection();
77 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::~DMemModelChunk remove region"));
79 s->iAllocator.Free( (TLinAddr(iBase)-s->iBase)>>m.iChunkShift, iMaxSize>>m.iChunkShift);
84 Kern::Free(iPageTables);
86 delete iPermanentPageBitMap;
89 iKernelMirror->Close(NULL);
91 TDfc* dfc = iDestroyedDfc;
95 __KTRACE_OPT(KMEMTRACE, {Mmu::Wait(); Kern::Printf("MT:D %d %x %O",NTickCount(),this,this);Mmu::Signal();});
97 BTraceContext4(BTrace::EChunks,BTrace::EChunkDestroyed,this);
101 TInt DMemModelChunk::Close(TAny* aPtr)
105 DMemModelProcess* pP=(DMemModelProcess*)aPtr;
106 if ((iAttributes&EMapTypeMask)==EMapTypeLocal)
107 pP=(DMemModelProcess*)iOwningProcess;
108 pP->RemoveChunk(this);
111 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Close %d %O",r,this));
112 __NK_ASSERT_DEBUG(r > 0); // Should never be negative.
116 return EObjectDeleted;
122 TUint8* DMemModelChunk::Base(DProcess* aProcess)
128 TInt DMemModelChunk::DoCreate(SChunkCreateInfo& aInfo)
130 __KTRACE_OPT(KMMU,Kern::Printf("Chunk %O DoCreate att=%08x",this,iAttributes));
132 __ASSERT_COMPILE(!(EMMChunkAttributesMask & EChunkAttributesMask));
134 if (aInfo.iMaxSize<=0)
139 iKernelMirror->iAttributes |= iAttributes|EMemoryNotOwned;
140 TInt r=iKernelMirror->DoCreate(aInfo);
146 TInt nPdes=(aInfo.iMaxSize+m.iChunkMask)>>m.iChunkShift;
147 iMaxSize=nPdes<<m.iChunkShift;
148 iMapAttr = aInfo.iMapAttr;
150 TInt mapType=iAttributes & EMapTypeMask;
151 if (mapType==EMapTypeShared)
153 iOsAsids=TBitMapAllocator::New(m.iNumOsAsids,ETrue);
157 TInt maxpages=iMaxSize>>m.iPageShift;
158 if (iAttributes & EDisconnected)
160 TBitMapAllocator* pM=TBitMapAllocator::New(maxpages,ETrue);
164 __KTRACE_OPT(KMMU,Kern::Printf("PageBitMap at %08x, MaxPages %d",pM,maxpages));
166 if(iChunkType==ESharedKernelSingle || iChunkType==ESharedKernelMultiple)
168 TBitMapAllocator* pM=TBitMapAllocator::New(maxpages,ETrue);
171 iPermanentPageBitMap = pM;
173 iPageTables=(TUint16*)Kern::Alloc(nPdes*sizeof(TUint16));
176 memset(iPageTables,0xff,nPdes*sizeof(TUint16));
178 TInt r=AllocateAddress();
179 __KTRACE_OPT(KMEMTRACE,Kern::Printf("MT:C %d %x %O",NTickCount(),this,this));
184 BTraceContextN(BTrace::EChunks,BTrace::EChunkCreated,this,iMaxSize,nameBuf.Ptr(),nameBuf.Size());
186 BTrace8(BTrace::EChunks,BTrace::EChunkOwner,this,iOwningProcess);
187 BTraceContext12(BTrace::EChunks,BTrace::EChunkInfo,this,iChunkType,iAttributes);
192 void DMemModelChunk::ClaimInitialPages()
194 __KTRACE_OPT(KMMU,Kern::Printf("Chunk %O ClaimInitialPages()",this));
197 TUint32 ccp=K::CompressKHeapPtr(this);
201 TInt ptid=m.PageTableId(TLinAddr(iBase)+offset);
202 __ASSERT_ALWAYS(ptid>=0,MM::Panic(MM::EClaimInitialPagesBadPageTable));
203 __KTRACE_OPT(KMMU,Kern::Printf("Offset %x PTID=%d",offset,ptid));
204 iPageTables[offset>>m.iChunkShift]=ptid;
205 SPageTableInfo& ptinfo = m.PtInfo(ptid);
206 ptinfo.SetChunk(ccp,offset>>m.iChunkShift);
207 TPte* pPte=(TPte*)m.PageTableLinAddr(ptid);
210 TInt flashCount = MM::MaxPagesInOneGo;
211 for (i=0; i<m.iChunkSize>>m.iPageShift; ++i, offset+=m.iPageSize)
215 flashCount = MM::MaxPagesInOneGo;
216 NKern::FlashSystem();
219 if (m.PteIsPresent(pte))
222 TPhysAddr phys=m.PtePhysAddr(pte, i);
223 __KTRACE_OPT(KMMU,Kern::Printf("Offset %x phys %08x",offset,phys));
224 SPageInfo* info = SPageInfo::SafeFromPhysAddr(phys);
227 info->SetChunk(this,offset>>m.iPageShift);
228 #ifdef BTRACE_KERNEL_MEMORY
229 --Epoc::KernelMiscPages; // page now owned by chunk, and is not 'miscelaneous'
235 __KTRACE_OPT(KMMU,Kern::Printf("Offset %x PTID %d NP %d", offset, ptid, np));
237 NKern::UnlockSystem();
240 void DMemModelChunk::SetFixedAddress(TLinAddr aAddr, TInt aInitialSize)
242 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O SetFixedAddress %08x size %08x",this,aAddr,aInitialSize));
243 iBase=(TUint8*)aAddr;
244 iSize=Mmu::RoundToPageSize(aInitialSize);
248 TInt DMemModelChunk::Reserve(TInt aInitialSize)
250 // Reserve home section address space for a chunk
253 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O Reserve() size %08x",this,aInitialSize));
254 iSize=Mmu::RoundToPageSize(aInitialSize);
259 TInt DMemModelChunk::Adjust(TInt aNewSize)
261 // Adjust a standard chunk.
265 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust %08x",aNewSize));
266 if (iAttributes & (EDoubleEnded|EDisconnected))
268 if (aNewSize<0 || aNewSize>iMaxSize)
272 TInt newSize=Mmu::RoundToPageSize(aNewSize);
278 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust growing"));
279 r=DoCommit(iSize,newSize-iSize);
281 else if (newSize<iSize)
283 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust shrinking"));
284 DoDecommit(newSize,iSize-newSize);
288 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
289 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x base %08x",this,iSize,iBase));
293 TInt DMemModelChunk::Address(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress)
295 if(!iPermanentPageBitMap)
296 return KErrAccessDenied;
297 if(TUint(aOffset)>=TUint(iMaxSize))
299 if(TUint(aOffset+aSize)>TUint(iMaxSize))
303 TInt pageShift = Mmu::Get().iPageShift;
304 TInt start = aOffset>>pageShift;
305 TInt size = ((aOffset+aSize-1)>>pageShift)-start+1;
306 if(iPermanentPageBitMap->NotAllocated(start,size))
308 aKernelAddress = (TLinAddr)iKernelMirror->iBase+aOffset;
312 TInt DMemModelChunk::PhysicalAddress(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress, TUint32& aPhysicalAddress, TUint32* aPhysicalPageList)
314 TInt r=Address(aOffset,aSize,aKernelAddress);
318 return Mmu::Get().LinearToPhysical(aKernelAddress,aSize,aPhysicalAddress,aPhysicalPageList);
321 void DMemModelChunk::Substitute(TInt aOffset, TPhysAddr aOldAddr, TPhysAddr aNewAddr)
323 // Substitute the page mapping at aOffset with aNewAddr.
324 // Enter and leave with system locked.
325 // This is sometimes called with interrupts disabled and should leave them alone.
327 __ASSERT_ALWAYS(iKernelMirror==NULL,MM::Panic(MM::EChunkRemapUnsupported));
329 TInt ptid=iPageTables[aOffset>>m.iChunkShift];
331 MM::Panic(MM::EChunkRemapNoPageTable);
333 // Permissions for global code will have been overwritten with ApplyPermissions
334 // so we can't trust iPtePermissions for those chunk types
336 if(iChunkType==EKernelCode)
337 perms = m.iKernelCodePtePerm;
338 else if(iChunkType==EDll)
339 perms = m.iGlobalCodePtePerm;
341 perms = iPtePermissions;
343 m.RemapPage(ptid, (TLinAddr)iBase+aOffset, aOldAddr, aNewAddr, perms, iOwningProcess);
347 Get the movability type of the chunk's pages
348 @return How movable the chunk's pages are
350 TZonePageType DMemModelChunk::GetPageType()
352 // Shared chunks have their physical addresses available
353 if (iChunkType == ESharedKernelSingle ||
354 iChunkType == ESharedKernelMultiple ||
355 iChunkType == ESharedIo ||
356 iChunkType == ESharedKernelMirror ||
357 iChunkType == EKernelMessage ||
358 iChunkType == EKernelData) // Don't move kernel heap pages as DMA may be accessing them.
362 // All other types of chunk are movable
366 TInt DMemModelChunk::DoCommit(TInt aOffset, TInt aSize, TCommitType aCommitType, TUint32* aExtraArg)
368 // Commit more RAM to a chunk at a specified offset
369 // enter and leave with system unlocked
370 // must hold RamAlloc mutex before calling this function
371 __ASSERT_MUTEX(MmuBase::RamAllocatorMutex);
372 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoCommit %x+%x type=%d extra=%08x",aOffset,aSize,aCommitType,aExtraArg));
374 TInt endOffset=offset+aSize;
377 DRamAllocator& a = *m.iRamPageAllocator;
379 TPhysAddr pageList[KMaxPages];
380 TPhysAddr* pPageList=0; // In case of discontiguous commit it points to the list of physical pages.
381 TPhysAddr nextPage=0; // In case of contiguous commit, it points to the physical address to commit
382 SPageInfo::TType type = SPageInfo::EChunk;
384 // Set flag to indicate if RAM should be cleared before being committed.
385 // Note, EDll, EUserCode are covered in the code segment, in order not to clear
386 // the region overwritten by the loader
387 TBool clearRam = iChunkType==EUserData
388 || iChunkType==EDllData
389 || iChunkType==EUserSelfModCode
390 || iChunkType==ESharedKernelSingle
391 || iChunkType==ESharedKernelMultiple
392 || iChunkType==ESharedIo
393 || iChunkType==ERamDrive;
396 TBool ownsMemory = !(iAttributes&EMemoryNotOwned);
397 TBool physicalCommit = aCommitType&DChunk::ECommitPhysicalMask;
401 return KErrNotSupported;
405 if(!physicalCommit && aCommitType != DChunk::ECommitVirtual)
406 return KErrNotSupported;
407 type = SPageInfo::EInvalid; // to indicate page info not to be updated
412 case DChunk::ECommitDiscontiguous:
416 case DChunk::ECommitContiguous:
418 // Allocate a block of contiguous RAM from the free pool
419 TInt numPages=(endOffset-offset)>>m.iPageShift;
420 r=m.AllocContiguousRam(numPages<<m.iPageShift, nextPage, GetPageType(), 0);
424 m.ClearPages(numPages, (TPhysAddr*)(nextPage|1), iClearByte); // clear RAM if required
425 *aExtraArg = nextPage; // store physical address of RAM as return argument
429 case DChunk::ECommitDiscontiguousPhysical:
431 pPageList = aExtraArg; // use pages given given to us
433 // Check address of pages are multiples of page size...
434 TInt numPages=(endOffset-offset)>>m.iPageShift;
435 TUint32* ptr = aExtraArg;
436 TUint32* endPtr = aExtraArg+numPages;
438 return KErrNone; // Zero size commit is OK
439 TPhysAddr pageBits = 0;
443 if(pageBits&(m.iPageSize-1))
444 return KErrArgument; // all addresses must be multiple of page size
448 case DChunk::ECommitContiguousPhysical:
449 nextPage = (TPhysAddr)aExtraArg; // we have been given the physical address to use
450 if(nextPage&(m.iPageSize-1))
451 return KErrArgument; // address must be multiple of page size
454 case DChunk::ECommitVirtual:
456 return KErrNotSupported;
461 return KErrNotSupported;
464 while(offset<endOffset)
466 TInt np=(endOffset-offset)>>m.iPageShift; // pages remaining to satisfy request
467 TInt npEnd=(m.iChunkSize-(offset&m.iChunkMask))>>m.iPageShift;// number of pages to end of page table
469 np=npEnd; // limit to single page table
470 if (np>MM::MaxPagesInOneGo)
471 np=MM::MaxPagesInOneGo; // limit
472 TInt ptid=iPageTables[offset>>m.iChunkShift];
476 // need to allocate a new page table
477 newPtId=m.AllocPageTable();
481 break; // Exit the loop. Below, we'll free all ram
482 // that is allocated in the previous loop passes.
487 if(aCommitType==DChunk::ECommitDiscontiguous)
489 pPageList = pageList;
490 r=m.AllocRamPages(pPageList,np, GetPageType()); // try to allocate pages
491 if (r!=KErrNone) //If fail, clean up what was allocated in this loop.
494 m.FreePageTable(newPtId);
495 break; // Exit the loop. Below, we'll free all ram
496 // that is allocated in the previous loop passes.
499 m.ClearPages(np, pPageList, iClearByte); // clear RAM if required
502 TInt commitSize = np<<m.iPageShift;
505 // In shared chunks (visible to both user and kernel side), it is always kernel side
506 // to be mapped the first. Decommiting will go in reverse order.
509 // Map the same memory into the kernel mirror chunk
511 r = iKernelMirror->DoCommit(offset,commitSize,ECommitDiscontiguousPhysical,pPageList);
513 r = iKernelMirror->DoCommit(offset,commitSize,ECommitContiguousPhysical,(TUint32*)nextPage);
514 __KTRACE_OPT(KMMU,Kern::Printf("iKernelMirror->DoCommit returns %d",r));
515 if(r!=KErrNone) //If fail, clean up what was allocated in this loop.
517 if(aCommitType==DChunk::ECommitDiscontiguous)
518 m.FreePages(pPageList,np,EPageFixed);
520 m.FreePageTable(newPtId);
522 break; // Exit the loop. Below, we'll free all ram
523 // that is allocated in the previous loop passes.
527 // Commit the memory.
528 NKern::LockSystem(); // lock the system while we change the MMU mappings
529 iSize += commitSize; // update committed size
530 if (aCommitType==DChunk::ECommitVirtual)
531 m.MapVirtual(ptid, np);
534 m.MapRamPages(ptid, type, this, offset, pPageList, np, iPtePermissions);
539 m.MapPhysicalPages(ptid, type, this, offset, nextPage, np, iPtePermissions);
540 nextPage += commitSize;
542 NKern::UnlockSystem();
546 // We have allocated a new page table, now we must assign it
547 iPageTables[offset>>m.iChunkShift]=ptid;
548 TLinAddr addr=(TLinAddr)iBase+offset; // current address
549 m.AssignPageTable(ptid, SPageTableInfo::EChunk, this, addr, iPdePermissions);
552 __KTRACE_OPT(KMEMTRACE,Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this));
554 BTraceContext12(BTrace::EChunks,ownsMemory?BTrace::EChunkMemoryAllocated:BTrace::EChunkMemoryAdded,this,offset,commitSize);
557 offset += commitSize; // update offset
562 if(iPermanentPageBitMap)
563 iPermanentPageBitMap->Alloc(aOffset>>m.iPageShift,aSize>>m.iPageShift);
567 // We ran out of memory somewhere.
568 // Free any memory we succeeded in allocating in the loops before the one that failed
569 if (iChunkType != ESharedKernelMirror) //Kernel mirror chunk will be decommited alongside the main chunk.
571 DChunk::TDecommitType decommitType = aCommitType==DChunk::ECommitVirtual ?
572 DChunk::EDecommitVirtual : DChunk::EDecommitNormal;
573 DoDecommit(aOffset,offset-aOffset,decommitType);
576 if(aCommitType==DChunk::ECommitContiguous)
578 // Free the pages we allocated but didn't get around to commiting
579 // It has to go page after page as we cannot use FreePhysicalRam here because the part of
580 // of original allocated contiguous memory is already partly freed (in DoDecommit).
581 TPhysAddr last = nextPage + ((endOffset-offset)>>m.iPageShift<<m.iPageShift);
584 a.FreeRamPage(nextPage, GetPageType());
585 nextPage += m.iPageSize;
587 *aExtraArg = KPhysAddrInvalid; // return invalid physical address
590 m.iAllocFailed=ETrue;
595 void DMemModelChunk::DoDecommit(TInt aOffset, TInt aSize, TDecommitType aDecommitType)
597 // Decommit RAM from a chunk at a specified offset
598 // enter and leave with system unlocked
599 // must hold RamAlloc mutex before calling this function
600 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoDecommit %x+%x",aOffset,aSize));
602 TBool ownsMemory = !(iAttributes&EMemoryNotOwned);
606 TInt endOffset=offset+aSize;
608 DRamAllocator& a = *m.iRamPageAllocator;
609 TPhysAddr pageList[KMaxPages];
610 TLinAddr linearPageList[KMaxPages];
611 const TAny* asids=GLOBAL_MAPPING;
614 else if (iOwningProcess)
615 asids=(const TAny*)((DMemModelProcess*)iOwningProcess)->iOsAsid;
616 TUint size_in_pages = (TUint)(Min(aSize,iSize)>>m.iPageShift);
617 TBool sync_decommit = (size_in_pages<m.iDecommitThreshold);
619 while(offset<endOffset)
621 TInt np=(endOffset-offset)>>m.iPageShift; // number of pages remaining to decommit
622 TInt pdeEnd=(offset+m.iChunkSize)&~m.iChunkMask;
623 TInt npEnd=(pdeEnd-offset)>>m.iPageShift; // number of pages to end of page table
625 np=npEnd; // limit to single page table
626 if (np>MM::MaxPagesInOneGo)
627 np=MM::MaxPagesInOneGo; // limit
628 TLinAddr addr=(TLinAddr)iBase+offset; // current address
629 TInt ptid=iPageTables[offset>>m.iChunkShift]; // get page table ID if a page table is already assigned here
636 TUint oldFree = m.FreeRamInBytes();
638 // Unmap the pages, clear the PTEs and place the physical addresses of the now-free RAM pages in
639 // pageList. Return nPtes=number of pages placed in list, remain=number of PTEs remaining in page table
640 // Bit 31 of return value is set if TLB flush may be incomplete
645 if (aDecommitType == EDecommitVirtual)
646 remain=m.UnmapVirtual(ptid,addr,np,pageList,ETrue,nPtes,nUnmapped,iOwningProcess);
648 remain=m.UnmapPages(ptid,addr,np,pageList,ETrue,nPtes,nUnmapped,iOwningProcess);
652 if (aDecommitType == EDecommitVirtual)
653 remain=m.UnmapUnownedVirtual(ptid,addr,np,pageList,linearPageList,nPtes,nUnmapped,iOwningProcess);
655 remain=m.UnmapUnownedPages(ptid,addr,np,pageList,linearPageList,nPtes,nUnmapped,iOwningProcess);
657 TInt nFree = ownsMemory ? nUnmapped : 0; //The number of pages to free
659 TInt decommitSize=nPtes<<m.iPageShift;
660 iSize-=decommitSize; // reduce the committed size
661 NKern::UnlockSystem();
665 __KTRACE_OPT(KMEMTRACE,Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this));
667 TUint reclaimed = (oldFree-m.FreeRamInBytes())>>m.iPageShift; // number of 'unlocked' pages reclaimed from ram cache
669 BTraceContext12(BTrace::EChunks,ownsMemory?BTrace::EChunkMemoryDeallocated:BTrace::EChunkMemoryRemoved,this,offset,(nFree-reclaimed)<<m.iPageShift);
672 if (sync_decommit && (remain & KUnmapPagesTLBFlushDeferred))
674 // must ensure DTLB flushed before doing cache purge on decommit
675 m.GenericFlush(Mmu::EFlushDTLB);
678 // if page table is now completely empty, unassign it and update chunk PDE info
679 remain &= KUnmapPagesCountMask;
682 m.DoUnassignPageTable(addr, asids);
683 m.FreePageTable(ptid);
684 iPageTables[offset>>m.iChunkShift]=0xffff;
687 // Physical memory not owned by the chunk has to be preserved from cache memory.
690 // If a chunk has Kernel mirror, it is sufficient to do it just once.
694 for (i=0;i<nUnmapped;i++)
695 m.CacheMaintenanceOnPreserve(pageList[i], KPageSize, linearPageList[i], iMapAttr);
700 // We can now return the decommitted pages to the free page list and sort out caching.
702 if (sync_decommit) //Purge cache if the size is below decommit threshold
703 m.CacheMaintenanceOnDecommit(pageList, nFree);
704 a.FreeRamPages(pageList,nFree, GetPageType());
707 offset+=(np<<m.iPageShift);
711 __KTRACE_OPT(KMMU,Kern::Printf("No page table at %08x",addr));
712 if ((iAttributes&EDisconnected)==0)
713 MM::Panic(MM::EChunkDecommitNoPageTable);
714 offset=pdeEnd; // disconnected chunk - step on to next PDE
717 if (deferred & KUnmapPagesTLBFlushDeferred)
718 m.GenericFlush( (iAttributes&ECode) ? Mmu::EFlushDTLB|Mmu::EFlushITLB : Mmu::EFlushDTLB );
720 if (total_freed && !sync_decommit) //Flash entire cache if the size exceeds decommit threshold
721 CacheMaintenance::SyncPhysicalCache_All(); //On ARMv6, this deals with both L1 & L2 cache
723 // Kernel mapped part of the chunk is removed at the end. At this point, no user side is mapped
724 // which ensures that evicting data from cache will surely succeed.
726 iKernelMirror->DoDecommit(aOffset,aSize);
730 TInt DMemModelChunk::AdjustDoubleEnded(TInt aBottom, TInt aTop)
732 // Adjust a double-ended chunk.
736 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::AdjustDoubleEnded %x-%x",aBottom,aTop));
737 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDoubleEnded)
739 if (aTop<0 || aBottom<0 || aTop<aBottom || aTop>iMaxSize)
742 aBottom &= ~m.iPageMask;
743 aTop=(aTop+m.iPageMask)&~m.iPageMask;
744 TInt newSize=aTop-aBottom;
745 if (newSize>iMaxSize)
749 TInt initBottom=iStartPos;
750 TInt initTop=iStartPos+iSize;
751 TInt nBottom=Max(aBottom,iStartPos); // intersection bottom
752 TInt nTop=Min(aTop,iStartPos+iSize); // intersection top
756 __KTRACE_OPT(KMMU,Kern::Printf("Initial and final regions intersect"));
757 if (initBottom<nBottom)
760 DoDecommit(initBottom,nBottom-initBottom);
763 DoDecommit(nTop,initTop-nTop); // this changes iSize
766 r=DoCommit(aBottom,nBottom-aBottom);
770 r=DoCommit(nTop,aTop-nTop);
774 DoDecommit(aBottom,nBottom-aBottom);
778 r=DoCommit(nTop,aTop-nTop);
782 __KTRACE_OPT(KMMU,Kern::Printf("Initial and final regions disjoint"));
784 DoDecommit(initBottom,iSize);
787 r=DoCommit(iStartPos,newSize);
790 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
791 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x+%x base %08x",this,iStartPos,iSize,iBase));
795 TInt DMemModelChunk::Commit(TInt aOffset, TInt aSize, TCommitType aCommitType, TUint32* aExtraArg)
797 // Commit to a disconnected chunk.
800 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Commit %x+%x type=%d extra=%08x",aOffset,aSize,aCommitType,aExtraArg));
801 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
803 if (aOffset<0 || aSize<0)
808 aSize+=(aOffset & m.iPageMask);
809 aOffset &= ~m.iPageMask;
810 aSize=(aSize+m.iPageMask)&~m.iPageMask;
811 if ((aOffset+aSize)>iMaxSize)
816 TInt i=aOffset>>m.iPageShift;
817 TInt n=aSize>>m.iPageShift;
818 if (iPageBitMap->NotFree(i,n))
822 r=DoCommit(aOffset,aSize,aCommitType,aExtraArg);
824 iPageBitMap->Alloc(i,n);
827 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
831 TInt DMemModelChunk::Allocate(TInt aSize, TInt aGuard, TInt aAlign)
833 // Allocate offset and commit to a disconnected chunk.
836 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Allocate %x %x %d",aSize,aGuard,aAlign));
838 // Only allow this to be called on disconnected chunks and not disconnected
839 // cache chunks as when guards pages exist the bit map can't be used to determine
840 // the size of disconnected cache chunks as is required by Decommit().
841 if ((iAttributes & (EDoubleEnded|EDisconnected|ECache))!=EDisconnected)
844 if (aSize<=0 || aGuard<0)
847 aAlign=Max(aAlign-m.iPageShift,0);
848 TInt base=TInt(TLinAddr(iBase)>>m.iPageShift);
849 aSize=(aSize+m.iPageMask)&~m.iPageMask;
850 aGuard=(aGuard+m.iPageMask)&~m.iPageMask;
851 if ((aSize+aGuard)>iMaxSize)
856 TInt n=(aSize+aGuard)>>m.iPageShift;
857 TInt i=iPageBitMap->AllocAligned(n,aAlign,base,EFalse); // allocate the offset
859 r=KErrNoMemory; // run out of reserved space for this chunk
862 TInt offset=i<<m.iPageShift;
863 __KTRACE_OPT(KMMU,Kern::Printf("Offset %x allocated",offset));
864 r=DoCommit(offset+aGuard,aSize);
867 iPageBitMap->Alloc(i,n);
868 r=offset; // if operation successful, return allocated offset
872 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Allocate returns %x",r));
873 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
877 TInt DMemModelChunk::Decommit(TInt aOffset, TInt aSize)
879 // Decommit from a disconnected chunk.
882 return Decommit(aOffset, aSize, EDecommitNormal);
885 TInt DMemModelChunk::Decommit(TInt aOffset, TInt aSize, TDecommitType aDecommitType)
887 // Decommit from a disconnected chunk
889 // @param aDecommitType Used to indicate whether area was originally committed with the
890 // ECommitVirtual type
893 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Decommit %x+%x",aOffset,aSize));
894 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
896 if (aOffset<0 || aSize<0)
901 if (aDecommitType == EDecommitVirtual)
902 return KErrNotSupported;
905 aSize+=(aOffset & m.iPageMask);
906 aOffset &= ~m.iPageMask;
907 aSize=(aSize+m.iPageMask)&~m.iPageMask;
908 if ((aOffset+aSize)>iMaxSize)
913 // limit the range to the home region range
914 __KTRACE_OPT(KMMU,Kern::Printf("Rounded and Clipped range %x+%x",aOffset,aSize));
916 TInt i=aOffset>>m.iPageShift;
917 TInt n=aSize>>m.iPageShift;
919 __KTRACE_OPT(KMMU,Kern::Printf("Calling SelectiveFree(%d,%d)",i,n));
920 TUint oldAvail = iPageBitMap->iAvail;
921 TUint oldSize = iSize;
923 // Free those positions which are still commited and also any guard pages,
924 // i.e. pages that are reserved in this chunk but which are not commited.
925 iPageBitMap->SelectiveFree(i,n);
927 DoDecommit(aOffset,aSize,aDecommitType);
929 if (iAttributes & ECache)
930 {// If this is the file server cache chunk then adjust the size based
931 // on the bit map size because:-
932 // - Unlocked and reclaimed pages will be unmapped without updating
933 // iSize or the bit map.
934 // - DoDecommit() only decommits the mapped pages.
935 // For all other chunks what is mapped is what is committed to the
936 // chunk so iSize is accurate.
937 TUint actualFreedPages = iPageBitMap->iAvail - oldAvail;
938 iSize = oldSize - (actualFreedPages << KPageShift);
942 __DEBUG_EVENT(EEventUpdateChunk, this);
946 TInt DMemModelChunk::Unlock(TInt aOffset, TInt aSize)
948 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Unlock %x+%x",aOffset,aSize));
949 if (!(iAttributes&ECache))
951 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
954 // Mark this as the file server cache chunk. This is safe as it is only the
955 // file server that can invoke this function.
956 iAttributes |= ECache;
958 if (aOffset<0 || aSize<0)
963 aSize+=(aOffset & m.iPageMask);
964 aOffset &= ~m.iPageMask;
965 aSize=(aSize+m.iPageMask)&~m.iPageMask;
966 if ((aOffset+aSize)>iMaxSize)
971 TInt i=aOffset>>m.iPageShift;
972 TInt n=aSize>>m.iPageShift;
973 if (iPageBitMap->NotAllocated(i,n))
978 TUint oldFree = m.FreeRamInBytes();
980 r=m.UnlockRamCachePages((TLinAddr)(iBase+aOffset),n,iOwningProcess);
984 TUint unlocked = m.FreeRamInBytes()-oldFree; // size of memory unlocked
986 BTraceContext12(BTrace::EChunks,BTrace::EChunkMemoryDeallocated,this,aOffset,unlocked);
992 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
996 TInt DMemModelChunk::Lock(TInt aOffset, TInt aSize)
998 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Lock %x+%x",aOffset,aSize));
999 if (!(iAttributes&ECache))
1001 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
1003 if (aOffset<0 || aSize<0)
1004 return KErrArgument;
1007 Mmu& m = Mmu::Get();
1008 aSize+=(aOffset & m.iPageMask);
1009 aOffset &= ~m.iPageMask;
1010 aSize=(aSize+m.iPageMask)&~m.iPageMask;
1011 if ((aOffset+aSize)>iMaxSize)
1012 return KErrArgument;
1016 TInt i=aOffset>>m.iPageShift;
1017 TInt n=aSize>>m.iPageShift;
1018 if (iPageBitMap->NotAllocated(i,n))
1022 #ifdef BTRACE_CHUNKS
1023 TUint oldFree = m.FreeRamInBytes();
1025 r=m.LockRamCachePages((TLinAddr)(iBase+aOffset),n,iOwningProcess);
1026 #ifdef BTRACE_CHUNKS
1029 TUint locked = oldFree-m.FreeRamInBytes();
1031 BTraceContext12(BTrace::EChunks,BTrace::EChunkMemoryAllocated,this,aOffset,locked);
1037 // decommit memory on error...
1038 __KTRACE_OPT(KMMU,Kern::Printf("Calling SelectiveFree(%d,%d)",i,n));
1039 TUint oldAvail = iPageBitMap->iAvail;
1040 iPageBitMap->SelectiveFree(i,n); // free those positions which are actually allocated
1041 TUint oldSize = iSize;
1043 DoDecommit(aOffset,aSize);
1045 // Use the bit map to adjust the size of the chunk as unlocked and reclaimed pages
1046 // will have been unmapped but not removed from the bit map as DoDecommit() only
1047 // decommits the mapped pages.
1048 TUint actualFreedPages = iPageBitMap->iAvail - oldAvail;
1049 iSize = oldSize - (actualFreedPages << KPageShift);
1053 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
1057 TInt DMemModelChunk::AllocateAddress()
1059 __KTRACE_OPT(KMMU,Kern::Printf("Chunk %O AllocateAddress()",this));
1060 TLinearSection* s=LinearSection();
1062 return KErrNone; // chunk has fixed preallocated address
1065 TUint32 required=iMaxSize>>m.iChunkShift;
1066 __KTRACE_OPT(KMMU,Kern::Printf("Searching from low to high addresses"));
1067 TInt r=s->iAllocator.AllocConsecutive(required, EFalse);
1069 return KErrNoMemory;
1070 s->iAllocator.Alloc(r, required);
1071 iBase=(TUint8*)(s->iBase + (r<<m.iChunkShift));
1072 __KTRACE_OPT(KMMU,Kern::Printf("Address %08x allocated",iBase));
1076 void DMemModelChunk::ApplyPermissions(TInt aOffset, TInt aSize, TPte aPtePerm)
1078 __KTRACE_OPT(KMMU,Kern::Printf("Chunk %O ApplyPermissions(%x+%x,%08x)",this,aOffset,aSize,aPtePerm));
1079 __ASSERT_ALWAYS(aOffset>=0 && aSize>=0, MM::Panic(MM::EChunkApplyPermissions1));
1083 aOffset &= ~m.iPageMask;
1084 aSize=(aSize+m.iPageMask)&~m.iPageMask;
1085 TInt endOffset=aOffset+aSize;
1086 __ASSERT_ALWAYS(endOffset<=iMaxSize, MM::Panic(MM::EChunkApplyPermissions2));
1089 while(aOffset<endOffset)
1091 TInt ptid=iPageTables[aOffset>>m.iChunkShift];
1092 TInt pdeEnd=(aOffset+m.iChunkSize)&~m.iChunkMask;
1098 TInt np=(endOffset-aOffset)>>m.iPageShift; // number of pages remaining to process
1099 TInt npEnd=(pdeEnd-aOffset)>>m.iPageShift; // number of pages to end of page table
1101 np=npEnd; // limit to single page table
1102 if (np>MM::MaxPagesInOneGo)
1103 np=MM::MaxPagesInOneGo; // limit
1104 m.ApplyPagePermissions(ptid, (aOffset&m.iChunkMask)>>m.iPageShift, np, aPtePerm);
1105 aOffset+=(np<<m.iPageShift);
1110 TInt DMemModelChunkHw::Close(TAny*)
1112 __KTRACE_OPT(KOBJECT,Kern::Printf("DMemModelChunkHw::Close %d %O",AccessCount(),this));
1118 // Save data for cache maintenance before beind destroyed by DeallocateLinearAddress
1119 TPhysAddr pa = iPhysAddr;
1120 TLinAddr la = iLinAddr;
1122 TUint attr = iAttribs;
1124 MmuBase& m=*MmuBase::TheMmu;
1126 m.Unmap(iLinAddr,iSize);
1128 DeallocateLinearAddress();
1130 // Physical memory has to be evicted from cache(s).
1131 // Must be preserved as it can still be in use by the driver.
1133 m.CacheMaintenanceOnPreserve(pa, size ,la ,attr);
1141 TInt DMemModelChunk::CheckAccess()
1143 DProcess* pP=TheCurrentThread->iOwningProcess;
1144 if (iAttributes&EPrivate)
1146 if (iOwningProcess && iOwningProcess!=pP && pP!=K::TheKernelProcess)
1147 return KErrAccessDenied;
1153 void DMemModelChunk::BTracePrime(TInt aCategory)
1155 DChunk::BTracePrime(aCategory);
1157 #ifdef BTRACE_CHUNKS
1158 if (aCategory == BTrace::EChunks || aCategory == -1)
1162 TBool memoryOwned = !(iAttributes&EMemoryNotOwned);
1163 MmuBase& m=*MmuBase::TheMmu;
1164 TInt committedBase = -1;
1166 // look at each page table in this chunk...
1167 TUint chunkEndIndex = iMaxSize>>KChunkShift;
1168 for(TUint chunkIndex=0; chunkIndex<chunkEndIndex; ++chunkIndex)
1170 TInt ptid = iPageTables[chunkIndex];
1174 if(committedBase!=-1)
1176 TUint committedEnd = chunkIndex*KChunkSize;
1177 BTrace12(BTrace::EChunks, memoryOwned?BTrace::EChunkMemoryAllocated:BTrace::EChunkMemoryAdded,this,committedBase,committedEnd-committedBase);
1183 TPte* pPte=(TPte*)m.PageTableLinAddr(ptid);
1185 // look at each page in page table...
1186 NKern::LockSystem();
1187 for(TUint pageIndex=0; pageIndex<KChunkSize/KPageSize; ++pageIndex)
1189 TBool committed = false;
1190 TPhysAddr phys = m.PtePhysAddr(pPte[pageIndex], pageIndex);
1191 if(phys!=KPhysAddrInvalid)
1193 // we have a page...
1198 // make sure we own the page...
1199 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(phys);
1200 if(pi && pi->Type()==SPageInfo::EChunk && pi->Owner()==this)
1207 if(committedBase==-1)
1208 committedBase = chunkIndex*KChunkSize+pageIndex*KPageSize; // start of new region
1212 if(committedBase!=-1)
1214 // generate trace for region...
1215 NKern::FlashSystem();
1216 TUint committedEnd = chunkIndex*KChunkSize+pageIndex*KPageSize;
1217 BTrace12(BTrace::EChunks, memoryOwned?BTrace::EChunkMemoryAllocated:BTrace::EChunkMemoryAdded,this,committedBase,committedEnd-committedBase);
1222 if((pageIndex&15)==0)
1223 NKern::FlashSystem();
1226 NKern::UnlockSystem();
1229 if(committedBase!=-1)
1231 TUint committedEnd = chunkEndIndex*KChunkSize;
1232 BTrace12(BTrace::EChunks, memoryOwned?BTrace::EChunkMemoryAllocated:BTrace::EChunkMemoryAdded,this,committedBase,committedEnd-committedBase);