Update contrib.
1 // Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
21 DMemModelChunk::DMemModelChunk()
26 DMemModelChunk::~DMemModelChunk()
28 __KTRACE_OPT(KTHREAD,Kern::Printf("DMemModelChunk destruct %O",this));
30 MM::MappingDestroy(iKernelMapping);
31 MM::MemoryDestroy(iMemoryObject);
34 delete iPermanentPageBitMap;
36 TDfc* dfc = iDestroyedDfc;
40 __KTRACE_OPT(KMEMTRACE, Kern::Printf("MT:D %d %x %O",NTickCount(),this,this));
42 BTraceContext4(BTrace::EChunks,BTrace::EChunkDestroyed,this);
47 TInt DMemModelChunk::Close(TAny* aPtr)
51 DMemModelProcess* pP=(DMemModelProcess*)aPtr;
52 __NK_ASSERT_DEBUG(!iOwningProcess || iOwningProcess==pP);
53 pP->RemoveChunk(this);
56 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Close %d %O",r,this));
57 __NK_ASSERT_DEBUG(r > 0); // Should never be negative.
61 return EObjectDeleted;
67 void DMemModelChunk::SetPaging(TUint aCreateAtt)
69 // Only user data chunks should be able to be data paged, i.e. only those
70 // that can be created via the RChunk create methods.
71 if ((iChunkType != EUserData && iChunkType != EUserSelfModCode) ||
72 !(K::MemModelAttributes & EMemModelAttrDataPaging)) // Data paging device installed?
76 // Pageable chunks must own their memory.
77 __NK_ASSERT_DEBUG(!(iAttributes & EMemoryNotOwned));
79 // Set the data paging attributes
80 TUint dataPolicy = TheSuperPage().KernelConfigFlags() & EKernelConfigDataPagingPolicyMask;
81 if (dataPolicy == EKernelConfigDataPagingPolicyNoPaging)
85 if (dataPolicy == EKernelConfigDataPagingPolicyAlwaysPage)
87 iAttributes |= EDataPaged;
90 TUint pagingAtt = aCreateAtt & TChunkCreate::EPagingMask;
91 if (pagingAtt == TChunkCreate::EPaged)
93 iAttributes |= EDataPaged;
96 if (pagingAtt == TChunkCreate::EUnpaged)
100 // No data paging attribute specified for this chunk so use the process's
101 __NK_ASSERT_DEBUG(pagingAtt == TChunkCreate::EPagingUnspec);
102 DProcess* currentProcess = TheCurrentThread->iOwningProcess;
103 if (currentProcess->iAttributes & DProcess::EDataPaged)
105 iAttributes |= EDataPaged;
110 TInt DMemModelChunk::DoCreate(SChunkCreateInfo& aInfo)
112 __ASSERT_COMPILE(!(EMMChunkAttributesMask & EChunkAttributesMask));
113 __KTRACE_OPT(KMMU,Kern::Printf("Chunk %O DoCreate att=%08x",this,iAttributes));
114 if (aInfo.iMaxSize<=0)
117 iMaxSize = MM::RoundToPageSize(aInfo.iMaxSize);
119 TInt maxpages=iMaxSize>>KPageShift;
120 if (iAttributes & EDisconnected)
122 TBitMapAllocator* pM=TBitMapAllocator::New(maxpages,ETrue);
126 __KTRACE_OPT(KMMU,Kern::Printf("PageBitMap at %08x, MaxPages %d",pM,maxpages));
128 if(iChunkType==ESharedKernelSingle || iChunkType==ESharedKernelMultiple)
130 TBitMapAllocator* pM=TBitMapAllocator::New(maxpages,ETrue);
133 iPermanentPageBitMap = pM;
136 TMemoryAttributes attr = EMemoryAttributeStandard;
137 TBool mapInKernel = false;
138 TBool nowipe = false;
139 TBool executable = false;
140 TBool movable = false;
145 case EUserSelfModCode:
156 case ESharedKernelSingle:
157 case ESharedKernelMultiple:
160 r = MM::MemoryAttributes(attr,*(TMappingAttributes2*)&aInfo.iMapAttr);
170 __NK_ASSERT_DEBUG(0); // invalid chunk type
172 __NK_ASSERT_DEBUG(0); // invalid chunk type
173 case EDll: // global code
174 __NK_ASSERT_DEBUG(0); // invalid chunk type
176 __NK_ASSERT_DEBUG(0); // invalid chunk type
177 case EUserCode: // local code
178 __NK_ASSERT_DEBUG(0); // invalid chunk type
179 case ESharedKernelMirror:
180 __NK_ASSERT_DEBUG(0); // invalid chunk type
182 __NK_ASSERT_DEBUG(0); // invalid chunk type
186 // calculate memory type...
187 TMemoryObjectType memoryType = EMemoryObjectUnpaged;
188 if (iAttributes & EMemoryNotOwned)
190 if (memoryType != EMemoryObjectUnpaged)
192 memoryType = EMemoryObjectHardware;
194 if (iAttributes & EDataPaged)
196 if (memoryType != EMemoryObjectUnpaged)
198 memoryType = EMemoryObjectPaged;
200 if (iAttributes & ECache)
202 if (memoryType != EMemoryObjectUnpaged)
204 memoryType = EMemoryObjectDiscardable;
206 if (memoryType == EMemoryObjectUnpaged)
209 memoryType = EMemoryObjectMovable;
212 // calculate memory flags...
213 TMemoryCreateFlags flags = nowipe ? EMemoryCreateNoWipe : EMemoryCreateDefault;
214 flags = (TMemoryCreateFlags)(flags|EMemoryCreateUseCustomWipeByte|(iClearByte<<EMemoryCreateWipeByteShift));
216 flags = (TMemoryCreateFlags)(flags|EMemoryCreateAllowExecution);
218 r = MM::MemoryNew(iMemoryObject,memoryType,MM::BytesToPages(iMaxSize),flags,attr);
224 TInt r = MM::MappingNew(iKernelMapping, iMemoryObject, ESupervisorReadWrite, KKernelOsAsid);
226 return r; // Note, iMemoryObject will get cleaned-up when chunk is destroyed
227 const TMappingAttributes2& lma = MM::LegacyMappingAttributes(attr,EUserReadWrite);
228 *(TMappingAttributes2*)&iMapAttr = lma;
234 BTraceContextN(BTrace::EChunks,BTrace::EChunkCreated,this,iMaxSize,nameBuf.Ptr(),nameBuf.Size());
236 BTrace8(BTrace::EChunks,BTrace::EChunkOwner,this,iOwningProcess);
237 BTraceContext12(BTrace::EChunks,BTrace::EChunkInfo,this,iChunkType,iAttributes);
239 #ifdef BTRACE_FLEXIBLE_MEM_MODEL
240 BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsChunk,iMemoryObject,this);
246 void DMemModelChunk::SetFixedAddress(TLinAddr aAddr, TInt aInitialSize)
248 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O SetFixedAddress %08x size %08x",this,aAddr,aInitialSize));
250 iSize = MM::RoundToPageSize(aInitialSize);
252 MM::MemoryClaimInitialPages(iMemoryObject,iFixedBase,iSize,ESupervisorReadWrite);
256 TInt DMemModelChunk::SetAttributes(SChunkCreateInfo& aInfo)
262 iAttributes = EPrivate;
265 iAttributes = EPrivate;
269 iAttributes = EPublic;
271 iAttributes = EPrivate;
273 case EUserSelfModCode:
275 iAttributes = EPublic|ECode;
277 iAttributes = EPrivate|ECode;
279 case ESharedKernelSingle:
280 case ESharedKernelMultiple:
282 iAttributes = EPublic;
285 __NK_ASSERT_DEBUG(0); // invalid chunk type
287 __NK_ASSERT_DEBUG(0); // invalid chunk type
288 case EDll: // global code
289 __NK_ASSERT_DEBUG(0); // invalid chunk type
291 __NK_ASSERT_DEBUG(0); // invalid chunk type
292 case EUserCode: // local code
293 __NK_ASSERT_DEBUG(0); // invalid chunk type
294 case ESharedKernelMirror:
295 __NK_ASSERT_DEBUG(0); // invalid chunk type
303 TInt DMemModelChunk::Adjust(TInt aNewSize)
305 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust %08x",aNewSize));
306 if (iAttributes & (EDoubleEnded|EDisconnected))
308 if (aNewSize<0 || aNewSize>iMaxSize)
312 TInt newSize=MM::RoundToPageSize(aNewSize);
315 MM::MemoryLock(iMemoryObject);
318 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust growing"));
319 r=DoCommit(iSize,newSize-iSize);
321 else if (newSize<iSize)
323 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust shrinking"));
324 DoDecommit(newSize,iSize-newSize);
326 MM::MemoryUnlock(iMemoryObject);
328 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
329 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x",this,iSize));
334 TInt DMemModelChunk::Address(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress)
336 if(!iPermanentPageBitMap)
337 return KErrAccessDenied;
338 if(TUint(aOffset)>=TUint(iMaxSize))
340 if(TUint(aOffset+aSize)>TUint(iMaxSize))
344 TInt start = aOffset>>KPageShift;
345 TInt size = ((aOffset+aSize-1)>>KPageShift)-start+1;
346 if(iPermanentPageBitMap->NotAllocated(start,size))
348 aKernelAddress = MM::MappingBase(iKernelMapping)+aOffset;
353 TInt DMemModelChunk::PhysicalAddress(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress, TUint32& aPhysicalAddress, TUint32* aPhysicalPageList)
357 TInt r = Address(aOffset,aSize,aKernelAddress);
360 TInt index = aOffset>>KPageShift;
361 TInt count = ((aOffset+aSize-1)>>KPageShift)-index+1;
362 r = MM::MemoryPhysAddr(iMemoryObject,index,count,aPhysicalAddress,aPhysicalPageList);
364 aPhysicalAddress += aOffset&KPageMask;
369 TInt DMemModelChunk::DoCommit(TInt aOffset, TInt aSize, TCommitType aCommitType, TUint32* aExtraArg)
371 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoCommit %x+%x type=%d extra=%08x",aOffset,aSize,aCommitType,aExtraArg));
373 __NK_ASSERT_DEBUG(((aOffset|aSize)&KPageMask)==0);
375 TInt r = KErrArgument;
378 case DChunk::ECommitDiscontiguous:
379 r = MM::MemoryAlloc(iMemoryObject, MM::BytesToPages(aOffset), MM::BytesToPages(aSize));
382 case DChunk::ECommitDiscontiguousPhysical:
383 r = MM::MemoryAddPages(iMemoryObject, MM::BytesToPages(aOffset), MM::BytesToPages(aSize), (TPhysAddr*)aExtraArg);
386 case DChunk::ECommitContiguous:
387 r = MM::MemoryAllocContiguous(iMemoryObject, MM::BytesToPages(aOffset), MM::BytesToPages(aSize), 0, *(TPhysAddr*)aExtraArg);
390 case DChunk::ECommitContiguousPhysical:
391 r = MM::MemoryAddContiguous(iMemoryObject, MM::BytesToPages(aOffset), MM::BytesToPages(aSize), (TPhysAddr)aExtraArg);
394 case DChunk::ECommitVirtual:
396 __NK_ASSERT_DEBUG(0); // Invalid commit type
397 r = KErrNotSupported;
404 if(iPermanentPageBitMap)
405 iPermanentPageBitMap->Alloc(aOffset>>KPageShift,aSize>>KPageShift);
407 TInt subcategory = (aCommitType & DChunk::ECommitPhysicalMask) ? BTrace::EChunkMemoryAdded : BTrace::EChunkMemoryAllocated;
408 BTraceContext12(BTrace::EChunks,subcategory,this,aOffset,aSize);
416 void DMemModelChunk::DoDecommit(TInt aOffset, TInt aSize)
418 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoDecommit %x+%x",aOffset,aSize));
420 __NK_ASSERT_DEBUG(((aOffset|aSize)&KPageMask)==0);
422 TUint index = MM::BytesToPages(aOffset);
423 TUint count = MM::BytesToPages(aSize);
424 iSize -= count*KPageSize;
425 if(iAttributes&EMemoryNotOwned)
426 MM::MemoryRemovePages(iMemoryObject, index, count, 0);
428 MM::MemoryFree(iMemoryObject, index, count);
433 TInt subcategory = (iAttributes & EMemoryNotOwned) ? BTrace::EChunkMemoryRemoved : BTrace::EChunkMemoryDeallocated;
434 BTraceContext12(BTrace::EChunks,subcategory,this,aOffset,count*KPageSize);
440 TInt DMemModelChunk::AdjustDoubleEnded(TInt aBottom, TInt aTop)
442 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::AdjustDoubleEnded %x-%x",aBottom,aTop));
443 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDoubleEnded)
445 if (aTop<0 || aBottom<0 || aTop<aBottom || aTop>iMaxSize)
448 aBottom &= ~KPageMask;
449 aTop = MM::RoundToPageSize(aTop);
450 TInt newSize=aTop-aBottom;
451 if (newSize>iMaxSize)
454 MM::MemoryLock(iMemoryObject);
455 TInt initBottom=iStartPos;
456 TInt initTop=iStartPos+iSize;
457 TInt nBottom=Max(aBottom,iStartPos); // intersection bottom
458 TInt nTop=Min(aTop,iStartPos+iSize); // intersection top
462 __KTRACE_OPT(KMMU,Kern::Printf("Initial and final regions intersect"));
463 if (initBottom<nBottom)
466 DoDecommit(initBottom,nBottom-initBottom);
469 DoDecommit(nTop,initTop-nTop); // this changes iSize
472 r=DoCommit(aBottom,nBottom-aBottom);
476 r=DoCommit(nTop,aTop-nTop);
480 DoDecommit(aBottom,nBottom-aBottom);
484 r=DoCommit(nTop,aTop-nTop);
488 __KTRACE_OPT(KMMU,Kern::Printf("Initial and final regions disjoint"));
490 DoDecommit(initBottom,iSize);
493 r=DoCommit(iStartPos,newSize);
495 MM::MemoryUnlock(iMemoryObject);
496 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
497 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x+%x",this,iStartPos,iSize));
502 TInt DMemModelChunk::CheckRegion(TInt& aOffset, TInt& aSize)
504 if((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
506 if(aOffset<0 || aSize<0)
511 TUint end = MM::RoundToPageSize(aOffset+aSize);
512 if(end>TUint(iMaxSize))
514 aOffset &= ~KPageMask;
516 if(end<=TUint(aOffset))
523 TInt DMemModelChunk::Commit(TInt aOffset, TInt aSize, TCommitType aCommitType, TUint32* aExtraArg)
525 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Commit %x+%x type=%d extra=%08x",aOffset,aSize,aCommitType,aExtraArg));
527 TInt r = CheckRegion(aOffset,aSize);
531 MM::MemoryLock(iMemoryObject);
532 TInt i=aOffset>>KPageShift;
533 TInt n=aSize>>KPageShift;
534 if (iPageBitMap->NotFree(i,n))
538 r=DoCommit(aOffset,aSize,aCommitType,aExtraArg);
540 iPageBitMap->Alloc(i,n);
542 MM::MemoryUnlock(iMemoryObject);
543 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
548 TInt DMemModelChunk::Allocate(TInt aSize, TInt aGuard, TInt aAlign)
550 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Allocate %x %x %d",aSize,aGuard,aAlign));
552 // the flexible memory model doesn't implement aGuard and aAlign...
553 __NK_ASSERT_DEBUG(aGuard==0);
555 __NK_ASSERT_DEBUG(aAlign==0);
558 TInt dummyOffset = 0;
559 TInt r = CheckRegion(dummyOffset,aSize);
563 MM::MemoryLock(iMemoryObject);
564 TInt n=aSize>>KPageShift;
565 TInt i=iPageBitMap->AllocConsecutive(n, EFalse); // allocate the offset
567 r=KErrNoMemory; // run out of reserved space for this chunk
570 TInt offset=i<<KPageShift;
571 __KTRACE_OPT(KMMU,Kern::Printf("Offset %x allocated",offset));
572 r=DoCommit(offset,aSize);
575 iPageBitMap->Alloc(i,n);
576 r=offset; // if operation successful, return allocated offset
579 MM::MemoryUnlock(iMemoryObject);
580 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Allocate returns %x",r));
581 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
586 TInt DMemModelChunk::Decommit(TInt aOffset, TInt aSize)
588 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Decommit %x+%x",aOffset,aSize));
589 TInt r = CheckRegion(aOffset,aSize);
593 MM::MemoryLock(iMemoryObject);
595 TInt i=aOffset>>KPageShift;
596 TInt n=aSize>>KPageShift;
597 __KTRACE_OPT(KMMU,Kern::Printf("Calling SelectiveFree(%d,%d)",i,n));
599 TUint oldAvail = iPageBitMap->iAvail;
600 iPageBitMap->SelectiveFree(i,n); // free those positions which are actually allocated
601 TUint oldSize = iSize;
603 DoDecommit(aOffset,aSize);
605 // Use the bit map to adjust the size of the chunk as unlocked and reclaimed pages
606 // will have been unmapped but not removed from the bit map as DoDecommit() only
607 // decommits the mapped pages.
608 TUint actualFreedPages = iPageBitMap->iAvail - oldAvail;
609 iSize = oldSize - (actualFreedPages << KPageShift);
611 MM::MemoryUnlock(iMemoryObject);
614 __COND_DEBUG_EVENT(r==KErrNone, EEventUpdateChunk, this);
619 TInt DMemModelChunk::Unlock(TInt aOffset, TInt aSize)
621 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Unlock %x+%x",aOffset,aSize));
622 if(!(iAttributes&ECache))
624 TInt r = CheckRegion(aOffset,aSize);
628 MM::MemoryLock(iMemoryObject);
630 TInt i=aOffset>>KPageShift;
631 TInt n=aSize>>KPageShift;
632 if(iPageBitMap->NotAllocated(i,n))
635 r = MM::MemoryAllowDiscard(iMemoryObject,i,n);
637 MM::MemoryUnlock(iMemoryObject);
643 TInt DMemModelChunk::Lock(TInt aOffset, TInt aSize)
645 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Lock %x+%x",aOffset,aSize));
646 if(!(iAttributes&ECache))
648 TInt r = CheckRegion(aOffset,aSize);
652 r = MM::MemoryDisallowDiscard(iMemoryObject, MM::BytesToPages(aOffset), MM::BytesToPages(aSize));
654 Decommit(aOffset,aSize);
660 TInt DMemModelChunk::CheckAccess()
662 if(iOwningProcess && iOwningProcess!=TheCurrentThread->iOwningProcess)
663 return KErrAccessDenied;
668 void DMemModelChunk::BTracePrime(TInt aCategory)
670 DChunk::BTracePrime(aCategory);
671 #ifdef BTRACE_FLEXIBLE_MEM_MODEL
672 if (aCategory == BTrace::EFlexibleMemModel || aCategory == -1)
676 MM::MemoryBTracePrime(iMemoryObject);
677 BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsChunk,iMemoryObject,this);
684 void DMemModelChunk::Substitute(TInt /*aOffset*/, TPhysAddr /*aOldAddr*/, TPhysAddr /*aNewAddr*/)
686 MM::Panic(MM::EUnsupportedOperation);