Update contrib.
1 // Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\memmodel\epoc\direct\mchunk.cpp
20 DMemModelChunk::~DMemModelChunk()
22 __KTRACE_OPT(KTHREAD,Kern::Printf("DMemModelChunk destruct %O",this));
26 MM::FreeRegion(iRegionBase,iRegionSize);
27 __KTRACE_OPT(KMEMTRACE, Kern::Printf("MT:D %d %x %O",NTickCount(),this,this););
30 BTraceContext4(BTrace::EChunks,BTrace::EChunkDestroyed,this);
35 TDfc* dfc = (TDfc*)__e32_atomic_swp_ord_ptr(&iDestroyedDfc, 0);
41 TUint8* DMemModelChunk::Base(DProcess* aProcess)
47 TInt DMemModelChunk::DoCreate(SChunkCreateInfo& anInfo)
49 __ASSERT_COMPILE(!(EMMChunkAttributesMask & EChunkAttributesMask));
51 if(iAttributes&EMemoryNotOwned)
52 return KErrNotSupported;
53 if (anInfo.iMaxSize<=0)
56 iMaxSize=MM::RoundToBlockSize(anInfo.iMaxSize);
61 case EUserSelfModCode:
64 case ESharedKernelSingle:
65 case ESharedKernelMultiple:
69 r=MM::AllocRegion(iRegionBase, iMaxSize);
73 MM::AllocFailed=ETrue;
75 iBase=(TUint8*)iRegionBase;
79 iMapAttr = EMapAttrCachedMax;
80 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoCreate clear %x+%x",iRegionBase,iRegionSize));
82 // Clear memory to value determined by chunk member
83 memset((TAny*)iRegionBase, iClearByte, MM::RoundToBlockSize(iRegionSize));
90 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoCreate %O ret %d",this,r));
91 __KTRACE_OPT(KMMU,Kern::Printf("RegionBase=%08x, RegionSize=%08x",iRegionBase,iRegionSize));
92 __KTRACE_OPT(KMEMTRACE, {MM::WaitRamAlloc();Kern::Printf("MT:C %d %x %O",NTickCount(),this,this);MM::SignalRamAlloc();});
96 BTraceContextN(BTrace::EChunks,BTrace::EChunkCreated,this,iMaxSize,nameBuf.Ptr(),nameBuf.Size());
98 BTrace8(BTrace::EChunks,BTrace::EChunkOwner,this,iOwningProcess);
99 BTraceContext12(BTrace::EChunks,BTrace::EChunkInfo,this,iChunkType,iAttributes);
104 void DMemModelChunk::SetFixedAddress(TLinAddr anAddr, TInt aSize)
106 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O SetFixedAddress %08X size %08X",this,anAddr,aSize));
107 iSize=MM::RoundToBlockSize(aSize);
110 iBase=(TUint8*)anAddr;
113 TInt DMemModelChunk::Adjust(TInt aNewSize)
115 // Adjust a standard chunk.
119 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust %08x",aNewSize));
120 if (iAttributes & (EDoubleEnded|EDisconnected))
122 if (aNewSize<0 || aNewSize>iMaxSize)
125 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x",this,iSize));
126 __KTRACE_OPT(KMEMTRACE, {MM::WaitRamAlloc();Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this);MM::SignalRamAlloc();});
130 TInt DMemModelChunk::AdjustDoubleEnded(TInt aBottom, TInt aTop)
132 // Adjust a double-ended chunk.
136 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::AdjustDoubleEnded %x-%x",aBottom,aTop));
137 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDoubleEnded)
139 if (aTop<0 || aBottom<0 || aTop<aBottom || aTop>iMaxSize)
141 TInt newSize=aTop-aBottom;
142 if (newSize>iMaxSize)
146 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x+%x",this,iStartPos,iSize));
147 __KTRACE_OPT(KMEMTRACE, {MM::WaitRamAlloc();Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this);MM::SignalRamAlloc();});
151 TInt DMemModelChunk::Address(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress)
153 if(TUint(aOffset)>=TUint(iMaxSize))
155 if(TUint(aOffset+aSize)>TUint(iMaxSize))
159 aKernelAddress = (TLinAddr)iBase+aOffset;
163 TInt DMemModelChunk::PhysicalAddress(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress, TUint32& aPhysicalAddress, TUint32* aPhysicalPageList)
165 TInt r=Address(aOffset,aSize,aKernelAddress);
169 TPhysAddr physStart = Epoc::LinearToPhysical(aKernelAddress);
172 TUint32 page = aKernelAddress>>pageShift<<pageShift;
173 TUint32 lastPage = (aKernelAddress+aSize-1)>>pageShift<<pageShift;
174 TUint32* pageList = aPhysicalPageList;
175 TUint32 nextPhys = Epoc::LinearToPhysical(page);
176 TUint32 pageSize = 1<<pageShift;
177 while(page<=lastPage)
179 TPhysAddr phys = Epoc::LinearToPhysical(page);
183 nextPhys = KPhysAddrInvalid;
185 nextPhys += pageSize;
188 if(nextPhys==KPhysAddrInvalid)
190 // Memory is discontiguous...
191 aPhysicalAddress = KPhysAddrInvalid;
196 // Memory is contiguous...
197 aPhysicalAddress = physStart;
202 TInt DMemModelChunk::Commit(TInt aOffset, TInt aSize, TCommitType aCommitType, TUint32* aExtraArg)
204 // Commit to a disconnected chunk.
207 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Commit %x+%x type=%d extra=%08x",aOffset,aSize,aCommitType,aExtraArg));
208 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
210 if (aOffset<0 || aSize<0 || (aOffset+aSize)>iMaxSize)
212 if(LOGICAL_XOR((TInt)aCommitType&DChunk::ECommitPhysicalMask, iAttributes&DChunk::EMemoryNotOwned))
213 return KErrNotSupported; // Commit type doesn't match 'memory owned' type
215 if((TInt)aCommitType&DChunk::ECommitPhysicalMask)
216 return KErrNotSupported;
217 if(aCommitType==DChunk::ECommitContiguous)
219 // We can't commit contiguous memory, we just have to take what's already there.
220 // So check to see if memory is contiguous, and if not, return KErrNoMemory -
221 // which is what other Memory Models do if they can't find enough contiguous RAM.
223 if(PhysicalAddress(aOffset,aSize,kernAddr,*aExtraArg)!=KErrNone)
226 else if(aCommitType!=DChunk::ECommitDiscontiguous)
232 TInt DMemModelChunk::Allocate(TInt aSize, TInt aGuard, TInt aAlign)
234 // Allocate offset and commit to a disconnected chunk.
237 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Allocate %x %x %d",aSize,aGuard,aAlign));
238 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
240 if (aSize<=0 || aSize>iMaxSize)
242 TInt r=KErrNotSupported;
243 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Allocate returns %x",r));
247 TInt DMemModelChunk::Decommit(TInt anOffset, TInt aSize)
249 // Decommit from a disconnected chunk.
252 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Decommit %x+%x",anOffset,aSize));
253 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
255 if (anOffset<0 || aSize<0 || (anOffset+aSize)>iMaxSize)
260 void DMemModelChunk::Substitute(TInt /*aOffset*/, TPhysAddr /*aOldAddr*/, TPhysAddr /*aNewAddr*/)
262 MM::Panic(MM::EUnsupportedOperation);
265 TInt DMemModelChunk::Unlock(TInt anOffset, TInt aSize)
267 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Decommit %x+%x",anOffset,aSize));
268 if (!(iAttributes&ECache))
270 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
272 if (anOffset<0 || aSize<0 || (anOffset+aSize)>iMaxSize)
277 TInt DMemModelChunk::Lock(TInt anOffset, TInt aSize)
279 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Decommit %x+%x",anOffset,aSize));
280 if (!(iAttributes&ECache))
282 if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
284 if (anOffset<0 || aSize<0 || (anOffset+aSize)>iMaxSize)
289 TInt DMemModelChunk::CheckAccess()
291 DProcess* pP=TheCurrentThread->iOwningProcess;
292 if (iAttributes&EPrivate)
294 if (iOwningProcess && iOwningProcess!=pP && pP!=K::TheKernelProcess)
295 return KErrAccessDenied;
300 TUint32 MM::RoundToBlockSize(TUint32 aSize)
302 TUint32 m=MM::RamBlockSize-1;
306 void MM::FreeRegion(TLinAddr aBase, TInt aSize)
308 __KTRACE_OPT(KMMU,Kern::Printf("MM::FreeRegion base %08x size %08x",aBase,aSize));
309 aSize=MM::RoundToBlockSize(aSize);
310 __ASSERT_ALWAYS(aBase>=MM::UserDataSectionBase && aBase+aSize<=MM::UserDataSectionEnd, MM::Panic(MM::EFreeInvalidRegion));
311 TInt block=(aBase-MM::UserDataSectionBase)>>MM::RamBlockShift;
312 TInt nBlocks=aSize>>MM::RamBlockShift;
313 MM::RamAllocator->Free(block, nBlocks);
316 TInt MM::AllocRegion(TLinAddr& aBase, TInt aSize, TInt aAlign)
318 __KTRACE_OPT(KMMU,Kern::Printf("MM::AllocRegion size 0x%x align %d",aSize,aAlign));
319 TInt align=Max(aAlign-MM::RamBlockShift, 0);
320 TInt nBlocks=MM::RoundToBlockSize(aSize)>>MM::RamBlockShift;
321 TInt base=(TInt)(MM::UserDataSectionBase>>MM::RamBlockShift);
322 TInt block=MM::RamAllocator->AllocAligned(nBlocks, align, base, ETrue); // returns first block number or -1
325 MM::RamAllocator->Alloc(block,nBlocks);
326 aBase=MM::UserDataSectionBase+(block<<MM::RamBlockShift);
327 __KTRACE_OPT(KMMU,Kern::Printf("MM::AllocRegion address %08x",aBase));
331 TInt MM::ClaimRegion(TLinAddr aBase, TInt aSize)
333 __KTRACE_OPT(KMMU,Kern::Printf("MM::ClaimRegion base %08x size %08x",aBase,aSize));
334 TUint32 m=MM::RamBlockSize-1;
335 aSize=MM::RoundToBlockSize(aSize+(aBase&m));
337 if (aBase<MM::UserDataSectionBase || TUint32(aSize)>MM::UserDataSectionEnd-aBase)
339 TInt block=(aBase-MM::UserDataSectionBase)>>MM::RamBlockShift;
340 TInt nBlocks=aSize>>MM::RamBlockShift;
341 if (MM::RamAllocator->NotFree(block, nBlocks))
343 MM::RamAllocator->Alloc(block, nBlocks);
347 // Allocate a physically contiguous region
348 TInt MM::AllocContiguousRegion(TLinAddr& aBase, TInt aSize, TInt aAlign)
350 #ifndef __CPU_HAS_MMU
351 return MM::AllocRegion(aBase, aSize, aAlign);
353 __KTRACE_OPT(KMMU,Kern::Printf("MM::AllocContiguousRegion size 0x%x align %d",aSize,aAlign));
354 TBitMapAllocator* sa = MM::SecondaryAllocator;
356 return MM::AllocRegion(aBase, aSize, aAlign); // only one physical bank
358 TBitMapAllocator* ra = MM::RamAllocator;
359 TInt align=Max(aAlign-MM::RamBlockShift, 0);
360 TUint32 alignmask = (1u<<align)-1;
361 TInt nBlocks=MM::RoundToBlockSize(aSize)>>MM::RamBlockShift;
362 TInt base=(TInt)(MM::UserDataSectionBase>>MM::RamBlockShift);
363 const SRamBank* banks = (const SRamBank*)TheSuperPage().iRamBootData;
364 const SRamBank* pB = banks;
367 for (; pB->iSize; ++pB)
369 TInt nb = pB->iSize >> MM::RamBlockShift;
370 sa->CopyAlignedRange(ra, bnum, nb);
371 TInt basealign = (base + bnum) & alignmask;
372 block = sa->AllocAligned(nBlocks, align, basealign, ETrue); // returns first block number or -1
379 MM::RamAllocator->Alloc(block + bnum, nBlocks);
380 aBase = MM::UserDataSectionBase + ((block + bnum)<<MM::RamBlockShift);
381 __KTRACE_OPT(KMMU,Kern::Printf("MM::AllocContiguousRegion address %08x",aBase));
386 TInt MM::BlockNumber(TPhysAddr aAddr)
388 __KTRACE_OPT(KMMU,Kern::Printf("MM::BlockNumber %08x",aAddr));
389 const SRamBank* banks = (const SRamBank*)TheSuperPage().iRamBootData;
390 const SRamBank* pB = banks;
392 for (; pB->iSize; ++pB)
394 if (aAddr >= pB->iBase)
396 TUint32 offset = aAddr - pB->iBase;
397 if (offset < pB->iSize)
399 TInt bn = bnum + TInt(offset>>MM::RamBlockShift);
400 __KTRACE_OPT(KMMU,Kern::Printf("MM::BlockNumber %08x->%x",aAddr,bn));
404 TInt nb = pB->iSize >> MM::RamBlockShift;
410 /********************************************
411 * Hardware chunk abstraction
412 ********************************************/
415 @pre Call in a thread context.
416 @pre Interrupts must be enabled.
417 @pre Kernel must be unlocked.
418 @pre No fast mutex can be held.
419 @pre Calling thread must be in a critical section.
421 EXPORT_C TInt DPlatChunkHw::New(DPlatChunkHw*& aChunk, TPhysAddr aAddr, TInt aSize, TUint aAttribs)
423 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"DPlatChunkHw::New");
424 __KTRACE_OPT(KMMU,Kern::Printf("DPlatChunkHw::New phys=%08x, size=%x, attribs=%x",aAddr,aSize,aAttribs));
428 DPlatChunkHw* pC=new DPlatChunkHw;
431 __KTRACE_OPT(KMMU,Kern::Printf("DPlatChunkHw created at %08x",pC));
441 void DMemModelChunk::BTracePrime(TInt aCategory)
443 DChunk::BTracePrime(aCategory);
446 if (aCategory == BTrace::EChunks || aCategory == -1)
448 BTrace12(BTrace::EChunks, BTrace::EChunkMemoryAllocated,this,0,this->iSize);