sl@0: // Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32\memmodel\epoc\direct\mchunk.cpp sl@0: // sl@0: // sl@0: sl@0: #include sl@0: sl@0: DMemModelChunk::~DMemModelChunk() sl@0: { sl@0: __KTRACE_OPT(KTHREAD,Kern::Printf("DMemModelChunk destruct %O",this)); sl@0: if (iRegionSize) sl@0: { sl@0: MM::WaitRamAlloc(); sl@0: MM::FreeRegion(iRegionBase,iRegionSize); sl@0: __KTRACE_OPT(KMEMTRACE, Kern::Printf("MT:D %d %x %O",NTickCount(),this,this);); sl@0: MM::SignalRamAlloc(); sl@0: #ifdef BTRACE_CHUNKS sl@0: BTraceContext4(BTrace::EChunks,BTrace::EChunkDestroyed,this); sl@0: #endif sl@0: } sl@0: iRegionSize=0; sl@0: sl@0: TDfc* dfc = (TDfc*)__e32_atomic_swp_ord_ptr(&iDestroyedDfc, 0); sl@0: if(dfc) sl@0: dfc->Enque(); sl@0: } sl@0: sl@0: sl@0: TUint8* DMemModelChunk::Base(DProcess* aProcess) sl@0: { sl@0: return iBase; sl@0: } sl@0: sl@0: sl@0: TInt DMemModelChunk::DoCreate(SChunkCreateInfo& anInfo) sl@0: { sl@0: __ASSERT_COMPILE(!(EMMChunkAttributesMask & EChunkAttributesMask)); sl@0: sl@0: if(iAttributes&EMemoryNotOwned) sl@0: return KErrNotSupported; sl@0: if (anInfo.iMaxSize<=0) sl@0: return KErrArgument; sl@0: TInt r=KErrNone; sl@0: iMaxSize=MM::RoundToBlockSize(anInfo.iMaxSize); sl@0: switch (anInfo.iType) sl@0: { sl@0: case EDll: sl@0: case EUserCode: sl@0: case EUserSelfModCode: sl@0: case EUserData: sl@0: case EDllData: sl@0: case ESharedKernelSingle: sl@0: case ESharedKernelMultiple: sl@0: case ESharedIo: sl@0: case EKernelMessage: sl@0: MM::WaitRamAlloc(); sl@0: r=MM::AllocRegion(iRegionBase, iMaxSize); sl@0: if (r==KErrNone) sl@0: iRegionSize=iMaxSize; sl@0: else sl@0: MM::AllocFailed=ETrue; sl@0: MM::SignalRamAlloc(); sl@0: iBase=(TUint8*)iRegionBase; sl@0: iSize=iMaxSize; sl@0: if(r==KErrNone) sl@0: { sl@0: iMapAttr = EMapAttrCachedMax; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoCreate clear %x+%x",iRegionBase,iRegionSize)); sl@0: sl@0: // Clear memory to value determined by chunk member sl@0: memset((TAny*)iRegionBase, iClearByte, MM::RoundToBlockSize(iRegionSize)); sl@0: } sl@0: break; sl@0: default: sl@0: break; sl@0: } sl@0: sl@0: __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoCreate %O ret %d",this,r)); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("RegionBase=%08x, RegionSize=%08x",iRegionBase,iRegionSize)); sl@0: __KTRACE_OPT(KMEMTRACE, {MM::WaitRamAlloc();Kern::Printf("MT:C %d %x %O",NTickCount(),this,this);MM::SignalRamAlloc();}); sl@0: #ifdef BTRACE_CHUNKS sl@0: TKName nameBuf; sl@0: Name(nameBuf); sl@0: BTraceContextN(BTrace::EChunks,BTrace::EChunkCreated,this,iMaxSize,nameBuf.Ptr(),nameBuf.Size()); sl@0: if(iOwningProcess) sl@0: BTrace8(BTrace::EChunks,BTrace::EChunkOwner,this,iOwningProcess); sl@0: BTraceContext12(BTrace::EChunks,BTrace::EChunkInfo,this,iChunkType,iAttributes); sl@0: #endif sl@0: return r; sl@0: } sl@0: sl@0: void DMemModelChunk::SetFixedAddress(TLinAddr anAddr, TInt aSize) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O SetFixedAddress %08X size %08X",this,anAddr,aSize)); sl@0: iSize=MM::RoundToBlockSize(aSize); sl@0: if (iSize>iMaxSize) sl@0: iMaxSize=iSize; sl@0: iBase=(TUint8*)anAddr; sl@0: } sl@0: sl@0: TInt DMemModelChunk::Adjust(TInt aNewSize) sl@0: // sl@0: // Adjust a standard chunk. sl@0: // sl@0: { sl@0: sl@0: __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust %08x",aNewSize)); sl@0: if (iAttributes & (EDoubleEnded|EDisconnected)) sl@0: return KErrGeneral; sl@0: if (aNewSize<0 || aNewSize>iMaxSize) sl@0: return KErrArgument; sl@0: sl@0: __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x",this,iSize)); sl@0: __KTRACE_OPT(KMEMTRACE, {MM::WaitRamAlloc();Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this);MM::SignalRamAlloc();}); sl@0: return KErrNone; sl@0: } sl@0: sl@0: TInt DMemModelChunk::AdjustDoubleEnded(TInt aBottom, TInt aTop) sl@0: // sl@0: // Adjust a double-ended chunk. sl@0: // sl@0: { sl@0: sl@0: __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::AdjustDoubleEnded %x-%x",aBottom,aTop)); sl@0: if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDoubleEnded) sl@0: return KErrGeneral; sl@0: if (aTop<0 || aBottom<0 || aTopiMaxSize) sl@0: return KErrArgument; sl@0: TInt newSize=aTop-aBottom; sl@0: if (newSize>iMaxSize) sl@0: return KErrArgument; sl@0: iStartPos=aBottom; sl@0: sl@0: __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x+%x",this,iStartPos,iSize)); sl@0: __KTRACE_OPT(KMEMTRACE, {MM::WaitRamAlloc();Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this);MM::SignalRamAlloc();}); sl@0: return KErrNone; sl@0: } sl@0: sl@0: TInt DMemModelChunk::Address(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress) sl@0: { sl@0: if(TUint(aOffset)>=TUint(iMaxSize)) sl@0: return KErrArgument; sl@0: if(TUint(aOffset+aSize)>TUint(iMaxSize)) sl@0: return KErrArgument; sl@0: if(aSize<=0) sl@0: return KErrArgument; sl@0: aKernelAddress = (TLinAddr)iBase+aOffset; sl@0: return KErrNone; sl@0: } sl@0: sl@0: TInt DMemModelChunk::PhysicalAddress(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress, TUint32& aPhysicalAddress, TUint32* aPhysicalPageList) sl@0: { sl@0: TInt r=Address(aOffset,aSize,aKernelAddress); sl@0: if(r!=KErrNone) sl@0: return r; sl@0: sl@0: TPhysAddr physStart = Epoc::LinearToPhysical(aKernelAddress); sl@0: sl@0: TInt pageShift = 12; sl@0: TUint32 page = aKernelAddress>>pageShift<>pageShift<iMaxSize) sl@0: return KErrArgument; sl@0: if(LOGICAL_XOR((TInt)aCommitType&DChunk::ECommitPhysicalMask, iAttributes&DChunk::EMemoryNotOwned)) sl@0: return KErrNotSupported; // Commit type doesn't match 'memory owned' type sl@0: sl@0: if((TInt)aCommitType&DChunk::ECommitPhysicalMask) sl@0: return KErrNotSupported; sl@0: if(aCommitType==DChunk::ECommitContiguous) sl@0: { sl@0: // We can't commit contiguous memory, we just have to take what's already there. sl@0: // So check to see if memory is contiguous, and if not, return KErrNoMemory - sl@0: // which is what other Memory Models do if they can't find enough contiguous RAM. sl@0: TLinAddr kernAddr; sl@0: if(PhysicalAddress(aOffset,aSize,kernAddr,*aExtraArg)!=KErrNone) sl@0: return KErrNoMemory; sl@0: } sl@0: else if(aCommitType!=DChunk::ECommitDiscontiguous) sl@0: return KErrArgument; sl@0: sl@0: return KErrNone; sl@0: } sl@0: sl@0: TInt DMemModelChunk::Allocate(TInt aSize, TInt aGuard, TInt aAlign) sl@0: // sl@0: // Allocate offset and commit to a disconnected chunk. sl@0: // sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Allocate %x %x %d",aSize,aGuard,aAlign)); sl@0: if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected) sl@0: return KErrGeneral; sl@0: if (aSize<=0 || aSize>iMaxSize) sl@0: return KErrArgument; sl@0: TInt r=KErrNotSupported; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Allocate returns %x",r)); sl@0: return r; sl@0: } sl@0: sl@0: TInt DMemModelChunk::Decommit(TInt anOffset, TInt aSize) sl@0: // sl@0: // Decommit from a disconnected chunk. sl@0: // sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Decommit %x+%x",anOffset,aSize)); sl@0: if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected) sl@0: return KErrGeneral; sl@0: if (anOffset<0 || aSize<0 || (anOffset+aSize)>iMaxSize) sl@0: return KErrArgument; sl@0: return KErrNone; sl@0: } sl@0: sl@0: void DMemModelChunk::Substitute(TInt /*aOffset*/, TPhysAddr /*aOldAddr*/, TPhysAddr /*aNewAddr*/) sl@0: { sl@0: MM::Panic(MM::EUnsupportedOperation); sl@0: } sl@0: sl@0: TInt DMemModelChunk::Unlock(TInt anOffset, TInt aSize) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Decommit %x+%x",anOffset,aSize)); sl@0: if (!(iAttributes&ECache)) sl@0: return KErrGeneral; sl@0: if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected) sl@0: return KErrGeneral; sl@0: if (anOffset<0 || aSize<0 || (anOffset+aSize)>iMaxSize) sl@0: return KErrArgument; sl@0: return KErrNone; sl@0: } sl@0: sl@0: TInt DMemModelChunk::Lock(TInt anOffset, TInt aSize) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Decommit %x+%x",anOffset,aSize)); sl@0: if (!(iAttributes&ECache)) sl@0: return KErrGeneral; sl@0: if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected) sl@0: return KErrGeneral; sl@0: if (anOffset<0 || aSize<0 || (anOffset+aSize)>iMaxSize) sl@0: return KErrArgument; sl@0: return KErrNone; sl@0: } sl@0: sl@0: TInt DMemModelChunk::CheckAccess() sl@0: { sl@0: DProcess* pP=TheCurrentThread->iOwningProcess; sl@0: if (iAttributes&EPrivate) sl@0: { sl@0: if (iOwningProcess && iOwningProcess!=pP && pP!=K::TheKernelProcess) sl@0: return KErrAccessDenied; sl@0: } sl@0: return KErrNone; sl@0: } sl@0: sl@0: TUint32 MM::RoundToBlockSize(TUint32 aSize) sl@0: { sl@0: TUint32 m=MM::RamBlockSize-1; sl@0: return (aSize+m)&~m; sl@0: } sl@0: sl@0: void MM::FreeRegion(TLinAddr aBase, TInt aSize) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("MM::FreeRegion base %08x size %08x",aBase,aSize)); sl@0: aSize=MM::RoundToBlockSize(aSize); sl@0: __ASSERT_ALWAYS(aBase>=MM::UserDataSectionBase && aBase+aSize<=MM::UserDataSectionEnd, MM::Panic(MM::EFreeInvalidRegion)); sl@0: TInt block=(aBase-MM::UserDataSectionBase)>>MM::RamBlockShift; sl@0: TInt nBlocks=aSize>>MM::RamBlockShift; sl@0: MM::RamAllocator->Free(block, nBlocks); sl@0: } sl@0: sl@0: TInt MM::AllocRegion(TLinAddr& aBase, TInt aSize, TInt aAlign) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("MM::AllocRegion size 0x%x align %d",aSize,aAlign)); sl@0: TInt align=Max(aAlign-MM::RamBlockShift, 0); sl@0: TInt nBlocks=MM::RoundToBlockSize(aSize)>>MM::RamBlockShift; sl@0: TInt base=(TInt)(MM::UserDataSectionBase>>MM::RamBlockShift); sl@0: TInt block=MM::RamAllocator->AllocAligned(nBlocks, align, base, ETrue); // returns first block number or -1 sl@0: if (block<0) sl@0: return KErrNoMemory; sl@0: MM::RamAllocator->Alloc(block,nBlocks); sl@0: aBase=MM::UserDataSectionBase+(block<MM::UserDataSectionEnd-aBase) sl@0: return KErrArgument; sl@0: TInt block=(aBase-MM::UserDataSectionBase)>>MM::RamBlockShift; sl@0: TInt nBlocks=aSize>>MM::RamBlockShift; sl@0: if (MM::RamAllocator->NotFree(block, nBlocks)) sl@0: return KErrInUse; sl@0: MM::RamAllocator->Alloc(block, nBlocks); sl@0: return KErrNone; sl@0: } sl@0: sl@0: // Allocate a physically contiguous region sl@0: TInt MM::AllocContiguousRegion(TLinAddr& aBase, TInt aSize, TInt aAlign) sl@0: { sl@0: #ifndef __CPU_HAS_MMU sl@0: return MM::AllocRegion(aBase, aSize, aAlign); sl@0: #else sl@0: __KTRACE_OPT(KMMU,Kern::Printf("MM::AllocContiguousRegion size 0x%x align %d",aSize,aAlign)); sl@0: TBitMapAllocator* sa = MM::SecondaryAllocator; sl@0: if (!sa) sl@0: return MM::AllocRegion(aBase, aSize, aAlign); // only one physical bank sl@0: sl@0: TBitMapAllocator* ra = MM::RamAllocator; sl@0: TInt align=Max(aAlign-MM::RamBlockShift, 0); sl@0: TUint32 alignmask = (1u<>MM::RamBlockShift; sl@0: TInt base=(TInt)(MM::UserDataSectionBase>>MM::RamBlockShift); sl@0: const SRamBank* banks = (const SRamBank*)TheSuperPage().iRamBootData; sl@0: const SRamBank* pB = banks; sl@0: TInt bnum = 0; sl@0: TInt block = -1; sl@0: for (; pB->iSize; ++pB) sl@0: { sl@0: TInt nb = pB->iSize >> MM::RamBlockShift; sl@0: sa->CopyAlignedRange(ra, bnum, nb); sl@0: TInt basealign = (base + bnum) & alignmask; sl@0: block = sa->AllocAligned(nBlocks, align, basealign, ETrue); // returns first block number or -1 sl@0: if (block>=0) sl@0: break; sl@0: bnum += nb; sl@0: } sl@0: if (pB->iSize == 0) sl@0: return KErrNoMemory; sl@0: MM::RamAllocator->Alloc(block + bnum, nBlocks); sl@0: aBase = MM::UserDataSectionBase + ((block + bnum)<iSize; ++pB) sl@0: { sl@0: if (aAddr >= pB->iBase) sl@0: { sl@0: TUint32 offset = aAddr - pB->iBase; sl@0: if (offset < pB->iSize) sl@0: { sl@0: TInt bn = bnum + TInt(offset>>MM::RamBlockShift); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("MM::BlockNumber %08x->%x",aAddr,bn)); sl@0: return bn; sl@0: } sl@0: } sl@0: TInt nb = pB->iSize >> MM::RamBlockShift; sl@0: bnum += nb; sl@0: } sl@0: return KErrNotFound; sl@0: } sl@0: sl@0: /******************************************** sl@0: * Hardware chunk abstraction sl@0: ********************************************/ sl@0: sl@0: /** sl@0: @pre Call in a thread context. sl@0: @pre Interrupts must be enabled. sl@0: @pre Kernel must be unlocked. sl@0: @pre No fast mutex can be held. sl@0: @pre Calling thread must be in a critical section. sl@0: */ sl@0: EXPORT_C TInt DPlatChunkHw::New(DPlatChunkHw*& aChunk, TPhysAddr aAddr, TInt aSize, TUint aAttribs) sl@0: { sl@0: CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"DPlatChunkHw::New"); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("DPlatChunkHw::New phys=%08x, size=%x, attribs=%x",aAddr,aSize,aAttribs)); sl@0: aChunk=NULL; sl@0: if (aSize<=0) sl@0: return KErrArgument; sl@0: DPlatChunkHw* pC=new DPlatChunkHw; sl@0: if (!pC) sl@0: return KErrNoMemory; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("DPlatChunkHw created at %08x",pC)); sl@0: sl@0: pC->iPhysAddr=aAddr; sl@0: pC->iLinAddr=aAddr; sl@0: pC->iSize=aSize; sl@0: aChunk=pC; sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: void DMemModelChunk::BTracePrime(TInt aCategory) sl@0: { sl@0: DChunk::BTracePrime(aCategory); sl@0: sl@0: #ifdef BTRACE_CHUNKS sl@0: if (aCategory == BTrace::EChunks || aCategory == -1) sl@0: { sl@0: BTrace12(BTrace::EChunks, BTrace::EChunkMemoryAllocated,this,0,this->iSize); sl@0: } sl@0: #endif sl@0: }