1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/memmodel/epoc/direct/mchunk.cpp Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,451 @@
1.4 +// Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32\memmodel\epoc\direct\mchunk.cpp
1.18 +//
1.19 +//
1.20 +
1.21 +#include <memmodel.h>
1.22 +
1.23 +DMemModelChunk::~DMemModelChunk()
1.24 + {
1.25 + __KTRACE_OPT(KTHREAD,Kern::Printf("DMemModelChunk destruct %O",this));
1.26 + if (iRegionSize)
1.27 + {
1.28 + MM::WaitRamAlloc();
1.29 + MM::FreeRegion(iRegionBase,iRegionSize);
1.30 + __KTRACE_OPT(KMEMTRACE, Kern::Printf("MT:D %d %x %O",NTickCount(),this,this););
1.31 + MM::SignalRamAlloc();
1.32 +#ifdef BTRACE_CHUNKS
1.33 + BTraceContext4(BTrace::EChunks,BTrace::EChunkDestroyed,this);
1.34 +#endif
1.35 + }
1.36 + iRegionSize=0;
1.37 +
1.38 + TDfc* dfc = (TDfc*)__e32_atomic_swp_ord_ptr(&iDestroyedDfc, 0);
1.39 + if(dfc)
1.40 + dfc->Enque();
1.41 + }
1.42 +
1.43 +
1.44 +TUint8* DMemModelChunk::Base(DProcess* aProcess)
1.45 + {
1.46 + return iBase;
1.47 + }
1.48 +
1.49 +
1.50 +TInt DMemModelChunk::DoCreate(SChunkCreateInfo& anInfo)
1.51 + {
1.52 + __ASSERT_COMPILE(!(EMMChunkAttributesMask & EChunkAttributesMask));
1.53 +
1.54 + if(iAttributes&EMemoryNotOwned)
1.55 + return KErrNotSupported;
1.56 + if (anInfo.iMaxSize<=0)
1.57 + return KErrArgument;
1.58 + TInt r=KErrNone;
1.59 + iMaxSize=MM::RoundToBlockSize(anInfo.iMaxSize);
1.60 + switch (anInfo.iType)
1.61 + {
1.62 + case EDll:
1.63 + case EUserCode:
1.64 + case EUserSelfModCode:
1.65 + case EUserData:
1.66 + case EDllData:
1.67 + case ESharedKernelSingle:
1.68 + case ESharedKernelMultiple:
1.69 + case ESharedIo:
1.70 + case EKernelMessage:
1.71 + MM::WaitRamAlloc();
1.72 + r=MM::AllocRegion(iRegionBase, iMaxSize);
1.73 + if (r==KErrNone)
1.74 + iRegionSize=iMaxSize;
1.75 + else
1.76 + MM::AllocFailed=ETrue;
1.77 + MM::SignalRamAlloc();
1.78 + iBase=(TUint8*)iRegionBase;
1.79 + iSize=iMaxSize;
1.80 + if(r==KErrNone)
1.81 + {
1.82 + iMapAttr = EMapAttrCachedMax;
1.83 + __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoCreate clear %x+%x",iRegionBase,iRegionSize));
1.84 +
1.85 + // Clear memory to value determined by chunk member
1.86 + memset((TAny*)iRegionBase, iClearByte, MM::RoundToBlockSize(iRegionSize));
1.87 + }
1.88 + break;
1.89 + default:
1.90 + break;
1.91 + }
1.92 +
1.93 + __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::DoCreate %O ret %d",this,r));
1.94 + __KTRACE_OPT(KMMU,Kern::Printf("RegionBase=%08x, RegionSize=%08x",iRegionBase,iRegionSize));
1.95 + __KTRACE_OPT(KMEMTRACE, {MM::WaitRamAlloc();Kern::Printf("MT:C %d %x %O",NTickCount(),this,this);MM::SignalRamAlloc();});
1.96 +#ifdef BTRACE_CHUNKS
1.97 + TKName nameBuf;
1.98 + Name(nameBuf);
1.99 + BTraceContextN(BTrace::EChunks,BTrace::EChunkCreated,this,iMaxSize,nameBuf.Ptr(),nameBuf.Size());
1.100 + if(iOwningProcess)
1.101 + BTrace8(BTrace::EChunks,BTrace::EChunkOwner,this,iOwningProcess);
1.102 + BTraceContext12(BTrace::EChunks,BTrace::EChunkInfo,this,iChunkType,iAttributes);
1.103 +#endif
1.104 + return r;
1.105 + }
1.106 +
1.107 +void DMemModelChunk::SetFixedAddress(TLinAddr anAddr, TInt aSize)
1.108 + {
1.109 + __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O SetFixedAddress %08X size %08X",this,anAddr,aSize));
1.110 + iSize=MM::RoundToBlockSize(aSize);
1.111 + if (iSize>iMaxSize)
1.112 + iMaxSize=iSize;
1.113 + iBase=(TUint8*)anAddr;
1.114 + }
1.115 +
1.116 +TInt DMemModelChunk::Adjust(TInt aNewSize)
1.117 +//
1.118 +// Adjust a standard chunk.
1.119 +//
1.120 + {
1.121 +
1.122 + __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Adjust %08x",aNewSize));
1.123 + if (iAttributes & (EDoubleEnded|EDisconnected))
1.124 + return KErrGeneral;
1.125 + if (aNewSize<0 || aNewSize>iMaxSize)
1.126 + return KErrArgument;
1.127 +
1.128 + __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x",this,iSize));
1.129 + __KTRACE_OPT(KMEMTRACE, {MM::WaitRamAlloc();Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this);MM::SignalRamAlloc();});
1.130 + return KErrNone;
1.131 + }
1.132 +
1.133 +TInt DMemModelChunk::AdjustDoubleEnded(TInt aBottom, TInt aTop)
1.134 +//
1.135 +// Adjust a double-ended chunk.
1.136 +//
1.137 + {
1.138 +
1.139 + __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::AdjustDoubleEnded %x-%x",aBottom,aTop));
1.140 + if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDoubleEnded)
1.141 + return KErrGeneral;
1.142 + if (aTop<0 || aBottom<0 || aTop<aBottom || aTop>iMaxSize)
1.143 + return KErrArgument;
1.144 + TInt newSize=aTop-aBottom;
1.145 + if (newSize>iMaxSize)
1.146 + return KErrArgument;
1.147 + iStartPos=aBottom;
1.148 +
1.149 + __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk %O adjusted to %x+%x",this,iStartPos,iSize));
1.150 + __KTRACE_OPT(KMEMTRACE, {MM::WaitRamAlloc();Kern::Printf("MT:A %d %x %x %O",NTickCount(),this,iSize,this);MM::SignalRamAlloc();});
1.151 + return KErrNone;
1.152 + }
1.153 +
1.154 +TInt DMemModelChunk::Address(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress)
1.155 + {
1.156 + if(TUint(aOffset)>=TUint(iMaxSize))
1.157 + return KErrArgument;
1.158 + if(TUint(aOffset+aSize)>TUint(iMaxSize))
1.159 + return KErrArgument;
1.160 + if(aSize<=0)
1.161 + return KErrArgument;
1.162 + aKernelAddress = (TLinAddr)iBase+aOffset;
1.163 + return KErrNone;
1.164 + }
1.165 +
1.166 +TInt DMemModelChunk::PhysicalAddress(TInt aOffset, TInt aSize, TLinAddr& aKernelAddress, TUint32& aPhysicalAddress, TUint32* aPhysicalPageList)
1.167 + {
1.168 + TInt r=Address(aOffset,aSize,aKernelAddress);
1.169 + if(r!=KErrNone)
1.170 + return r;
1.171 +
1.172 + TPhysAddr physStart = Epoc::LinearToPhysical(aKernelAddress);
1.173 +
1.174 + TInt pageShift = 12;
1.175 + TUint32 page = aKernelAddress>>pageShift<<pageShift;
1.176 + TUint32 lastPage = (aKernelAddress+aSize-1)>>pageShift<<pageShift;
1.177 + TUint32* pageList = aPhysicalPageList;
1.178 + TUint32 nextPhys = Epoc::LinearToPhysical(page);
1.179 + TUint32 pageSize = 1<<pageShift;
1.180 + while(page<=lastPage)
1.181 + {
1.182 + TPhysAddr phys = Epoc::LinearToPhysical(page);
1.183 + if(pageList)
1.184 + *pageList++ = phys;
1.185 + if(phys!=nextPhys)
1.186 + nextPhys = KPhysAddrInvalid;
1.187 + else
1.188 + nextPhys += pageSize;
1.189 + page += pageSize;
1.190 + }
1.191 + if(nextPhys==KPhysAddrInvalid)
1.192 + {
1.193 + // Memory is discontiguous...
1.194 + aPhysicalAddress = KPhysAddrInvalid;
1.195 + return 1;
1.196 + }
1.197 + else
1.198 + {
1.199 + // Memory is contiguous...
1.200 + aPhysicalAddress = physStart;
1.201 + return KErrNone;
1.202 + }
1.203 + }
1.204 +
1.205 +TInt DMemModelChunk::Commit(TInt aOffset, TInt aSize, TCommitType aCommitType, TUint32* aExtraArg)
1.206 +//
1.207 +// Commit to a disconnected chunk.
1.208 +//
1.209 + {
1.210 + __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Commit %x+%x type=%d extra=%08x",aOffset,aSize,aCommitType,aExtraArg));
1.211 + if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
1.212 + return KErrGeneral;
1.213 + if (aOffset<0 || aSize<0 || (aOffset+aSize)>iMaxSize)
1.214 + return KErrArgument;
1.215 + if(LOGICAL_XOR((TInt)aCommitType&DChunk::ECommitPhysicalMask, iAttributes&DChunk::EMemoryNotOwned))
1.216 + return KErrNotSupported; // Commit type doesn't match 'memory owned' type
1.217 +
1.218 + if((TInt)aCommitType&DChunk::ECommitPhysicalMask)
1.219 + return KErrNotSupported;
1.220 + if(aCommitType==DChunk::ECommitContiguous)
1.221 + {
1.222 + // We can't commit contiguous memory, we just have to take what's already there.
1.223 + // So check to see if memory is contiguous, and if not, return KErrNoMemory -
1.224 + // which is what other Memory Models do if they can't find enough contiguous RAM.
1.225 + TLinAddr kernAddr;
1.226 + if(PhysicalAddress(aOffset,aSize,kernAddr,*aExtraArg)!=KErrNone)
1.227 + return KErrNoMemory;
1.228 + }
1.229 + else if(aCommitType!=DChunk::ECommitDiscontiguous)
1.230 + return KErrArgument;
1.231 +
1.232 + return KErrNone;
1.233 + }
1.234 +
1.235 +TInt DMemModelChunk::Allocate(TInt aSize, TInt aGuard, TInt aAlign)
1.236 +//
1.237 +// Allocate offset and commit to a disconnected chunk.
1.238 +//
1.239 + {
1.240 + __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Allocate %x %x %d",aSize,aGuard,aAlign));
1.241 + if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
1.242 + return KErrGeneral;
1.243 + if (aSize<=0 || aSize>iMaxSize)
1.244 + return KErrArgument;
1.245 + TInt r=KErrNotSupported;
1.246 + __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Allocate returns %x",r));
1.247 + return r;
1.248 + }
1.249 +
1.250 +TInt DMemModelChunk::Decommit(TInt anOffset, TInt aSize)
1.251 +//
1.252 +// Decommit from a disconnected chunk.
1.253 +//
1.254 + {
1.255 + __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Decommit %x+%x",anOffset,aSize));
1.256 + if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
1.257 + return KErrGeneral;
1.258 + if (anOffset<0 || aSize<0 || (anOffset+aSize)>iMaxSize)
1.259 + return KErrArgument;
1.260 + return KErrNone;
1.261 + }
1.262 +
1.263 +void DMemModelChunk::Substitute(TInt /*aOffset*/, TPhysAddr /*aOldAddr*/, TPhysAddr /*aNewAddr*/)
1.264 + {
1.265 + MM::Panic(MM::EUnsupportedOperation);
1.266 + }
1.267 +
1.268 +TInt DMemModelChunk::Unlock(TInt anOffset, TInt aSize)
1.269 + {
1.270 + __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Decommit %x+%x",anOffset,aSize));
1.271 + if (!(iAttributes&ECache))
1.272 + return KErrGeneral;
1.273 + if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
1.274 + return KErrGeneral;
1.275 + if (anOffset<0 || aSize<0 || (anOffset+aSize)>iMaxSize)
1.276 + return KErrArgument;
1.277 + return KErrNone;
1.278 + }
1.279 +
1.280 +TInt DMemModelChunk::Lock(TInt anOffset, TInt aSize)
1.281 + {
1.282 + __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunk::Decommit %x+%x",anOffset,aSize));
1.283 + if (!(iAttributes&ECache))
1.284 + return KErrGeneral;
1.285 + if ((iAttributes & (EDoubleEnded|EDisconnected))!=EDisconnected)
1.286 + return KErrGeneral;
1.287 + if (anOffset<0 || aSize<0 || (anOffset+aSize)>iMaxSize)
1.288 + return KErrArgument;
1.289 + return KErrNone;
1.290 + }
1.291 +
1.292 +TInt DMemModelChunk::CheckAccess()
1.293 + {
1.294 + DProcess* pP=TheCurrentThread->iOwningProcess;
1.295 + if (iAttributes&EPrivate)
1.296 + {
1.297 + if (iOwningProcess && iOwningProcess!=pP && pP!=K::TheKernelProcess)
1.298 + return KErrAccessDenied;
1.299 + }
1.300 + return KErrNone;
1.301 + }
1.302 +
1.303 +TUint32 MM::RoundToBlockSize(TUint32 aSize)
1.304 + {
1.305 + TUint32 m=MM::RamBlockSize-1;
1.306 + return (aSize+m)&~m;
1.307 + }
1.308 +
1.309 +void MM::FreeRegion(TLinAddr aBase, TInt aSize)
1.310 + {
1.311 + __KTRACE_OPT(KMMU,Kern::Printf("MM::FreeRegion base %08x size %08x",aBase,aSize));
1.312 + aSize=MM::RoundToBlockSize(aSize);
1.313 + __ASSERT_ALWAYS(aBase>=MM::UserDataSectionBase && aBase+aSize<=MM::UserDataSectionEnd, MM::Panic(MM::EFreeInvalidRegion));
1.314 + TInt block=(aBase-MM::UserDataSectionBase)>>MM::RamBlockShift;
1.315 + TInt nBlocks=aSize>>MM::RamBlockShift;
1.316 + MM::RamAllocator->Free(block, nBlocks);
1.317 + }
1.318 +
1.319 +TInt MM::AllocRegion(TLinAddr& aBase, TInt aSize, TInt aAlign)
1.320 + {
1.321 + __KTRACE_OPT(KMMU,Kern::Printf("MM::AllocRegion size 0x%x align %d",aSize,aAlign));
1.322 + TInt align=Max(aAlign-MM::RamBlockShift, 0);
1.323 + TInt nBlocks=MM::RoundToBlockSize(aSize)>>MM::RamBlockShift;
1.324 + TInt base=(TInt)(MM::UserDataSectionBase>>MM::RamBlockShift);
1.325 + TInt block=MM::RamAllocator->AllocAligned(nBlocks, align, base, ETrue); // returns first block number or -1
1.326 + if (block<0)
1.327 + return KErrNoMemory;
1.328 + MM::RamAllocator->Alloc(block,nBlocks);
1.329 + aBase=MM::UserDataSectionBase+(block<<MM::RamBlockShift);
1.330 + __KTRACE_OPT(KMMU,Kern::Printf("MM::AllocRegion address %08x",aBase));
1.331 + return KErrNone;
1.332 + }
1.333 +
1.334 +TInt MM::ClaimRegion(TLinAddr aBase, TInt aSize)
1.335 + {
1.336 + __KTRACE_OPT(KMMU,Kern::Printf("MM::ClaimRegion base %08x size %08x",aBase,aSize));
1.337 + TUint32 m=MM::RamBlockSize-1;
1.338 + aSize=MM::RoundToBlockSize(aSize+(aBase&m));
1.339 + aBase&=~m;
1.340 + if (aBase<MM::UserDataSectionBase || TUint32(aSize)>MM::UserDataSectionEnd-aBase)
1.341 + return KErrArgument;
1.342 + TInt block=(aBase-MM::UserDataSectionBase)>>MM::RamBlockShift;
1.343 + TInt nBlocks=aSize>>MM::RamBlockShift;
1.344 + if (MM::RamAllocator->NotFree(block, nBlocks))
1.345 + return KErrInUse;
1.346 + MM::RamAllocator->Alloc(block, nBlocks);
1.347 + return KErrNone;
1.348 + }
1.349 +
1.350 +// Allocate a physically contiguous region
1.351 +TInt MM::AllocContiguousRegion(TLinAddr& aBase, TInt aSize, TInt aAlign)
1.352 + {
1.353 +#ifndef __CPU_HAS_MMU
1.354 + return MM::AllocRegion(aBase, aSize, aAlign);
1.355 +#else
1.356 + __KTRACE_OPT(KMMU,Kern::Printf("MM::AllocContiguousRegion size 0x%x align %d",aSize,aAlign));
1.357 + TBitMapAllocator* sa = MM::SecondaryAllocator;
1.358 + if (!sa)
1.359 + return MM::AllocRegion(aBase, aSize, aAlign); // only one physical bank
1.360 +
1.361 + TBitMapAllocator* ra = MM::RamAllocator;
1.362 + TInt align=Max(aAlign-MM::RamBlockShift, 0);
1.363 + TUint32 alignmask = (1u<<align)-1;
1.364 + TInt nBlocks=MM::RoundToBlockSize(aSize)>>MM::RamBlockShift;
1.365 + TInt base=(TInt)(MM::UserDataSectionBase>>MM::RamBlockShift);
1.366 + const SRamBank* banks = (const SRamBank*)TheSuperPage().iRamBootData;
1.367 + const SRamBank* pB = banks;
1.368 + TInt bnum = 0;
1.369 + TInt block = -1;
1.370 + for (; pB->iSize; ++pB)
1.371 + {
1.372 + TInt nb = pB->iSize >> MM::RamBlockShift;
1.373 + sa->CopyAlignedRange(ra, bnum, nb);
1.374 + TInt basealign = (base + bnum) & alignmask;
1.375 + block = sa->AllocAligned(nBlocks, align, basealign, ETrue); // returns first block number or -1
1.376 + if (block>=0)
1.377 + break;
1.378 + bnum += nb;
1.379 + }
1.380 + if (pB->iSize == 0)
1.381 + return KErrNoMemory;
1.382 + MM::RamAllocator->Alloc(block + bnum, nBlocks);
1.383 + aBase = MM::UserDataSectionBase + ((block + bnum)<<MM::RamBlockShift);
1.384 + __KTRACE_OPT(KMMU,Kern::Printf("MM::AllocContiguousRegion address %08x",aBase));
1.385 + return KErrNone;
1.386 +#endif
1.387 + }
1.388 +
1.389 +TInt MM::BlockNumber(TPhysAddr aAddr)
1.390 + {
1.391 + __KTRACE_OPT(KMMU,Kern::Printf("MM::BlockNumber %08x",aAddr));
1.392 + const SRamBank* banks = (const SRamBank*)TheSuperPage().iRamBootData;
1.393 + const SRamBank* pB = banks;
1.394 + TInt bnum = 0;
1.395 + for (; pB->iSize; ++pB)
1.396 + {
1.397 + if (aAddr >= pB->iBase)
1.398 + {
1.399 + TUint32 offset = aAddr - pB->iBase;
1.400 + if (offset < pB->iSize)
1.401 + {
1.402 + TInt bn = bnum + TInt(offset>>MM::RamBlockShift);
1.403 + __KTRACE_OPT(KMMU,Kern::Printf("MM::BlockNumber %08x->%x",aAddr,bn));
1.404 + return bn;
1.405 + }
1.406 + }
1.407 + TInt nb = pB->iSize >> MM::RamBlockShift;
1.408 + bnum += nb;
1.409 + }
1.410 + return KErrNotFound;
1.411 + }
1.412 +
1.413 +/********************************************
1.414 + * Hardware chunk abstraction
1.415 + ********************************************/
1.416 +
1.417 +/**
1.418 + @pre Call in a thread context.
1.419 + @pre Interrupts must be enabled.
1.420 + @pre Kernel must be unlocked.
1.421 + @pre No fast mutex can be held.
1.422 + @pre Calling thread must be in a critical section.
1.423 + */
1.424 +EXPORT_C TInt DPlatChunkHw::New(DPlatChunkHw*& aChunk, TPhysAddr aAddr, TInt aSize, TUint aAttribs)
1.425 + {
1.426 + CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"DPlatChunkHw::New");
1.427 + __KTRACE_OPT(KMMU,Kern::Printf("DPlatChunkHw::New phys=%08x, size=%x, attribs=%x",aAddr,aSize,aAttribs));
1.428 + aChunk=NULL;
1.429 + if (aSize<=0)
1.430 + return KErrArgument;
1.431 + DPlatChunkHw* pC=new DPlatChunkHw;
1.432 + if (!pC)
1.433 + return KErrNoMemory;
1.434 + __KTRACE_OPT(KMMU,Kern::Printf("DPlatChunkHw created at %08x",pC));
1.435 +
1.436 + pC->iPhysAddr=aAddr;
1.437 + pC->iLinAddr=aAddr;
1.438 + pC->iSize=aSize;
1.439 + aChunk=pC;
1.440 + return KErrNone;
1.441 + }
1.442 +
1.443 +
1.444 +void DMemModelChunk::BTracePrime(TInt aCategory)
1.445 + {
1.446 + DChunk::BTracePrime(aCategory);
1.447 +
1.448 +#ifdef BTRACE_CHUNKS
1.449 + if (aCategory == BTrace::EChunks || aCategory == -1)
1.450 + {
1.451 + BTrace12(BTrace::EChunks, BTrace::EChunkMemoryAllocated,this,0,this->iSize);
1.452 + }
1.453 +#endif
1.454 + }