1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mprocess.cpp Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,1138 @@
1.4 +// Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +//
1.18 +
1.19 +#include <memmodel.h>
1.20 +#include "mmu/mm.h"
1.21 +#include "mmu/maddrcont.h"
1.22 +#include "mmboot.h"
1.23 +#include <kernel/cache.h>
1.24 +#include "execs.h"
1.25 +
1.26 +#define iMState iWaitLink.iSpare1
1.27 +
1.28 +NFastMutex TheSharedChunkLock;
1.29 +
1.30 +#ifndef _DEBUG
1.31 +const TInt KChunkGranularity = 4; // amount to grow SChunkInfo list by
1.32 +const TInt KMaxChunkInfosInOneGo = 100; // max number of SChunkInfo objects to copy with System Lock held
1.33 +#else // if debug...
1.34 +const TInt KChunkGranularity = 1;
1.35 +const TInt KMaxChunkInfosInOneGo = 1;
1.36 +#endif
1.37 +
1.38 +
1.39 +
1.40 +/********************************************
1.41 + * Process
1.42 + ********************************************/
1.43 +
1.44 +DMemModelProcess::~DMemModelProcess()
1.45 + {
1.46 + __KTRACE_OPT(KMMU,Kern::Printf("DMemModelProcess destruct"));
1.47 + Destruct();
1.48 + }
1.49 +
1.50 +
1.51 +void DMemModelProcess::Destruct()
1.52 + {
1.53 + __ASSERT_ALWAYS(!iOsAsidRefCount, MM::Panic(MM::EProcessDestructOsAsidRemaining));
1.54 + __ASSERT_ALWAYS(!iChunkCount, MM::Panic(MM::EProcessDestructChunksRemaining));
1.55 + Kern::Free(iChunks);
1.56 + __ASSERT_ALWAYS(!iSharedChunks || iSharedChunks->Count()==0, MM::Panic(MM::EProcessDestructChunksRemaining));
1.57 + delete iSharedChunks;
1.58 +
1.59 + DProcess::Destruct();
1.60 + }
1.61 +
1.62 +
1.63 +TInt DMemModelProcess::TryOpenOsAsid()
1.64 + {
1.65 + if (__e32_atomic_tas_ord32(&iOsAsidRefCount, 1, 1, 0))
1.66 + {
1.67 + return iOsAsid;
1.68 + }
1.69 + return KErrDied;
1.70 + }
1.71 +
1.72 +
1.73 +void DMemModelProcess::CloseOsAsid()
1.74 + {
1.75 + if (__e32_atomic_tas_ord32(&iOsAsidRefCount, 1, -1, 0) == 1)
1.76 + {// Last reference has been closed so free the asid.
1.77 + MM::AddressSpaceFree(iOsAsid);
1.78 + }
1.79 + }
1.80 +
1.81 +
1.82 +void DMemModelProcess::AsyncCloseOsAsid()
1.83 + {
1.84 + if (__e32_atomic_tas_ord32(&iOsAsidRefCount, 1, -1, 0) == 1)
1.85 + {// Last reference has been closed so free the asid asynchronusly.
1.86 + MM::AsyncAddressSpaceFree(iOsAsid);
1.87 + }
1.88 + }
1.89 +
1.90 +
1.91 +TInt DMemModelProcess::NewChunk(DChunk*& aChunk, SChunkCreateInfo& aInfo, TLinAddr& aRunAddr)
1.92 + {
1.93 + aChunk=NULL;
1.94 +
1.95 + DMemModelChunk* pC=new DMemModelChunk;
1.96 + if (!pC)
1.97 + return KErrNoMemory;
1.98 +
1.99 + TChunkType type = aInfo.iType;
1.100 + pC->iChunkType=type;
1.101 + TInt r=pC->SetAttributes(aInfo);
1.102 + if (r!=KErrNone)
1.103 + {
1.104 + pC->Close(NULL);
1.105 + return r;
1.106 + }
1.107 +
1.108 + pC->iOwningProcess=(pC->iAttributes&DMemModelChunk::EPublic)?NULL:this;
1.109 + r=pC->Create(aInfo);
1.110 + if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdjust))
1.111 + {
1.112 + if (aInfo.iRunAddress!=0)
1.113 + pC->SetFixedAddress(aInfo.iRunAddress,aInfo.iPreallocated);
1.114 + if (aInfo.iPreallocated==0 && aInfo.iInitialTop!=0)
1.115 + {
1.116 + if (pC->iAttributes & DChunk::EDisconnected)
1.117 + {
1.118 + r=pC->Commit(aInfo.iInitialBottom,aInfo.iInitialTop-aInfo.iInitialBottom);
1.119 + }
1.120 + else if (pC->iAttributes & DChunk::EDoubleEnded)
1.121 + {
1.122 + r=pC->AdjustDoubleEnded(aInfo.iInitialBottom,aInfo.iInitialTop);
1.123 + }
1.124 + else
1.125 + {
1.126 + r=pC->Adjust(aInfo.iInitialTop);
1.127 + }
1.128 + }
1.129 + }
1.130 + if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdd))
1.131 + {
1.132 + r = AddChunk(pC, EFalse);
1.133 + }
1.134 + if (r==KErrNone)
1.135 + {
1.136 + if(pC->iKernelMapping)
1.137 + aRunAddr = (TLinAddr)MM::MappingBase(pC->iKernelMapping);
1.138 + pC->iDestroyedDfc = aInfo.iDestroyedDfc;
1.139 + aChunk=(DChunk*)pC;
1.140 + }
1.141 + else
1.142 + pC->Close(NULL); // NULL since chunk can't have been added to process
1.143 + return r;
1.144 + }
1.145 +
1.146 +
1.147 +/**
1.148 +Determine whether this process should be data paged.
1.149 +
1.150 +@param aInfo A reference to the create info for this process.
1.151 + */
1.152 +TInt DMemModelProcess::SetPaging(const TProcessCreateInfo& aInfo)
1.153 + {
1.154 + TUint pagedFlags = aInfo.iFlags & TProcessCreateInfo::EDataPagingMask;
1.155 + // If KImageDataPaged and KImageDataUnpaged flags present then corrupt
1.156 + // Check this first to ensure that it is always verified.
1.157 + if (pagedFlags == TProcessCreateInfo::EDataPagingMask)
1.158 + {
1.159 + return KErrCorrupt;
1.160 + }
1.161 +
1.162 + if (aInfo.iAttr & ECodeSegAttKernel ||
1.163 + !(K::MemModelAttributes & EMemModelAttrDataPaging))
1.164 + {// Kernel process shouldn't be data paged or no data paging device installed.
1.165 + return KErrNone;
1.166 + }
1.167 +
1.168 + TUint dataPolicy = TheSuperPage().KernelConfigFlags() & EKernelConfigDataPagingPolicyMask;
1.169 + if (dataPolicy == EKernelConfigDataPagingPolicyAlwaysPage)
1.170 + {
1.171 + iAttributes |= EDataPaged;
1.172 + return KErrNone;
1.173 + }
1.174 + if (dataPolicy == EKernelConfigDataPagingPolicyNoPaging)
1.175 + {// No paging allowed so just return.
1.176 + return KErrNone;
1.177 + }
1.178 + if (pagedFlags == TProcessCreateInfo::EDataPaged)
1.179 + {
1.180 + iAttributes |= EDataPaged;
1.181 + return KErrNone;
1.182 + }
1.183 + if (pagedFlags == TProcessCreateInfo::EDataUnpaged)
1.184 + {// No paging set so just return.
1.185 + return KErrNone;
1.186 + }
1.187 + // Neither paged nor unpaged set so use default paging policy.
1.188 + // dataPolicy must be EKernelConfigDataPagingPolicyDefaultUnpaged or
1.189 + // EKernelConfigDataPagingPolicyDefaultPaged.
1.190 + __NK_ASSERT_DEBUG(pagedFlags == TProcessCreateInfo::EDataPagingUnspecified);
1.191 + __NK_ASSERT_DEBUG( dataPolicy == EKernelConfigDataPagingPolicyDefaultPaged ||
1.192 + dataPolicy == EKernelConfigDataPagingPolicyDefaultUnpaged);
1.193 + if (dataPolicy == EKernelConfigDataPagingPolicyDefaultPaged)
1.194 + {
1.195 + iAttributes |= EDataPaged;
1.196 + }
1.197 + return KErrNone;
1.198 + }
1.199 +
1.200 +
1.201 +TInt DMemModelProcess::DoCreate(TBool aKernelProcess, TProcessCreateInfo& aInfo)
1.202 + {
1.203 + // Required so we can detect whether a process has been created and added
1.204 + // to its object container by checking for iContainerID!=EProcess.
1.205 + __ASSERT_COMPILE(EProcess != 0);
1.206 + __KTRACE_OPT(KPROC,Kern::Printf(">DMemModelProcess::DoCreate %O",this));
1.207 + TInt r=KErrNone;
1.208 +
1.209 + if (aKernelProcess)
1.210 + {
1.211 + iAttributes |= ESupervisor;
1.212 + iOsAsid = KKernelOsAsid;
1.213 + }
1.214 + else
1.215 + {
1.216 + r = MM::AddressSpaceAlloc(iPageDir);
1.217 + if (r>=0)
1.218 + {
1.219 + iOsAsid = r;
1.220 + r = KErrNone;
1.221 + }
1.222 + }
1.223 + if (r == KErrNone)
1.224 + {// Add this process's own reference to its os asid.
1.225 + __e32_atomic_store_ord32(&iOsAsidRefCount, 1);
1.226 + }
1.227 +
1.228 +#ifdef BTRACE_FLEXIBLE_MEM_MODEL
1.229 + BTrace8(BTrace::EFlexibleMemModel,BTrace::EAddressSpaceId,this,iOsAsid);
1.230 +#endif
1.231 +
1.232 + __KTRACE_OPT(KPROC,Kern::Printf("OS ASID=%d, PD=%08x",iOsAsid,iPageDir));
1.233 + __KTRACE_OPT(KPROC,Kern::Printf("<DMemModelProcess::DoCreate %d",r));
1.234 + return r;
1.235 + }
1.236 +
1.237 +TInt DMemModelProcess::CreateDataBssStackArea(TProcessCreateInfo& aInfo)
1.238 + {
1.239 + __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::CreateDataBssStackArea %O",this));
1.240 + TInt r = KErrNone;
1.241 + TInt dataBssSize = MM::RoundToPageSize(aInfo.iTotalDataSize);
1.242 + if(dataBssSize)
1.243 + {
1.244 + DMemoryObject* memory;
1.245 + TMemoryObjectType memoryType = iAttributes&EDataPaged ? EMemoryObjectPaged : EMemoryObjectMovable;
1.246 + r = MM::MemoryNew(memory,memoryType,MM::BytesToPages(dataBssSize));
1.247 + if(r==KErrNone)
1.248 + {
1.249 + r = MM::MemoryAlloc(memory,0,MM::BytesToPages(dataBssSize));
1.250 + if(r==KErrNone)
1.251 + {
1.252 + r = MM::MappingNew(iDataBssMapping,memory,EUserReadWrite,OsAsid());
1.253 + }
1.254 + if(r!=KErrNone)
1.255 + MM::MemoryDestroy(memory);
1.256 + else
1.257 + {
1.258 + iDataBssRunAddress = MM::MappingBase(iDataBssMapping);
1.259 +#ifdef BTRACE_FLEXIBLE_MEM_MODEL
1.260 + BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsProcessStaticData,memory,this);
1.261 +#endif
1.262 + }
1.263 + }
1.264 + }
1.265 + __KTRACE_OPT(KPROC,Kern::Printf("DataBssSize=%x, ",dataBssSize));
1.266 +
1.267 + return r;
1.268 + }
1.269 +
1.270 +
1.271 +TInt DMemModelProcess::AttachExistingCodeSeg(TProcessCreateInfo& aInfo)
1.272 + {
1.273 + TInt r = DEpocProcess::AttachExistingCodeSeg(aInfo);
1.274 + if(r==KErrNone)
1.275 + {
1.276 + // allocate virtual memory for the EXEs codeseg...
1.277 + DMemModelCodeSeg* seg = (DMemModelCodeSeg*)iTempCodeSeg;
1.278 + if(seg->iAttr&ECodeSegAttAddrNotUnique)
1.279 + {
1.280 + TUint codeSize = seg->iSize;
1.281 + TLinAddr codeAddr = seg->RamInfo().iCodeRunAddr;
1.282 + TBool isDemandPaged = seg->iAttr&ECodeSegAttCodePaged;
1.283 + // Allocate virtual memory for the code seg using the os asid.
1.284 + // No need to open a reference on os asid as process not fully
1.285 + // created yet so it can't die and free the os asid.
1.286 + r = MM::VirtualAlloc(OsAsid(),codeAddr,codeSize,isDemandPaged);
1.287 + if(r==KErrNone)
1.288 + {
1.289 + iCodeVirtualAllocSize = codeSize;
1.290 + iCodeVirtualAllocAddress = codeAddr;
1.291 + }
1.292 + }
1.293 + }
1.294 +
1.295 + return r;
1.296 + }
1.297 +
1.298 +
1.299 +TInt DMemModelProcess::AddChunk(DChunk* aChunk, TBool aIsReadOnly)
1.300 + {
1.301 + DMemModelChunk* pC=(DMemModelChunk*)aChunk;
1.302 + if(pC->iOwningProcess && this!=pC->iOwningProcess)
1.303 + return KErrAccessDenied;
1.304 +
1.305 + TInt r = WaitProcessLock();
1.306 + if(r==KErrNone)
1.307 + {
1.308 + TInt i = ChunkIndex(pC);
1.309 + if(i>=0) // Found the chunk in this process, just up its count
1.310 + {
1.311 + iChunks[i].iAccessCount++;
1.312 + __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk %08x to %08x (Access count incremented to %d)",aChunk,this,iChunks[i].iAccessCount));
1.313 + SignalProcessLock();
1.314 + return KErrNone;
1.315 + }
1.316 + r = DoAddChunk(pC,aIsReadOnly);
1.317 + SignalProcessLock();
1.318 + }
1.319 + __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk returns %d",r));
1.320 + return r;
1.321 + }
1.322 +
1.323 +
1.324 +void M::FsRegisterThread()
1.325 + {
1.326 + TInternalRamDrive::Unlock();
1.327 + }
1.328 +
1.329 +
1.330 +void ExecHandler::UnlockRamDrive()
1.331 + {
1.332 + }
1.333 +
1.334 +
1.335 +EXPORT_C TLinAddr TInternalRamDrive::Base()
1.336 + {
1.337 + DMemModelChunk* pC=(DMemModelChunk*)PP::TheRamDriveChunk;
1.338 + DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
1.339 + NKern::LockSystem();
1.340 + TLinAddr addr = (TLinAddr)pC->Base(pP);
1.341 + NKern::UnlockSystem();
1.342 + if(!addr)
1.343 + {
1.344 + Unlock();
1.345 + NKern::LockSystem();
1.346 + addr = (TLinAddr)pC->Base(pP);
1.347 + NKern::UnlockSystem();
1.348 + }
1.349 + return addr;
1.350 + }
1.351 +
1.352 +
1.353 +EXPORT_C void TInternalRamDrive::Unlock()
1.354 + {
1.355 + DMemModelChunk* pC=(DMemModelChunk*)PP::TheRamDriveChunk;
1.356 + DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
1.357 +
1.358 + TInt r = pP->WaitProcessLock();
1.359 + if(r==KErrNone)
1.360 + if(pP->ChunkIndex(pC)==KErrNotFound)
1.361 + r = pP->DoAddChunk(pC,EFalse);
1.362 + __ASSERT_ALWAYS(r==KErrNone, MM::Panic(MM::EFsRegisterThread));
1.363 + pP->SignalProcessLock();
1.364 + }
1.365 +
1.366 +
1.367 +EXPORT_C void TInternalRamDrive::Lock()
1.368 + {
1.369 + }
1.370 +
1.371 +
1.372 +TInt DMemModelProcess::DoAddChunk(DMemModelChunk* aChunk, TBool aIsReadOnly)
1.373 + {
1.374 + //
1.375 + // Must hold the process $LOCK mutex before calling this.
1.376 + // As the process lock is held it is safe to access iOsAsid without a reference.
1.377 + //
1.378 +
1.379 + __NK_ASSERT_DEBUG(ChunkIndex(aChunk)==KErrNotFound); // shouldn't be adding a chunk which is already added
1.380 +
1.381 + __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::DoAddChunk %O to %O",aChunk,this));
1.382 +
1.383 + // create mapping for chunk...
1.384 + DMemoryMapping* mapping;
1.385 + TMappingPermissions perm = MM::MappingPermissions
1.386 + (
1.387 + iOsAsid!=(TInt)KKernelOsAsid, // user?
1.388 + aIsReadOnly==false, // write?
1.389 + aChunk->iAttributes&DMemModelChunk::ECode // execute?
1.390 + );
1.391 + TInt r;
1.392 + if(aChunk->iFixedBase) // HACK, kernel chunk has a fixed iBase
1.393 + r = MM::MappingNew(mapping,aChunk->iMemoryObject,perm,iOsAsid,EMappingCreateExactVirtual,(TLinAddr)aChunk->iFixedBase);
1.394 + else
1.395 + r = MM::MappingNew(mapping,aChunk->iMemoryObject,perm,iOsAsid);
1.396 + if(r!=KErrNone)
1.397 + return r;
1.398 + if(iOsAsid==0)
1.399 + aChunk->iKernelMapping = mapping;
1.400 + TLinAddr base = MM::MappingBase(mapping);
1.401 +
1.402 + // expand chunk info memory if required...
1.403 + if(iChunkCount==iChunkAlloc)
1.404 + {
1.405 + TInt newAlloc = iChunkAlloc+KChunkGranularity;
1.406 + r = Kern::SafeReAlloc((TAny*&)iChunks,iChunkAlloc*sizeof(SChunkInfo),newAlloc*sizeof(SChunkInfo));
1.407 + if(r!=KErrNone)
1.408 + {
1.409 + MM::MappingDestroy(mapping);
1.410 + return r;
1.411 + }
1.412 + iChunkAlloc = newAlloc;
1.413 + }
1.414 +
1.415 + // insert new chunk info...
1.416 + TUint i = ChunkInsertIndex(aChunk);
1.417 + SChunkInfo* info = iChunks+i;
1.418 + SChunkInfo* infoEnd = iChunks+iChunkCount;
1.419 + NKern::LockSystem();
1.420 + ++iChunkCount;
1.421 + for(;;)
1.422 + {
1.423 + // make space for new chunk info by shuffling along
1.424 + // existing infos KMaxChunkInfosInOneGo at a time...
1.425 + SChunkInfo* infoPtr = infoEnd-KMaxChunkInfosInOneGo;
1.426 + if(infoPtr<info)
1.427 + infoPtr = info;
1.428 + memmove(infoPtr+1,infoPtr,(TLinAddr)infoEnd-(TLinAddr)infoPtr);
1.429 + infoEnd = infoPtr;
1.430 + if(infoEnd<=info)
1.431 + break;
1.432 + NKern::FlashSystem();
1.433 + }
1.434 + info->iChunk = aChunk;
1.435 + info->iMapping = mapping;
1.436 + info->iAccessCount = 1;
1.437 + info->iIsReadOnly = aIsReadOnly;
1.438 + NKern::UnlockSystem();
1.439 +
1.440 + // add chunk to list of Shared Chunks...
1.441 + if(aChunk->iChunkType==ESharedKernelSingle || aChunk->iChunkType==ESharedKernelMultiple)
1.442 + {
1.443 + if(!iSharedChunks)
1.444 + iSharedChunks = new RAddressedContainer(&TheSharedChunkLock,iProcessLock);
1.445 + if(!iSharedChunks)
1.446 + r = KErrNoMemory;
1.447 + else
1.448 + r = iSharedChunks->Add(base,aChunk);
1.449 + if(r!=KErrNone)
1.450 + {
1.451 + DoRemoveChunk(i);
1.452 + return r;
1.453 + }
1.454 + }
1.455 +
1.456 + // done OK...
1.457 + __DEBUG_EVENT(EEventUpdateProcess, this);
1.458 + return KErrNone;
1.459 + }
1.460 +
1.461 +
1.462 +void DMemModelProcess::DoRemoveChunk(TInt aIndex)
1.463 + {
1.464 + __DEBUG_EVENT(EEventUpdateProcess, this);
1.465 +
1.466 + DMemModelChunk* chunk = iChunks[aIndex].iChunk;
1.467 + DMemoryMapping* mapping = iChunks[aIndex].iMapping;
1.468 +
1.469 + if(chunk->iChunkType==ESharedKernelSingle || chunk->iChunkType==ESharedKernelMultiple)
1.470 + {
1.471 + // remove chunk from list of Shared Chunks...
1.472 + if(iSharedChunks)
1.473 + {
1.474 + iSharedChunks->Remove(MM::MappingBase(mapping));
1.475 +#ifdef _DEBUG
1.476 + // delete iSharedChunks if it's empty, so memory leak test code passes...
1.477 + if(iSharedChunks->Count()==0)
1.478 + {
1.479 + NKern::FMWait(&TheSharedChunkLock);
1.480 + RAddressedContainer* s = iSharedChunks;
1.481 + iSharedChunks = 0;
1.482 + NKern::FMSignal(&TheSharedChunkLock);
1.483 + delete s;
1.484 + }
1.485 +#endif
1.486 + }
1.487 + }
1.488 +
1.489 + // remove chunk from array...
1.490 + SChunkInfo* infoStart = iChunks+aIndex+1;
1.491 + SChunkInfo* infoEnd = iChunks+iChunkCount;
1.492 + NKern::LockSystem();
1.493 + for(;;)
1.494 + {
1.495 + // shuffle existing infos down KMaxChunkInfosInOneGo at a time...
1.496 + SChunkInfo* infoPtr = infoStart+KMaxChunkInfosInOneGo;
1.497 + if(infoPtr>infoEnd)
1.498 + infoPtr = infoEnd;
1.499 + memmove(infoStart-1,infoStart,(TLinAddr)infoPtr-(TLinAddr)infoStart);
1.500 + infoStart = infoPtr;
1.501 + if(infoStart>=infoEnd)
1.502 + break;
1.503 + NKern::FlashSystem();
1.504 + }
1.505 + --iChunkCount;
1.506 + NKern::UnlockSystem();
1.507 +
1.508 + if(mapping==chunk->iKernelMapping)
1.509 + chunk->iKernelMapping = 0;
1.510 +
1.511 + MM::MappingDestroy(mapping);
1.512 + }
1.513 +
1.514 +
1.515 +/**
1.516 +Final chance for process to release resources during its death.
1.517 +
1.518 +Called with process $LOCK mutex held (if it exists).
1.519 +This mutex will not be released before it is deleted.
1.520 +I.e. no other thread will ever hold the mutex again.
1.521 +*/
1.522 +void DMemModelProcess::FinalRelease()
1.523 + {
1.524 + // Clean up any left over chunks (such as SharedIo buffers)
1.525 + if(iProcessLock)
1.526 + while(iChunkCount)
1.527 + DoRemoveChunk(0);
1.528 + // Destroy the remaining mappings and memory objects owned by this process
1.529 + MM::MappingAndMemoryDestroy(iDataBssMapping);
1.530 + if(iCodeVirtualAllocSize)
1.531 + MM::VirtualFree(iOsAsid,iCodeVirtualAllocAddress,iCodeVirtualAllocSize);
1.532 +
1.533 + // Close the original reference on the os asid.
1.534 + CloseOsAsid();
1.535 + }
1.536 +
1.537 +
1.538 +void DMemModelProcess::RemoveChunk(DMemModelChunk *aChunk)
1.539 + {
1.540 + // note that this can't be called after the process $LOCK mutex has been deleted
1.541 + // since it can only be called by a thread in this process doing a handle close or
1.542 + // dying, or by the process handles array being deleted due to the process dying,
1.543 + // all of which happen before $LOCK is deleted.
1.544 + __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O RemoveChunk %O",this,aChunk));
1.545 + Kern::MutexWait(*iProcessLock);
1.546 + TInt i = ChunkIndex(aChunk);
1.547 + if(i>=0) // Found the chunk
1.548 + {
1.549 + __KTRACE_OPT(KPROC,Kern::Printf("Chunk access count %d",iChunks[i].iAccessCount));
1.550 + if(--iChunks[i].iAccessCount==0)
1.551 + {
1.552 + DoRemoveChunk(i);
1.553 + }
1.554 + }
1.555 + Kern::MutexSignal(*iProcessLock);
1.556 + }
1.557 +
1.558 +
1.559 +TUint8* DMemModelChunk::Base(DProcess* aProcess)
1.560 + {
1.561 + DMemModelProcess* pP = (DMemModelProcess*)aProcess;
1.562 + DMemoryMapping* mapping = 0;
1.563 +
1.564 + if(iKernelMapping && pP==K::TheKernelProcess)
1.565 + {
1.566 + // shortcut for shared chunks...
1.567 + mapping = iKernelMapping;
1.568 + }
1.569 + else
1.570 + {
1.571 + // find chunk in process...
1.572 + TInt i = pP->ChunkIndex(this);
1.573 + if(i>=0)
1.574 + mapping = pP->iChunks[i].iMapping;
1.575 + }
1.576 +
1.577 + if(!mapping)
1.578 + return 0;
1.579 +
1.580 + return (TUint8*)MM::MappingBase(mapping);
1.581 + }
1.582 +
1.583 +
1.584 +DChunk* DThread::OpenSharedChunk(const TAny* aAddress, TBool aWrite, TInt& aOffset)
1.585 + {
1.586 + DMemModelChunk* chunk = 0;
1.587 +
1.588 + NKern::FMWait(&TheSharedChunkLock);
1.589 + RAddressedContainer* list = ((DMemModelProcess*)iOwningProcess)->iSharedChunks;
1.590 + if(list)
1.591 + {
1.592 + // search list...
1.593 + TUint offset;
1.594 + chunk = (DMemModelChunk*)list->Find((TLinAddr)aAddress,offset);
1.595 + if(chunk && offset<TUint(chunk->iMaxSize) && chunk->Open()==KErrNone)
1.596 + aOffset = offset; // chunk found and opened successfully
1.597 + else
1.598 + chunk = 0; // failed
1.599 + }
1.600 + NKern::FMSignal(&TheSharedChunkLock);
1.601 +
1.602 + return chunk;
1.603 + }
1.604 +
1.605 +
1.606 +TUint DMemModelProcess::ChunkInsertIndex(DMemModelChunk* aChunk)
1.607 + {
1.608 + // need to hold iProcessLock or System Lock...
1.609 +#ifdef _DEBUG
1.610 + if(K::Initialising==false && iProcessLock!=NULL && iProcessLock->iCleanup.iThread!=&Kern::CurrentThread())
1.611 + {
1.612 + // don't hold iProcessLock, so...
1.613 + __ASSERT_SYSTEM_LOCK;
1.614 + }
1.615 +#endif
1.616 +
1.617 + // binary search...
1.618 + SChunkInfo* list = iChunks;
1.619 + TUint l = 0;
1.620 + TUint r = iChunkCount;
1.621 + TUint m;
1.622 + while(l<r)
1.623 + {
1.624 + m = (l+r)>>1;
1.625 + DChunk* x = list[m].iChunk;
1.626 + if(x<=aChunk)
1.627 + l = m+1;
1.628 + else
1.629 + r = m;
1.630 + }
1.631 + return r;
1.632 + }
1.633 +
1.634 +
1.635 +TInt DMemModelProcess::ChunkIndex(DMemModelChunk* aChunk)
1.636 + {
1.637 + TUint i = ChunkInsertIndex(aChunk);
1.638 + if(i && iChunks[--i].iChunk==aChunk)
1.639 + return i;
1.640 + return KErrNotFound;
1.641 + }
1.642 +
1.643 +
1.644 +TInt DMemModelProcess::MapCodeSeg(DCodeSeg* aSeg)
1.645 + {
1.646 + __ASSERT_CRITICAL; // Must be in critical section so can't leak os asid references.
1.647 +
1.648 + DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg;
1.649 + __KTRACE_OPT(KDLL,Kern::Printf("Process %O MapCodeSeg %C", this, aSeg));
1.650 + TBool kernel_only=( (seg.iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel );
1.651 + TBool user_local=( (seg.iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == 0 );
1.652 + if (kernel_only && !(iAttributes&ESupervisor))
1.653 + return KErrNotSupported;
1.654 + if (seg.iAttr&ECodeSegAttKernel)
1.655 + return KErrNone; // no extra mappings needed for kernel code
1.656 +
1.657 + // Attempt to open a reference on the os asid it is required so
1.658 + // MapUserRamCode() and CommitDllData() can use iOsAsid safely.
1.659 + TInt osAsid = TryOpenOsAsid();
1.660 + if (osAsid < 0)
1.661 + {// The process has died.
1.662 + return KErrDied;
1.663 + }
1.664 +
1.665 + TInt r=KErrNone;
1.666 + if (user_local)
1.667 + r=MapUserRamCode(seg.Memory());
1.668 + if (seg.IsDll())
1.669 + {
1.670 + TInt total_data_size;
1.671 + TLinAddr data_base;
1.672 + seg.GetDataSizeAndBase(total_data_size, data_base);
1.673 + if (r==KErrNone && total_data_size)
1.674 + {
1.675 + TInt size=MM::RoundToPageSize(total_data_size);
1.676 + r=CommitDllData(data_base, size, aSeg);
1.677 + if (r!=KErrNone && user_local)
1.678 + UnmapUserRamCode(seg.Memory());
1.679 + }
1.680 + }
1.681 + CloseOsAsid();
1.682 +
1.683 + return r;
1.684 + }
1.685 +
1.686 +
1.687 +void DMemModelProcess::UnmapCodeSeg(DCodeSeg* aSeg)
1.688 + {
1.689 + __ASSERT_CRITICAL; // Must be in critical section so can't leak os asid references.
1.690 +
1.691 + DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg;
1.692 + __KTRACE_OPT(KDLL,Kern::Printf("Process %O UnmapCodeSeg %C", this, aSeg));
1.693 + if (seg.iAttr&ECodeSegAttKernel)
1.694 + return; // no extra mappings needed for kernel code
1.695 +
1.696 + // Attempt to open a reference on the os asid it is required so
1.697 + // UnmapUserRamCode() and DecommitDllData() can use iOsAsid safely.
1.698 + TInt osAsid = TryOpenOsAsid();
1.699 + if (osAsid < 0)
1.700 + {// The process has died and it the process it will have cleaned up any code segs.
1.701 + return;
1.702 + }
1.703 +
1.704 + if (seg.IsDll())
1.705 + {
1.706 + TInt total_data_size;
1.707 + TLinAddr data_base;
1.708 + seg.GetDataSizeAndBase(total_data_size, data_base);
1.709 + if (total_data_size)
1.710 + DecommitDllData(data_base, MM::RoundToPageSize(total_data_size));
1.711 + }
1.712 + if (seg.Memory())
1.713 + UnmapUserRamCode(seg.Memory());
1.714 +
1.715 + CloseOsAsid();
1.716 + }
1.717 +
1.718 +void DMemModelProcess::RemoveDllData()
1.719 +//
1.720 +// Call with CodeSegLock held
1.721 +//
1.722 + {
1.723 + }
1.724 +
1.725 +
1.726 +TInt DMemModelProcess::MapUserRamCode(DMemModelCodeSegMemory* aMemory)
1.727 + {
1.728 + __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O MapUserRamCode %C %d %d",
1.729 + this, aMemory->iCodeSeg, iOsAsid, aMemory->iPagedCodeInfo!=0));
1.730 + __ASSERT_MUTEX(DCodeSeg::CodeSegLock);
1.731 +
1.732 + TMappingCreateFlags createFlags = EMappingCreateExactVirtual;
1.733 +
1.734 + if(!(aMemory->iCodeSeg->iAttr&ECodeSegAttAddrNotUnique))
1.735 + {
1.736 + // codeseg memory address is globally unique, (common address across all processes)...
1.737 + FlagSet(createFlags,EMappingCreateCommonVirtual);
1.738 + }
1.739 +
1.740 + if(aMemory->iCodeSeg->IsExe())
1.741 + {
1.742 + // EXE codesegs have already had their virtual address allocated so we must adopt that...
1.743 + __NK_ASSERT_DEBUG(iCodeVirtualAllocSize);
1.744 + __NK_ASSERT_DEBUG(iCodeVirtualAllocAddress==aMemory->iRamInfo.iCodeRunAddr);
1.745 + iCodeVirtualAllocSize = 0;
1.746 + iCodeVirtualAllocAddress = 0;
1.747 + FlagSet(createFlags,EMappingCreateAdoptVirtual);
1.748 + }
1.749 +
1.750 + DMemoryMapping* mapping;
1.751 + return MM::MappingNew(mapping,aMemory->iCodeMemoryObject,EUserExecute,iOsAsid,createFlags,aMemory->iRamInfo.iCodeRunAddr);
1.752 + }
1.753 +
1.754 +
1.755 +void DMemModelProcess::UnmapUserRamCode(DMemModelCodeSegMemory* aMemory)
1.756 + {
1.757 + __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O UnmapUserRamCode %C %d %d",
1.758 + this, aMemory->iCodeSeg, iOsAsid, aMemory->iPagedCodeInfo!=0));
1.759 +
1.760 + __ASSERT_MUTEX(DCodeSeg::CodeSegLock);
1.761 + MM::MappingDestroy(aMemory->iRamInfo.iCodeRunAddr,iOsAsid);
1.762 + }
1.763 +
1.764 +
1.765 +TInt DMemModelProcess::CommitDllData(TLinAddr aBase, TInt aSize, DCodeSeg* aCodeSeg)
1.766 + {
1.767 + __KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O CommitDllData %08x+%x",this,aBase,aSize));
1.768 +
1.769 + DMemoryObject* memory;
1.770 + TMemoryObjectType memoryType = aCodeSeg->iAttr&ECodeSegAttDataPaged ? EMemoryObjectPaged : EMemoryObjectMovable;
1.771 + TInt r = MM::MemoryNew(memory,memoryType,MM::BytesToPages(aSize));
1.772 + if(r==KErrNone)
1.773 + {
1.774 + r = MM::MemoryAlloc(memory,0,MM::BytesToPages(aSize));
1.775 + if(r==KErrNone)
1.776 + {
1.777 + DMemoryMapping* mapping;
1.778 + r = MM::MappingNew(mapping,memory,EUserReadWrite,iOsAsid,EMappingCreateCommonVirtual,aBase);
1.779 + }
1.780 + if(r!=KErrNone)
1.781 + MM::MemoryDestroy(memory);
1.782 + else
1.783 + {
1.784 +#ifdef BTRACE_FLEXIBLE_MEM_MODEL
1.785 + BTrace12(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsDllStaticData,memory,aCodeSeg,this);
1.786 +#endif
1.787 + }
1.788 +
1.789 + }
1.790 + __KTRACE_OPT(KDLL,Kern::Printf("CommitDllData returns %d",r));
1.791 + return r;
1.792 + }
1.793 +
1.794 +
1.795 +void DMemModelProcess::DecommitDllData(TLinAddr aBase, TInt aSize)
1.796 + {
1.797 + __KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O DecommitDllData %08x+%x",this,aBase,aSize));
1.798 + MM::MappingAndMemoryDestroy(aBase,iOsAsid);
1.799 + }
1.800 +
1.801 +void DMemModelProcess::BTracePrime(TInt aCategory)
1.802 + {
1.803 + DProcess::BTracePrime(aCategory);
1.804 +
1.805 +#ifdef BTRACE_FLEXIBLE_MEM_MODEL
1.806 + if (aCategory == BTrace::EFlexibleMemModel || aCategory == -1)
1.807 + {
1.808 + BTrace8(BTrace::EFlexibleMemModel,BTrace::EAddressSpaceId,this,iOsAsid);
1.809 +
1.810 + if (iDataBssMapping)
1.811 + {
1.812 + DMemoryObject* memory = MM::MappingGetAndOpenMemory(iDataBssMapping);
1.813 + if (memory)
1.814 + {
1.815 + MM::MemoryBTracePrime(memory);
1.816 + BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsProcessStaticData,memory,this);
1.817 + MM::MemoryClose(memory);
1.818 + }
1.819 + }
1.820 +
1.821 + // Trace memory objects for DLL static data
1.822 + SDblQue cs_list;
1.823 + DCodeSeg::UnmarkAll(DCodeSeg::EMarkListDeps|DCodeSeg::EMarkUnListDeps);
1.824 + TraverseCodeSegs(&cs_list, NULL, DCodeSeg::EMarkListDeps, 0);
1.825 + SDblQueLink* anchor=&cs_list.iA;
1.826 + SDblQueLink* pL=cs_list.First();
1.827 + for(; pL!=anchor; pL=pL->iNext)
1.828 + {
1.829 + DMemModelCodeSeg* seg = _LOFF(pL,DMemModelCodeSeg,iTempLink);
1.830 + if (seg->IsDll())
1.831 + {
1.832 + TInt total_data_size;
1.833 + TLinAddr data_base;
1.834 + seg->GetDataSizeAndBase(total_data_size, data_base);
1.835 + if (total_data_size)
1.836 + {
1.837 + TUint offset;
1.838 + // The instance count can be ignored as a dll data mapping is only ever
1.839 + // used with a single memory object.
1.840 + TUint mappingInstanceCount;
1.841 + NKern::ThreadEnterCS();
1.842 + DMemoryMapping* mapping = MM::FindMappingInAddressSpace(iOsAsid, data_base, 0, offset, mappingInstanceCount);
1.843 + if (mapping)
1.844 + {
1.845 + DMemoryObject* memory = MM::MappingGetAndOpenMemory(mapping);
1.846 + if (memory)
1.847 + {
1.848 + MM::MemoryBTracePrime(memory);
1.849 + BTrace12(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsDllStaticData,memory,seg,this);
1.850 + MM::MemoryClose(memory);
1.851 + }
1.852 + MM::MappingClose(mapping);
1.853 + }
1.854 + NKern::ThreadLeaveCS();
1.855 + }
1.856 + }
1.857 + }
1.858 + DCodeSeg::EmptyQueue(cs_list, 0); // leave cs_list empty
1.859 + }
1.860 +#endif
1.861 + }
1.862 +
1.863 +
1.864 +TInt DMemModelProcess::NewShPool(DShPool*& aPool, TShPoolCreateInfo& aInfo)
1.865 + {
1.866 + aPool = NULL;
1.867 + DMemModelShPool* pC = NULL;
1.868 +
1.869 + if (aInfo.iInfo.iFlags & TShPoolCreateInfo::EPageAlignedBuffer)
1.870 + {
1.871 + pC = new DMemModelAlignedShPool();
1.872 + }
1.873 + else
1.874 + {
1.875 + pC = new DMemModelNonAlignedShPool();
1.876 + }
1.877 +
1.878 + if (pC == NULL)
1.879 + {
1.880 + return KErrNoMemory;
1.881 + }
1.882 +
1.883 + TInt r = pC->Create(this, aInfo);
1.884 +
1.885 + if (r == KErrNone)
1.886 + {
1.887 + aPool = pC;
1.888 + }
1.889 + else
1.890 + {
1.891 + pC->Close(NULL);
1.892 + }
1.893 +
1.894 + return r;
1.895 + }
1.896 +
1.897 +
1.898 +TInt DThread::RawRead(const TAny* aSrc, TAny* aDest, TInt aLength, TInt aFlags, TIpcExcTrap* aExcTrap)
1.899 +//
1.900 +// Read from the thread's process.
1.901 +// aSrc Run address of memory to read
1.902 +// aDest Current address of destination
1.903 +// aExcTrap Exception trap object to be updated if the actual memory access is performed on other memory area than specified.
1.904 +// It happens when reading is performed on un-aligned memory area.
1.905 +//
1.906 + {
1.907 + (void)aExcTrap;
1.908 + DMemModelThread& t=*(DMemModelThread*)TheCurrentThread;
1.909 + DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess;
1.910 + TLinAddr src=(TLinAddr)aSrc;
1.911 + TLinAddr dest=(TLinAddr)aDest;
1.912 + TInt result = KErrNone;
1.913 + TBool have_taken_fault = EFalse;
1.914 +
1.915 + while (aLength)
1.916 + {
1.917 + if (iMState==EDead)
1.918 + {
1.919 + result = KErrDied;
1.920 + break;
1.921 + }
1.922 + TLinAddr alias_src;
1.923 + TUint alias_size;
1.924 +
1.925 +#ifdef __BROADCAST_CACHE_MAINTENANCE__
1.926 + TInt pagingTrap;
1.927 + XTRAP_PAGING_START(pagingTrap);
1.928 +#endif
1.929 +
1.930 + TInt len = have_taken_fault ? Min(aLength, KPageSize - (src & KPageMask)) : aLength;
1.931 + TInt alias_result=t.Alias(src, pP, len, alias_src, alias_size);
1.932 + if (alias_result<0)
1.933 + {
1.934 + result = KErrBadDescriptor; // bad permissions
1.935 + break;
1.936 + }
1.937 +
1.938 +#ifdef __BROADCAST_CACHE_MAINTENANCE__
1.939 + // need to let the trap handler know where we are accessing in case we take a page fault
1.940 + // and the alias gets removed
1.941 + aExcTrap->iRemoteBase = alias_src;
1.942 + aExcTrap->iSize = alias_size;
1.943 +#endif
1.944 +
1.945 + __KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawRead %08x<-%08x+%x",dest,alias_src,alias_size));
1.946 +
1.947 + CHECK_PAGING_SAFE;
1.948 +
1.949 + if(aFlags&KCheckLocalAddress)
1.950 + MM::ValidateLocalIpcAddress(dest,alias_size,ETrue);
1.951 + UNLOCK_USER_MEMORY();
1.952 + memcpy( (TAny*)dest, (const TAny*)alias_src, alias_size);
1.953 + LOCK_USER_MEMORY();
1.954 +
1.955 + src+=alias_size;
1.956 + dest+=alias_size;
1.957 + aLength-=alias_size;
1.958 +
1.959 +#ifdef __BROADCAST_CACHE_MAINTENANCE__
1.960 + XTRAP_PAGING_END;
1.961 + if(pagingTrap)
1.962 + have_taken_fault = ETrue;
1.963 +#endif
1.964 + }
1.965 + t.RemoveAlias();
1.966 +#ifdef __BROADCAST_CACHE_MAINTENANCE__
1.967 + t.iPagingExcTrap = NULL; // in case we broke out of the loop and skipped XTRAP_PAGING_END
1.968 +#endif
1.969 +
1.970 + return result;
1.971 + }
1.972 +
1.973 +
1.974 +TInt DThread::RawWrite(const TAny* aDest, const TAny* aSrc, TInt aLength, TInt aFlags, DThread* /*anOriginatingThread*/, TIpcExcTrap* aExcTrap)
1.975 +//
1.976 +// Write to the thread's process.
1.977 +// aDest Run address of memory to write
1.978 +// aSrc Current address of destination
1.979 +// aExcTrap Exception trap object to be updated if the actual memory access is performed on other memory area then specified.
1.980 +// It happens when reading is performed on un-aligned memory area.
1.981 +//
1.982 + {
1.983 + (void)aExcTrap;
1.984 + DMemModelThread& t=*(DMemModelThread*)TheCurrentThread;
1.985 + DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess;
1.986 + TLinAddr src=(TLinAddr)aSrc;
1.987 + TLinAddr dest=(TLinAddr)aDest;
1.988 + TInt result = KErrNone;
1.989 + TBool have_taken_fault = EFalse;
1.990 +
1.991 + while (aLength)
1.992 + {
1.993 + if (iMState==EDead)
1.994 + {
1.995 + result = KErrDied;
1.996 + break;
1.997 + }
1.998 + TLinAddr alias_dest;
1.999 + TUint alias_size;
1.1000 +
1.1001 +#ifdef __BROADCAST_CACHE_MAINTENANCE__
1.1002 + TInt pagingTrap;
1.1003 + XTRAP_PAGING_START(pagingTrap);
1.1004 +#endif
1.1005 +
1.1006 + TInt len = have_taken_fault ? Min(aLength, KPageSize - (dest & KPageMask)) : aLength;
1.1007 + TInt alias_result=t.Alias(dest, pP, len, alias_dest, alias_size);
1.1008 + if (alias_result<0)
1.1009 + {
1.1010 + result = KErrBadDescriptor; // bad permissions
1.1011 + break;
1.1012 + }
1.1013 +
1.1014 +#ifdef __BROADCAST_CACHE_MAINTENANCE__
1.1015 + // need to let the trap handler know where we are accessing in case we take a page fault
1.1016 + // and the alias gets removed
1.1017 + aExcTrap->iRemoteBase = alias_dest;
1.1018 + aExcTrap->iSize = alias_size;
1.1019 +#endif
1.1020 +
1.1021 + __KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawWrite %08x+%x->%08x",src,alias_size,alias_dest));
1.1022 +
1.1023 + // Must check that it is safe to page, unless we are reading from unpaged ROM in which case
1.1024 + // we allow it.
1.1025 + CHECK_PAGING_SAFE_RANGE(src, aLength);
1.1026 + CHECK_DATA_PAGING_SAFE_RANGE(dest, aLength);
1.1027 +
1.1028 + if(aFlags&KCheckLocalAddress)
1.1029 + MM::ValidateLocalIpcAddress(src,alias_size,EFalse);
1.1030 + UNLOCK_USER_MEMORY();
1.1031 + memcpy( (TAny*)alias_dest, (const TAny*)src, alias_size);
1.1032 + LOCK_USER_MEMORY();
1.1033 +
1.1034 + src+=alias_size;
1.1035 + dest+=alias_size;
1.1036 + aLength-=alias_size;
1.1037 +
1.1038 +#ifdef __BROADCAST_CACHE_MAINTENANCE__
1.1039 + XTRAP_PAGING_END;
1.1040 + if(pagingTrap)
1.1041 + have_taken_fault = ETrue;
1.1042 +#endif
1.1043 + }
1.1044 + t.RemoveAlias();
1.1045 +#ifdef __BROADCAST_CACHE_MAINTENANCE__
1.1046 + t.iPagingExcTrap = NULL; // in case we broke out of the loop and skipped XTRAP_PAGING_END
1.1047 +#endif
1.1048 +
1.1049 + return result;
1.1050 + }
1.1051 +
1.1052 +
1.1053 +#ifndef __MARM__
1.1054 +
1.1055 +TInt DThread::ReadAndParseDesHeader(const TAny* aSrc, TDesHeader& aDest)
1.1056 +//
1.1057 +// Read the header of a remote descriptor.
1.1058 +//
1.1059 + {
1.1060 + static const TUint8 LengthLookup[16]={4,8,12,8,12,0,0,0,0,0,0,0,0,0,0,0};
1.1061 +
1.1062 + CHECK_PAGING_SAFE;
1.1063 +
1.1064 + DMemModelThread& t=*(DMemModelThread*)TheCurrentThread;
1.1065 + DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess;
1.1066 + TLinAddr src=(TLinAddr)aSrc;
1.1067 +
1.1068 + __NK_ASSERT_DEBUG(t.iIpcClient==NULL);
1.1069 + t.iIpcClient = this;
1.1070 +
1.1071 + TLinAddr pAlias;
1.1072 + TUint8* pDest = (TUint8*)&aDest;
1.1073 + TUint alias_size = 0;
1.1074 + TInt length = 12;
1.1075 + TInt type = KErrBadDescriptor;
1.1076 + while (length > 0)
1.1077 + {
1.1078 +#ifdef __BROADCAST_CACHE_MAINTENANCE__
1.1079 + TInt pagingTrap;
1.1080 + XTRAP_PAGING_START(pagingTrap);
1.1081 +#endif
1.1082 +
1.1083 + if (alias_size == 0)
1.1084 + {
1.1085 + // no alias present, so must create one here
1.1086 + if (t.Alias(src, pP, length, pAlias, alias_size) != KErrNone)
1.1087 + break;
1.1088 + __NK_ASSERT_DEBUG(alias_size >= sizeof(TUint32));
1.1089 + }
1.1090 +
1.1091 + // read either the first word, or as much as aliased of the remainder
1.1092 + TInt l = length == 12 ? sizeof(TUint32) : Min(length, alias_size);
1.1093 + if (Kern::SafeRead((TAny*)pAlias, (TAny*)pDest, l))
1.1094 + break; // exception reading from user space
1.1095 +
1.1096 + if (length == 12)
1.1097 + {
1.1098 + // we have just read the first word, so decode the descriptor type
1.1099 + type = *(TUint32*)pDest >> KShiftDesType8;
1.1100 + length = LengthLookup[type];
1.1101 + // invalid descriptor type will have length 0 which will get decrease by 'l' and
1.1102 + // terminate the loop with length < 0
1.1103 + }
1.1104 +
1.1105 + src += l;
1.1106 + alias_size -= l;
1.1107 + pAlias += l;
1.1108 + pDest += l;
1.1109 + length -= l;
1.1110 +
1.1111 +#ifdef __BROADCAST_CACHE_MAINTENANCE__
1.1112 + XTRAP_PAGING_END;
1.1113 + if (pagingTrap)
1.1114 + alias_size = 0; // a page fault caused the alias to be removed
1.1115 +#endif
1.1116 + }
1.1117 +
1.1118 + t.RemoveAlias();
1.1119 + t.iIpcClient = NULL;
1.1120 +#ifdef __BROADCAST_CACHE_MAINTENANCE__
1.1121 + t.iPagingExcTrap = NULL; // in case we broke out of the loop and skipped XTRAP_PAGING_END
1.1122 +#endif
1.1123 + return length == 0 ? K::ParseDesHeader(aSrc, (TRawDesHeader&)aDest, aDest) : KErrBadDescriptor;
1.1124 + }
1.1125 +
1.1126 +
1.1127 +#endif
1.1128 +
1.1129 +
1.1130 +TInt DThread::PrepareMemoryForDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList)
1.1131 + {
1.1132 + // not supported, new Physical Pinning APIs should be used for DMA
1.1133 + return KErrNotSupported;
1.1134 + }
1.1135 +
1.1136 +TInt DThread::ReleaseMemoryFromDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList)
1.1137 + {
1.1138 + // not supported, new Physical Pinning APIs should be used for DMA
1.1139 + return KErrNotSupported;
1.1140 + }
1.1141 +