sl@0: // Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // sl@0: sl@0: #include sl@0: #include "mmu/mm.h" sl@0: #include "mmu/maddrcont.h" sl@0: #include "mmboot.h" sl@0: #include sl@0: #include "execs.h" sl@0: sl@0: #define iMState iWaitLink.iSpare1 sl@0: sl@0: NFastMutex TheSharedChunkLock; sl@0: sl@0: #ifndef _DEBUG sl@0: const TInt KChunkGranularity = 4; // amount to grow SChunkInfo list by sl@0: const TInt KMaxChunkInfosInOneGo = 100; // max number of SChunkInfo objects to copy with System Lock held sl@0: #else // if debug... sl@0: const TInt KChunkGranularity = 1; sl@0: const TInt KMaxChunkInfosInOneGo = 1; sl@0: #endif sl@0: sl@0: sl@0: sl@0: /******************************************** sl@0: * Process sl@0: ********************************************/ sl@0: sl@0: DMemModelProcess::~DMemModelProcess() sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("DMemModelProcess destruct")); sl@0: Destruct(); sl@0: } sl@0: sl@0: sl@0: void DMemModelProcess::Destruct() sl@0: { sl@0: __ASSERT_ALWAYS(!iOsAsidRefCount, MM::Panic(MM::EProcessDestructOsAsidRemaining)); sl@0: __ASSERT_ALWAYS(!iChunkCount, MM::Panic(MM::EProcessDestructChunksRemaining)); sl@0: Kern::Free(iChunks); sl@0: __ASSERT_ALWAYS(!iSharedChunks || iSharedChunks->Count()==0, MM::Panic(MM::EProcessDestructChunksRemaining)); sl@0: delete iSharedChunks; sl@0: sl@0: DProcess::Destruct(); sl@0: } sl@0: sl@0: sl@0: TInt DMemModelProcess::TryOpenOsAsid() sl@0: { sl@0: if (__e32_atomic_tas_ord32(&iOsAsidRefCount, 1, 1, 0)) sl@0: { sl@0: return iOsAsid; sl@0: } sl@0: return KErrDied; sl@0: } sl@0: sl@0: sl@0: void DMemModelProcess::CloseOsAsid() sl@0: { sl@0: if (__e32_atomic_tas_ord32(&iOsAsidRefCount, 1, -1, 0) == 1) sl@0: {// Last reference has been closed so free the asid. sl@0: MM::AddressSpaceFree(iOsAsid); sl@0: } sl@0: } sl@0: sl@0: sl@0: void DMemModelProcess::AsyncCloseOsAsid() sl@0: { sl@0: if (__e32_atomic_tas_ord32(&iOsAsidRefCount, 1, -1, 0) == 1) sl@0: {// Last reference has been closed so free the asid asynchronusly. sl@0: MM::AsyncAddressSpaceFree(iOsAsid); sl@0: } sl@0: } sl@0: sl@0: sl@0: TInt DMemModelProcess::NewChunk(DChunk*& aChunk, SChunkCreateInfo& aInfo, TLinAddr& aRunAddr) sl@0: { sl@0: aChunk=NULL; sl@0: sl@0: DMemModelChunk* pC=new DMemModelChunk; sl@0: if (!pC) sl@0: return KErrNoMemory; sl@0: sl@0: TChunkType type = aInfo.iType; sl@0: pC->iChunkType=type; sl@0: TInt r=pC->SetAttributes(aInfo); sl@0: if (r!=KErrNone) sl@0: { sl@0: pC->Close(NULL); sl@0: return r; sl@0: } sl@0: sl@0: pC->iOwningProcess=(pC->iAttributes&DMemModelChunk::EPublic)?NULL:this; sl@0: r=pC->Create(aInfo); sl@0: if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdjust)) sl@0: { sl@0: if (aInfo.iRunAddress!=0) sl@0: pC->SetFixedAddress(aInfo.iRunAddress,aInfo.iPreallocated); sl@0: if (aInfo.iPreallocated==0 && aInfo.iInitialTop!=0) sl@0: { sl@0: if (pC->iAttributes & DChunk::EDisconnected) sl@0: { sl@0: r=pC->Commit(aInfo.iInitialBottom,aInfo.iInitialTop-aInfo.iInitialBottom); sl@0: } sl@0: else if (pC->iAttributes & DChunk::EDoubleEnded) sl@0: { sl@0: r=pC->AdjustDoubleEnded(aInfo.iInitialBottom,aInfo.iInitialTop); sl@0: } sl@0: else sl@0: { sl@0: r=pC->Adjust(aInfo.iInitialTop); sl@0: } sl@0: } sl@0: } sl@0: if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdd)) sl@0: { sl@0: r = AddChunk(pC, EFalse); sl@0: } sl@0: if (r==KErrNone) sl@0: { sl@0: if(pC->iKernelMapping) sl@0: aRunAddr = (TLinAddr)MM::MappingBase(pC->iKernelMapping); sl@0: pC->iDestroyedDfc = aInfo.iDestroyedDfc; sl@0: aChunk=(DChunk*)pC; sl@0: } sl@0: else sl@0: pC->Close(NULL); // NULL since chunk can't have been added to process sl@0: return r; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Determine whether this process should be data paged. sl@0: sl@0: @param aInfo A reference to the create info for this process. sl@0: */ sl@0: TInt DMemModelProcess::SetPaging(const TProcessCreateInfo& aInfo) sl@0: { sl@0: TUint pagedFlags = aInfo.iFlags & TProcessCreateInfo::EDataPagingMask; sl@0: // If KImageDataPaged and KImageDataUnpaged flags present then corrupt sl@0: // Check this first to ensure that it is always verified. sl@0: if (pagedFlags == TProcessCreateInfo::EDataPagingMask) sl@0: { sl@0: return KErrCorrupt; sl@0: } sl@0: sl@0: if (aInfo.iAttr & ECodeSegAttKernel || sl@0: !(K::MemModelAttributes & EMemModelAttrDataPaging)) sl@0: {// Kernel process shouldn't be data paged or no data paging device installed. sl@0: return KErrNone; sl@0: } sl@0: sl@0: TUint dataPolicy = TheSuperPage().KernelConfigFlags() & EKernelConfigDataPagingPolicyMask; sl@0: if (dataPolicy == EKernelConfigDataPagingPolicyAlwaysPage) sl@0: { sl@0: iAttributes |= EDataPaged; sl@0: return KErrNone; sl@0: } sl@0: if (dataPolicy == EKernelConfigDataPagingPolicyNoPaging) sl@0: {// No paging allowed so just return. sl@0: return KErrNone; sl@0: } sl@0: if (pagedFlags == TProcessCreateInfo::EDataPaged) sl@0: { sl@0: iAttributes |= EDataPaged; sl@0: return KErrNone; sl@0: } sl@0: if (pagedFlags == TProcessCreateInfo::EDataUnpaged) sl@0: {// No paging set so just return. sl@0: return KErrNone; sl@0: } sl@0: // Neither paged nor unpaged set so use default paging policy. sl@0: // dataPolicy must be EKernelConfigDataPagingPolicyDefaultUnpaged or sl@0: // EKernelConfigDataPagingPolicyDefaultPaged. sl@0: __NK_ASSERT_DEBUG(pagedFlags == TProcessCreateInfo::EDataPagingUnspecified); sl@0: __NK_ASSERT_DEBUG( dataPolicy == EKernelConfigDataPagingPolicyDefaultPaged || sl@0: dataPolicy == EKernelConfigDataPagingPolicyDefaultUnpaged); sl@0: if (dataPolicy == EKernelConfigDataPagingPolicyDefaultPaged) sl@0: { sl@0: iAttributes |= EDataPaged; sl@0: } sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: TInt DMemModelProcess::DoCreate(TBool aKernelProcess, TProcessCreateInfo& aInfo) sl@0: { sl@0: // Required so we can detect whether a process has been created and added sl@0: // to its object container by checking for iContainerID!=EProcess. sl@0: __ASSERT_COMPILE(EProcess != 0); sl@0: __KTRACE_OPT(KPROC,Kern::Printf(">DMemModelProcess::DoCreate %O",this)); sl@0: TInt r=KErrNone; sl@0: sl@0: if (aKernelProcess) sl@0: { sl@0: iAttributes |= ESupervisor; sl@0: iOsAsid = KKernelOsAsid; sl@0: } sl@0: else sl@0: { sl@0: r = MM::AddressSpaceAlloc(iPageDir); sl@0: if (r>=0) sl@0: { sl@0: iOsAsid = r; sl@0: r = KErrNone; sl@0: } sl@0: } sl@0: if (r == KErrNone) sl@0: {// Add this process's own reference to its os asid. sl@0: __e32_atomic_store_ord32(&iOsAsidRefCount, 1); sl@0: } sl@0: sl@0: #ifdef BTRACE_FLEXIBLE_MEM_MODEL sl@0: BTrace8(BTrace::EFlexibleMemModel,BTrace::EAddressSpaceId,this,iOsAsid); sl@0: #endif sl@0: sl@0: __KTRACE_OPT(KPROC,Kern::Printf("OS ASID=%d, PD=%08x",iOsAsid,iPageDir)); sl@0: __KTRACE_OPT(KPROC,Kern::Printf("iAttr&ECodeSegAttAddrNotUnique) sl@0: { sl@0: TUint codeSize = seg->iSize; sl@0: TLinAddr codeAddr = seg->RamInfo().iCodeRunAddr; sl@0: TBool isDemandPaged = seg->iAttr&ECodeSegAttCodePaged; sl@0: // Allocate virtual memory for the code seg using the os asid. sl@0: // No need to open a reference on os asid as process not fully sl@0: // created yet so it can't die and free the os asid. sl@0: r = MM::VirtualAlloc(OsAsid(),codeAddr,codeSize,isDemandPaged); sl@0: if(r==KErrNone) sl@0: { sl@0: iCodeVirtualAllocSize = codeSize; sl@0: iCodeVirtualAllocAddress = codeAddr; sl@0: } sl@0: } sl@0: } sl@0: sl@0: return r; sl@0: } sl@0: sl@0: sl@0: TInt DMemModelProcess::AddChunk(DChunk* aChunk, TBool aIsReadOnly) sl@0: { sl@0: DMemModelChunk* pC=(DMemModelChunk*)aChunk; sl@0: if(pC->iOwningProcess && this!=pC->iOwningProcess) sl@0: return KErrAccessDenied; sl@0: sl@0: TInt r = WaitProcessLock(); sl@0: if(r==KErrNone) sl@0: { sl@0: TInt i = ChunkIndex(pC); sl@0: if(i>=0) // Found the chunk in this process, just up its count sl@0: { sl@0: iChunks[i].iAccessCount++; sl@0: __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk %08x to %08x (Access count incremented to %d)",aChunk,this,iChunks[i].iAccessCount)); sl@0: SignalProcessLock(); sl@0: return KErrNone; sl@0: } sl@0: r = DoAddChunk(pC,aIsReadOnly); sl@0: SignalProcessLock(); sl@0: } sl@0: __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk returns %d",r)); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: void M::FsRegisterThread() sl@0: { sl@0: TInternalRamDrive::Unlock(); sl@0: } sl@0: sl@0: sl@0: void ExecHandler::UnlockRamDrive() sl@0: { sl@0: } sl@0: sl@0: sl@0: EXPORT_C TLinAddr TInternalRamDrive::Base() sl@0: { sl@0: DMemModelChunk* pC=(DMemModelChunk*)PP::TheRamDriveChunk; sl@0: DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess; sl@0: NKern::LockSystem(); sl@0: TLinAddr addr = (TLinAddr)pC->Base(pP); sl@0: NKern::UnlockSystem(); sl@0: if(!addr) sl@0: { sl@0: Unlock(); sl@0: NKern::LockSystem(); sl@0: addr = (TLinAddr)pC->Base(pP); sl@0: NKern::UnlockSystem(); sl@0: } sl@0: return addr; sl@0: } sl@0: sl@0: sl@0: EXPORT_C void TInternalRamDrive::Unlock() sl@0: { sl@0: DMemModelChunk* pC=(DMemModelChunk*)PP::TheRamDriveChunk; sl@0: DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess; sl@0: sl@0: TInt r = pP->WaitProcessLock(); sl@0: if(r==KErrNone) sl@0: if(pP->ChunkIndex(pC)==KErrNotFound) sl@0: r = pP->DoAddChunk(pC,EFalse); sl@0: __ASSERT_ALWAYS(r==KErrNone, MM::Panic(MM::EFsRegisterThread)); sl@0: pP->SignalProcessLock(); sl@0: } sl@0: sl@0: sl@0: EXPORT_C void TInternalRamDrive::Lock() sl@0: { sl@0: } sl@0: sl@0: sl@0: TInt DMemModelProcess::DoAddChunk(DMemModelChunk* aChunk, TBool aIsReadOnly) sl@0: { sl@0: // sl@0: // Must hold the process $LOCK mutex before calling this. sl@0: // As the process lock is held it is safe to access iOsAsid without a reference. sl@0: // sl@0: sl@0: __NK_ASSERT_DEBUG(ChunkIndex(aChunk)==KErrNotFound); // shouldn't be adding a chunk which is already added sl@0: sl@0: __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::DoAddChunk %O to %O",aChunk,this)); sl@0: sl@0: // create mapping for chunk... sl@0: DMemoryMapping* mapping; sl@0: TMappingPermissions perm = MM::MappingPermissions sl@0: ( sl@0: iOsAsid!=(TInt)KKernelOsAsid, // user? sl@0: aIsReadOnly==false, // write? sl@0: aChunk->iAttributes&DMemModelChunk::ECode // execute? sl@0: ); sl@0: TInt r; sl@0: if(aChunk->iFixedBase) // HACK, kernel chunk has a fixed iBase sl@0: r = MM::MappingNew(mapping,aChunk->iMemoryObject,perm,iOsAsid,EMappingCreateExactVirtual,(TLinAddr)aChunk->iFixedBase); sl@0: else sl@0: r = MM::MappingNew(mapping,aChunk->iMemoryObject,perm,iOsAsid); sl@0: if(r!=KErrNone) sl@0: return r; sl@0: if(iOsAsid==0) sl@0: aChunk->iKernelMapping = mapping; sl@0: TLinAddr base = MM::MappingBase(mapping); sl@0: sl@0: // expand chunk info memory if required... sl@0: if(iChunkCount==iChunkAlloc) sl@0: { sl@0: TInt newAlloc = iChunkAlloc+KChunkGranularity; sl@0: r = Kern::SafeReAlloc((TAny*&)iChunks,iChunkAlloc*sizeof(SChunkInfo),newAlloc*sizeof(SChunkInfo)); sl@0: if(r!=KErrNone) sl@0: { sl@0: MM::MappingDestroy(mapping); sl@0: return r; sl@0: } sl@0: iChunkAlloc = newAlloc; sl@0: } sl@0: sl@0: // insert new chunk info... sl@0: TUint i = ChunkInsertIndex(aChunk); sl@0: SChunkInfo* info = iChunks+i; sl@0: SChunkInfo* infoEnd = iChunks+iChunkCount; sl@0: NKern::LockSystem(); sl@0: ++iChunkCount; sl@0: for(;;) sl@0: { sl@0: // make space for new chunk info by shuffling along sl@0: // existing infos KMaxChunkInfosInOneGo at a time... sl@0: SChunkInfo* infoPtr = infoEnd-KMaxChunkInfosInOneGo; sl@0: if(infoPtriChunk = aChunk; sl@0: info->iMapping = mapping; sl@0: info->iAccessCount = 1; sl@0: info->iIsReadOnly = aIsReadOnly; sl@0: NKern::UnlockSystem(); sl@0: sl@0: // add chunk to list of Shared Chunks... sl@0: if(aChunk->iChunkType==ESharedKernelSingle || aChunk->iChunkType==ESharedKernelMultiple) sl@0: { sl@0: if(!iSharedChunks) sl@0: iSharedChunks = new RAddressedContainer(&TheSharedChunkLock,iProcessLock); sl@0: if(!iSharedChunks) sl@0: r = KErrNoMemory; sl@0: else sl@0: r = iSharedChunks->Add(base,aChunk); sl@0: if(r!=KErrNone) sl@0: { sl@0: DoRemoveChunk(i); sl@0: return r; sl@0: } sl@0: } sl@0: sl@0: // done OK... sl@0: __DEBUG_EVENT(EEventUpdateProcess, this); sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: void DMemModelProcess::DoRemoveChunk(TInt aIndex) sl@0: { sl@0: __DEBUG_EVENT(EEventUpdateProcess, this); sl@0: sl@0: DMemModelChunk* chunk = iChunks[aIndex].iChunk; sl@0: DMemoryMapping* mapping = iChunks[aIndex].iMapping; sl@0: sl@0: if(chunk->iChunkType==ESharedKernelSingle || chunk->iChunkType==ESharedKernelMultiple) sl@0: { sl@0: // remove chunk from list of Shared Chunks... sl@0: if(iSharedChunks) sl@0: { sl@0: iSharedChunks->Remove(MM::MappingBase(mapping)); sl@0: #ifdef _DEBUG sl@0: // delete iSharedChunks if it's empty, so memory leak test code passes... sl@0: if(iSharedChunks->Count()==0) sl@0: { sl@0: NKern::FMWait(&TheSharedChunkLock); sl@0: RAddressedContainer* s = iSharedChunks; sl@0: iSharedChunks = 0; sl@0: NKern::FMSignal(&TheSharedChunkLock); sl@0: delete s; sl@0: } sl@0: #endif sl@0: } sl@0: } sl@0: sl@0: // remove chunk from array... sl@0: SChunkInfo* infoStart = iChunks+aIndex+1; sl@0: SChunkInfo* infoEnd = iChunks+iChunkCount; sl@0: NKern::LockSystem(); sl@0: for(;;) sl@0: { sl@0: // shuffle existing infos down KMaxChunkInfosInOneGo at a time... sl@0: SChunkInfo* infoPtr = infoStart+KMaxChunkInfosInOneGo; sl@0: if(infoPtr>infoEnd) sl@0: infoPtr = infoEnd; sl@0: memmove(infoStart-1,infoStart,(TLinAddr)infoPtr-(TLinAddr)infoStart); sl@0: infoStart = infoPtr; sl@0: if(infoStart>=infoEnd) sl@0: break; sl@0: NKern::FlashSystem(); sl@0: } sl@0: --iChunkCount; sl@0: NKern::UnlockSystem(); sl@0: sl@0: if(mapping==chunk->iKernelMapping) sl@0: chunk->iKernelMapping = 0; sl@0: sl@0: MM::MappingDestroy(mapping); sl@0: } sl@0: sl@0: sl@0: /** sl@0: Final chance for process to release resources during its death. sl@0: sl@0: Called with process $LOCK mutex held (if it exists). sl@0: This mutex will not be released before it is deleted. sl@0: I.e. no other thread will ever hold the mutex again. sl@0: */ sl@0: void DMemModelProcess::FinalRelease() sl@0: { sl@0: // Clean up any left over chunks (such as SharedIo buffers) sl@0: if(iProcessLock) sl@0: while(iChunkCount) sl@0: DoRemoveChunk(0); sl@0: // Destroy the remaining mappings and memory objects owned by this process sl@0: MM::MappingAndMemoryDestroy(iDataBssMapping); sl@0: if(iCodeVirtualAllocSize) sl@0: MM::VirtualFree(iOsAsid,iCodeVirtualAllocAddress,iCodeVirtualAllocSize); sl@0: sl@0: // Close the original reference on the os asid. sl@0: CloseOsAsid(); sl@0: } sl@0: sl@0: sl@0: void DMemModelProcess::RemoveChunk(DMemModelChunk *aChunk) sl@0: { sl@0: // note that this can't be called after the process $LOCK mutex has been deleted sl@0: // since it can only be called by a thread in this process doing a handle close or sl@0: // dying, or by the process handles array being deleted due to the process dying, sl@0: // all of which happen before $LOCK is deleted. sl@0: __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O RemoveChunk %O",this,aChunk)); sl@0: Kern::MutexWait(*iProcessLock); sl@0: TInt i = ChunkIndex(aChunk); sl@0: if(i>=0) // Found the chunk sl@0: { sl@0: __KTRACE_OPT(KPROC,Kern::Printf("Chunk access count %d",iChunks[i].iAccessCount)); sl@0: if(--iChunks[i].iAccessCount==0) sl@0: { sl@0: DoRemoveChunk(i); sl@0: } sl@0: } sl@0: Kern::MutexSignal(*iProcessLock); sl@0: } sl@0: sl@0: sl@0: TUint8* DMemModelChunk::Base(DProcess* aProcess) sl@0: { sl@0: DMemModelProcess* pP = (DMemModelProcess*)aProcess; sl@0: DMemoryMapping* mapping = 0; sl@0: sl@0: if(iKernelMapping && pP==K::TheKernelProcess) sl@0: { sl@0: // shortcut for shared chunks... sl@0: mapping = iKernelMapping; sl@0: } sl@0: else sl@0: { sl@0: // find chunk in process... sl@0: TInt i = pP->ChunkIndex(this); sl@0: if(i>=0) sl@0: mapping = pP->iChunks[i].iMapping; sl@0: } sl@0: sl@0: if(!mapping) sl@0: return 0; sl@0: sl@0: return (TUint8*)MM::MappingBase(mapping); sl@0: } sl@0: sl@0: sl@0: DChunk* DThread::OpenSharedChunk(const TAny* aAddress, TBool aWrite, TInt& aOffset) sl@0: { sl@0: DMemModelChunk* chunk = 0; sl@0: sl@0: NKern::FMWait(&TheSharedChunkLock); sl@0: RAddressedContainer* list = ((DMemModelProcess*)iOwningProcess)->iSharedChunks; sl@0: if(list) sl@0: { sl@0: // search list... sl@0: TUint offset; sl@0: chunk = (DMemModelChunk*)list->Find((TLinAddr)aAddress,offset); sl@0: if(chunk && offsetiMaxSize) && chunk->Open()==KErrNone) sl@0: aOffset = offset; // chunk found and opened successfully sl@0: else sl@0: chunk = 0; // failed sl@0: } sl@0: NKern::FMSignal(&TheSharedChunkLock); sl@0: sl@0: return chunk; sl@0: } sl@0: sl@0: sl@0: TUint DMemModelProcess::ChunkInsertIndex(DMemModelChunk* aChunk) sl@0: { sl@0: // need to hold iProcessLock or System Lock... sl@0: #ifdef _DEBUG sl@0: if(K::Initialising==false && iProcessLock!=NULL && iProcessLock->iCleanup.iThread!=&Kern::CurrentThread()) sl@0: { sl@0: // don't hold iProcessLock, so... sl@0: __ASSERT_SYSTEM_LOCK; sl@0: } sl@0: #endif sl@0: sl@0: // binary search... sl@0: SChunkInfo* list = iChunks; sl@0: TUint l = 0; sl@0: TUint r = iChunkCount; sl@0: TUint m; sl@0: while(l>1; sl@0: DChunk* x = list[m].iChunk; sl@0: if(x<=aChunk) sl@0: l = m+1; sl@0: else sl@0: r = m; sl@0: } sl@0: return r; sl@0: } sl@0: sl@0: sl@0: TInt DMemModelProcess::ChunkIndex(DMemModelChunk* aChunk) sl@0: { sl@0: TUint i = ChunkInsertIndex(aChunk); sl@0: if(i && iChunks[--i].iChunk==aChunk) sl@0: return i; sl@0: return KErrNotFound; sl@0: } sl@0: sl@0: sl@0: TInt DMemModelProcess::MapCodeSeg(DCodeSeg* aSeg) sl@0: { sl@0: __ASSERT_CRITICAL; // Must be in critical section so can't leak os asid references. sl@0: sl@0: DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg; sl@0: __KTRACE_OPT(KDLL,Kern::Printf("Process %O MapCodeSeg %C", this, aSeg)); sl@0: TBool kernel_only=( (seg.iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel ); sl@0: TBool user_local=( (seg.iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == 0 ); sl@0: if (kernel_only && !(iAttributes&ESupervisor)) sl@0: return KErrNotSupported; sl@0: if (seg.iAttr&ECodeSegAttKernel) sl@0: return KErrNone; // no extra mappings needed for kernel code sl@0: sl@0: // Attempt to open a reference on the os asid it is required so sl@0: // MapUserRamCode() and CommitDllData() can use iOsAsid safely. sl@0: TInt osAsid = TryOpenOsAsid(); sl@0: if (osAsid < 0) sl@0: {// The process has died. sl@0: return KErrDied; sl@0: } sl@0: sl@0: TInt r=KErrNone; sl@0: if (user_local) sl@0: r=MapUserRamCode(seg.Memory()); sl@0: if (seg.IsDll()) sl@0: { sl@0: TInt total_data_size; sl@0: TLinAddr data_base; sl@0: seg.GetDataSizeAndBase(total_data_size, data_base); sl@0: if (r==KErrNone && total_data_size) sl@0: { sl@0: TInt size=MM::RoundToPageSize(total_data_size); sl@0: r=CommitDllData(data_base, size, aSeg); sl@0: if (r!=KErrNone && user_local) sl@0: UnmapUserRamCode(seg.Memory()); sl@0: } sl@0: } sl@0: CloseOsAsid(); sl@0: sl@0: return r; sl@0: } sl@0: sl@0: sl@0: void DMemModelProcess::UnmapCodeSeg(DCodeSeg* aSeg) sl@0: { sl@0: __ASSERT_CRITICAL; // Must be in critical section so can't leak os asid references. sl@0: sl@0: DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg; sl@0: __KTRACE_OPT(KDLL,Kern::Printf("Process %O UnmapCodeSeg %C", this, aSeg)); sl@0: if (seg.iAttr&ECodeSegAttKernel) sl@0: return; // no extra mappings needed for kernel code sl@0: sl@0: // Attempt to open a reference on the os asid it is required so sl@0: // UnmapUserRamCode() and DecommitDllData() can use iOsAsid safely. sl@0: TInt osAsid = TryOpenOsAsid(); sl@0: if (osAsid < 0) sl@0: {// The process has died and it the process it will have cleaned up any code segs. sl@0: return; sl@0: } sl@0: sl@0: if (seg.IsDll()) sl@0: { sl@0: TInt total_data_size; sl@0: TLinAddr data_base; sl@0: seg.GetDataSizeAndBase(total_data_size, data_base); sl@0: if (total_data_size) sl@0: DecommitDllData(data_base, MM::RoundToPageSize(total_data_size)); sl@0: } sl@0: if (seg.Memory()) sl@0: UnmapUserRamCode(seg.Memory()); sl@0: sl@0: CloseOsAsid(); sl@0: } sl@0: sl@0: void DMemModelProcess::RemoveDllData() sl@0: // sl@0: // Call with CodeSegLock held sl@0: // sl@0: { sl@0: } sl@0: sl@0: sl@0: TInt DMemModelProcess::MapUserRamCode(DMemModelCodeSegMemory* aMemory) sl@0: { sl@0: __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O MapUserRamCode %C %d %d", sl@0: this, aMemory->iCodeSeg, iOsAsid, aMemory->iPagedCodeInfo!=0)); sl@0: __ASSERT_MUTEX(DCodeSeg::CodeSegLock); sl@0: sl@0: TMappingCreateFlags createFlags = EMappingCreateExactVirtual; sl@0: sl@0: if(!(aMemory->iCodeSeg->iAttr&ECodeSegAttAddrNotUnique)) sl@0: { sl@0: // codeseg memory address is globally unique, (common address across all processes)... sl@0: FlagSet(createFlags,EMappingCreateCommonVirtual); sl@0: } sl@0: sl@0: if(aMemory->iCodeSeg->IsExe()) sl@0: { sl@0: // EXE codesegs have already had their virtual address allocated so we must adopt that... sl@0: __NK_ASSERT_DEBUG(iCodeVirtualAllocSize); sl@0: __NK_ASSERT_DEBUG(iCodeVirtualAllocAddress==aMemory->iRamInfo.iCodeRunAddr); sl@0: iCodeVirtualAllocSize = 0; sl@0: iCodeVirtualAllocAddress = 0; sl@0: FlagSet(createFlags,EMappingCreateAdoptVirtual); sl@0: } sl@0: sl@0: DMemoryMapping* mapping; sl@0: return MM::MappingNew(mapping,aMemory->iCodeMemoryObject,EUserExecute,iOsAsid,createFlags,aMemory->iRamInfo.iCodeRunAddr); sl@0: } sl@0: sl@0: sl@0: void DMemModelProcess::UnmapUserRamCode(DMemModelCodeSegMemory* aMemory) sl@0: { sl@0: __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O UnmapUserRamCode %C %d %d", sl@0: this, aMemory->iCodeSeg, iOsAsid, aMemory->iPagedCodeInfo!=0)); sl@0: sl@0: __ASSERT_MUTEX(DCodeSeg::CodeSegLock); sl@0: MM::MappingDestroy(aMemory->iRamInfo.iCodeRunAddr,iOsAsid); sl@0: } sl@0: sl@0: sl@0: TInt DMemModelProcess::CommitDllData(TLinAddr aBase, TInt aSize, DCodeSeg* aCodeSeg) sl@0: { sl@0: __KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O CommitDllData %08x+%x",this,aBase,aSize)); sl@0: sl@0: DMemoryObject* memory; sl@0: TMemoryObjectType memoryType = aCodeSeg->iAttr&ECodeSegAttDataPaged ? EMemoryObjectPaged : EMemoryObjectMovable; sl@0: TInt r = MM::MemoryNew(memory,memoryType,MM::BytesToPages(aSize)); sl@0: if(r==KErrNone) sl@0: { sl@0: r = MM::MemoryAlloc(memory,0,MM::BytesToPages(aSize)); sl@0: if(r==KErrNone) sl@0: { sl@0: DMemoryMapping* mapping; sl@0: r = MM::MappingNew(mapping,memory,EUserReadWrite,iOsAsid,EMappingCreateCommonVirtual,aBase); sl@0: } sl@0: if(r!=KErrNone) sl@0: MM::MemoryDestroy(memory); sl@0: else sl@0: { sl@0: #ifdef BTRACE_FLEXIBLE_MEM_MODEL sl@0: BTrace12(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsDllStaticData,memory,aCodeSeg,this); sl@0: #endif sl@0: } sl@0: sl@0: } sl@0: __KTRACE_OPT(KDLL,Kern::Printf("CommitDllData returns %d",r)); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: void DMemModelProcess::DecommitDllData(TLinAddr aBase, TInt aSize) sl@0: { sl@0: __KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O DecommitDllData %08x+%x",this,aBase,aSize)); sl@0: MM::MappingAndMemoryDestroy(aBase,iOsAsid); sl@0: } sl@0: sl@0: void DMemModelProcess::BTracePrime(TInt aCategory) sl@0: { sl@0: DProcess::BTracePrime(aCategory); sl@0: sl@0: #ifdef BTRACE_FLEXIBLE_MEM_MODEL sl@0: if (aCategory == BTrace::EFlexibleMemModel || aCategory == -1) sl@0: { sl@0: BTrace8(BTrace::EFlexibleMemModel,BTrace::EAddressSpaceId,this,iOsAsid); sl@0: sl@0: if (iDataBssMapping) sl@0: { sl@0: DMemoryObject* memory = MM::MappingGetAndOpenMemory(iDataBssMapping); sl@0: if (memory) sl@0: { sl@0: MM::MemoryBTracePrime(memory); sl@0: BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsProcessStaticData,memory,this); sl@0: MM::MemoryClose(memory); sl@0: } sl@0: } sl@0: sl@0: // Trace memory objects for DLL static data sl@0: SDblQue cs_list; sl@0: DCodeSeg::UnmarkAll(DCodeSeg::EMarkListDeps|DCodeSeg::EMarkUnListDeps); sl@0: TraverseCodeSegs(&cs_list, NULL, DCodeSeg::EMarkListDeps, 0); sl@0: SDblQueLink* anchor=&cs_list.iA; sl@0: SDblQueLink* pL=cs_list.First(); sl@0: for(; pL!=anchor; pL=pL->iNext) sl@0: { sl@0: DMemModelCodeSeg* seg = _LOFF(pL,DMemModelCodeSeg,iTempLink); sl@0: if (seg->IsDll()) sl@0: { sl@0: TInt total_data_size; sl@0: TLinAddr data_base; sl@0: seg->GetDataSizeAndBase(total_data_size, data_base); sl@0: if (total_data_size) sl@0: { sl@0: TUint offset; sl@0: // The instance count can be ignored as a dll data mapping is only ever sl@0: // used with a single memory object. sl@0: TUint mappingInstanceCount; sl@0: NKern::ThreadEnterCS(); sl@0: DMemoryMapping* mapping = MM::FindMappingInAddressSpace(iOsAsid, data_base, 0, offset, mappingInstanceCount); sl@0: if (mapping) sl@0: { sl@0: DMemoryObject* memory = MM::MappingGetAndOpenMemory(mapping); sl@0: if (memory) sl@0: { sl@0: MM::MemoryBTracePrime(memory); sl@0: BTrace12(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsDllStaticData,memory,seg,this); sl@0: MM::MemoryClose(memory); sl@0: } sl@0: MM::MappingClose(mapping); sl@0: } sl@0: NKern::ThreadLeaveCS(); sl@0: } sl@0: } sl@0: } sl@0: DCodeSeg::EmptyQueue(cs_list, 0); // leave cs_list empty sl@0: } sl@0: #endif sl@0: } sl@0: sl@0: sl@0: TInt DMemModelProcess::NewShPool(DShPool*& aPool, TShPoolCreateInfo& aInfo) sl@0: { sl@0: aPool = NULL; sl@0: DMemModelShPool* pC = NULL; sl@0: sl@0: if (aInfo.iInfo.iFlags & TShPoolCreateInfo::EPageAlignedBuffer) sl@0: { sl@0: pC = new DMemModelAlignedShPool(); sl@0: } sl@0: else sl@0: { sl@0: pC = new DMemModelNonAlignedShPool(); sl@0: } sl@0: sl@0: if (pC == NULL) sl@0: { sl@0: return KErrNoMemory; sl@0: } sl@0: sl@0: TInt r = pC->Create(this, aInfo); sl@0: sl@0: if (r == KErrNone) sl@0: { sl@0: aPool = pC; sl@0: } sl@0: else sl@0: { sl@0: pC->Close(NULL); sl@0: } sl@0: sl@0: return r; sl@0: } sl@0: sl@0: sl@0: TInt DThread::RawRead(const TAny* aSrc, TAny* aDest, TInt aLength, TInt aFlags, TIpcExcTrap* aExcTrap) sl@0: // sl@0: // Read from the thread's process. sl@0: // aSrc Run address of memory to read sl@0: // aDest Current address of destination sl@0: // aExcTrap Exception trap object to be updated if the actual memory access is performed on other memory area than specified. sl@0: // It happens when reading is performed on un-aligned memory area. sl@0: // sl@0: { sl@0: (void)aExcTrap; sl@0: DMemModelThread& t=*(DMemModelThread*)TheCurrentThread; sl@0: DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess; sl@0: TLinAddr src=(TLinAddr)aSrc; sl@0: TLinAddr dest=(TLinAddr)aDest; sl@0: TInt result = KErrNone; sl@0: TBool have_taken_fault = EFalse; sl@0: sl@0: while (aLength) sl@0: { sl@0: if (iMState==EDead) sl@0: { sl@0: result = KErrDied; sl@0: break; sl@0: } sl@0: TLinAddr alias_src; sl@0: TUint alias_size; sl@0: sl@0: #ifdef __BROADCAST_CACHE_MAINTENANCE__ sl@0: TInt pagingTrap; sl@0: XTRAP_PAGING_START(pagingTrap); sl@0: #endif sl@0: sl@0: TInt len = have_taken_fault ? Min(aLength, KPageSize - (src & KPageMask)) : aLength; sl@0: TInt alias_result=t.Alias(src, pP, len, alias_src, alias_size); sl@0: if (alias_result<0) sl@0: { sl@0: result = KErrBadDescriptor; // bad permissions sl@0: break; sl@0: } sl@0: sl@0: #ifdef __BROADCAST_CACHE_MAINTENANCE__ sl@0: // need to let the trap handler know where we are accessing in case we take a page fault sl@0: // and the alias gets removed sl@0: aExcTrap->iRemoteBase = alias_src; sl@0: aExcTrap->iSize = alias_size; sl@0: #endif sl@0: sl@0: __KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawRead %08x<-%08x+%x",dest,alias_src,alias_size)); sl@0: sl@0: CHECK_PAGING_SAFE; sl@0: sl@0: if(aFlags&KCheckLocalAddress) sl@0: MM::ValidateLocalIpcAddress(dest,alias_size,ETrue); sl@0: UNLOCK_USER_MEMORY(); sl@0: memcpy( (TAny*)dest, (const TAny*)alias_src, alias_size); sl@0: LOCK_USER_MEMORY(); sl@0: sl@0: src+=alias_size; sl@0: dest+=alias_size; sl@0: aLength-=alias_size; sl@0: sl@0: #ifdef __BROADCAST_CACHE_MAINTENANCE__ sl@0: XTRAP_PAGING_END; sl@0: if(pagingTrap) sl@0: have_taken_fault = ETrue; sl@0: #endif sl@0: } sl@0: t.RemoveAlias(); sl@0: #ifdef __BROADCAST_CACHE_MAINTENANCE__ sl@0: t.iPagingExcTrap = NULL; // in case we broke out of the loop and skipped XTRAP_PAGING_END sl@0: #endif sl@0: sl@0: return result; sl@0: } sl@0: sl@0: sl@0: TInt DThread::RawWrite(const TAny* aDest, const TAny* aSrc, TInt aLength, TInt aFlags, DThread* /*anOriginatingThread*/, TIpcExcTrap* aExcTrap) sl@0: // sl@0: // Write to the thread's process. sl@0: // aDest Run address of memory to write sl@0: // aSrc Current address of destination sl@0: // aExcTrap Exception trap object to be updated if the actual memory access is performed on other memory area then specified. sl@0: // It happens when reading is performed on un-aligned memory area. sl@0: // sl@0: { sl@0: (void)aExcTrap; sl@0: DMemModelThread& t=*(DMemModelThread*)TheCurrentThread; sl@0: DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess; sl@0: TLinAddr src=(TLinAddr)aSrc; sl@0: TLinAddr dest=(TLinAddr)aDest; sl@0: TInt result = KErrNone; sl@0: TBool have_taken_fault = EFalse; sl@0: sl@0: while (aLength) sl@0: { sl@0: if (iMState==EDead) sl@0: { sl@0: result = KErrDied; sl@0: break; sl@0: } sl@0: TLinAddr alias_dest; sl@0: TUint alias_size; sl@0: sl@0: #ifdef __BROADCAST_CACHE_MAINTENANCE__ sl@0: TInt pagingTrap; sl@0: XTRAP_PAGING_START(pagingTrap); sl@0: #endif sl@0: sl@0: TInt len = have_taken_fault ? Min(aLength, KPageSize - (dest & KPageMask)) : aLength; sl@0: TInt alias_result=t.Alias(dest, pP, len, alias_dest, alias_size); sl@0: if (alias_result<0) sl@0: { sl@0: result = KErrBadDescriptor; // bad permissions sl@0: break; sl@0: } sl@0: sl@0: #ifdef __BROADCAST_CACHE_MAINTENANCE__ sl@0: // need to let the trap handler know where we are accessing in case we take a page fault sl@0: // and the alias gets removed sl@0: aExcTrap->iRemoteBase = alias_dest; sl@0: aExcTrap->iSize = alias_size; sl@0: #endif sl@0: sl@0: __KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawWrite %08x+%x->%08x",src,alias_size,alias_dest)); sl@0: sl@0: // Must check that it is safe to page, unless we are reading from unpaged ROM in which case sl@0: // we allow it. sl@0: CHECK_PAGING_SAFE_RANGE(src, aLength); sl@0: CHECK_DATA_PAGING_SAFE_RANGE(dest, aLength); sl@0: sl@0: if(aFlags&KCheckLocalAddress) sl@0: MM::ValidateLocalIpcAddress(src,alias_size,EFalse); sl@0: UNLOCK_USER_MEMORY(); sl@0: memcpy( (TAny*)alias_dest, (const TAny*)src, alias_size); sl@0: LOCK_USER_MEMORY(); sl@0: sl@0: src+=alias_size; sl@0: dest+=alias_size; sl@0: aLength-=alias_size; sl@0: sl@0: #ifdef __BROADCAST_CACHE_MAINTENANCE__ sl@0: XTRAP_PAGING_END; sl@0: if(pagingTrap) sl@0: have_taken_fault = ETrue; sl@0: #endif sl@0: } sl@0: t.RemoveAlias(); sl@0: #ifdef __BROADCAST_CACHE_MAINTENANCE__ sl@0: t.iPagingExcTrap = NULL; // in case we broke out of the loop and skipped XTRAP_PAGING_END sl@0: #endif sl@0: sl@0: return result; sl@0: } sl@0: sl@0: sl@0: #ifndef __MARM__ sl@0: sl@0: TInt DThread::ReadAndParseDesHeader(const TAny* aSrc, TDesHeader& aDest) sl@0: // sl@0: // Read the header of a remote descriptor. sl@0: // sl@0: { sl@0: static const TUint8 LengthLookup[16]={4,8,12,8,12,0,0,0,0,0,0,0,0,0,0,0}; sl@0: sl@0: CHECK_PAGING_SAFE; sl@0: sl@0: DMemModelThread& t=*(DMemModelThread*)TheCurrentThread; sl@0: DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess; sl@0: TLinAddr src=(TLinAddr)aSrc; sl@0: sl@0: __NK_ASSERT_DEBUG(t.iIpcClient==NULL); sl@0: t.iIpcClient = this; sl@0: sl@0: TLinAddr pAlias; sl@0: TUint8* pDest = (TUint8*)&aDest; sl@0: TUint alias_size = 0; sl@0: TInt length = 12; sl@0: TInt type = KErrBadDescriptor; sl@0: while (length > 0) sl@0: { sl@0: #ifdef __BROADCAST_CACHE_MAINTENANCE__ sl@0: TInt pagingTrap; sl@0: XTRAP_PAGING_START(pagingTrap); sl@0: #endif sl@0: sl@0: if (alias_size == 0) sl@0: { sl@0: // no alias present, so must create one here sl@0: if (t.Alias(src, pP, length, pAlias, alias_size) != KErrNone) sl@0: break; sl@0: __NK_ASSERT_DEBUG(alias_size >= sizeof(TUint32)); sl@0: } sl@0: sl@0: // read either the first word, or as much as aliased of the remainder sl@0: TInt l = length == 12 ? sizeof(TUint32) : Min(length, alias_size); sl@0: if (Kern::SafeRead((TAny*)pAlias, (TAny*)pDest, l)) sl@0: break; // exception reading from user space sl@0: sl@0: if (length == 12) sl@0: { sl@0: // we have just read the first word, so decode the descriptor type sl@0: type = *(TUint32*)pDest >> KShiftDesType8; sl@0: length = LengthLookup[type]; sl@0: // invalid descriptor type will have length 0 which will get decrease by 'l' and sl@0: // terminate the loop with length < 0 sl@0: } sl@0: sl@0: src += l; sl@0: alias_size -= l; sl@0: pAlias += l; sl@0: pDest += l; sl@0: length -= l; sl@0: sl@0: #ifdef __BROADCAST_CACHE_MAINTENANCE__ sl@0: XTRAP_PAGING_END; sl@0: if (pagingTrap) sl@0: alias_size = 0; // a page fault caused the alias to be removed sl@0: #endif sl@0: } sl@0: sl@0: t.RemoveAlias(); sl@0: t.iIpcClient = NULL; sl@0: #ifdef __BROADCAST_CACHE_MAINTENANCE__ sl@0: t.iPagingExcTrap = NULL; // in case we broke out of the loop and skipped XTRAP_PAGING_END sl@0: #endif sl@0: return length == 0 ? K::ParseDesHeader(aSrc, (TRawDesHeader&)aDest, aDest) : KErrBadDescriptor; sl@0: } sl@0: sl@0: sl@0: #endif sl@0: sl@0: sl@0: TInt DThread::PrepareMemoryForDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList) sl@0: { sl@0: // not supported, new Physical Pinning APIs should be used for DMA sl@0: return KErrNotSupported; sl@0: } sl@0: sl@0: TInt DThread::ReleaseMemoryFromDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList) sl@0: { sl@0: // not supported, new Physical Pinning APIs should be used for DMA sl@0: return KErrNotSupported; sl@0: } sl@0: