sl@0: // Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32\memmodel\epoc\multiple\mprocess.cpp sl@0: // sl@0: // sl@0: sl@0: #include "memmodel.h" sl@0: #include "mmboot.h" sl@0: #include "cache_maintenance.h" sl@0: #include sl@0: sl@0: #define iMState iWaitLink.iSpare1 sl@0: sl@0: // just for convenience... sl@0: #define KAmSelfMod (DMemModelChunk::ECode | DMemModelChunk::EAddressLocal) sl@0: sl@0: _LIT(KDollarDat,"$DAT"); sl@0: _LIT(KLitDollarCode,"$CODE"); sl@0: _LIT(KLitDllDollarData,"DLL$DATA"); sl@0: sl@0: #ifdef __CPU_HAS_BTB sl@0: extern void __FlushBtb(); sl@0: #endif sl@0: sl@0: const TInt KChunkGranularity=4; sl@0: sl@0: /******************************************** sl@0: * Process sl@0: ********************************************/ sl@0: void DMemModelProcess::Destruct() sl@0: { sl@0: __ASSERT_ALWAYS(!iChunkCount && !iCodeChunk && !iDllDataChunk, MM::Panic(MM::EProcessDestructChunksRemaining)); sl@0: Kern::Free(iChunks); sl@0: Kern::Free(iLocalSection); sl@0: if (iOsAsid) sl@0: { sl@0: Mmu& m=Mmu::Get(); sl@0: MmuBase::Wait(); sl@0: m.FreeOsAsid(iOsAsid); sl@0: iOsAsid=0; sl@0: MmuBase::Signal(); sl@0: #ifndef __SMP__ sl@0: LastUserSelfMod=0; // must force a BTB flush when next selfmod chunk switched in sl@0: #endif sl@0: } sl@0: #ifdef __CPU_HAS_BTB sl@0: __FlushBtb(); sl@0: #endif sl@0: DProcess::Destruct(); sl@0: } sl@0: sl@0: TInt DMemModelProcess::NewChunk(DChunk*& aChunk, SChunkCreateInfo& aInfo, TLinAddr& aRunAddr) sl@0: { sl@0: aChunk=NULL; sl@0: DMemModelChunk* pC=NULL; sl@0: TInt r=GetNewChunk(pC,aInfo); sl@0: if (r!=KErrNone) sl@0: { sl@0: if (pC) sl@0: pC->Close(NULL); sl@0: return r; sl@0: } sl@0: TInt mapType=pC->iAttributes & DMemModelChunk::EMapTypeMask; sl@0: pC->iOwningProcess=(mapType==DMemModelChunk::EMapTypeLocal)?this:NULL; sl@0: #ifdef __CPU_HAS_BTB sl@0: if ((pC->iAttributes & KAmSelfMod) == KAmSelfMod) // it's a potentially overlapping self-mod sl@0: { sl@0: iSelfModChunks++; sl@0: #ifndef __SMP__ sl@0: LastUserSelfMod = this; // we become the last selfmodding process sl@0: #endif sl@0: __FlushBtb(); // we need to do this, as there may be bad branches already in the btb sl@0: } sl@0: #endif sl@0: r=pC->Create(aInfo); sl@0: if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdjust)) sl@0: { sl@0: if (aInfo.iRunAddress!=0) sl@0: pC->SetFixedAddress(aInfo.iRunAddress,aInfo.iPreallocated); sl@0: if (aInfo.iPreallocated==0 && aInfo.iInitialTop!=0) sl@0: { sl@0: if (pC->iAttributes & DChunk::EDisconnected) sl@0: { sl@0: r=pC->Commit(aInfo.iInitialBottom,aInfo.iInitialTop-aInfo.iInitialBottom); sl@0: } sl@0: else if (pC->iAttributes & DChunk::EDoubleEnded) sl@0: { sl@0: r=pC->AdjustDoubleEnded(aInfo.iInitialBottom,aInfo.iInitialTop); sl@0: } sl@0: else sl@0: { sl@0: r=pC->Adjust(aInfo.iInitialTop); sl@0: } sl@0: } sl@0: } sl@0: if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdd)) sl@0: { sl@0: // if (pC->iAttributes & DMemModelChunk::ECode) sl@0: // MM::TheMmu->SyncCodeMappings(); sl@0: if (mapType!=DMemModelChunk::EMapTypeGlobal) sl@0: { sl@0: r=WaitProcessLock(); sl@0: if (r==KErrNone) sl@0: { sl@0: r=AddChunk(pC,aRunAddr,EFalse); sl@0: SignalProcessLock(); sl@0: } sl@0: } sl@0: else sl@0: aRunAddr=(TLinAddr)pC->Base(); sl@0: } sl@0: if (r==KErrNone) sl@0: { sl@0: if(r==KErrNone) sl@0: if(pC->iKernelMirror) sl@0: aRunAddr = (TLinAddr)pC->iKernelMirror->Base(); sl@0: pC->iDestroyedDfc = aInfo.iDestroyedDfc; sl@0: aChunk=(DChunk*)pC; sl@0: } sl@0: else sl@0: pC->Close(NULL); // NULL since chunk can't have been added to process sl@0: return r; sl@0: } sl@0: sl@0: TInt DMemModelProcess::DoCreate(TBool aKernelProcess, TProcessCreateInfo& aInfo) sl@0: { sl@0: __KTRACE_OPT(KPROC,Kern::Printf(">DMemModelProcess::DoCreate %O",this)); sl@0: sl@0: Mmu& m=Mmu::Get(); sl@0: TInt r=KErrNone; sl@0: sl@0: iSelfModChunks=0; // we don't have any yet. sl@0: sl@0: if (aKernelProcess) sl@0: { sl@0: iAttributes |= ESupervisor; sl@0: //iOsAsid=0; sl@0: // Leave these till Mmu::Init2 sl@0: // if (m.iLocalPdSize) sl@0: // iLocalPageDir=m.LinearToPhysical(TLinAddr(m.LocalPageDir(0))); sl@0: // iGlobalPageDir=m.LinearToPhysical(TLinAddr(m.GlobalPageDir(0))); sl@0: m.iAsidInfo[0]=((TUint32)this)|1; sl@0: iAddressCheckMaskR=0xffffffff; sl@0: iAddressCheckMaskW=0xffffffff; sl@0: } sl@0: else sl@0: { sl@0: MmuBase::Wait(); sl@0: r=m.NewOsAsid(EFalse); sl@0: if (r>=0) sl@0: { sl@0: iOsAsid=r; sl@0: if (m.iLocalPdSize) sl@0: iLocalPageDir=m.LinearToPhysical(TLinAddr(m.LocalPageDir(r))); sl@0: else sl@0: iGlobalPageDir=m.LinearToPhysical(TLinAddr(m.GlobalPageDir(r))); sl@0: m.iAsidInfo[r] |= (TUint32)this; sl@0: r=KErrNone; sl@0: } sl@0: MmuBase::Signal(); sl@0: if (r==KErrNone && 0==(iLocalSection=TLinearSection::New(m.iUserLocalBase, m.iUserLocalEnd)) ) sl@0: r=KErrNoMemory; sl@0: } sl@0: sl@0: __KTRACE_OPT(KPROC,Kern::Printf("OS ASID=%d, LPD=%08x, GPD=%08x, ASID info=%08x",iOsAsid,iLocalPageDir, sl@0: iGlobalPageDir,m.iAsidInfo[iOsAsid])); sl@0: __KTRACE_OPT(KPROC,Kern::Printf("iAttributes & DMemModelChunk::EPrivate) && this!=pC->iOwningProcess) sl@0: return KErrAccessDenied; sl@0: TInt r=WaitProcessLock(); sl@0: if (r==KErrNone) sl@0: { sl@0: TInt pos=0; sl@0: r=ChunkIndex(pC,pos); sl@0: TLinAddr dataSectionBase=0; sl@0: if (r==0) // Found the chunk in this process, just up its count sl@0: { sl@0: iChunks[pos].iAccessCount++; sl@0: __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk %08x to %08x (Access count incremented to %d)",aChunk,this,iChunks[pos].iAccessCount)); sl@0: SignalProcessLock(); sl@0: return KErrNone; sl@0: } sl@0: r=AddChunk(pC,dataSectionBase,isReadOnly); sl@0: SignalProcessLock(); sl@0: } sl@0: __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk returns %d",r)); sl@0: return r; sl@0: } sl@0: sl@0: void M::FsRegisterThread() sl@0: { sl@0: DMemModelChunk* pC=(DMemModelChunk*)PP::TheRamDriveChunk; sl@0: TInt mapType=pC->iAttributes & DMemModelChunk::EMapTypeMask; sl@0: if (mapType!=DMemModelChunk::EMapTypeLocal) sl@0: { sl@0: DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess; sl@0: TLinAddr dataSectionBase; sl@0: TInt r=pP->WaitProcessLock(); sl@0: if (r==KErrNone) sl@0: r=pP->AddChunk(pC,dataSectionBase,EFalse); sl@0: __ASSERT_ALWAYS(r==KErrNone, MM::Panic(MM::EFsRegisterThread)); sl@0: pP->SignalProcessLock(); sl@0: } sl@0: } sl@0: sl@0: TInt DMemModelProcess::AddChunk(DMemModelChunk* aChunk, TLinAddr& aDataSectionBase, TBool isReadOnly) sl@0: { sl@0: // sl@0: // Must hold the process $LOCK mutex before calling this sl@0: // sl@0: __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk %O to %O",aChunk,this)); sl@0: SChunkInfo *pC=iChunks; sl@0: SChunkInfo *pE=pC+iChunkCount-1; sl@0: TLinAddr base=TLinAddr(aChunk->iBase); sl@0: TInt i=0; sl@0: sl@0: #ifdef __CPU_HAS_BTB sl@0: if ((aChunk->iAttributes & KAmSelfMod)==KAmSelfMod) // it's a potentially overlapping self-mod sl@0: { sl@0: iSelfModChunks++; sl@0: #ifndef __SMP__ sl@0: LastUserSelfMod = this; // we become the last selfmodding process sl@0: #endif sl@0: __FlushBtb(); // we need to do this, as there may be bad branches already in the btb sl@0: } sl@0: #endif sl@0: if (iChunkCount) sl@0: { sl@0: for (; pE>=pC && TLinAddr(pE->iChunk->iBase)>base; --pE); sl@0: if (pE>=pC && TLinAddr(pE->iChunk->iBase)+pE->iChunk->iMaxSize>base) sl@0: return KErrInUse; sl@0: pC=pE+1; sl@0: if (pCiMaxSize>TLinAddr(pC->iChunk->iBase)) sl@0: return KErrInUse; sl@0: i=pC-iChunks; sl@0: } sl@0: if (iChunkCount==iChunkAlloc) sl@0: { sl@0: TInt newAlloc=iChunkAlloc+KChunkGranularity; sl@0: TInt r=Kern::SafeReAlloc((TAny*&)iChunks,iChunkAlloc*sizeof(SChunkInfo),newAlloc*sizeof(SChunkInfo)); sl@0: if (r!=KErrNone) sl@0: return r; sl@0: pC=iChunks+i; sl@0: iChunkAlloc=newAlloc; sl@0: } sl@0: memmove(pC+1,pC,(iChunkCount-i)*sizeof(SChunkInfo)); sl@0: ++iChunkCount; sl@0: pC->isReadOnly=isReadOnly; sl@0: pC->iAccessCount=1; sl@0: pC->iChunk=aChunk; sl@0: aDataSectionBase=base; sl@0: Mmu& m=Mmu::Get(); sl@0: if (aChunk->iOsAsids) sl@0: { sl@0: // only need to do address space manipulation for shared chunks sl@0: MmuBase::Wait(); sl@0: aChunk->iOsAsids->Alloc(iOsAsid,1); sl@0: TLinAddr a; sl@0: TInt i=0; sl@0: for (a=TLinAddr(aChunk->iBase); aiBase)+aChunk->iMaxSize; a+=m.iChunkSize, ++i) sl@0: { sl@0: TInt ptid=aChunk->iPageTables[i]; sl@0: if (ptid!=0xffff) sl@0: m.DoAssignPageTable(ptid,a,aChunk->iPdePermissions,(const TAny*)iOsAsid); sl@0: } sl@0: MmuBase::Signal(); sl@0: } sl@0: if (aChunk->iChunkType==ERamDrive) sl@0: { sl@0: NKern::LockSystem(); sl@0: iAddressCheckMaskR |= m.iRamDriveMask; sl@0: iAddressCheckMaskW |= m.iRamDriveMask; sl@0: NKern::UnlockSystem(); sl@0: } sl@0: __DEBUG_EVENT(EEventUpdateProcess, this); sl@0: return KErrNone; sl@0: } sl@0: sl@0: void DMemModelProcess::DoRemoveChunk(TInt aIndex) sl@0: { sl@0: __DEBUG_EVENT(EEventUpdateProcess, this); sl@0: DMemModelChunk* chunk = iChunks[aIndex].iChunk; sl@0: memmove(iChunks+aIndex, iChunks+aIndex+1, (iChunkCount-aIndex-1)*sizeof(SChunkInfo)); sl@0: --iChunkCount; sl@0: Mmu& m=Mmu::Get(); sl@0: if (chunk->iOsAsids) sl@0: { sl@0: // only need to do address space manipulation for shared chunks sl@0: MmuBase::Wait(); sl@0: chunk->iOsAsids->Free(iOsAsid); sl@0: TLinAddr a; sl@0: for (a=TLinAddr(chunk->iBase); aiBase)+chunk->iMaxSize; a+=m.iChunkSize) sl@0: m.DoUnassignPageTable(a,(const TAny*)iOsAsid); sl@0: TUint32 mask=(chunk->iAttributes&DMemModelChunk::ECode)?Mmu::EFlushITLB:0; sl@0: m.GenericFlush(mask|Mmu::EFlushDTLB); sl@0: sl@0: MmuBase::Signal(); sl@0: } sl@0: if (chunk->iChunkType==ERamDrive) sl@0: { sl@0: NKern::LockSystem(); sl@0: iAddressCheckMaskR &= ~m.iRamDriveMask; sl@0: iAddressCheckMaskW &= ~m.iRamDriveMask; sl@0: NKern::UnlockSystem(); sl@0: } sl@0: } sl@0: sl@0: /** sl@0: Final chance for process to release resources during its death. sl@0: sl@0: Called with process $LOCK mutex held (if it exists). sl@0: This mutex will not be released before it is deleted. sl@0: I.e. no other thread will ever hold the mutex again. sl@0: */ sl@0: void DMemModelProcess::FinalRelease() sl@0: { sl@0: // Clean up any left over chunks (such as SharedIo buffers) sl@0: if(iProcessLock) sl@0: while(iChunkCount) sl@0: DoRemoveChunk(0); sl@0: } sl@0: sl@0: void DMemModelProcess::RemoveChunk(DMemModelChunk *aChunk) sl@0: { sl@0: // note that this can't be called after the process $LOCK mutex has been deleted sl@0: // since it can only be called by a thread in this process doing a handle close or sl@0: // dying, or by the process handles array being deleted due to the process dying, sl@0: // all of which happen before $LOCK is deleted. sl@0: __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O RemoveChunk %O",this,aChunk)); sl@0: Kern::MutexWait(*iProcessLock); sl@0: TInt pos=0; sl@0: TInt r=ChunkIndex(aChunk,pos); sl@0: sl@0: if (r==KErrNone) // Found the chunk sl@0: { sl@0: __KTRACE_OPT(KPROC,Kern::Printf("Chunk access count %d",iChunks[pos].iAccessCount)); sl@0: if (--iChunks[pos].iAccessCount==0) sl@0: { sl@0: DoRemoveChunk(pos); sl@0: #ifdef __CPU_HAS_BTB sl@0: if ((aChunk->iAttributes & KAmSelfMod)==KAmSelfMod) // was a self-mod code chunk sl@0: if (iSelfModChunks) sl@0: iSelfModChunks--; sl@0: #endif sl@0: } sl@0: } sl@0: Kern::MutexSignal(*iProcessLock); sl@0: } sl@0: sl@0: TInt DMemModelProcess::ChunkIndex(DMemModelChunk* aChunk,TInt& aPos) sl@0: { sl@0: if (!aChunk) sl@0: return KErrNotFound; sl@0: SChunkInfo *pC=iChunks; sl@0: SChunkInfo *pE=pC+iChunkCount; sl@0: for (; pCiChunk!=aChunk; ++pC); sl@0: if (pC==pE) sl@0: return KErrNotFound; sl@0: aPos=pC-iChunks; sl@0: return KErrNone; sl@0: } sl@0: sl@0: TInt DMemModelProcess::MapCodeSeg(DCodeSeg* aSeg) sl@0: { sl@0: DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg; sl@0: __KTRACE_OPT(KDLL,Kern::Printf("Process %O MapCodeSeg %C", this, aSeg)); sl@0: TBool kernel_only=( (seg.iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel ); sl@0: if (kernel_only && !(iAttributes&ESupervisor)) sl@0: return KErrNotSupported; sl@0: if (seg.iAttr&ECodeSegAttKernel) sl@0: return KErrNone; // no extra mappings needed for kernel code sl@0: TInt r=KErrNone; sl@0: if (seg.Pages()) sl@0: r=MapUserRamCode(seg.Memory(),EFalse); sl@0: if (seg.IsDll()) sl@0: { sl@0: TInt total_data_size; sl@0: TLinAddr data_base; sl@0: seg.GetDataSizeAndBase(total_data_size, data_base); sl@0: if (r==KErrNone && total_data_size) sl@0: { sl@0: TInt size=Mmu::RoundToPageSize(total_data_size); sl@0: r=CommitDllData(data_base, size); sl@0: if (r!=KErrNone && seg.Pages()) sl@0: UnmapUserRamCode(seg.Memory(), EFalse); sl@0: } sl@0: } sl@0: return r; sl@0: } sl@0: sl@0: void DMemModelProcess::UnmapCodeSeg(DCodeSeg* aSeg) sl@0: { sl@0: DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg; sl@0: __KTRACE_OPT(KDLL,Kern::Printf("Process %O UnmapCodeSeg %C", this, aSeg)); sl@0: if (seg.iAttr&ECodeSegAttKernel) sl@0: return; // no extra mappings needed for kernel code sl@0: if (seg.IsDll()) sl@0: { sl@0: TInt total_data_size; sl@0: TLinAddr data_base; sl@0: seg.GetDataSizeAndBase(total_data_size, data_base); sl@0: if (total_data_size) sl@0: DecommitDllData(data_base, Mmu::RoundToPageSize(total_data_size)); sl@0: } sl@0: if (seg.Pages()) sl@0: UnmapUserRamCode(seg.Memory(), EFalse); sl@0: } sl@0: sl@0: void DMemModelProcess::RemoveDllData() sl@0: // sl@0: // Call with CodeSegLock held sl@0: // sl@0: { sl@0: } sl@0: sl@0: TInt DMemModelProcess::CreateCodeChunk() sl@0: { sl@0: __KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O CreateCodeChunk",this)); sl@0: TBool kernel=iAttributes&ESupervisor; sl@0: Mmu& m=Mmu::Get(); sl@0: SChunkCreateInfo c; sl@0: c.iGlobal=kernel; sl@0: c.iAtt = TChunkCreate::EDisconnected | (kernel? 0 : TChunkCreate::EMemoryNotOwned); sl@0: c.iForceFixed=EFalse; sl@0: c.iOperations=SChunkCreateInfo::EAdjust|SChunkCreateInfo::EAdd; sl@0: c.iRunAddress=kernel ? 0 : m.iUserCodeBase; sl@0: c.iPreallocated=0; sl@0: c.iType=kernel ? EKernelCode : EUserCode; sl@0: c.iMaxSize=m.iMaxUserCodeSize; sl@0: c.iName.Set(KLitDollarCode); sl@0: c.iOwner=this; sl@0: c.iInitialTop=0; sl@0: TLinAddr runAddr; sl@0: TInt r = NewChunk((DChunk*&)iCodeChunk,c,runAddr); sl@0: return r; sl@0: } sl@0: sl@0: void DMemModelProcess::FreeCodeChunk() sl@0: { sl@0: iCodeChunk->Close(this); sl@0: iCodeChunk=NULL; sl@0: } sl@0: sl@0: TInt DMemModelProcess::MapUserRamCode(DMemModelCodeSegMemory* aMemory, TBool aLoading) sl@0: { sl@0: __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O MapUserRamCode %C %d %d %d", sl@0: this, aMemory->iCodeSeg, aLoading, iOsAsid, aMemory->iIsDemandPaged)); sl@0: __ASSERT_MUTEX(DCodeSeg::CodeSegLock); sl@0: sl@0: TInt r; sl@0: sl@0: if (!iCodeChunk) sl@0: { sl@0: r=CreateCodeChunk(); sl@0: __KTRACE_OPT(KPROC,Kern::Printf("CreateCodeChunk returns %d", r)); sl@0: if (r!=KErrNone) sl@0: return r; sl@0: } sl@0: sl@0: MmuBase::Wait(); sl@0: sl@0: Mmu& m=Mmu::Get(); sl@0: TInt offset=aMemory->iRamInfo.iCodeRunAddr-TLinAddr(iCodeChunk->iBase); sl@0: TInt codeSize = aMemory->iPageCount<iIsDemandPaged; sl@0: DChunk::TCommitType commitType = paged ? DChunk::ECommitVirtual : DChunk::ECommitDiscontiguousPhysical; sl@0: r=iCodeChunk->Commit(offset, codeSize, commitType, aMemory->iPages); sl@0: __KTRACE_OPT(KPROC,Kern::Printf("Commit Pages returns %d", r)); sl@0: if(r==KErrNone) sl@0: { sl@0: if (aLoading && !paged) sl@0: { sl@0: iCodeChunk->ApplyPermissions(offset, codeSize, m.iUserCodeLoadPtePerm); sl@0: UNLOCK_USER_MEMORY(); sl@0: memset((TAny*)(aMemory->iRamInfo.iCodeLoadAddr+aMemory->iRamInfo.iCodeSize+aMemory->iRamInfo.iDataSize), 0x03, codeSize-(aMemory->iRamInfo.iCodeSize+aMemory->iRamInfo.iDataSize)); sl@0: LOCK_USER_MEMORY(); sl@0: } sl@0: if(aLoading && aMemory->iDataPageCount) sl@0: { sl@0: TInt dataSize = aMemory->iDataPageCount<Commit(offset+codeSize, dataSize, DChunk::ECommitDiscontiguousPhysical, aMemory->iPages+aMemory->iPageCount); sl@0: if(r==KErrNone) sl@0: { sl@0: iCodeChunk->ApplyPermissions(offset+codeSize, dataSize, m.iUserCodeLoadPtePerm); sl@0: UNLOCK_USER_MEMORY(); sl@0: memset((TAny*)(aMemory->iRamInfo.iDataLoadAddr+aMemory->iRamInfo.iDataSize), 0x03, dataSize-aMemory->iRamInfo.iDataSize); sl@0: LOCK_USER_MEMORY(); sl@0: } sl@0: } sl@0: if(r!=KErrNone) sl@0: { sl@0: // error, so decommit up code pages we had already committed... sl@0: DChunk::TDecommitType decommitType = paged ? DChunk::EDecommitVirtual : DChunk::EDecommitNormal; sl@0: iCodeChunk->Decommit(offset, codeSize, decommitType); sl@0: } sl@0: else sl@0: { sl@0: // indicate codeseg is now successfully mapped into the process... sl@0: NKern::LockSystem(); sl@0: aMemory->iOsAsids->Free(iOsAsid); sl@0: NKern::UnlockSystem(); sl@0: } sl@0: } sl@0: sl@0: MmuBase::Signal(); sl@0: sl@0: if(r!=KErrNone && iCodeChunk->iSize==0) sl@0: FreeCodeChunk(); // cleanup any unused code chunk we would otherwise leave lying around sl@0: sl@0: return r; sl@0: } sl@0: sl@0: void DMemModelProcess::UnmapUserRamCode(DMemModelCodeSegMemory* aMemory, TBool aLoading) sl@0: { sl@0: __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O UnmapUserRamCode %C %d %d", sl@0: this, aMemory->iCodeSeg, iOsAsid, aMemory->iIsDemandPaged != 0)); sl@0: sl@0: __ASSERT_MUTEX(DCodeSeg::CodeSegLock); sl@0: sl@0: MmuBase::Wait(); sl@0: sl@0: NKern::LockSystem(); sl@0: aMemory->iOsAsids->Alloc(iOsAsid, 1); sl@0: NKern::UnlockSystem(); sl@0: sl@0: Mmu& m=Mmu::Get(); sl@0: __NK_ASSERT_DEBUG(iCodeChunk); sl@0: TInt offset=aMemory->iRamInfo.iCodeRunAddr-TLinAddr(iCodeChunk->iBase); sl@0: TInt codeSize = aMemory->iPageCount<iIsDemandPaged; sl@0: DChunk::TDecommitType decommitType = paged ? DChunk::EDecommitVirtual : DChunk::EDecommitNormal; sl@0: TInt r=iCodeChunk->Decommit(offset, codeSize, decommitType); sl@0: __ASSERT_DEBUG(r==KErrNone, MM::Panic(MM::EDecommitFailed)); sl@0: (void)r; //Supress the warning in urel build sl@0: sl@0: if(aLoading && aMemory->iDataPageCount) sl@0: { sl@0: // decommit pages used to store data section... sl@0: TInt dataSize = aMemory->iDataPageCount<Decommit(offset+codeSize, dataSize); sl@0: __ASSERT_DEBUG(r==KErrNone, MM::Panic(MM::EDecommitFailed)); sl@0: (void)r; //Supress the warning in urel build sl@0: } sl@0: __NK_ASSERT_DEBUG(iCodeChunk->iSize >= 0); sl@0: sl@0: MmuBase::Signal(); sl@0: sl@0: if (iCodeChunk->iSize==0) sl@0: FreeCodeChunk(); sl@0: } sl@0: sl@0: TInt DMemModelProcess::CreateDllDataChunk() sl@0: { sl@0: __KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O CreateDllDataChunk",this)); sl@0: Mmu& m=Mmu::Get(); sl@0: SChunkCreateInfo c; sl@0: c.iGlobal=EFalse; sl@0: c.iAtt=TChunkCreate::EDisconnected; sl@0: c.iForceFixed=EFalse; sl@0: c.iOperations=SChunkCreateInfo::EAdjust|SChunkCreateInfo::EAdd; sl@0: c.iRunAddress=m.iDllDataBase; sl@0: c.iPreallocated=0; sl@0: c.iType=EDllData; sl@0: c.iMaxSize=m.iMaxDllDataSize; sl@0: c.iName.Set(KLitDllDollarData); sl@0: c.iOwner=this; sl@0: c.iInitialTop=0; sl@0: TLinAddr runAddr; sl@0: return NewChunk((DChunk*&)iDllDataChunk,c,runAddr); sl@0: } sl@0: sl@0: void DMemModelProcess::FreeDllDataChunk() sl@0: { sl@0: iDllDataChunk->Close(this); sl@0: iDllDataChunk=NULL; sl@0: } sl@0: sl@0: TInt DMemModelProcess::CommitDllData(TLinAddr aBase, TInt aSize) sl@0: { sl@0: __KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O CommitDllData %08x+%x",this,aBase,aSize)); sl@0: TInt r=KErrNone; sl@0: if (!iDllDataChunk) sl@0: r=CreateDllDataChunk(); sl@0: if (r==KErrNone) sl@0: { sl@0: TInt offset=aBase-(TLinAddr)iDllDataChunk->iBase; sl@0: __ASSERT_ALWAYS(TUint32(offset)iMaxSize),MM::Panic(MM::ECommitInvalidDllDataAddress)); sl@0: r=iDllDataChunk->Commit(offset, aSize); sl@0: if (r!=KErrNone && iDllDataChunk->iSize==0) sl@0: FreeDllDataChunk(); sl@0: } sl@0: __KTRACE_OPT(KDLL,Kern::Printf("CommitDllData returns %d",r)); sl@0: return r; sl@0: } sl@0: sl@0: void DMemModelProcess::DecommitDllData(TLinAddr aBase, TInt aSize) sl@0: { sl@0: __KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O DecommitDllData %08x+%x",this,aBase,aSize)); sl@0: TInt offset=aBase-(TLinAddr)iDllDataChunk->iBase; sl@0: TInt r=iDllDataChunk->Decommit(offset, aSize); sl@0: __ASSERT_ALWAYS(r==KErrNone,MM::Panic(MM::EDecommitInvalidDllDataAddress)); sl@0: if (iDllDataChunk->iSize==0) sl@0: FreeDllDataChunk(); sl@0: } sl@0: sl@0: TInt DMemModelProcess::NewShPool(DShPool*& /* aPool */, TShPoolCreateInfo& /* aInfo */) sl@0: { sl@0: return KErrNotSupported; sl@0: } sl@0: sl@0: sl@0: TInt DThread::RawRead(const TAny* aSrc, TAny* aDest, TInt aLength, TInt aFlags, TIpcExcTrap* /*aExcTrap*/) sl@0: // sl@0: // Read from the thread's process. sl@0: // Enter and return with system locked sl@0: // aSrc Run address of memory to read sl@0: // aDest Current address of destination sl@0: // aExcTrap Exception trap object to be updated if the actual memory access is performed on other memory area then specified. sl@0: // It happens when reading is performed on un-aligned memory area. sl@0: // sl@0: { sl@0: Mmu& m=Mmu::Get(); sl@0: DMemModelThread& t=*(DMemModelThread*)TheCurrentThread; sl@0: DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess; sl@0: TLinAddr src=(TLinAddr)aSrc; sl@0: TLinAddr dest=(TLinAddr)aDest; sl@0: TBool localIsSafe=ETrue; sl@0: TInt result = KErrNone; sl@0: sl@0: while (aLength) sl@0: { sl@0: if (iMState==EDead) sl@0: { sl@0: result = KErrDied; sl@0: break; sl@0: } sl@0: TLinAddr alias_src; sl@0: TInt alias_size; sl@0: TInt alias_result=t.Alias(src, pP, aLength, EMapAttrReadUser, alias_src, alias_size); sl@0: if (alias_result<0) sl@0: { sl@0: result = KErrBadDescriptor; // bad permissions sl@0: break; sl@0: } sl@0: NKern::UnlockSystem(); sl@0: sl@0: __KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawRead %08x<-%08x+%x",dest,alias_src,alias_size)); sl@0: if(aFlags&KCheckLocalAddress) sl@0: localIsSafe = m.ValidateLocalIpcAddress(dest,alias_size,ETrue); sl@0: sl@0: CHECK_PAGING_SAFE; sl@0: sl@0: COND_UNLOCK_USER_MEMORY(localIsSafe); sl@0: sl@0: if(alias_result) sl@0: { sl@0: // remote address is safe for direct access... sl@0: if (localIsSafe) sl@0: memcpy( (TAny*)dest, (const TAny*)alias_src, alias_size); sl@0: else sl@0: umemput( (TAny*)dest, (const TAny*)alias_src, alias_size); sl@0: } sl@0: else sl@0: { sl@0: // remote address is NOT safe for direct access, so use user permision checks when reading... sl@0: if (localIsSafe) sl@0: umemget( (TAny*)dest, (const TAny*)alias_src, alias_size); sl@0: else sl@0: uumemcpy( (TAny*)dest, (const TAny*)alias_src, alias_size); sl@0: } sl@0: sl@0: LOCK_USER_MEMORY(); sl@0: sl@0: src+=alias_size; sl@0: dest+=alias_size; sl@0: aLength-=alias_size; sl@0: NKern::LockSystem(); sl@0: } sl@0: t.RemoveAlias(); sl@0: return result; sl@0: } sl@0: sl@0: TInt DThread::RawWrite(const TAny* aDest, const TAny* aSrc, TInt aLength, TInt aFlags, DThread* anOriginatingThread, TIpcExcTrap* /*aExcTrap*/) sl@0: // sl@0: // Write to the thread's process. sl@0: // Enter and return with system locked sl@0: // aDest Run address of memory to write sl@0: // aSrc Current address of destination sl@0: // anOriginatingThread The thread on behalf of which this operation is performed (eg client of device driver). sl@0: // aExcTrap Exception trap object to be updated if the actual memory access is performed on other memory area then specified. sl@0: // It happens when reading is performed on un-aligned memory area. sl@0: // sl@0: { sl@0: Mmu& m=Mmu::Get(); sl@0: DMemModelThread& t=*(DMemModelThread*)TheCurrentThread; sl@0: DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess; sl@0: TLinAddr src=(TLinAddr)aSrc; sl@0: TLinAddr dest=(TLinAddr)aDest; sl@0: TBool localIsSafe=ETrue; sl@0: DThread* pO=anOriginatingThread?anOriginatingThread:&t; sl@0: DProcess* pF=K::TheFileServerProcess; sl@0: TBool special=(iOwningProcess==pF && pO->iOwningProcess==pF); sl@0: TUint32 perm=special ? EMapAttrWriteSup : EMapAttrWriteUser; sl@0: TInt result = KErrNone; sl@0: sl@0: while (aLength) sl@0: { sl@0: if (iMState==EDead) sl@0: { sl@0: result = KErrDied; sl@0: break; sl@0: } sl@0: TLinAddr alias_dest; sl@0: TInt alias_size; sl@0: TInt alias_result=t.Alias(dest, pP, aLength, perm, alias_dest, alias_size); sl@0: if (alias_result<0) sl@0: { sl@0: result = KErrBadDescriptor; // bad permissions sl@0: break; sl@0: } sl@0: NKern::UnlockSystem(); sl@0: sl@0: __KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawWrite %08x+%x->%08x",src,alias_size,alias_dest)); sl@0: if(aFlags&KCheckLocalAddress) sl@0: localIsSafe = m.ValidateLocalIpcAddress(src,alias_size,EFalse); sl@0: sl@0: // Must check that it is safe to page, unless we are reading from unpaged ROM in which case sl@0: // we allow it. umemget and uumemcpy do this anyway, so we just need to check if sl@0: // localIsSafe is set. sl@0: if (localIsSafe) sl@0: { sl@0: CHECK_PAGING_SAFE_RANGE(src, aLength); sl@0: CHECK_DATA_PAGING_SAFE_RANGE(dest, aLength); sl@0: } sl@0: sl@0: COND_UNLOCK_USER_MEMORY(localIsSafe); sl@0: sl@0: if(alias_result) sl@0: { sl@0: // remote address is safe for direct access... sl@0: if (localIsSafe) sl@0: memcpy( (TAny*)alias_dest, (const TAny*)src, alias_size); sl@0: else sl@0: umemget( (TAny*)alias_dest, (const TAny*)src, alias_size); sl@0: } sl@0: else sl@0: { sl@0: // remote address is NOT safe for direct access, so use user permision checks when writing... sl@0: if (localIsSafe) sl@0: umemput( (TAny*)alias_dest, (const TAny*)src, alias_size); sl@0: else sl@0: uumemcpy( (TAny*)alias_dest, (const TAny*)src, alias_size); sl@0: } sl@0: sl@0: LOCK_USER_MEMORY(); sl@0: sl@0: src+=alias_size; sl@0: dest+=alias_size; sl@0: aLength-=alias_size; sl@0: NKern::LockSystem(); sl@0: } sl@0: t.RemoveAlias(); sl@0: return result; sl@0: } sl@0: sl@0: #ifdef __DEBUGGER_SUPPORT__ sl@0: sl@0: /** sl@0: @pre Calling thread must be in critical section sl@0: @pre CodeSeg mutex held sl@0: */ sl@0: TInt CodeModifier::SafeWriteCode(DProcess* aProcess, TLinAddr aAddress, TInt aSize, TUint aValue, void* aOldValue) sl@0: { sl@0: Mmu& m=Mmu::Get(); sl@0: MmuBase::Wait(); sl@0: sl@0: NKern::LockSystem(); sl@0: sl@0: // Find physical address of the page, the breakpoint belongs to sl@0: TPhysAddr physAddr = m.LinearToPhysical(aAddress,((DMemModelProcess*)aProcess)->iOsAsid); sl@0: __KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - PA:%x", physAddr)); sl@0: if (physAddr==KPhysAddrInvalid) sl@0: { sl@0: __KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - invalid VA")); sl@0: NKern::UnlockSystem(); sl@0: MmuBase::Signal(); sl@0: return KErrBadDescriptor; sl@0: } sl@0: sl@0: // Temporarily map physical page sl@0: TLinAddr tempAddr = m.MapTemp (physAddr&~m.iPageMask, aAddress); sl@0: tempAddr |= aAddress & m.iPageMask; sl@0: __KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - tempAddr:%x",tempAddr)); sl@0: sl@0: //Set exception handler. Make sure the boundaries cover the worst case (aSize = 4) sl@0: TIpcExcTrap xt; sl@0: xt.iLocalBase=0; sl@0: xt.iRemoteBase=(TLinAddr)tempAddr&~3; //word aligned. sl@0: xt.iSize=sizeof(TInt); sl@0: xt.iDir=1; sl@0: sl@0: TInt r=xt.Trap(NULL); sl@0: if (r==0) sl@0: { sl@0: r = WriteCode(tempAddr, aSize, aValue, aOldValue); sl@0: xt.UnTrap(); sl@0: } sl@0: sl@0: m.UnmapTemp(); sl@0: NKern::UnlockSystem(); sl@0: MmuBase::Signal(); sl@0: return r; sl@0: } sl@0: sl@0: /** sl@0: @pre Calling thread must be in critical section sl@0: @pre CodeSeg mutex held sl@0: */ sl@0: TInt CodeModifier::WriteCode(TLinAddr aAddress, TInt aSize, TUint aValue, void* aOldValue) sl@0: { sl@0: // We do not want to be interrupted by e.g. ISR that will run altered code before IMB-Range. sl@0: // Therefore, copy data and clean/invalidate caches with interrupts disabled. sl@0: TInt irq=NKern::DisableAllInterrupts(); sl@0: switch(aSize) sl@0: { sl@0: case 1: sl@0: *(TUint8*) aOldValue = *(TUint8*)aAddress; sl@0: *(TUint8*) aAddress = (TUint8)aValue; sl@0: break; sl@0: case 2: sl@0: *(TUint16*) aOldValue = *(TUint16*)aAddress; sl@0: *(TUint16*) aAddress = (TUint16)aValue; sl@0: break; sl@0: default://It is 4 otherwise sl@0: *(TUint32*) aOldValue = *(TUint32*)aAddress; sl@0: *(TUint32*) aAddress = (TUint32)aValue; sl@0: break; sl@0: }; sl@0: CacheMaintenance::CodeChanged(aAddress, aSize, CacheMaintenance::ECodeModifier); sl@0: NKern::RestoreInterrupts(irq); sl@0: return KErrNone; sl@0: } sl@0: #endif //__DEBUGGER_SUPPORT__ sl@0: sl@0: sl@0: #ifdef __MARM__ sl@0: sl@0: // the body of ReadDesHeader is machine coded on ARM... sl@0: extern TInt ThreadDoReadAndParseDesHeader(DThread* aThread, const TAny* aSrc, TUint32* aDest); sl@0: sl@0: TInt DThread::ReadAndParseDesHeader(const TAny* aSrc, TDesHeader& aDest) sl@0: // sl@0: // Read and parse the header of a remote descriptor. sl@0: // Enter and return with system locked sl@0: // sl@0: { sl@0: // todo: remove use of system lock from callers, when they have been un-exported from the kernel sl@0: NKern::UnlockSystem(); sl@0: TInt r = ThreadDoReadAndParseDesHeader(this,aSrc,(TUint32*)&aDest); sl@0: NKern::LockSystem(); sl@0: return r; sl@0: } sl@0: sl@0: sl@0: #else // !__MARM__ sl@0: sl@0: sl@0: TInt DThread::ReadAndParseDesHeader(const TAny* aSrc, TDesHeader& aDest) sl@0: // sl@0: // Read and parse the header of a remote descriptor. sl@0: // Enter and return with system locked sl@0: // sl@0: { sl@0: static const TUint8 LengthLookup[16] = {4,8,12,8,12,0,0,0,0,0,0,0,0,0,0,0}; sl@0: sl@0: DMemModelThread& t = *(DMemModelThread*)TheCurrentThread; sl@0: TInt r = KErrBadDescriptor; sl@0: sl@0: CHECK_PAGING_SAFE; sl@0: sl@0: DMemModelProcess* pP = (DMemModelProcess*)iOwningProcess; sl@0: TLinAddr src = (TLinAddr)aSrc; sl@0: const TUint32* pAlias; sl@0: TInt alias_size; sl@0: TInt alias_result = t.Alias(src, pP, 12, EMapAttrReadUser, (TLinAddr&)pAlias, alias_size); sl@0: if (alias_result<0) sl@0: return KErrBadDescriptor; // bad permissions sl@0: NKern::UnlockSystem(); sl@0: t.iIpcClient = this; sl@0: TUint32* dest = (TUint32*)&aDest; sl@0: if (Kern::SafeRead(pAlias, dest, sizeof(TUint32))) sl@0: goto fail; sl@0: sl@0: { sl@0: TInt type=*dest>>KShiftDesType8; sl@0: sl@0: src += sizeof(TUint32); sl@0: alias_size -= sizeof(TUint32); sl@0: ++pAlias; sl@0: ++dest; sl@0: sl@0: TInt l=LengthLookup[type]; sl@0: if (l==0) sl@0: goto fail; sl@0: sl@0: l -= sizeof(TUint32); // we've already read one word sl@0: if (l>0 && alias_size) sl@0: { sl@0: get_more: sl@0: // more to go - get rest or as much as is currently aliased sl@0: TInt ll = alias_size>=l ? l : alias_size; sl@0: if(Kern::SafeRead(pAlias, dest, l)) sl@0: goto fail; sl@0: l -= ll; sl@0: src += TLinAddr(ll); sl@0: dest = (TUint32*)(TLinAddr(dest) + TLinAddr(ll)); sl@0: } sl@0: if (l>0) sl@0: { sl@0: // more to go - need to step alias on sl@0: NKern::LockSystem(); sl@0: alias_result = t.Alias(src, pP, l, EMapAttrReadUser, (TLinAddr&)pAlias, alias_size); sl@0: if (alias_result<0) sl@0: goto fail_locked; sl@0: NKern::UnlockSystem(); sl@0: goto get_more; sl@0: } sl@0: sl@0: r = K::ParseDesHeader(aSrc, *(TRawDesHeader*)&aDest, aDest); sl@0: } sl@0: sl@0: fail: sl@0: NKern::LockSystem(); sl@0: fail_locked: sl@0: t.RemoveAlias(); sl@0: t.iIpcClient = NULL; sl@0: return r; sl@0: } sl@0: sl@0: sl@0: #endif sl@0: sl@0: sl@0: DChunk* DThread::OpenSharedChunk(const TAny* aAddress, TBool aWrite, TInt& aOffset) sl@0: { sl@0: NKern::LockSystem(); sl@0: sl@0: DMemModelProcess* pP = (DMemModelProcess*)iOwningProcess; sl@0: DMemModelProcess::SChunkInfo* pS=pP->iChunks; sl@0: DMemModelProcess::SChunkInfo* pC=pS+pP->iChunkCount; sl@0: while(--pC>=pS && TUint(pC->iChunk->Base())>TUint(aAddress)) {}; sl@0: if(pC>=pS) sl@0: { sl@0: DMemModelChunk* chunk = pC->iChunk; sl@0: if(chunk->iChunkType==ESharedKernelSingle || chunk->iChunkType==ESharedKernelMultiple) sl@0: { sl@0: TInt offset = (TInt)aAddress-(TInt)chunk->Base(); sl@0: if(TUint(offset)iMaxSize) && chunk->Open()==KErrNone) sl@0: { sl@0: aOffset = offset; sl@0: NKern::UnlockSystem(); sl@0: return chunk; sl@0: } sl@0: } sl@0: } sl@0: NKern::UnlockSystem(); sl@0: return 0; sl@0: } sl@0: sl@0: TInt DThread::PrepareMemoryForDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList) sl@0: { sl@0: TInt asid = ((DMemModelProcess*)iOwningProcess)->iOsAsid; sl@0: Mmu& m=(Mmu&)*MmuBase::TheMmu; sl@0: return m.PreparePagesForDMA((TLinAddr)aLinAddr, aSize, asid, aPhysicalPageList); sl@0: } sl@0: sl@0: TInt DThread::ReleaseMemoryFromDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList) sl@0: { sl@0: TInt pageCount = (((TInt)aLinAddr & KPageMask) + aSize + KPageMask) >> KPageShift; sl@0: Mmu& m=(Mmu&)*MmuBase::TheMmu; sl@0: return m.ReleasePagesFromDMA(aPhysicalPageList, pageCount); sl@0: } sl@0: