Update contrib.
1 // Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\memmodel\epoc\multiple\mprocess.cpp
20 #include "cache_maintenance.h"
21 #include <demand_paging.h>
23 #define iMState iWaitLink.iSpare1
25 // just for convenience...
26 #define KAmSelfMod (DMemModelChunk::ECode | DMemModelChunk::EAddressLocal)
28 _LIT(KDollarDat,"$DAT");
29 _LIT(KLitDollarCode,"$CODE");
30 _LIT(KLitDllDollarData,"DLL$DATA");
33 extern void __FlushBtb();
36 const TInt KChunkGranularity=4;
38 /********************************************
40 ********************************************/
41 void DMemModelProcess::Destruct()
43 __ASSERT_ALWAYS(!iChunkCount && !iCodeChunk && !iDllDataChunk, MM::Panic(MM::EProcessDestructChunksRemaining));
45 Kern::Free(iLocalSection);
50 m.FreeOsAsid(iOsAsid);
54 LastUserSelfMod=0; // must force a BTB flush when next selfmod chunk switched in
63 TInt DMemModelProcess::NewChunk(DChunk*& aChunk, SChunkCreateInfo& aInfo, TLinAddr& aRunAddr)
66 DMemModelChunk* pC=NULL;
67 TInt r=GetNewChunk(pC,aInfo);
74 TInt mapType=pC->iAttributes & DMemModelChunk::EMapTypeMask;
75 pC->iOwningProcess=(mapType==DMemModelChunk::EMapTypeLocal)?this:NULL;
77 if ((pC->iAttributes & KAmSelfMod) == KAmSelfMod) // it's a potentially overlapping self-mod
81 LastUserSelfMod = this; // we become the last selfmodding process
83 __FlushBtb(); // we need to do this, as there may be bad branches already in the btb
87 if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdjust))
89 if (aInfo.iRunAddress!=0)
90 pC->SetFixedAddress(aInfo.iRunAddress,aInfo.iPreallocated);
91 if (aInfo.iPreallocated==0 && aInfo.iInitialTop!=0)
93 if (pC->iAttributes & DChunk::EDisconnected)
95 r=pC->Commit(aInfo.iInitialBottom,aInfo.iInitialTop-aInfo.iInitialBottom);
97 else if (pC->iAttributes & DChunk::EDoubleEnded)
99 r=pC->AdjustDoubleEnded(aInfo.iInitialBottom,aInfo.iInitialTop);
103 r=pC->Adjust(aInfo.iInitialTop);
107 if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdd))
109 // if (pC->iAttributes & DMemModelChunk::ECode)
110 // MM::TheMmu->SyncCodeMappings();
111 if (mapType!=DMemModelChunk::EMapTypeGlobal)
116 r=AddChunk(pC,aRunAddr,EFalse);
121 aRunAddr=(TLinAddr)pC->Base();
126 if(pC->iKernelMirror)
127 aRunAddr = (TLinAddr)pC->iKernelMirror->Base();
128 pC->iDestroyedDfc = aInfo.iDestroyedDfc;
132 pC->Close(NULL); // NULL since chunk can't have been added to process
136 TInt DMemModelProcess::DoCreate(TBool aKernelProcess, TProcessCreateInfo& aInfo)
138 __KTRACE_OPT(KPROC,Kern::Printf(">DMemModelProcess::DoCreate %O",this));
143 iSelfModChunks=0; // we don't have any yet.
147 iAttributes |= ESupervisor;
149 // Leave these till Mmu::Init2
150 // if (m.iLocalPdSize)
151 // iLocalPageDir=m.LinearToPhysical(TLinAddr(m.LocalPageDir(0)));
152 // iGlobalPageDir=m.LinearToPhysical(TLinAddr(m.GlobalPageDir(0)));
153 m.iAsidInfo[0]=((TUint32)this)|1;
154 iAddressCheckMaskR=0xffffffff;
155 iAddressCheckMaskW=0xffffffff;
160 r=m.NewOsAsid(EFalse);
165 iLocalPageDir=m.LinearToPhysical(TLinAddr(m.LocalPageDir(r)));
167 iGlobalPageDir=m.LinearToPhysical(TLinAddr(m.GlobalPageDir(r)));
168 m.iAsidInfo[r] |= (TUint32)this;
172 if (r==KErrNone && 0==(iLocalSection=TLinearSection::New(m.iUserLocalBase, m.iUserLocalEnd)) )
176 __KTRACE_OPT(KPROC,Kern::Printf("OS ASID=%d, LPD=%08x, GPD=%08x, ASID info=%08x",iOsAsid,iLocalPageDir,
177 iGlobalPageDir,m.iAsidInfo[iOsAsid]));
178 __KTRACE_OPT(KPROC,Kern::Printf("<DMemModelProcess::DoCreate %d",r));
182 TInt DMemModelProcess::CreateDataBssStackArea(TProcessCreateInfo& aInfo)
184 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::CreateDataBssStackArea %O",this));
186 TInt dataBssSize=Mmu::RoundToPageSize(aInfo.iTotalDataSize);
187 TInt maxSize=dataBssSize+PP::MaxStackSpacePerProcess;
188 TLinAddr dataRunAddress=m.iUserLocalBase;
189 iDataBssRunAddress=dataRunAddress;
191 __KTRACE_OPT(KPROC,Kern::Printf("DataBssSize=%x, chunk max size %x",dataBssSize,maxSize));
193 SChunkCreateInfo cinfo;
194 cinfo.iGlobal=EFalse;
195 cinfo.iAtt=TChunkCreate::EDisconnected;
196 cinfo.iForceFixed=EFalse;
197 cinfo.iOperations=SChunkCreateInfo::EAdjust|SChunkCreateInfo::EAdd;
198 cinfo.iType=EUserData;
199 cinfo.iMaxSize=maxSize;
200 cinfo.iInitialBottom=0;
201 cinfo.iInitialTop=dataBssSize;
202 cinfo.iPreallocated=0;
203 cinfo.iName.Set(KDollarDat);
207 TInt r=NewChunk((DChunk*&)iDataBssStackChunk,cinfo,cb);
211 TInt DMemModelProcess::AddChunk(DChunk* aChunk, TBool isReadOnly)
213 DMemModelChunk* pC=(DMemModelChunk*)aChunk;
214 if ((pC->iAttributes & DMemModelChunk::EPrivate) && this!=pC->iOwningProcess)
215 return KErrAccessDenied;
216 TInt r=WaitProcessLock();
220 r=ChunkIndex(pC,pos);
221 TLinAddr dataSectionBase=0;
222 if (r==0) // Found the chunk in this process, just up its count
224 iChunks[pos].iAccessCount++;
225 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk %08x to %08x (Access count incremented to %d)",aChunk,this,iChunks[pos].iAccessCount));
229 r=AddChunk(pC,dataSectionBase,isReadOnly);
232 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk returns %d",r));
236 void M::FsRegisterThread()
238 DMemModelChunk* pC=(DMemModelChunk*)PP::TheRamDriveChunk;
239 TInt mapType=pC->iAttributes & DMemModelChunk::EMapTypeMask;
240 if (mapType!=DMemModelChunk::EMapTypeLocal)
242 DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
243 TLinAddr dataSectionBase;
244 TInt r=pP->WaitProcessLock();
246 r=pP->AddChunk(pC,dataSectionBase,EFalse);
247 __ASSERT_ALWAYS(r==KErrNone, MM::Panic(MM::EFsRegisterThread));
248 pP->SignalProcessLock();
252 TInt DMemModelProcess::AddChunk(DMemModelChunk* aChunk, TLinAddr& aDataSectionBase, TBool isReadOnly)
255 // Must hold the process $LOCK mutex before calling this
257 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk %O to %O",aChunk,this));
258 SChunkInfo *pC=iChunks;
259 SChunkInfo *pE=pC+iChunkCount-1;
260 TLinAddr base=TLinAddr(aChunk->iBase);
264 if ((aChunk->iAttributes & KAmSelfMod)==KAmSelfMod) // it's a potentially overlapping self-mod
268 LastUserSelfMod = this; // we become the last selfmodding process
270 __FlushBtb(); // we need to do this, as there may be bad branches already in the btb
275 for (; pE>=pC && TLinAddr(pE->iChunk->iBase)>base; --pE);
276 if (pE>=pC && TLinAddr(pE->iChunk->iBase)+pE->iChunk->iMaxSize>base)
279 if (pC<iChunks+iChunkCount && base+aChunk->iMaxSize>TLinAddr(pC->iChunk->iBase))
283 if (iChunkCount==iChunkAlloc)
285 TInt newAlloc=iChunkAlloc+KChunkGranularity;
286 TInt r=Kern::SafeReAlloc((TAny*&)iChunks,iChunkAlloc*sizeof(SChunkInfo),newAlloc*sizeof(SChunkInfo));
290 iChunkAlloc=newAlloc;
292 memmove(pC+1,pC,(iChunkCount-i)*sizeof(SChunkInfo));
294 pC->isReadOnly=isReadOnly;
297 aDataSectionBase=base;
299 if (aChunk->iOsAsids)
301 // only need to do address space manipulation for shared chunks
303 aChunk->iOsAsids->Alloc(iOsAsid,1);
306 for (a=TLinAddr(aChunk->iBase); a<TLinAddr(aChunk->iBase)+aChunk->iMaxSize; a+=m.iChunkSize, ++i)
308 TInt ptid=aChunk->iPageTables[i];
310 m.DoAssignPageTable(ptid,a,aChunk->iPdePermissions,(const TAny*)iOsAsid);
314 if (aChunk->iChunkType==ERamDrive)
317 iAddressCheckMaskR |= m.iRamDriveMask;
318 iAddressCheckMaskW |= m.iRamDriveMask;
319 NKern::UnlockSystem();
321 __DEBUG_EVENT(EEventUpdateProcess, this);
325 void DMemModelProcess::DoRemoveChunk(TInt aIndex)
327 __DEBUG_EVENT(EEventUpdateProcess, this);
328 DMemModelChunk* chunk = iChunks[aIndex].iChunk;
329 memmove(iChunks+aIndex, iChunks+aIndex+1, (iChunkCount-aIndex-1)*sizeof(SChunkInfo));
334 // only need to do address space manipulation for shared chunks
336 chunk->iOsAsids->Free(iOsAsid);
338 for (a=TLinAddr(chunk->iBase); a<TLinAddr(chunk->iBase)+chunk->iMaxSize; a+=m.iChunkSize)
339 m.DoUnassignPageTable(a,(const TAny*)iOsAsid);
340 TUint32 mask=(chunk->iAttributes&DMemModelChunk::ECode)?Mmu::EFlushITLB:0;
341 m.GenericFlush(mask|Mmu::EFlushDTLB);
345 if (chunk->iChunkType==ERamDrive)
348 iAddressCheckMaskR &= ~m.iRamDriveMask;
349 iAddressCheckMaskW &= ~m.iRamDriveMask;
350 NKern::UnlockSystem();
355 Final chance for process to release resources during its death.
357 Called with process $LOCK mutex held (if it exists).
358 This mutex will not be released before it is deleted.
359 I.e. no other thread will ever hold the mutex again.
361 void DMemModelProcess::FinalRelease()
363 // Clean up any left over chunks (such as SharedIo buffers)
369 void DMemModelProcess::RemoveChunk(DMemModelChunk *aChunk)
371 // note that this can't be called after the process $LOCK mutex has been deleted
372 // since it can only be called by a thread in this process doing a handle close or
373 // dying, or by the process handles array being deleted due to the process dying,
374 // all of which happen before $LOCK is deleted.
375 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O RemoveChunk %O",this,aChunk));
376 Kern::MutexWait(*iProcessLock);
378 TInt r=ChunkIndex(aChunk,pos);
380 if (r==KErrNone) // Found the chunk
382 __KTRACE_OPT(KPROC,Kern::Printf("Chunk access count %d",iChunks[pos].iAccessCount));
383 if (--iChunks[pos].iAccessCount==0)
387 if ((aChunk->iAttributes & KAmSelfMod)==KAmSelfMod) // was a self-mod code chunk
393 Kern::MutexSignal(*iProcessLock);
396 TInt DMemModelProcess::ChunkIndex(DMemModelChunk* aChunk,TInt& aPos)
400 SChunkInfo *pC=iChunks;
401 SChunkInfo *pE=pC+iChunkCount;
402 for (; pC<pE && pC->iChunk!=aChunk; ++pC);
409 TInt DMemModelProcess::MapCodeSeg(DCodeSeg* aSeg)
411 DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg;
412 __KTRACE_OPT(KDLL,Kern::Printf("Process %O MapCodeSeg %C", this, aSeg));
413 TBool kernel_only=( (seg.iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel );
414 if (kernel_only && !(iAttributes&ESupervisor))
415 return KErrNotSupported;
416 if (seg.iAttr&ECodeSegAttKernel)
417 return KErrNone; // no extra mappings needed for kernel code
420 r=MapUserRamCode(seg.Memory(),EFalse);
423 TInt total_data_size;
425 seg.GetDataSizeAndBase(total_data_size, data_base);
426 if (r==KErrNone && total_data_size)
428 TInt size=Mmu::RoundToPageSize(total_data_size);
429 r=CommitDllData(data_base, size);
430 if (r!=KErrNone && seg.Pages())
431 UnmapUserRamCode(seg.Memory(), EFalse);
437 void DMemModelProcess::UnmapCodeSeg(DCodeSeg* aSeg)
439 DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg;
440 __KTRACE_OPT(KDLL,Kern::Printf("Process %O UnmapCodeSeg %C", this, aSeg));
441 if (seg.iAttr&ECodeSegAttKernel)
442 return; // no extra mappings needed for kernel code
445 TInt total_data_size;
447 seg.GetDataSizeAndBase(total_data_size, data_base);
449 DecommitDllData(data_base, Mmu::RoundToPageSize(total_data_size));
452 UnmapUserRamCode(seg.Memory(), EFalse);
455 void DMemModelProcess::RemoveDllData()
457 // Call with CodeSegLock held
462 TInt DMemModelProcess::CreateCodeChunk()
464 __KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O CreateCodeChunk",this));
465 TBool kernel=iAttributes&ESupervisor;
469 c.iAtt = TChunkCreate::EDisconnected | (kernel? 0 : TChunkCreate::EMemoryNotOwned);
470 c.iForceFixed=EFalse;
471 c.iOperations=SChunkCreateInfo::EAdjust|SChunkCreateInfo::EAdd;
472 c.iRunAddress=kernel ? 0 : m.iUserCodeBase;
474 c.iType=kernel ? EKernelCode : EUserCode;
475 c.iMaxSize=m.iMaxUserCodeSize;
476 c.iName.Set(KLitDollarCode);
480 TInt r = NewChunk((DChunk*&)iCodeChunk,c,runAddr);
484 void DMemModelProcess::FreeCodeChunk()
486 iCodeChunk->Close(this);
490 TInt DMemModelProcess::MapUserRamCode(DMemModelCodeSegMemory* aMemory, TBool aLoading)
492 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O MapUserRamCode %C %d %d %d",
493 this, aMemory->iCodeSeg, aLoading, iOsAsid, aMemory->iIsDemandPaged));
494 __ASSERT_MUTEX(DCodeSeg::CodeSegLock);
501 __KTRACE_OPT(KPROC,Kern::Printf("CreateCodeChunk returns %d", r));
509 TInt offset=aMemory->iRamInfo.iCodeRunAddr-TLinAddr(iCodeChunk->iBase);
510 TInt codeSize = aMemory->iPageCount<<m.iPageShift;
511 TBool paged = aMemory->iIsDemandPaged;
512 DChunk::TCommitType commitType = paged ? DChunk::ECommitVirtual : DChunk::ECommitDiscontiguousPhysical;
513 r=iCodeChunk->Commit(offset, codeSize, commitType, aMemory->iPages);
514 __KTRACE_OPT(KPROC,Kern::Printf("Commit Pages returns %d", r));
517 if (aLoading && !paged)
519 iCodeChunk->ApplyPermissions(offset, codeSize, m.iUserCodeLoadPtePerm);
520 UNLOCK_USER_MEMORY();
521 memset((TAny*)(aMemory->iRamInfo.iCodeLoadAddr+aMemory->iRamInfo.iCodeSize+aMemory->iRamInfo.iDataSize), 0x03, codeSize-(aMemory->iRamInfo.iCodeSize+aMemory->iRamInfo.iDataSize));
524 if(aLoading && aMemory->iDataPageCount)
526 TInt dataSize = aMemory->iDataPageCount<<m.iPageShift;
527 r=iCodeChunk->Commit(offset+codeSize, dataSize, DChunk::ECommitDiscontiguousPhysical, aMemory->iPages+aMemory->iPageCount);
530 iCodeChunk->ApplyPermissions(offset+codeSize, dataSize, m.iUserCodeLoadPtePerm);
531 UNLOCK_USER_MEMORY();
532 memset((TAny*)(aMemory->iRamInfo.iDataLoadAddr+aMemory->iRamInfo.iDataSize), 0x03, dataSize-aMemory->iRamInfo.iDataSize);
538 // error, so decommit up code pages we had already committed...
539 DChunk::TDecommitType decommitType = paged ? DChunk::EDecommitVirtual : DChunk::EDecommitNormal;
540 iCodeChunk->Decommit(offset, codeSize, decommitType);
544 // indicate codeseg is now successfully mapped into the process...
546 aMemory->iOsAsids->Free(iOsAsid);
547 NKern::UnlockSystem();
553 if(r!=KErrNone && iCodeChunk->iSize==0)
554 FreeCodeChunk(); // cleanup any unused code chunk we would otherwise leave lying around
559 void DMemModelProcess::UnmapUserRamCode(DMemModelCodeSegMemory* aMemory, TBool aLoading)
561 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O UnmapUserRamCode %C %d %d",
562 this, aMemory->iCodeSeg, iOsAsid, aMemory->iIsDemandPaged != 0));
564 __ASSERT_MUTEX(DCodeSeg::CodeSegLock);
569 aMemory->iOsAsids->Alloc(iOsAsid, 1);
570 NKern::UnlockSystem();
573 __NK_ASSERT_DEBUG(iCodeChunk);
574 TInt offset=aMemory->iRamInfo.iCodeRunAddr-TLinAddr(iCodeChunk->iBase);
575 TInt codeSize = aMemory->iPageCount<<m.iPageShift;
576 TBool paged = aMemory->iIsDemandPaged;
577 DChunk::TDecommitType decommitType = paged ? DChunk::EDecommitVirtual : DChunk::EDecommitNormal;
578 TInt r=iCodeChunk->Decommit(offset, codeSize, decommitType);
579 __ASSERT_DEBUG(r==KErrNone, MM::Panic(MM::EDecommitFailed));
580 (void)r; //Supress the warning in urel build
582 if(aLoading && aMemory->iDataPageCount)
584 // decommit pages used to store data section...
585 TInt dataSize = aMemory->iDataPageCount<<m.iPageShift;
586 r=iCodeChunk->Decommit(offset+codeSize, dataSize);
587 __ASSERT_DEBUG(r==KErrNone, MM::Panic(MM::EDecommitFailed));
588 (void)r; //Supress the warning in urel build
590 __NK_ASSERT_DEBUG(iCodeChunk->iSize >= 0);
594 if (iCodeChunk->iSize==0)
598 TInt DMemModelProcess::CreateDllDataChunk()
600 __KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O CreateDllDataChunk",this));
604 c.iAtt=TChunkCreate::EDisconnected;
605 c.iForceFixed=EFalse;
606 c.iOperations=SChunkCreateInfo::EAdjust|SChunkCreateInfo::EAdd;
607 c.iRunAddress=m.iDllDataBase;
610 c.iMaxSize=m.iMaxDllDataSize;
611 c.iName.Set(KLitDllDollarData);
615 return NewChunk((DChunk*&)iDllDataChunk,c,runAddr);
618 void DMemModelProcess::FreeDllDataChunk()
620 iDllDataChunk->Close(this);
624 TInt DMemModelProcess::CommitDllData(TLinAddr aBase, TInt aSize)
626 __KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O CommitDllData %08x+%x",this,aBase,aSize));
629 r=CreateDllDataChunk();
632 TInt offset=aBase-(TLinAddr)iDllDataChunk->iBase;
633 __ASSERT_ALWAYS(TUint32(offset)<TUint32(iDllDataChunk->iMaxSize),MM::Panic(MM::ECommitInvalidDllDataAddress));
634 r=iDllDataChunk->Commit(offset, aSize);
635 if (r!=KErrNone && iDllDataChunk->iSize==0)
638 __KTRACE_OPT(KDLL,Kern::Printf("CommitDllData returns %d",r));
642 void DMemModelProcess::DecommitDllData(TLinAddr aBase, TInt aSize)
644 __KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O DecommitDllData %08x+%x",this,aBase,aSize));
645 TInt offset=aBase-(TLinAddr)iDllDataChunk->iBase;
646 TInt r=iDllDataChunk->Decommit(offset, aSize);
647 __ASSERT_ALWAYS(r==KErrNone,MM::Panic(MM::EDecommitInvalidDllDataAddress));
648 if (iDllDataChunk->iSize==0)
652 TInt DMemModelProcess::NewShPool(DShPool*& /* aPool */, TShPoolCreateInfo& /* aInfo */)
654 return KErrNotSupported;
658 TInt DThread::RawRead(const TAny* aSrc, TAny* aDest, TInt aLength, TInt aFlags, TIpcExcTrap* /*aExcTrap*/)
660 // Read from the thread's process.
661 // Enter and return with system locked
662 // aSrc Run address of memory to read
663 // aDest Current address of destination
664 // aExcTrap Exception trap object to be updated if the actual memory access is performed on other memory area then specified.
665 // It happens when reading is performed on un-aligned memory area.
669 DMemModelThread& t=*(DMemModelThread*)TheCurrentThread;
670 DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess;
671 TLinAddr src=(TLinAddr)aSrc;
672 TLinAddr dest=(TLinAddr)aDest;
673 TBool localIsSafe=ETrue;
674 TInt result = KErrNone;
685 TInt alias_result=t.Alias(src, pP, aLength, EMapAttrReadUser, alias_src, alias_size);
688 result = KErrBadDescriptor; // bad permissions
691 NKern::UnlockSystem();
693 __KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawRead %08x<-%08x+%x",dest,alias_src,alias_size));
694 if(aFlags&KCheckLocalAddress)
695 localIsSafe = m.ValidateLocalIpcAddress(dest,alias_size,ETrue);
699 COND_UNLOCK_USER_MEMORY(localIsSafe);
703 // remote address is safe for direct access...
705 memcpy( (TAny*)dest, (const TAny*)alias_src, alias_size);
707 umemput( (TAny*)dest, (const TAny*)alias_src, alias_size);
711 // remote address is NOT safe for direct access, so use user permision checks when reading...
713 umemget( (TAny*)dest, (const TAny*)alias_src, alias_size);
715 uumemcpy( (TAny*)dest, (const TAny*)alias_src, alias_size);
729 TInt DThread::RawWrite(const TAny* aDest, const TAny* aSrc, TInt aLength, TInt aFlags, DThread* anOriginatingThread, TIpcExcTrap* /*aExcTrap*/)
731 // Write to the thread's process.
732 // Enter and return with system locked
733 // aDest Run address of memory to write
734 // aSrc Current address of destination
735 // anOriginatingThread The thread on behalf of which this operation is performed (eg client of device driver).
736 // aExcTrap Exception trap object to be updated if the actual memory access is performed on other memory area then specified.
737 // It happens when reading is performed on un-aligned memory area.
741 DMemModelThread& t=*(DMemModelThread*)TheCurrentThread;
742 DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess;
743 TLinAddr src=(TLinAddr)aSrc;
744 TLinAddr dest=(TLinAddr)aDest;
745 TBool localIsSafe=ETrue;
746 DThread* pO=anOriginatingThread?anOriginatingThread:&t;
747 DProcess* pF=K::TheFileServerProcess;
748 TBool special=(iOwningProcess==pF && pO->iOwningProcess==pF);
749 TUint32 perm=special ? EMapAttrWriteSup : EMapAttrWriteUser;
750 TInt result = KErrNone;
761 TInt alias_result=t.Alias(dest, pP, aLength, perm, alias_dest, alias_size);
764 result = KErrBadDescriptor; // bad permissions
767 NKern::UnlockSystem();
769 __KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawWrite %08x+%x->%08x",src,alias_size,alias_dest));
770 if(aFlags&KCheckLocalAddress)
771 localIsSafe = m.ValidateLocalIpcAddress(src,alias_size,EFalse);
773 // Must check that it is safe to page, unless we are reading from unpaged ROM in which case
774 // we allow it. umemget and uumemcpy do this anyway, so we just need to check if
775 // localIsSafe is set.
778 CHECK_PAGING_SAFE_RANGE(src, aLength);
779 CHECK_DATA_PAGING_SAFE_RANGE(dest, aLength);
782 COND_UNLOCK_USER_MEMORY(localIsSafe);
786 // remote address is safe for direct access...
788 memcpy( (TAny*)alias_dest, (const TAny*)src, alias_size);
790 umemget( (TAny*)alias_dest, (const TAny*)src, alias_size);
794 // remote address is NOT safe for direct access, so use user permision checks when writing...
796 umemput( (TAny*)alias_dest, (const TAny*)src, alias_size);
798 uumemcpy( (TAny*)alias_dest, (const TAny*)src, alias_size);
812 #ifdef __DEBUGGER_SUPPORT__
815 @pre Calling thread must be in critical section
816 @pre CodeSeg mutex held
818 TInt CodeModifier::SafeWriteCode(DProcess* aProcess, TLinAddr aAddress, TInt aSize, TUint aValue, void* aOldValue)
825 // Find physical address of the page, the breakpoint belongs to
826 TPhysAddr physAddr = m.LinearToPhysical(aAddress,((DMemModelProcess*)aProcess)->iOsAsid);
827 __KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - PA:%x", physAddr));
828 if (physAddr==KPhysAddrInvalid)
830 __KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - invalid VA"));
831 NKern::UnlockSystem();
833 return KErrBadDescriptor;
836 // Temporarily map physical page
837 TLinAddr tempAddr = m.MapTemp (physAddr&~m.iPageMask, aAddress);
838 tempAddr |= aAddress & m.iPageMask;
839 __KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - tempAddr:%x",tempAddr));
841 //Set exception handler. Make sure the boundaries cover the worst case (aSize = 4)
844 xt.iRemoteBase=(TLinAddr)tempAddr&~3; //word aligned.
845 xt.iSize=sizeof(TInt);
848 TInt r=xt.Trap(NULL);
851 r = WriteCode(tempAddr, aSize, aValue, aOldValue);
856 NKern::UnlockSystem();
862 @pre Calling thread must be in critical section
863 @pre CodeSeg mutex held
865 TInt CodeModifier::WriteCode(TLinAddr aAddress, TInt aSize, TUint aValue, void* aOldValue)
867 // We do not want to be interrupted by e.g. ISR that will run altered code before IMB-Range.
868 // Therefore, copy data and clean/invalidate caches with interrupts disabled.
869 TInt irq=NKern::DisableAllInterrupts();
873 *(TUint8*) aOldValue = *(TUint8*)aAddress;
874 *(TUint8*) aAddress = (TUint8)aValue;
877 *(TUint16*) aOldValue = *(TUint16*)aAddress;
878 *(TUint16*) aAddress = (TUint16)aValue;
880 default://It is 4 otherwise
881 *(TUint32*) aOldValue = *(TUint32*)aAddress;
882 *(TUint32*) aAddress = (TUint32)aValue;
885 CacheMaintenance::CodeChanged(aAddress, aSize, CacheMaintenance::ECodeModifier);
886 NKern::RestoreInterrupts(irq);
889 #endif //__DEBUGGER_SUPPORT__
894 // the body of ReadDesHeader is machine coded on ARM...
895 extern TInt ThreadDoReadAndParseDesHeader(DThread* aThread, const TAny* aSrc, TUint32* aDest);
897 TInt DThread::ReadAndParseDesHeader(const TAny* aSrc, TDesHeader& aDest)
899 // Read and parse the header of a remote descriptor.
900 // Enter and return with system locked
903 // todo: remove use of system lock from callers, when they have been un-exported from the kernel
904 NKern::UnlockSystem();
905 TInt r = ThreadDoReadAndParseDesHeader(this,aSrc,(TUint32*)&aDest);
914 TInt DThread::ReadAndParseDesHeader(const TAny* aSrc, TDesHeader& aDest)
916 // Read and parse the header of a remote descriptor.
917 // Enter and return with system locked
920 static const TUint8 LengthLookup[16] = {4,8,12,8,12,0,0,0,0,0,0,0,0,0,0,0};
922 DMemModelThread& t = *(DMemModelThread*)TheCurrentThread;
923 TInt r = KErrBadDescriptor;
927 DMemModelProcess* pP = (DMemModelProcess*)iOwningProcess;
928 TLinAddr src = (TLinAddr)aSrc;
929 const TUint32* pAlias;
931 TInt alias_result = t.Alias(src, pP, 12, EMapAttrReadUser, (TLinAddr&)pAlias, alias_size);
933 return KErrBadDescriptor; // bad permissions
934 NKern::UnlockSystem();
936 TUint32* dest = (TUint32*)&aDest;
937 if (Kern::SafeRead(pAlias, dest, sizeof(TUint32)))
941 TInt type=*dest>>KShiftDesType8;
943 src += sizeof(TUint32);
944 alias_size -= sizeof(TUint32);
948 TInt l=LengthLookup[type];
952 l -= sizeof(TUint32); // we've already read one word
953 if (l>0 && alias_size)
956 // more to go - get rest or as much as is currently aliased
957 TInt ll = alias_size>=l ? l : alias_size;
958 if(Kern::SafeRead(pAlias, dest, l))
962 dest = (TUint32*)(TLinAddr(dest) + TLinAddr(ll));
966 // more to go - need to step alias on
968 alias_result = t.Alias(src, pP, l, EMapAttrReadUser, (TLinAddr&)pAlias, alias_size);
971 NKern::UnlockSystem();
975 r = K::ParseDesHeader(aSrc, *(TRawDesHeader*)&aDest, aDest);
990 DChunk* DThread::OpenSharedChunk(const TAny* aAddress, TBool aWrite, TInt& aOffset)
994 DMemModelProcess* pP = (DMemModelProcess*)iOwningProcess;
995 DMemModelProcess::SChunkInfo* pS=pP->iChunks;
996 DMemModelProcess::SChunkInfo* pC=pS+pP->iChunkCount;
997 while(--pC>=pS && TUint(pC->iChunk->Base())>TUint(aAddress)) {};
1000 DMemModelChunk* chunk = pC->iChunk;
1001 if(chunk->iChunkType==ESharedKernelSingle || chunk->iChunkType==ESharedKernelMultiple)
1003 TInt offset = (TInt)aAddress-(TInt)chunk->Base();
1004 if(TUint(offset)<TUint(chunk->iMaxSize) && chunk->Open()==KErrNone)
1007 NKern::UnlockSystem();
1012 NKern::UnlockSystem();
1016 TInt DThread::PrepareMemoryForDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList)
1018 TInt asid = ((DMemModelProcess*)iOwningProcess)->iOsAsid;
1019 Mmu& m=(Mmu&)*MmuBase::TheMmu;
1020 return m.PreparePagesForDMA((TLinAddr)aLinAddr, aSize, asid, aPhysicalPageList);
1023 TInt DThread::ReleaseMemoryFromDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList)
1025 TInt pageCount = (((TInt)aLinAddr & KPageMask) + aSize + KPageMask) >> KPageShift;
1026 Mmu& m=(Mmu&)*MmuBase::TheMmu;
1027 return m.ReleasePagesFromDMA(aPhysicalPageList, pageCount);