Update contrib.
1 // Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\memmodel\epoc\moving\mprocess.cpp
19 #include "cache_maintenance.h"
22 #define iMState iWaitLink.iSpare1
24 _LIT(KDollarDat,"$DAT");
25 _LIT(KLitDllDollarData,"DLL$DATA");
27 /********************************************
29 ********************************************/
30 void DMemModelProcess::Destruct()
33 if (this==TheCurrentAddressSpace)
34 TheCurrentAddressSpace=NULL;
35 if (this==TheCurrentVMProcess)
36 TheCurrentVMProcess=NULL;
37 if (this==TheCurrentDataSectionProcess)
38 TheCurrentDataSectionProcess=NULL;
39 if (this==TheCompleteDataSectionProcess)
40 TheCompleteDataSectionProcess=NULL;
41 NKern::UnlockSystem();
45 TInt DMemModelProcess::NewChunk(DChunk*& aChunk, SChunkCreateInfo& aInfo, TLinAddr& aRunAddr)
48 DMemModelChunk* pC=NULL;
49 TInt r=GetNewChunk(pC,aInfo);
56 if (aInfo.iForceFixed || iAttributes & DMemModelProcess::EFixedAddress)
57 pC->iAttributes |= DMemModelChunk::EFixedAddress;
58 if (!aInfo.iGlobal && (iAttributes & DMemModelProcess::EPrivate)!=0)
59 pC->iAttributes |= DMemModelChunk::EPrivate;
60 if (pC->iChunkType==EDll || pC->iChunkType==EUserCode || pC->iChunkType==EUserSelfModCode || pC->iChunkType==EKernelCode)
61 pC->iAttributes |= (DMemModelChunk::EFixedAddress|DMemModelChunk::ECode);
62 pC->iOwningProcess=(aInfo.iGlobal)?NULL:this;
64 if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdjust))
66 if (aInfo.iRunAddress!=0)
67 pC->SetFixedAddress(aInfo.iRunAddress,aInfo.iPreallocated);
68 if (aInfo.iPreallocated==0)
70 if (pC->iAttributes & DChunk::EDisconnected)
72 r=pC->Commit(aInfo.iInitialBottom,aInfo.iInitialTop-aInfo.iInitialBottom);
74 else if (pC->iAttributes & DChunk::EDoubleEnded)
76 r=pC->AdjustDoubleEnded(aInfo.iInitialBottom,aInfo.iInitialTop);
80 r=pC->Adjust(aInfo.iInitialTop);
83 if (r==KErrNone && pC->iHomeRegionBase==0 && (pC->iAttributes&DMemModelChunk::EFixedAddress)!=0)
86 aRunAddr=(TLinAddr)pC->Base();
89 if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdd))
91 if (pC->iAttributes & DMemModelChunk::ECode)
92 Mmu::Get().SyncCodeMappings();
93 if (pC->iChunkType!=EUserCode)
98 r=AddChunk(pC,aRunAddr,EFalse);
103 aRunAddr=(TLinAddr)pC->Base(); // code chunks always fixed address
107 pC->iDestroyedDfc = aInfo.iDestroyedDfc;
111 pC->Close(NULL); // NULL since chunk can't have been added to process
115 TInt DMemModelProcess::DoCreate(TBool aKernelProcess, TProcessCreateInfo& aInfo)
117 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::DoCreate %O",this));
120 iAttributes=ESupervisor|EFixedAddress|EPrivate;
121 else if (aInfo.iAttr & ECodeSegAttFixed)
122 iAttributes=EFixedAddress|EPrivate;
125 if ((iAttributes & ESupervisor)==0 && (iAttributes & EFixedAddress)!=0)
127 CheckForFixedAccess();
132 TInt DMemModelProcess::CreateDataBssStackArea(TProcessCreateInfo& aInfo)
134 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::CreateDataBssStackArea %O",this));
135 TInt dataBssSize=Mmu::RoundToPageSize(aInfo.iTotalDataSize);
136 TInt maxSize=dataBssSize+PP::MaxStackSpacePerProcess;
137 TBool fixed=(iAttributes & EFixedAddress);
139 __KTRACE_OPT(KPROC,Kern::Printf("DataBssSize=%x, chunk max size %x",dataBssSize,maxSize));
141 SChunkCreateInfo cinfo;
142 cinfo.iGlobal=EFalse;
143 cinfo.iAtt=TChunkCreate::EDisconnected;
144 cinfo.iForceFixed=EFalse;
145 cinfo.iOperations=SChunkCreateInfo::EAdjust|SChunkCreateInfo::EAdd;
146 cinfo.iType=EUserData;
147 cinfo.iMaxSize=maxSize;
148 cinfo.iInitialBottom=0;
149 cinfo.iInitialTop=dataBssSize;
150 cinfo.iPreallocated=0;
151 cinfo.iName.Set(KDollarDat);
153 if (fixed && dataBssSize!=0 && aInfo.iCodeLoadAddress)
155 const TRomImageHeader& rih=*(const TRomImageHeader*)aInfo.iCodeLoadAddress;
156 cinfo.iRunAddress=rih.iDataBssLinearBase;
160 TInt r=NewChunk((DChunk*&)iDataBssStackChunk,cinfo,iDataBssRunAddress);
164 TInt DMemModelProcess::AddChunk(DChunk* aChunk,TBool isReadOnly)
166 DMemModelChunk* pC=(DMemModelChunk*)aChunk;
167 TInt r=WaitProcessLock();
171 r=ChunkIndex(pC,pos);
172 TLinAddr dataSectionBase=0;
173 if (r==0) // Found the chunk in this process, just up its count
175 iChunks[pos].iAccessCount++;
176 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk %08x to %08x (Access count incremented to %d)",aChunk,this,iChunks[pos].iAccessCount));
180 r=AddChunk(pC,dataSectionBase,isReadOnly);
183 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk returns %d",r));
187 void FlushBeforeChunkMove(DMemModelChunk* aChunk)
190 TUint32 ff=Mmu::EFlushDMove|Mmu::EFlushDPermChg;
191 if (aChunk->iAttributes & DMemModelChunk::ECode) // assumption here that code chunks don't move
192 ff |= Mmu::EFlushIPermChg;
196 TInt DMemModelProcess::AddChunk(DMemModelChunk* aChunk, TLinAddr& aDataSectionBase, TBool isReadOnly)
199 // Must hold the process $LOCK mutex before calling this
201 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk %08x to %08x (for first time)",aChunk,this));
202 TInt r=AllocateDataSectionBase(*((DMemModelChunk*)aChunk),(TUint&)aDataSectionBase);
206 if (iNumChunks==KMaxChunksInProcess)
207 return KErrOverflow; // too many chunks in the process
209 SChunkInfo *pC=iChunks;
210 SChunkInfo *pE=pC+iNumChunks-1;
212 while(pE>=pC && TUint(pE->iDataSectionBase)>TUint(aDataSectionBase))
218 pC->iDataSectionBase=aDataSectionBase;
219 pC->isReadOnly=isReadOnly;
224 if(!(iAttributes&ESupervisor))
226 TInt attribs=aChunk->iAttributes;
227 if (!(attribs&DMemModelChunk::EFixedAddress))
230 iAttributes |= EMoving;
233 if (attribs&DMemModelChunk::EFixedAccess)
235 NKern::UnlockSystem();
236 AddFixedAccessChunk(aChunk);
237 goto done; // FINISHED
240 iAttributes |= EVariableAccess;
241 if (attribs & DMemModelChunk::ECode)
243 iNumNonFixedAccessCodeChunks++;
244 iAttributes |= EVariableCode;
246 if (++iNumNonFixedAccessChunks==1)
248 NKern::UnlockSystem();
249 DoAttributeChange(); // change process from fixed to variable access
253 if (this!=TheCurrentThread->iOwningProcess)
255 // Adding chunk to another process
256 if (this==TheCurrentDataSectionProcess && !(attribs&DMemModelChunk::EFixedAddress))
257 TheCompleteDataSectionProcess=NULL; // just set partial state change flag and leave chunk alone
258 if (this==TheCurrentAddressSpace)
259 TheCurrentAddressSpace=NULL;
260 NKern::UnlockSystem();
261 goto done; // FINISHED
264 // Adding chunk to currently active user process
266 TheCurrentAddressSpace=NULL;
268 TUint32 ff=0; // flush flags
269 DMemModelChunk::TChunkState state=isReadOnly?DMemModelChunk::ERunningRO:DMemModelChunk::ERunningRW;
270 if (attribs&DMemModelChunk::EFixedAddress)
272 // Fixed address chunk, just change permissions
273 ff|=aChunk->ApplyTopLevelPermissions(state);
275 else if (this==TheCurrentDataSectionProcess)
278 // This process is already in the data section, so just move the chunk down.
279 // Must do flushing first
280 TheCompleteDataSectionProcess=NULL;
281 FlushBeforeChunkMove(aChunk);
282 aChunk->MoveToRunAddress(aDataSectionBase,state); // idempotent
283 TheCompleteDataSectionProcess=this;
285 else if (iNumMovingChunks==1)
287 // The first moving chunk being added to a process with the data section occupied by another process.
288 // This is the problematic case - we must displace the other process from the data section.
289 // However we must allow preemption after each chunk is moved. Note that if a reschedule does
290 // occur the necessary chunk moves will have been done by the scheduler, so we can finish
292 // Must do cache flushing first
293 m.GenericFlush(Mmu::EFlushDMove);
294 if (TheCurrentDataSectionProcess)
296 if (TheCurrentDataSectionProcess->iAttributes & EVariableCode)
297 ff |= Mmu::EFlushIPermChg;
298 SChunkInfo* pOtherProcChunks=TheCurrentDataSectionProcess->iChunks;
299 SChunkInfo* pEndOtherProcChunks=pOtherProcChunks+TheCurrentDataSectionProcess->iNumChunks;
300 NKern::FlashSystem();
301 // if a reschedule occurs, TheCompleteDataSectionProcess will become equal to this
302 while (TheCompleteDataSectionProcess!=this && pOtherProcChunks<pEndOtherProcChunks)
304 DMemModelChunk *pChunk=pOtherProcChunks->iChunk;
305 pChunk->MoveToHomeSection();
307 TheCompleteDataSectionProcess=NULL;
308 NKern::FlashSystem();
311 if (TheCompleteDataSectionProcess!=this)
313 if (attribs & DMemModelChunk::ECode)
314 ff |= Mmu::EFlushIPermChg;
315 aChunk->MoveToRunAddress(aDataSectionBase,state);
316 TheCurrentDataSectionProcess=this;
317 TheCompleteDataSectionProcess=this;
320 TheCurrentAddressSpace=this;
321 TheCurrentVMProcess=this;
326 NKern::UnlockSystem();
328 __KTRACE_OPT(KPROC,Kern::Printf("Added array entry for %x",aDataSectionBase));
329 __KTRACE_OPT(KPROC,Kern::Printf("Chunks maxsize %x",pC->iChunk->MaxSize()));
330 __DEBUG_EVENT(EEventUpdateProcess, this);
334 TInt DMemModelProcess::AllocateDataSectionBase(DMemModelChunk& aChunk, TUint& aBase)
336 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AllocateDataSectionBase"));
338 if ((aChunk.iAttributes & DMemModelChunk::EPrivate) && this!=aChunk.iOwningProcess)
339 return KErrAccessDenied;
340 if (aChunk.iAttributes & DMemModelChunk::EFixedAddress)
342 aBase=aChunk.iHomeRegionBase;
348 switch (aChunk.iChunkType)
351 base=m.iDataSectionBase;
352 maxBase=m.iDllDataBase;
355 case EUserSelfModCode:
356 MM::Panic(MM::EUserCodeNotFixed);
359 aBase=m.iDllDataBase;
362 __KTRACE_OPT(KPANIC,Kern::Printf("DMemModelProcess::AllocateDataSectionBase BadChunkType %d",aChunk.iChunkType));
363 return KErrAccessDenied;
366 TLinAddr lastBase=base;
367 SChunkInfo *pS=iChunks;
368 SChunkInfo *pE=pS+iNumChunks;
371 TLinAddr thisBase=pS->iDataSectionBase;
372 __KTRACE_OPT(KPROC,Kern::Printf("Chunk already at %x",thisBase));
373 if (thisBase>=maxBase)
375 if (thisBase>=base) // Within the range we are allocating
377 TInt gap=thisBase-lastBase;
378 if (gap>=aChunk.MaxSize())
380 lastBase=thisBase+pS->iChunk->MaxSize();
384 if (lastBase+aChunk.MaxSize()>maxBase)
386 __KTRACE_OPT(KPROC,Kern::Printf("ERROR - none allocated, out of memory"));
390 __KTRACE_OPT(KPROC,Kern::Printf("User allocated %x",aBase));
394 TUint8* DMemModelProcess::DataSectionBase(DMemModelChunk* aChunk)
396 // this can't be called after $LOCK is deleted
397 Kern::MutexWait(*iProcessLock);
399 TInt r=ChunkIndex(aChunk,pos);
400 if (r==0) // Found the chunk
402 TUint8* answer=((TUint8*)iChunks[pos].iDataSectionBase);
403 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::DataSectionBase %x",answer));
404 Kern::MutexSignal(*iProcessLock);
407 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::DataSectionBase chunk %08x not present in %08x",aChunk,this));
408 Kern::MutexSignal(*iProcessLock);
412 void DMemModelProcess::DoRemoveChunk(TInt aIndex)
414 // Must be called with process $LOCK mutex held
415 __DEBUG_EVENT(EEventUpdateProcess, this);
416 DMemModelChunk* chunk = iChunks[aIndex].iChunk;
419 TInt attribs=chunk->iAttributes;
420 __KTRACE_OPT(KPROC,Kern::Printf("Removing Chunk attribs=%08x, Process attribs=%08x",attribs,iAttributes));
421 if (!(attribs&DMemModelChunk::EFixedAccess))
423 // Must leave chunk in process chunk list until we have flushed the cache if necessary
424 if (this==TheCurrentVMProcess && (attribs&DMemModelChunk::EFixedAddress))
426 TUint32 ff=chunk->ApplyTopLevelPermissions(DMemModelChunk::ENotRunning);
428 // the system must now remain locked until the chunk is removed from the process chunk list
430 if (this==TheCurrentDataSectionProcess && !(attribs&DMemModelChunk::EFixedAddress))
432 // must do cache flush first
433 FlushBeforeChunkMove(chunk); // preemptible, but on return cache is free of chunk data
434 chunk->MoveToHomeSection();
435 // the system must now remain locked until the chunk is removed from the process chunk list
439 // Remove the chunk from the process chunk list
440 SChunkInfo *pD=iChunks+aIndex;
441 SChunkInfo *pS=iChunks+aIndex+1;
442 SChunkInfo *pE=iChunks+iNumChunks;
447 // Update the process attribute flags
448 if (!(attribs&DMemModelChunk::EFixedAddress))
450 if (--iNumMovingChunks==0)
451 iAttributes &= ~EMoving;
453 if (!(attribs&DMemModelChunk::EFixedAccess))
455 if ((attribs&DMemModelChunk::ECode) && --iNumNonFixedAccessCodeChunks==0)
456 iAttributes &= ~EVariableCode;
457 if (this==TheCurrentDataSectionProcess && !(iAttributes&EMoving))
459 TheCurrentDataSectionProcess=NULL;
460 TheCompleteDataSectionProcess=NULL;
462 if (--iNumNonFixedAccessChunks==0)
464 iAttributes &= ~EVariableAccess;
465 if (this==TheCurrentVMProcess)
467 TheCurrentVMProcess=NULL;
468 TheCurrentAddressSpace=NULL;
470 NKern::UnlockSystem();
471 DoAttributeChange(); // change process from variable to fixed access
474 NKern::UnlockSystem();
478 NKern::UnlockSystem();
479 RemoveFixedAccessChunk(chunk);
484 Final chance for process to release resources during its death.
486 Called with process $LOCK mutex held (if it exists).
487 This mutex will not be released before it is deleted.
488 I.e. no other thread will ever hold the mutex again.
490 void DMemModelProcess::FinalRelease()
492 // Clean up any left over chunks (such as SharedIo buffers)
497 void DMemModelProcess::RemoveChunk(DMemModelChunk *aChunk)
499 // note that this can't be called after the process $LOCK mutex has been deleted
500 // since it can only be called by a thread in this process doing a handle close or
501 // dying, or by the process handles array being deleted due to the process dying,
502 // all of which happen before $LOCK is deleted.
503 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::RemoveChunk %08x from %08x",aChunk,this));
504 Kern::MutexWait(*iProcessLock);
506 TInt r=ChunkIndex(aChunk,pos);
507 __KTRACE_OPT(KPROC,if(r) Kern::Printf("Chunk lookup failed with %d",r));
508 if (r==0) // Found the chunk
510 __KTRACE_OPT(KPROC,Kern::Printf("Chunk access count %d",iChunks[pos].iAccessCount));
511 if (--iChunks[pos].iAccessCount==0)
514 Kern::MutexSignal(*iProcessLock);
517 TInt DMemModelProcess::ChunkIndex(DMemModelChunk* aChunk,TInt& aPos)
520 return(KErrNotFound);
522 SChunkInfo *pC=iChunks;
523 SChunkInfo *pE=pC+iNumChunks;
524 while(pC<pE && (pC->iChunk!=aChunk))
535 void DMemModelProcess::RemoveDllData()
537 // Call with CodeSegLock held
540 Kern::SafeClose((DObject*&)iDllDataChunk, this);
543 TInt DMemModelProcess::CreateDllDataChunk()
545 // Call with CodeSegLock held
548 __KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O CreateDllDataChunk",this));
552 c.iAtt=TChunkCreate::EDisconnected;
553 c.iForceFixed=EFalse;
554 c.iOperations=SChunkCreateInfo::EAdjust|SChunkCreateInfo::EAdd;
558 c.iMaxSize=(iAttributes&EFixedAddress) ? 1 : m.iMaxDllDataSize; // minimal size for fixed processes
559 c.iName.Set(KLitDllDollarData);
564 return NewChunk((DChunk*&)iDllDataChunk,c,runAddr);
567 void DMemModelProcess::FreeDllDataChunk()
569 iDllDataChunk->Close(this);
573 TInt DMemModelProcess::CommitDllData(TLinAddr aBase, TInt aSize)
575 __KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O CommitDllData %08x+%x",this,aBase,aSize));
578 r=CreateDllDataChunk();
582 TLinAddr dll_data_base=(iAttributes & EFixedAddress) ? (TLinAddr)iDllDataChunk->Base()
583 : TLinAddr(m.iDllDataBase);
584 TInt offset=aBase-dll_data_base;
585 __ASSERT_ALWAYS(TUint32(offset)<TUint32(iDllDataChunk->iMaxSize),MM::Panic(MM::ECommitInvalidDllDataAddress));
586 r=iDllDataChunk->Commit(offset, aSize);
587 if (r!=KErrNone && iDllDataChunk->iSize==0)
590 __KTRACE_OPT(KDLL,Kern::Printf("CommitDllData returns %d",r));
594 void DMemModelProcess::DecommitDllData(TLinAddr aBase, TInt aSize)
596 __KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O DecommitDllData %08x+%x",this,aBase,aSize));
598 TLinAddr dll_data_base=(iAttributes & EFixedAddress) ? (TLinAddr)iDllDataChunk->Base()
599 : TLinAddr(m.iDllDataBase);
600 TInt offset=aBase-dll_data_base;
601 TInt r=iDllDataChunk->Decommit(offset, aSize);
602 __ASSERT_ALWAYS(r==KErrNone,MM::Panic(MM::EDecommitInvalidDllDataAddress));
603 if (iDllDataChunk->iSize==0)
607 TInt DMemModelProcess::MapCodeSeg(DCodeSeg* aSeg)
609 DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg;
610 __KTRACE_OPT(KDLL,Kern::Printf("Process %O MapCodeSeg %C", this, aSeg));
611 TBool kernel_only=( (seg.iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel );
612 if (kernel_only && !(iAttributes&ESupervisor))
613 return KErrNotSupported;
614 if (seg.iAttr&ECodeSegAttKernel || seg.iDataAllocBase==-1)
615 return KErrNone; // no extra mappings needed for kernel code or code with fixed data address
619 TInt total_data_size;
621 seg.GetDataSizeAndBase(total_data_size, data_base);
622 if (r==KErrNone && total_data_size)
624 TInt size=Mmu::RoundToPageSize(total_data_size);
625 r=CommitDllData(data_base, size);
631 void DMemModelProcess::UnmapCodeSeg(DCodeSeg* aSeg)
633 DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg;
634 __KTRACE_OPT(KDLL,Kern::Printf("Process %O UnmapCodeSeg %C", this, aSeg));
635 if (seg.iAttr&ECodeSegAttKernel || seg.iDataAllocBase==-1)
636 return; // no extra mappings needed for kernel code or code with fixed data address
639 TInt total_data_size;
641 seg.GetDataSizeAndBase(total_data_size, data_base);
643 DecommitDllData(data_base, Mmu::RoundToPageSize(total_data_size));
647 TInt DMemModelProcess::NewShPool(DShPool*& /* aPool */, TShPoolCreateInfo& /* aInfo */)
649 return KErrNotSupported;
652 TInt DThread::RawRead(const TAny* aSrc, TAny* aDest, TInt aLength, TInt aFlags, TIpcExcTrap* aExcTrap)
654 // Read from the thread's process.
655 // aSrc is run address of memory to read. The memory is in aThread's address space.
656 // aDest is the address of destination. The memory is in the current process's address space.
657 // aExcTrap, exception trap object to be updated if the actual memory access is performed on another memory area. It happens
658 // when reading is performed in chunks or if home adress is read instead of the provided run address.
659 // Enter and return with system locked.
661 const TUint8* pS=(const TUint8*)aSrc;
662 TUint8* pD=(TUint8*)aDest;
663 const TUint8* pC=NULL;
665 TBool suspect=EFalse;
666 DThread* pT=TheCurrentThread;
671 suspect=((aFlags & KCheckLocalAddress) && !MM::CurrentAddress(pT,pD,aLength,ETrue));
674 pC=(const TUint8*)MM::CurrentAddress(this,pS,aLength,EFalse);
675 __KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawRead %08x<-[%08x::%08x]%08x+%x",pD,this,pS,pC,aLength));
677 return KErrBadDescriptor;
679 TInt len=Min(aLength,K::MaxMemCopyInOneGo);
682 aExcTrap->iSize = (len + 2*(sizeof(TInt32)-1));//+6 is for the worst case. We do not have to be precise here.
683 aExcTrap->iRemoteBase = (TLinAddr)pC & ~(sizeof(TInt32)-1);
684 if (aExcTrap->iLocalBase)
685 aExcTrap->iLocalBase = (TLinAddr)pD & ~(sizeof(TInt32)-1);
686 __KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawRead exc. update: %08x %08x %08x",aExcTrap->iLocalBase,aExcTrap->iRemoteBase,aExcTrap->iSize));
689 #ifdef __DEMAND_PAGING__
690 XTRAP_PAGING_START(check);
694 suspect?(void)umemput(pD,pC,len):(void)memcpy(pD,pC,len);
696 #ifdef __DEMAND_PAGING__
699 return check; // paging error caused by bad client (I.e. 'this' thread was bad)
702 __KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawRead paging trap, suspect %d, dest %08x, source %08x, length %d\n", suspect, pD, pC, len));
712 check=NKern::FlashSystem();
717 TInt DThread::RawWrite(const TAny* aDest, const TAny* aSrc, TInt aLength, TInt aFlags, DThread* aOriginatingThread, TIpcExcTrap* aExcTrap)
719 // Write to the thread's process.
720 // aDest is run address of memory to write. It resides in this thread's address space.
721 // aSrc is address of the source buffer. It resides in the current process's address space.
722 // aOriginatingThread is the thread on behalf of which this operation is performed (eg client of device driver).
723 // Enter and return with system locked
724 // aExcTrap, exception trap object to be updated if the actual memory access is performed on another memory area. It happens
725 // when reading is performed in chunks or if home adress is read instead of the provided run address.
728 TUint8* pD=(TUint8*)aDest;
729 const TUint8* pS=(const TUint8*)aSrc;
732 TBool suspect=EFalse;
733 DThread* pT=TheCurrentThread;
734 DThread* pO=aOriginatingThread;
737 DProcess* pF=K::TheFileServerProcess;
738 TBool special=(iOwningProcess==pF && pO->iOwningProcess==pF);
743 suspect=((aFlags & KCheckLocalAddress) && !MM::CurrentAddress(pT,pS,aLength,EFalse));
746 pC=(TUint8*)MM::CurrentAddress(this,pD,aLength,ETrue);
747 __KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawRead [%08x::%08x]%08x<-%08x+%x",this,pD,pC,pS,aLength));
753 return KErrBadDescriptor;
756 TInt len=Min(aLength,K::MaxMemCopyInOneGo);
759 aExcTrap->iSize = (len + 2*(sizeof(TInt32)-1));//+6 is for the worst case. We do not have to be precise here.
760 aExcTrap->iRemoteBase = (TLinAddr)pC & ~(sizeof(TInt32)-1);
761 if (aExcTrap->iLocalBase)
762 aExcTrap->iLocalBase = (TLinAddr)pS & ~(sizeof(TInt32)-1);
763 __KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawWrite exc. update %08x %08x %08x",aExcTrap->iLocalBase,aExcTrap->iRemoteBase,aExcTrap->iSize));
766 #ifdef __DEMAND_PAGING__
767 XTRAP_PAGING_START(check);
768 // Must check that it is safe to page, unless we are reading from unpaged ROM in which case
769 // we allow it. umemget does this anyway, so we just need to check if suspect is not set.
772 CHECK_PAGING_SAFE_RANGE((TLinAddr)aSrc, aLength);
773 CHECK_DATA_PAGING_SAFE_RANGE((TLinAddr)aDest, aLength);
777 suspect?(void)umemget(pC,pS,len):(void)memcpy(pC,pS,len);
779 #ifdef __DEMAND_PAGING__
782 return check; // paging error caused by bad client (I.e. 'this' thread was bad)
785 __KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawWrite paging trap, suspect %d, dest %08x, src %08x, length %d\n", suspect, pC, pD, len));
795 check=NKern::FlashSystem();
800 #ifdef __DEBUGGER_SUPPORT__
802 TInt CodeModifier::SafeWriteCode(DProcess* aProcess, TLinAddr aAddress, TInt aSize, TUint aValue, void* aOldValue)
804 //Set exception handler. Make sure the boundaries cover the worst case (aSize = 4)
807 xt.iRemoteBase=(TLinAddr)aAddress&~3; //word aligned.
808 xt.iSize=sizeof(TInt);
811 TInt r=xt.Trap(NULL);
814 r = WriteCode(aAddress, aSize, aValue, aOldValue);
817 NKern::UnlockSystem();
821 TInt CodeModifier::WriteCode(TLinAddr aAddress, TInt aSize, TUint aValue, void* aOldValue)
823 TUint userChunkBase = (TUint)MM::UserCodeChunk->Base();
824 TRomHeader romHeader = Epoc::RomHeader();
826 if (!((aAddress >= romHeader.iRomBase ) && (aAddress < (romHeader.iRomBase + romHeader.iUncompressedSize)))) //if not in ROM
827 if ( (aAddress<userChunkBase) || (aAddress) > (userChunkBase+MM::UserCodeChunk->MaxSize()) ) //and not in non-XIP code
828 return KErrBadDescriptor;
830 // if page was moved by defrag there may be a cache line with the
831 // wrong, old physical address, so we must invalidate this first.
832 InternalCache::Invalidate(KCacheSelectD, (TLinAddr)aAddress, 4);
834 //Copy data and clean/invalidate caches with interrupts disabled.
835 TInt irq=NKern::DisableAllInterrupts();
839 *(TUint8*) aOldValue = *(TUint8*)aAddress;
840 *(TUint8*) aAddress = (TUint8)aValue;
843 *(TUint16*) aOldValue = *(TUint16*)aAddress;
844 *(TUint16*) aAddress = (TUint16)aValue;
846 default://It is 4 otherwise
847 *(TUint32*) aOldValue = *(TUint32*)aAddress;
848 *(TUint32*) aAddress = (TUint32)aValue;
851 CacheMaintenance::CodeChanged(aAddress, aSize, CacheMaintenance::ECodeModifier);
852 NKern::RestoreInterrupts(irq);
856 #endif //__DEBUGGER_SUPPORT__
858 TInt DThread::ReadAndParseDesHeader(const TAny* aSrc, TDesHeader& aDest)
860 // Read the header of a remote descriptor.
861 // Enter and return with system locked
864 TInt r=KErrBadDescriptor;
865 DThread* thread = TheCurrentThread;
866 TRawDesHeader& header = (TRawDesHeader&)aDest;
868 #ifdef __DEMAND_PAGING__
871 XTRAP_PAGING_START(pagingFault);
873 thread->iIpcClient = this;
876 const TUint32* pS=(const TUint32*)MM::CurrentAddress(this,aSrc,sizeof(TDesC8),EFalse);
877 if (pS && KErrNone==Kern::SafeRead(pS,&header[0],sizeof(TUint32)))
879 TInt type=header[0]>>KShiftDesType8;
880 static const TUint8 LengthLookup[16]={4,8,12,8,12,0,0,0,0,0,0,0,0,0,0,0};
881 TInt len=LengthLookup[type];
882 if(len>(TInt)sizeof(TUint32))
884 if(KErrNone==Kern::SafeRead(pS+1,&header[1],len-sizeof(TUint32)))
886 // else, bad descriptor
890 // else, bad descriptor
893 #ifdef __DEMAND_PAGING__
894 thread->iIpcClient = NULL;
897 return pagingFault; // paging error caused by bad client (I.e. 'this' thread was bad)
902 return K::ParseDesHeader(aSrc, header, aDest);
905 DMemModelChunk* ChunkFromAddress(DThread* aThread, const TAny* aAddress)
907 DMemModelProcess* pP = (DMemModelProcess*)aThread->iOwningProcess;
908 DMemModelProcess::SChunkInfo* pS=pP->iChunks;
909 DMemModelProcess::SChunkInfo* pC=pS+pP->iNumChunks;
910 while(--pC>=pS && TUint(pC->iDataSectionBase)>TUint(aAddress)) {};
917 Open a shared chunk in which a remote address range is located.
919 DChunk* DThread::OpenSharedChunk(const TAny* aAddress, TBool aWrite, TInt& aOffset)
923 DMemModelProcess* pP = (DMemModelProcess*)iOwningProcess;
924 DMemModelProcess::SChunkInfo* pS=pP->iChunks;
925 DMemModelProcess::SChunkInfo* pC=pS+pP->iNumChunks;
926 while(--pC>=pS && TUint(pC->iDataSectionBase)>TUint(aAddress)) {};
929 DMemModelChunk* chunk = pC->iChunk;
930 if(chunk->iChunkType==ESharedKernelSingle || chunk->iChunkType==ESharedKernelMultiple)
932 TInt offset = (TInt)aAddress-(TInt)chunk->Base();
933 if(TUint(offset)<TUint(chunk->iMaxSize) && chunk->Open()==KErrNone)
936 NKern::UnlockSystem();
941 NKern::UnlockSystem();
945 TInt DThread::PrepareMemoryForDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList)
947 if ((iOwningProcess->iAttributes & DMemModelProcess::EFixedAddress )==0)
948 return KErrNotSupported;
949 Mmu& m=(Mmu&)*MmuBase::TheMmu;
950 return m.PreparePagesForDMA((TLinAddr)aLinAddr, aSize, aPhysicalPageList);
953 TInt DThread::ReleaseMemoryFromDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList)
955 if ((iOwningProcess->iAttributes & DMemModelProcess::EFixedAddress )==0)
956 return KErrNotSupported;
957 TInt pageCount = (((TInt)aLinAddr & KPageMask) + aSize + KPageMask) >> KPageShift;
958 Mmu& m=(Mmu&)*MmuBase::TheMmu;
959 return m.ReleasePagesFromDMA(aPhysicalPageList, pageCount);