Update contrib.
1 // Copyright (c) 1994-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
18 #include "mmu/maddrcont.h"
20 #include <kernel/cache.h>
23 #define iMState iWaitLink.iSpare1
25 NFastMutex TheSharedChunkLock;
28 const TInt KChunkGranularity = 4; // amount to grow SChunkInfo list by
29 const TInt KMaxChunkInfosInOneGo = 100; // max number of SChunkInfo objects to copy with System Lock held
31 const TInt KChunkGranularity = 1;
32 const TInt KMaxChunkInfosInOneGo = 1;
37 /********************************************
39 ********************************************/
41 DMemModelProcess::~DMemModelProcess()
43 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelProcess destruct"));
48 void DMemModelProcess::Destruct()
50 __ASSERT_ALWAYS(!iOsAsidRefCount, MM::Panic(MM::EProcessDestructOsAsidRemaining));
51 __ASSERT_ALWAYS(!iChunkCount, MM::Panic(MM::EProcessDestructChunksRemaining));
53 __ASSERT_ALWAYS(!iSharedChunks || iSharedChunks->Count()==0, MM::Panic(MM::EProcessDestructChunksRemaining));
60 TInt DMemModelProcess::TryOpenOsAsid()
62 if (__e32_atomic_tas_ord32(&iOsAsidRefCount, 1, 1, 0))
70 void DMemModelProcess::CloseOsAsid()
72 if (__e32_atomic_tas_ord32(&iOsAsidRefCount, 1, -1, 0) == 1)
73 {// Last reference has been closed so free the asid.
74 MM::AddressSpaceFree(iOsAsid);
79 void DMemModelProcess::AsyncCloseOsAsid()
81 if (__e32_atomic_tas_ord32(&iOsAsidRefCount, 1, -1, 0) == 1)
82 {// Last reference has been closed so free the asid asynchronusly.
83 MM::AsyncAddressSpaceFree(iOsAsid);
88 TInt DMemModelProcess::NewChunk(DChunk*& aChunk, SChunkCreateInfo& aInfo, TLinAddr& aRunAddr)
92 DMemModelChunk* pC=new DMemModelChunk;
96 TChunkType type = aInfo.iType;
98 TInt r=pC->SetAttributes(aInfo);
105 pC->iOwningProcess=(pC->iAttributes&DMemModelChunk::EPublic)?NULL:this;
107 if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdjust))
109 if (aInfo.iRunAddress!=0)
110 pC->SetFixedAddress(aInfo.iRunAddress,aInfo.iPreallocated);
111 if (aInfo.iPreallocated==0 && aInfo.iInitialTop!=0)
113 if (pC->iAttributes & DChunk::EDisconnected)
115 r=pC->Commit(aInfo.iInitialBottom,aInfo.iInitialTop-aInfo.iInitialBottom);
117 else if (pC->iAttributes & DChunk::EDoubleEnded)
119 r=pC->AdjustDoubleEnded(aInfo.iInitialBottom,aInfo.iInitialTop);
123 r=pC->Adjust(aInfo.iInitialTop);
127 if (r==KErrNone && (aInfo.iOperations & SChunkCreateInfo::EAdd))
129 r = AddChunk(pC, EFalse);
133 if(pC->iKernelMapping)
134 aRunAddr = (TLinAddr)MM::MappingBase(pC->iKernelMapping);
135 pC->iDestroyedDfc = aInfo.iDestroyedDfc;
139 pC->Close(NULL); // NULL since chunk can't have been added to process
145 Determine whether this process should be data paged.
147 @param aInfo A reference to the create info for this process.
149 TInt DMemModelProcess::SetPaging(const TProcessCreateInfo& aInfo)
151 TUint pagedFlags = aInfo.iFlags & TProcessCreateInfo::EDataPagingMask;
152 // If KImageDataPaged and KImageDataUnpaged flags present then corrupt
153 // Check this first to ensure that it is always verified.
154 if (pagedFlags == TProcessCreateInfo::EDataPagingMask)
159 if (aInfo.iAttr & ECodeSegAttKernel ||
160 !(K::MemModelAttributes & EMemModelAttrDataPaging))
161 {// Kernel process shouldn't be data paged or no data paging device installed.
165 TUint dataPolicy = TheSuperPage().KernelConfigFlags() & EKernelConfigDataPagingPolicyMask;
166 if (dataPolicy == EKernelConfigDataPagingPolicyAlwaysPage)
168 iAttributes |= EDataPaged;
171 if (dataPolicy == EKernelConfigDataPagingPolicyNoPaging)
172 {// No paging allowed so just return.
175 if (pagedFlags == TProcessCreateInfo::EDataPaged)
177 iAttributes |= EDataPaged;
180 if (pagedFlags == TProcessCreateInfo::EDataUnpaged)
181 {// No paging set so just return.
184 // Neither paged nor unpaged set so use default paging policy.
185 // dataPolicy must be EKernelConfigDataPagingPolicyDefaultUnpaged or
186 // EKernelConfigDataPagingPolicyDefaultPaged.
187 __NK_ASSERT_DEBUG(pagedFlags == TProcessCreateInfo::EDataPagingUnspecified);
188 __NK_ASSERT_DEBUG( dataPolicy == EKernelConfigDataPagingPolicyDefaultPaged ||
189 dataPolicy == EKernelConfigDataPagingPolicyDefaultUnpaged);
190 if (dataPolicy == EKernelConfigDataPagingPolicyDefaultPaged)
192 iAttributes |= EDataPaged;
198 TInt DMemModelProcess::DoCreate(TBool aKernelProcess, TProcessCreateInfo& aInfo)
200 // Required so we can detect whether a process has been created and added
201 // to its object container by checking for iContainerID!=EProcess.
202 __ASSERT_COMPILE(EProcess != 0);
203 __KTRACE_OPT(KPROC,Kern::Printf(">DMemModelProcess::DoCreate %O",this));
208 iAttributes |= ESupervisor;
209 iOsAsid = KKernelOsAsid;
213 r = MM::AddressSpaceAlloc(iPageDir);
221 {// Add this process's own reference to its os asid.
222 __e32_atomic_store_ord32(&iOsAsidRefCount, 1);
225 #ifdef BTRACE_FLEXIBLE_MEM_MODEL
226 BTrace8(BTrace::EFlexibleMemModel,BTrace::EAddressSpaceId,this,iOsAsid);
229 __KTRACE_OPT(KPROC,Kern::Printf("OS ASID=%d, PD=%08x",iOsAsid,iPageDir));
230 __KTRACE_OPT(KPROC,Kern::Printf("<DMemModelProcess::DoCreate %d",r));
234 TInt DMemModelProcess::CreateDataBssStackArea(TProcessCreateInfo& aInfo)
236 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::CreateDataBssStackArea %O",this));
238 TInt dataBssSize = MM::RoundToPageSize(aInfo.iTotalDataSize);
241 DMemoryObject* memory;
242 TMemoryObjectType memoryType = iAttributes&EDataPaged ? EMemoryObjectPaged : EMemoryObjectMovable;
243 r = MM::MemoryNew(memory,memoryType,MM::BytesToPages(dataBssSize));
246 r = MM::MemoryAlloc(memory,0,MM::BytesToPages(dataBssSize));
249 r = MM::MappingNew(iDataBssMapping,memory,EUserReadWrite,OsAsid());
252 MM::MemoryDestroy(memory);
255 iDataBssRunAddress = MM::MappingBase(iDataBssMapping);
256 #ifdef BTRACE_FLEXIBLE_MEM_MODEL
257 BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsProcessStaticData,memory,this);
262 __KTRACE_OPT(KPROC,Kern::Printf("DataBssSize=%x, ",dataBssSize));
268 TInt DMemModelProcess::AttachExistingCodeSeg(TProcessCreateInfo& aInfo)
270 TInt r = DEpocProcess::AttachExistingCodeSeg(aInfo);
273 // allocate virtual memory for the EXEs codeseg...
274 DMemModelCodeSeg* seg = (DMemModelCodeSeg*)iTempCodeSeg;
275 if(seg->iAttr&ECodeSegAttAddrNotUnique)
277 TUint codeSize = seg->iSize;
278 TLinAddr codeAddr = seg->RamInfo().iCodeRunAddr;
279 TBool isDemandPaged = seg->iAttr&ECodeSegAttCodePaged;
280 // Allocate virtual memory for the code seg using the os asid.
281 // No need to open a reference on os asid as process not fully
282 // created yet so it can't die and free the os asid.
283 r = MM::VirtualAlloc(OsAsid(),codeAddr,codeSize,isDemandPaged);
286 iCodeVirtualAllocSize = codeSize;
287 iCodeVirtualAllocAddress = codeAddr;
296 TInt DMemModelProcess::AddChunk(DChunk* aChunk, TBool aIsReadOnly)
298 DMemModelChunk* pC=(DMemModelChunk*)aChunk;
299 if(pC->iOwningProcess && this!=pC->iOwningProcess)
300 return KErrAccessDenied;
302 TInt r = WaitProcessLock();
305 TInt i = ChunkIndex(pC);
306 if(i>=0) // Found the chunk in this process, just up its count
308 iChunks[i].iAccessCount++;
309 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk %08x to %08x (Access count incremented to %d)",aChunk,this,iChunks[i].iAccessCount));
313 r = DoAddChunk(pC,aIsReadOnly);
316 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::AddChunk returns %d",r));
321 void M::FsRegisterThread()
323 TInternalRamDrive::Unlock();
327 void ExecHandler::UnlockRamDrive()
332 EXPORT_C TLinAddr TInternalRamDrive::Base()
334 DMemModelChunk* pC=(DMemModelChunk*)PP::TheRamDriveChunk;
335 DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
337 TLinAddr addr = (TLinAddr)pC->Base(pP);
338 NKern::UnlockSystem();
343 addr = (TLinAddr)pC->Base(pP);
344 NKern::UnlockSystem();
350 EXPORT_C void TInternalRamDrive::Unlock()
352 DMemModelChunk* pC=(DMemModelChunk*)PP::TheRamDriveChunk;
353 DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
355 TInt r = pP->WaitProcessLock();
357 if(pP->ChunkIndex(pC)==KErrNotFound)
358 r = pP->DoAddChunk(pC,EFalse);
359 __ASSERT_ALWAYS(r==KErrNone, MM::Panic(MM::EFsRegisterThread));
360 pP->SignalProcessLock();
364 EXPORT_C void TInternalRamDrive::Lock()
369 TInt DMemModelProcess::DoAddChunk(DMemModelChunk* aChunk, TBool aIsReadOnly)
372 // Must hold the process $LOCK mutex before calling this.
373 // As the process lock is held it is safe to access iOsAsid without a reference.
376 __NK_ASSERT_DEBUG(ChunkIndex(aChunk)==KErrNotFound); // shouldn't be adding a chunk which is already added
378 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess::DoAddChunk %O to %O",aChunk,this));
380 // create mapping for chunk...
381 DMemoryMapping* mapping;
382 TMappingPermissions perm = MM::MappingPermissions
384 iOsAsid!=(TInt)KKernelOsAsid, // user?
385 aIsReadOnly==false, // write?
386 aChunk->iAttributes&DMemModelChunk::ECode // execute?
389 if(aChunk->iFixedBase) // HACK, kernel chunk has a fixed iBase
390 r = MM::MappingNew(mapping,aChunk->iMemoryObject,perm,iOsAsid,EMappingCreateExactVirtual,(TLinAddr)aChunk->iFixedBase);
392 r = MM::MappingNew(mapping,aChunk->iMemoryObject,perm,iOsAsid);
396 aChunk->iKernelMapping = mapping;
397 TLinAddr base = MM::MappingBase(mapping);
399 // expand chunk info memory if required...
400 if(iChunkCount==iChunkAlloc)
402 TInt newAlloc = iChunkAlloc+KChunkGranularity;
403 r = Kern::SafeReAlloc((TAny*&)iChunks,iChunkAlloc*sizeof(SChunkInfo),newAlloc*sizeof(SChunkInfo));
406 MM::MappingDestroy(mapping);
409 iChunkAlloc = newAlloc;
412 // insert new chunk info...
413 TUint i = ChunkInsertIndex(aChunk);
414 SChunkInfo* info = iChunks+i;
415 SChunkInfo* infoEnd = iChunks+iChunkCount;
420 // make space for new chunk info by shuffling along
421 // existing infos KMaxChunkInfosInOneGo at a time...
422 SChunkInfo* infoPtr = infoEnd-KMaxChunkInfosInOneGo;
425 memmove(infoPtr+1,infoPtr,(TLinAddr)infoEnd-(TLinAddr)infoPtr);
429 NKern::FlashSystem();
431 info->iChunk = aChunk;
432 info->iMapping = mapping;
433 info->iAccessCount = 1;
434 info->iIsReadOnly = aIsReadOnly;
435 NKern::UnlockSystem();
437 // add chunk to list of Shared Chunks...
438 if(aChunk->iChunkType==ESharedKernelSingle || aChunk->iChunkType==ESharedKernelMultiple)
441 iSharedChunks = new RAddressedContainer(&TheSharedChunkLock,iProcessLock);
445 r = iSharedChunks->Add(base,aChunk);
454 __DEBUG_EVENT(EEventUpdateProcess, this);
459 void DMemModelProcess::DoRemoveChunk(TInt aIndex)
461 __DEBUG_EVENT(EEventUpdateProcess, this);
463 DMemModelChunk* chunk = iChunks[aIndex].iChunk;
464 DMemoryMapping* mapping = iChunks[aIndex].iMapping;
466 if(chunk->iChunkType==ESharedKernelSingle || chunk->iChunkType==ESharedKernelMultiple)
468 // remove chunk from list of Shared Chunks...
471 iSharedChunks->Remove(MM::MappingBase(mapping));
473 // delete iSharedChunks if it's empty, so memory leak test code passes...
474 if(iSharedChunks->Count()==0)
476 NKern::FMWait(&TheSharedChunkLock);
477 RAddressedContainer* s = iSharedChunks;
479 NKern::FMSignal(&TheSharedChunkLock);
486 // remove chunk from array...
487 SChunkInfo* infoStart = iChunks+aIndex+1;
488 SChunkInfo* infoEnd = iChunks+iChunkCount;
492 // shuffle existing infos down KMaxChunkInfosInOneGo at a time...
493 SChunkInfo* infoPtr = infoStart+KMaxChunkInfosInOneGo;
496 memmove(infoStart-1,infoStart,(TLinAddr)infoPtr-(TLinAddr)infoStart);
498 if(infoStart>=infoEnd)
500 NKern::FlashSystem();
503 NKern::UnlockSystem();
505 if(mapping==chunk->iKernelMapping)
506 chunk->iKernelMapping = 0;
508 MM::MappingDestroy(mapping);
513 Final chance for process to release resources during its death.
515 Called with process $LOCK mutex held (if it exists).
516 This mutex will not be released before it is deleted.
517 I.e. no other thread will ever hold the mutex again.
519 void DMemModelProcess::FinalRelease()
521 // Clean up any left over chunks (such as SharedIo buffers)
525 // Destroy the remaining mappings and memory objects owned by this process
526 MM::MappingAndMemoryDestroy(iDataBssMapping);
527 if(iCodeVirtualAllocSize)
528 MM::VirtualFree(iOsAsid,iCodeVirtualAllocAddress,iCodeVirtualAllocSize);
530 // Close the original reference on the os asid.
535 void DMemModelProcess::RemoveChunk(DMemModelChunk *aChunk)
537 // note that this can't be called after the process $LOCK mutex has been deleted
538 // since it can only be called by a thread in this process doing a handle close or
539 // dying, or by the process handles array being deleted due to the process dying,
540 // all of which happen before $LOCK is deleted.
541 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O RemoveChunk %O",this,aChunk));
542 Kern::MutexWait(*iProcessLock);
543 TInt i = ChunkIndex(aChunk);
544 if(i>=0) // Found the chunk
546 __KTRACE_OPT(KPROC,Kern::Printf("Chunk access count %d",iChunks[i].iAccessCount));
547 if(--iChunks[i].iAccessCount==0)
552 Kern::MutexSignal(*iProcessLock);
556 TUint8* DMemModelChunk::Base(DProcess* aProcess)
558 DMemModelProcess* pP = (DMemModelProcess*)aProcess;
559 DMemoryMapping* mapping = 0;
561 if(iKernelMapping && pP==K::TheKernelProcess)
563 // shortcut for shared chunks...
564 mapping = iKernelMapping;
568 // find chunk in process...
569 TInt i = pP->ChunkIndex(this);
571 mapping = pP->iChunks[i].iMapping;
577 return (TUint8*)MM::MappingBase(mapping);
581 DChunk* DThread::OpenSharedChunk(const TAny* aAddress, TBool aWrite, TInt& aOffset)
583 DMemModelChunk* chunk = 0;
585 NKern::FMWait(&TheSharedChunkLock);
586 RAddressedContainer* list = ((DMemModelProcess*)iOwningProcess)->iSharedChunks;
591 chunk = (DMemModelChunk*)list->Find((TLinAddr)aAddress,offset);
592 if(chunk && offset<TUint(chunk->iMaxSize) && chunk->Open()==KErrNone)
593 aOffset = offset; // chunk found and opened successfully
597 NKern::FMSignal(&TheSharedChunkLock);
603 TUint DMemModelProcess::ChunkInsertIndex(DMemModelChunk* aChunk)
605 // need to hold iProcessLock or System Lock...
607 if(K::Initialising==false && iProcessLock!=NULL && iProcessLock->iCleanup.iThread!=&Kern::CurrentThread())
609 // don't hold iProcessLock, so...
610 __ASSERT_SYSTEM_LOCK;
615 SChunkInfo* list = iChunks;
617 TUint r = iChunkCount;
622 DChunk* x = list[m].iChunk;
632 TInt DMemModelProcess::ChunkIndex(DMemModelChunk* aChunk)
634 TUint i = ChunkInsertIndex(aChunk);
635 if(i && iChunks[--i].iChunk==aChunk)
641 TInt DMemModelProcess::MapCodeSeg(DCodeSeg* aSeg)
643 __ASSERT_CRITICAL; // Must be in critical section so can't leak os asid references.
645 DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg;
646 __KTRACE_OPT(KDLL,Kern::Printf("Process %O MapCodeSeg %C", this, aSeg));
647 TBool kernel_only=( (seg.iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel );
648 TBool user_local=( (seg.iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == 0 );
649 if (kernel_only && !(iAttributes&ESupervisor))
650 return KErrNotSupported;
651 if (seg.iAttr&ECodeSegAttKernel)
652 return KErrNone; // no extra mappings needed for kernel code
654 // Attempt to open a reference on the os asid it is required so
655 // MapUserRamCode() and CommitDllData() can use iOsAsid safely.
656 TInt osAsid = TryOpenOsAsid();
658 {// The process has died.
664 r=MapUserRamCode(seg.Memory());
667 TInt total_data_size;
669 seg.GetDataSizeAndBase(total_data_size, data_base);
670 if (r==KErrNone && total_data_size)
672 TInt size=MM::RoundToPageSize(total_data_size);
673 r=CommitDllData(data_base, size, aSeg);
674 if (r!=KErrNone && user_local)
675 UnmapUserRamCode(seg.Memory());
684 void DMemModelProcess::UnmapCodeSeg(DCodeSeg* aSeg)
686 __ASSERT_CRITICAL; // Must be in critical section so can't leak os asid references.
688 DMemModelCodeSeg& seg=*(DMemModelCodeSeg*)aSeg;
689 __KTRACE_OPT(KDLL,Kern::Printf("Process %O UnmapCodeSeg %C", this, aSeg));
690 if (seg.iAttr&ECodeSegAttKernel)
691 return; // no extra mappings needed for kernel code
693 // Attempt to open a reference on the os asid it is required so
694 // UnmapUserRamCode() and DecommitDllData() can use iOsAsid safely.
695 TInt osAsid = TryOpenOsAsid();
697 {// The process has died and it the process it will have cleaned up any code segs.
703 TInt total_data_size;
705 seg.GetDataSizeAndBase(total_data_size, data_base);
707 DecommitDllData(data_base, MM::RoundToPageSize(total_data_size));
710 UnmapUserRamCode(seg.Memory());
715 void DMemModelProcess::RemoveDllData()
717 // Call with CodeSegLock held
723 TInt DMemModelProcess::MapUserRamCode(DMemModelCodeSegMemory* aMemory)
725 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O MapUserRamCode %C %d %d",
726 this, aMemory->iCodeSeg, iOsAsid, aMemory->iPagedCodeInfo!=0));
727 __ASSERT_MUTEX(DCodeSeg::CodeSegLock);
729 TMappingCreateFlags createFlags = EMappingCreateExactVirtual;
731 if(!(aMemory->iCodeSeg->iAttr&ECodeSegAttAddrNotUnique))
733 // codeseg memory address is globally unique, (common address across all processes)...
734 FlagSet(createFlags,EMappingCreateCommonVirtual);
737 if(aMemory->iCodeSeg->IsExe())
739 // EXE codesegs have already had their virtual address allocated so we must adopt that...
740 __NK_ASSERT_DEBUG(iCodeVirtualAllocSize);
741 __NK_ASSERT_DEBUG(iCodeVirtualAllocAddress==aMemory->iRamInfo.iCodeRunAddr);
742 iCodeVirtualAllocSize = 0;
743 iCodeVirtualAllocAddress = 0;
744 FlagSet(createFlags,EMappingCreateAdoptVirtual);
747 DMemoryMapping* mapping;
748 return MM::MappingNew(mapping,aMemory->iCodeMemoryObject,EUserExecute,iOsAsid,createFlags,aMemory->iRamInfo.iCodeRunAddr);
752 void DMemModelProcess::UnmapUserRamCode(DMemModelCodeSegMemory* aMemory)
754 __KTRACE_OPT(KPROC,Kern::Printf("DMemModelProcess %O UnmapUserRamCode %C %d %d",
755 this, aMemory->iCodeSeg, iOsAsid, aMemory->iPagedCodeInfo!=0));
757 __ASSERT_MUTEX(DCodeSeg::CodeSegLock);
758 MM::MappingDestroy(aMemory->iRamInfo.iCodeRunAddr,iOsAsid);
762 TInt DMemModelProcess::CommitDllData(TLinAddr aBase, TInt aSize, DCodeSeg* aCodeSeg)
764 __KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O CommitDllData %08x+%x",this,aBase,aSize));
766 DMemoryObject* memory;
767 TMemoryObjectType memoryType = aCodeSeg->iAttr&ECodeSegAttDataPaged ? EMemoryObjectPaged : EMemoryObjectMovable;
768 TInt r = MM::MemoryNew(memory,memoryType,MM::BytesToPages(aSize));
771 r = MM::MemoryAlloc(memory,0,MM::BytesToPages(aSize));
774 DMemoryMapping* mapping;
775 r = MM::MappingNew(mapping,memory,EUserReadWrite,iOsAsid,EMappingCreateCommonVirtual,aBase);
778 MM::MemoryDestroy(memory);
781 #ifdef BTRACE_FLEXIBLE_MEM_MODEL
782 BTrace12(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsDllStaticData,memory,aCodeSeg,this);
787 __KTRACE_OPT(KDLL,Kern::Printf("CommitDllData returns %d",r));
792 void DMemModelProcess::DecommitDllData(TLinAddr aBase, TInt aSize)
794 __KTRACE_OPT(KDLL,Kern::Printf("DMemModelProcess %O DecommitDllData %08x+%x",this,aBase,aSize));
795 MM::MappingAndMemoryDestroy(aBase,iOsAsid);
798 void DMemModelProcess::BTracePrime(TInt aCategory)
800 DProcess::BTracePrime(aCategory);
802 #ifdef BTRACE_FLEXIBLE_MEM_MODEL
803 if (aCategory == BTrace::EFlexibleMemModel || aCategory == -1)
805 BTrace8(BTrace::EFlexibleMemModel,BTrace::EAddressSpaceId,this,iOsAsid);
809 DMemoryObject* memory = MM::MappingGetAndOpenMemory(iDataBssMapping);
812 MM::MemoryBTracePrime(memory);
813 BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsProcessStaticData,memory,this);
814 MM::MemoryClose(memory);
818 // Trace memory objects for DLL static data
820 DCodeSeg::UnmarkAll(DCodeSeg::EMarkListDeps|DCodeSeg::EMarkUnListDeps);
821 TraverseCodeSegs(&cs_list, NULL, DCodeSeg::EMarkListDeps, 0);
822 SDblQueLink* anchor=&cs_list.iA;
823 SDblQueLink* pL=cs_list.First();
824 for(; pL!=anchor; pL=pL->iNext)
826 DMemModelCodeSeg* seg = _LOFF(pL,DMemModelCodeSeg,iTempLink);
829 TInt total_data_size;
831 seg->GetDataSizeAndBase(total_data_size, data_base);
835 // The instance count can be ignored as a dll data mapping is only ever
836 // used with a single memory object.
837 TUint mappingInstanceCount;
838 NKern::ThreadEnterCS();
839 DMemoryMapping* mapping = MM::FindMappingInAddressSpace(iOsAsid, data_base, 0, offset, mappingInstanceCount);
842 DMemoryObject* memory = MM::MappingGetAndOpenMemory(mapping);
845 MM::MemoryBTracePrime(memory);
846 BTrace12(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsDllStaticData,memory,seg,this);
847 MM::MemoryClose(memory);
849 MM::MappingClose(mapping);
851 NKern::ThreadLeaveCS();
855 DCodeSeg::EmptyQueue(cs_list, 0); // leave cs_list empty
861 TInt DMemModelProcess::NewShPool(DShPool*& aPool, TShPoolCreateInfo& aInfo)
864 DMemModelShPool* pC = NULL;
866 if (aInfo.iInfo.iFlags & TShPoolCreateInfo::EPageAlignedBuffer)
868 pC = new DMemModelAlignedShPool();
872 pC = new DMemModelNonAlignedShPool();
880 TInt r = pC->Create(this, aInfo);
895 TInt DThread::RawRead(const TAny* aSrc, TAny* aDest, TInt aLength, TInt aFlags, TIpcExcTrap* aExcTrap)
897 // Read from the thread's process.
898 // aSrc Run address of memory to read
899 // aDest Current address of destination
900 // aExcTrap Exception trap object to be updated if the actual memory access is performed on other memory area than specified.
901 // It happens when reading is performed on un-aligned memory area.
905 DMemModelThread& t=*(DMemModelThread*)TheCurrentThread;
906 DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess;
907 TLinAddr src=(TLinAddr)aSrc;
908 TLinAddr dest=(TLinAddr)aDest;
909 TInt result = KErrNone;
910 TBool have_taken_fault = EFalse;
922 #ifdef __BROADCAST_CACHE_MAINTENANCE__
924 XTRAP_PAGING_START(pagingTrap);
927 TInt len = have_taken_fault ? Min(aLength, KPageSize - (src & KPageMask)) : aLength;
928 TInt alias_result=t.Alias(src, pP, len, alias_src, alias_size);
931 result = KErrBadDescriptor; // bad permissions
935 #ifdef __BROADCAST_CACHE_MAINTENANCE__
936 // need to let the trap handler know where we are accessing in case we take a page fault
937 // and the alias gets removed
938 aExcTrap->iRemoteBase = alias_src;
939 aExcTrap->iSize = alias_size;
942 __KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawRead %08x<-%08x+%x",dest,alias_src,alias_size));
946 if(aFlags&KCheckLocalAddress)
947 MM::ValidateLocalIpcAddress(dest,alias_size,ETrue);
948 UNLOCK_USER_MEMORY();
949 memcpy( (TAny*)dest, (const TAny*)alias_src, alias_size);
956 #ifdef __BROADCAST_CACHE_MAINTENANCE__
959 have_taken_fault = ETrue;
963 #ifdef __BROADCAST_CACHE_MAINTENANCE__
964 t.iPagingExcTrap = NULL; // in case we broke out of the loop and skipped XTRAP_PAGING_END
971 TInt DThread::RawWrite(const TAny* aDest, const TAny* aSrc, TInt aLength, TInt aFlags, DThread* /*anOriginatingThread*/, TIpcExcTrap* aExcTrap)
973 // Write to the thread's process.
974 // aDest Run address of memory to write
975 // aSrc Current address of destination
976 // aExcTrap Exception trap object to be updated if the actual memory access is performed on other memory area then specified.
977 // It happens when reading is performed on un-aligned memory area.
981 DMemModelThread& t=*(DMemModelThread*)TheCurrentThread;
982 DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess;
983 TLinAddr src=(TLinAddr)aSrc;
984 TLinAddr dest=(TLinAddr)aDest;
985 TInt result = KErrNone;
986 TBool have_taken_fault = EFalse;
998 #ifdef __BROADCAST_CACHE_MAINTENANCE__
1000 XTRAP_PAGING_START(pagingTrap);
1003 TInt len = have_taken_fault ? Min(aLength, KPageSize - (dest & KPageMask)) : aLength;
1004 TInt alias_result=t.Alias(dest, pP, len, alias_dest, alias_size);
1007 result = KErrBadDescriptor; // bad permissions
1011 #ifdef __BROADCAST_CACHE_MAINTENANCE__
1012 // need to let the trap handler know where we are accessing in case we take a page fault
1013 // and the alias gets removed
1014 aExcTrap->iRemoteBase = alias_dest;
1015 aExcTrap->iSize = alias_size;
1018 __KTRACE_OPT(KTHREAD2,Kern::Printf("DThread::RawWrite %08x+%x->%08x",src,alias_size,alias_dest));
1020 // Must check that it is safe to page, unless we are reading from unpaged ROM in which case
1022 CHECK_PAGING_SAFE_RANGE(src, aLength);
1023 CHECK_DATA_PAGING_SAFE_RANGE(dest, aLength);
1025 if(aFlags&KCheckLocalAddress)
1026 MM::ValidateLocalIpcAddress(src,alias_size,EFalse);
1027 UNLOCK_USER_MEMORY();
1028 memcpy( (TAny*)alias_dest, (const TAny*)src, alias_size);
1033 aLength-=alias_size;
1035 #ifdef __BROADCAST_CACHE_MAINTENANCE__
1038 have_taken_fault = ETrue;
1042 #ifdef __BROADCAST_CACHE_MAINTENANCE__
1043 t.iPagingExcTrap = NULL; // in case we broke out of the loop and skipped XTRAP_PAGING_END
1052 TInt DThread::ReadAndParseDesHeader(const TAny* aSrc, TDesHeader& aDest)
1054 // Read the header of a remote descriptor.
1057 static const TUint8 LengthLookup[16]={4,8,12,8,12,0,0,0,0,0,0,0,0,0,0,0};
1061 DMemModelThread& t=*(DMemModelThread*)TheCurrentThread;
1062 DMemModelProcess* pP=(DMemModelProcess*)iOwningProcess;
1063 TLinAddr src=(TLinAddr)aSrc;
1065 __NK_ASSERT_DEBUG(t.iIpcClient==NULL);
1066 t.iIpcClient = this;
1069 TUint8* pDest = (TUint8*)&aDest;
1070 TUint alias_size = 0;
1072 TInt type = KErrBadDescriptor;
1075 #ifdef __BROADCAST_CACHE_MAINTENANCE__
1077 XTRAP_PAGING_START(pagingTrap);
1080 if (alias_size == 0)
1082 // no alias present, so must create one here
1083 if (t.Alias(src, pP, length, pAlias, alias_size) != KErrNone)
1085 __NK_ASSERT_DEBUG(alias_size >= sizeof(TUint32));
1088 // read either the first word, or as much as aliased of the remainder
1089 TInt l = length == 12 ? sizeof(TUint32) : Min(length, alias_size);
1090 if (Kern::SafeRead((TAny*)pAlias, (TAny*)pDest, l))
1091 break; // exception reading from user space
1095 // we have just read the first word, so decode the descriptor type
1096 type = *(TUint32*)pDest >> KShiftDesType8;
1097 length = LengthLookup[type];
1098 // invalid descriptor type will have length 0 which will get decrease by 'l' and
1099 // terminate the loop with length < 0
1108 #ifdef __BROADCAST_CACHE_MAINTENANCE__
1111 alias_size = 0; // a page fault caused the alias to be removed
1116 t.iIpcClient = NULL;
1117 #ifdef __BROADCAST_CACHE_MAINTENANCE__
1118 t.iPagingExcTrap = NULL; // in case we broke out of the loop and skipped XTRAP_PAGING_END
1120 return length == 0 ? K::ParseDesHeader(aSrc, (TRawDesHeader&)aDest, aDest) : KErrBadDescriptor;
1127 TInt DThread::PrepareMemoryForDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList)
1129 // not supported, new Physical Pinning APIs should be used for DMA
1130 return KErrNotSupported;
1133 TInt DThread::ReleaseMemoryFromDMA(const TAny* aLinAddr, TInt aSize, TPhysAddr* aPhysicalPageList)
1135 // not supported, new Physical Pinning APIs should be used for DMA
1136 return KErrNotSupported;