Update contrib.
1 // Copyright (c) 1995-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\memmodel\epoc\multiple\mcodeseg.cpp
19 #include <mmubase.inl>
20 #include "cache_maintenance.h"
21 #include <demand_paging.h>
23 DCodeSeg* M::NewCodeSeg(TCodeSegCreateInfo&)
25 // Create a new instance of this class.
29 __KTRACE_OPT(KDLL,Kern::Printf("M::NewCodeSeg"));
30 return new DMemModelCodeSeg;
34 // DMemModelCodeSegMemory
37 DEpocCodeSegMemory* DEpocCodeSegMemory::New(DEpocCodeSeg* aCodeSeg)
39 return new DMemModelCodeSegMemory(aCodeSeg);
43 DMemModelCodeSegMemory::DMemModelCodeSegMemory(DEpocCodeSeg* aCodeSeg)
44 : DMmuCodeSegMemory(aCodeSeg)
49 TInt DMemModelCodeSegMemory::Create(TCodeSegCreateInfo& aInfo)
51 TInt r = DMmuCodeSegMemory::Create(aInfo);
57 iOsAsids = TBitMapAllocator::New(m.iNumOsAsids, EFalse);
61 TInt totalPages = iPageCount+iDataPageCount;
62 iPages = (TPhysAddr*)Kern::Alloc(totalPages*sizeof(TPhysAddr));
66 for (i=0; i<totalPages; ++i)
67 iPages[i] = KPhysAddrInvalid;
71 // allocate RAM pages...
72 __KTRACE_OPT(KDLL,Kern::Printf("Alloc DLL pages %x,%x", iPageCount,iDataPageCount));
73 TInt startPage = iIsDemandPaged ? iPageCount : 0; // if demand paged, skip pages for code
74 TInt endPage = iPageCount+iDataPageCount;
75 r=m.AllocRamPages(iPages+startPage, endPage-startPage, EPageMovable);
77 // initialise SPageInfo objects for allocated pages...
81 for (i=startPage; i<endPage; ++i)
83 SPageInfo* info = SPageInfo::FromPhysAddr(iPages[i]);
84 info->SetCodeSegMemory(this,i);
88 NKern::UnlockSystem();
96 #ifdef BTRACE_CODESEGS
97 BTrace8(BTrace::ECodeSegs,BTrace::ECodeSegMemoryAllocated,iCodeSeg,iPageCount<<m.iPageShift);
102 TInt code_alloc=((totalPages<<m.iPageShift)+m.iAliasMask)>>m.iAliasShift;
103 r=MM::UserCodeAllocator->AllocConsecutive(code_alloc, ETrue);
108 MM::UserCodeAllocator->Alloc(r, code_alloc);
110 iRamInfo.iCodeRunAddr=m.iUserCodeBase+(r<<m.iAliasShift);
111 iRamInfo.iCodeLoadAddr=iRamInfo.iCodeRunAddr;
112 if (iRamInfo.iDataSize)
115 iRamInfo.iDataLoadAddr=iRamInfo.iCodeLoadAddr+Mmu::RoundToPageSize(iRamInfo.iCodeSize);
117 iRamInfo.iDataLoadAddr=iRamInfo.iCodeLoadAddr+iRamInfo.iCodeSize;
120 DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
121 r=pP->MapUserRamCode(this, ETrue);
131 void DMemModelCodeSegMemory::Substitute(TInt aOffset, TPhysAddr aOld, TPhysAddr aNew)
133 __KTRACE_OPT(KMMU,Kern::Printf("DMemModelCodeSegMemory::Substitute %x %08x %08x",aOffset,aOld,aNew));
136 if (iPages[aOffset>>KPageShift] != aOld)
137 MM::Panic(MM::ECodeSegRemapWrongPage);
139 iPages[aOffset>>KPageShift] = aNew;
140 m.RemapPageByAsid(iOsAsids, iRamInfo.iCodeRunAddr+aOffset, aOld, aNew, m.PtePermissions(EUserCode));
144 TInt DMemModelCodeSegMemory::Loaded(TCodeSegCreateInfo& aInfo)
146 __NK_ASSERT_DEBUG(iPages);
148 TInt r = DMmuCodeSegMemory::Loaded(aInfo);
156 UNLOCK_USER_MEMORY();
157 CacheMaintenance::CodeChanged(iRamInfo.iCodeLoadAddr, iRamInfo.iCodeSize);
162 // apply code fixups to pages which have already been loaded...
163 TInt pageShift = m.iPageShift;
164 for (TInt i = 0 ; i < iPageCount ; ++i)
166 if (iPages[i] != KPhysAddrInvalid)
168 r = ApplyCodeFixupsOnLoad((TUint32*)(iRamInfo.iCodeLoadAddr+(i<<pageShift)),iRamInfo.iCodeRunAddr+(i<<pageShift));
174 // copy export directory (this will now have fixups applied)...
175 TInt exportDirSize = iRamInfo.iExportDirCount * sizeof(TLinAddr);
176 if (exportDirSize > 0 || (exportDirSize==0 && (iCodeSeg->iAttr&ECodeSegAttNmdExpData)) )
178 exportDirSize += sizeof(TLinAddr);
179 TLinAddr* expDir = (TLinAddr*)Kern::Alloc(exportDirSize);
182 iCopyOfExportDir = expDir;
183 UNLOCK_USER_MEMORY();
184 memcpy(expDir,(TAny*)(iRamInfo.iExportDir-sizeof(TLinAddr)),exportDirSize);
189 // unmap code from loading process...
190 DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
191 __ASSERT_ALWAYS(iCreator==pP, MM::Panic(MM::ECodeSegLoadedNotCreator));
192 pP->UnmapUserRamCode(this, ETrue);
195 // discard any temporary pages used to store loaded data section...
199 TPhysAddr* pages = iPages+iPageCount;
200 m.FreePages(pages,iDataPageCount, EPageMovable);
201 for (TInt i = 0 ; i < iDataPageCount ; ++i)
202 pages[i] = KPhysAddrInvalid;
205 // see if we can free any virtual address space now we don't need any for loading data
206 TInt data_start = ((iPageCount << m.iPageShift) + m.iAliasMask) >> m.iAliasShift;
207 TInt data_end = (((iPageCount + iDataPageCount) << m.iPageShift) + m.iAliasMask) >> m.iAliasShift;
208 if (data_end != data_start)
211 MM::UserCodeAllocator->Free(iCodeAllocBase + data_start, data_end - data_start);
216 //Reduce the size of the DCodeSeg now the data section has been moved
217 iCodeSeg->iSize = iPageCount << m.iPageShift;
224 void DMemModelCodeSegMemory::Destroy()
227 iCreator->UnmapUserRamCode(this, ETrue); // remove from creating process if not fully loaded
231 DMemModelCodeSegMemory::~DMemModelCodeSegMemory()
233 __KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSegMemory::~DMemModelCodeSegMemory %x", this));
234 __NK_ASSERT_DEBUG(iAccessCount==0);
235 __NK_ASSERT_DEBUG(iOsAsids==0 || iOsAsids->Avail()==0); // check not mapped (inverted logic!)
239 if(iCodeAllocBase>=0)
241 // free allocated virtual memory space...
242 TInt size = (iPageCount+iDataPageCount)<<KPageShift;
243 TInt code_alloc=(size+m.iAliasMask)>>m.iAliasShift;
245 MM::UserCodeAllocator->Free(iCodeAllocBase, code_alloc);
251 #ifdef __DEMAND_PAGING__
254 // Return any paged memory to the paging system
257 DemandPaging& p = *DemandPaging::ThePager;
258 for (TInt i = 0 ; i < iPageCount ; ++i)
260 if (iPages[i] != KPhysAddrInvalid)
261 p.NotifyPageFree(iPages[i]);
263 NKern::UnlockSystem();
266 Kern::Free(iCopyOfExportDir);
267 iCopyOfExportDir = NULL;
271 m.FreePages(iPages,iPageCount+iDataPageCount, EPageMovable);
275 #ifdef BTRACE_CODESEGS
276 BTrace8(BTrace::ECodeSegs,BTrace::ECodeSegMemoryDeallocated,this,iPageCount<<m.iPageShift);
283 DMemModelCodeSeg::DMemModelCodeSeg()
287 : iCodeAllocBase(-1),
293 DMemModelCodeSeg::~DMemModelCodeSeg()
298 __KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::Destruct %C", this));
301 if (iCodeAllocBase>=0)
303 TBool kernel=( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel );
304 TBool global=( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttGlobal );
308 DMemModelProcess& kproc=*(DMemModelProcess*)K::TheKernelProcess;
309 r=kproc.iCodeChunk->Decommit(iCodeAllocBase, iSize);
313 r=m.iGlobalCode->Decommit(iCodeAllocBase, iSize);
315 __ASSERT_DEBUG(r==KErrNone, MM::Panic(MM::EDecommitFailed));
316 r=r; // stop compiler warning
320 if (iDataAllocBase>=0 && !iXIP)
322 SRamCodeInfo& ri=RamInfo();
323 TInt data_alloc=(ri.iDataSize+ri.iBssSize+m.iPageMask)>>m.iPageShift;
324 MM::DllDataAllocator->Free(iDataAllocBase, data_alloc);
327 Kern::Free(iKernelData);
328 DEpocCodeSeg::Destruct();
332 TInt DMemModelCodeSeg::DoCreateRam(TCodeSegCreateInfo& aInfo, DProcess*)
334 __KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::DoCreateRam %C", this));
335 TBool kernel=( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel );
336 TBool global=( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttGlobal );
338 SRamCodeInfo& ri=RamInfo();
339 iSize = Mmu::RoundToPageSize(ri.iCodeSize+ri.iDataSize);
342 TInt total_data_size=ri.iDataSize+ri.iBssSize;
346 DMemModelProcess& kproc=*(DMemModelProcess*)K::TheKernelProcess;
347 if (!kproc.iCodeChunk)
348 r=kproc.CreateCodeChunk();
351 r=kproc.iCodeChunk->Allocate(iSize, 0, m.iAliasShift);
355 ri.iCodeRunAddr=(TUint32)kproc.iCodeChunk->Base();
357 ri.iCodeLoadAddr=ri.iCodeRunAddr;
359 ri.iDataLoadAddr=ri.iCodeLoadAddr+ri.iCodeSize;
362 iKernelData=Kern::Alloc(total_data_size);
365 ri.iDataRunAddr=(TLinAddr)iKernelData;
372 r=m.CreateGlobalCodeChunk();
374 r=m.iGlobalCode->Allocate(iSize, 0, m.iAliasShift);
378 ri.iCodeRunAddr=(TUint32)m.iGlobalCode->Base();
380 ri.iCodeLoadAddr=ri.iCodeRunAddr;
381 ri.iDataLoadAddr=0; // we don't allow static data in global code
383 TInt loadSize = ri.iCodeSize+ri.iDataSize;
384 memset((TAny*)(ri.iCodeRunAddr+loadSize), 0x03, iSize-loadSize);
389 if (total_data_size && !IsExe())
391 TInt data_alloc=(total_data_size+m.iPageMask)>>m.iPageShift;
392 __KTRACE_OPT(KDLL,Kern::Printf("Alloc DLL data %x", data_alloc));
393 r=MM::DllDataAllocator->AllocConsecutive(data_alloc, ETrue);
398 MM::DllDataAllocator->Alloc(r, data_alloc);
400 ri.iDataRunAddr=m.iDllDataBase+m.iMaxDllDataSize-((r+data_alloc)<<m.iPageShift);
407 r = Memory()->Create(aInfo);
413 TInt DMemModelCodeSeg::DoCreateXIP(DProcess* aProcess)
415 // __KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::DoCreateXIP %C proc %O", this, aProcess));
420 TInt DMemModelCodeSeg::Loaded(TCodeSegCreateInfo& aInfo)
423 return DEpocCodeSeg::Loaded(aInfo);
425 TBool kernel=( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel );
426 TBool global=( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttGlobal );
429 TInt r = Memory()->Loaded(aInfo);
433 else if (kernel && iExeCodeSeg!=this)
436 DMemModelProcess& kproc=*(DMemModelProcess*)K::TheKernelProcess;
437 SRamCodeInfo& ri=RamInfo();
439 // NOTE: Must do IMB before changing permissions since ARMv6 is very pedantic and
440 // doesn't let you clean a cache line which is marked as read only.
441 CacheMaintenance::CodeChanged(ri.iCodeRunAddr, ri.iCodeSize);
443 TInt offset=ri.iCodeRunAddr-TLinAddr(kproc.iCodeChunk->iBase);
444 kproc.iCodeChunk->ApplyPermissions(offset, iSize, m.iKernelCodePtePerm);
449 SRamCodeInfo& ri=RamInfo();
450 CacheMaintenance::CodeChanged(ri.iCodeRunAddr, ri.iCodeSize);
451 TInt offset=ri.iCodeRunAddr-TLinAddr(m.iGlobalCode->iBase);
452 m.iGlobalCode->ApplyPermissions(offset, iSize, m.iGlobalCodePtePerm);
454 return DEpocCodeSeg::Loaded(aInfo);
457 void DMemModelCodeSeg::ReadExportDir(TUint32* aDest)
459 __KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::ReadExportDir %C %08x",this, aDest));
463 SRamCodeInfo& ri=RamInfo();
464 TInt size=(ri.iExportDirCount+1)*sizeof(TLinAddr);
466 if (Memory()->iCopyOfExportDir)
468 kumemput(aDest, Memory()->iCopyOfExportDir, size);
472 NKern::ThreadEnterCS();
474 TLinAddr src=ri.iExportDir-sizeof(TLinAddr);
477 TInt offset=src-ri.iCodeRunAddr;
478 TPhysAddr* physArray = Pages();
479 TPhysAddr* ppa=physArray+(offset>>m.iPageShift);
482 TInt pageOffset = src&m.iPageMask;
483 TInt l=Min(size, m.iPageSize-pageOffset);
484 TLinAddr alias_src = m.MapTemp(*ppa++,src-pageOffset)+pageOffset;
485 // Note, the following memory access isn't XTRAP'ed, because...
486 // a) This function is only called by the loader thread, so even if
487 // exceptions were trapped the system is doomed anyway
488 // b) Any exception will cause the crash debugger/logger to be called
489 // which will provide more information than if trapped exceptions
490 // and returned an error code.
491 kumemput32(aDest, (const TAny*)alias_src, l);
495 aDest+=l/sizeof(TUint32);
499 NKern::ThreadLeaveCS();
503 TBool DMemModelCodeSeg::OpenCheck(DProcess* aProcess)
505 return FindCheck(aProcess);
508 TBool DMemModelCodeSeg::FindCheck(DProcess* aProcess)
510 __KTRACE_OPT(KDLL,Kern::Printf("CSEG:%08x Compat? proc=%O",this,aProcess));
513 DMemModelProcess& p=*(DMemModelProcess*)aProcess;
514 DCodeSeg* pPSeg=p.CodeSeg();
515 if (iAttachProcess && iAttachProcess!=aProcess)
517 if (iExeCodeSeg && iExeCodeSeg!=pPSeg)
524 void DMemModelCodeSeg::BTracePrime(TInt aCategory)
526 #ifdef BTRACE_CODESEGS
527 if (aCategory == BTrace::ECodeSegs || aCategory == -1)
529 DCodeSeg::BTracePrime(aCategory);
530 DMemModelCodeSegMemory* codeSegMemory = Memory();
531 if(codeSegMemory && codeSegMemory->iPages && codeSegMemory->iPageCount)
533 BTrace8(BTrace::ECodeSegs,BTrace::ECodeSegMemoryAllocated,this,codeSegMemory->iPageCount<<Mmu::Get().iPageShift);