First public contribution.
1 // Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
16 #include <plat_priv.h>
17 #include "cache_maintenance.h"
23 #include "mcodepaging.h"
26 Manager for memory objects containing demand paged executable code.
27 This is the memory used by DCodeSegMemory object to store the contents of RAM loaded
28 EXEs and DLLs which are to be demand paged.
30 This memory has associated information, supplied by the Loader, which enables
31 the executable's code to be located in the file system and its contents
32 relocated and fixed-up when demand loaded.
37 class DCodePagedMemoryManager : public DPagedMemoryManager
40 // from DMemoryManager...
41 virtual TInt New(DMemoryObject*& aMemory, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags);
42 virtual void Destruct(DMemoryObject* aMemory);
43 virtual void Free(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
44 virtual TInt CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& aPageArrayEntry);
46 // from DPagedMemoryManager...
48 virtual TInt InstallPagingDevice(DPagingDevice* aDevice);
49 virtual TInt AcquirePageReadRequest(DPageReadRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount);
50 virtual TInt ReadPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageReadRequest* aRequest);
51 virtual TBool IsAllocated(DMemoryObject* aMemory, TUint aIndex, TUint aCount);
55 Array of paging devices used for each media drive.
56 This is a initialised by #InstallPagingDevice.
57 Drives without paging devices have the null pointer in their entry.
59 DPagingDevice* iDevice[KMaxLocalDrives];
63 The single instance of this manager class.
65 static DCodePagedMemoryManager TheManager;
67 friend DPagingDevice* CodePagingDevice(TInt aDiveNum);
72 Reference counted object containing a #TPagedCodeInfo.
73 This is a structure containing the information about a demand paged code segment
74 which is required to load and fixup its code section.
76 An instance of this object is created for each memory object being managed by
77 #DCodePagedMemoryManager, and a pointer to it is stored in the memory object's
78 DMemoryObject::iManagerData member.
81 @see MM::PagedCodeLoaded
83 class DPagedCodeInfo : public DReferenceCountedObject
87 Return a reference to the embedded #TPagedCodeInfo.
89 inline TPagedCodeInfo& Info()
93 @copybrief TPagedCodeInfo
99 DCodePagedMemoryManager DCodePagedMemoryManager::TheManager;
100 DPagedMemoryManager* TheCodePagedMemoryManager = &DCodePagedMemoryManager::TheManager;
103 DPagingDevice* CodePagingDevice(TInt aDriveNum)
105 __NK_ASSERT_DEBUG(aDriveNum<KMaxLocalDrives);
106 return DCodePagedMemoryManager::TheManager.iDevice[aDriveNum];
110 void DCodePagedMemoryManager::Init3()
112 TRACEB(("DCodePagedMemoryManager::Init3()"));
116 TInt DCodePagedMemoryManager::InstallPagingDevice(DPagingDevice* aDevice)
118 TRACEB(("DCodePagedMemoryManager::InstallPagingDevice(0x%08x)",aDevice));
120 TUint codePolicy = TheSuperPage().KernelConfigFlags() & EKernelConfigCodePagingPolicyMask;
121 TRACEB(("Code Paging Policy = %d", codePolicy >> EKernelConfigCodePagingPolicyShift));
122 if(codePolicy == EKernelConfigCodePagingPolicyNoPaging)
124 // no paging allowed so end now...
129 for(i=0; i<KMaxLocalDrives; ++i)
130 if(aDevice->iDrivesSupported&(1<<i))
132 TRACEB(("DCodePagedMemoryManager::InstallPagingDevice drive=%d",i));
134 if(!__e32_atomic_cas_ord_ptr(&iDevice[i], &null, aDevice)) // set iDevice[i]=aDevice if it was originally 0
136 // paging device already registered...
137 TRACEB(("DCodePagedMemoryManager::InstallPagingDevice returns ALREADY EXISTS!"));
138 return KErrAlreadyExists;
140 // flag code paging is supported...
141 __e32_atomic_ior_ord32(&K::MemModelAttributes, (TUint32)EMemModelAttrCodePaging);
148 TInt DCodePagedMemoryManager::AcquirePageReadRequest(DPageReadRequest*& aRequest, DMemoryObject* aMemory, TUint aIndex, TUint aCount)
150 DPagingDevice* device = 0;
152 DPagedCodeInfo* pagedCodeInfo = (DPagedCodeInfo*)aMemory->iManagerData;
155 TPagedCodeInfo& info = pagedCodeInfo->Info();
156 device = iDevice[info.iCodeLocalDrive];
166 aRequest = device->iRequestPool->AcquirePageReadRequest(aMemory,aIndex,aCount);
171 TInt DCodePagedMemoryManager::New(DMemoryObject*& aMemory, TUint aSizeInPages, TMemoryAttributes aAttributes, TMemoryCreateFlags aCreateFlags)
173 DPagedCodeInfo* pagedCodeInfo = new DPagedCodeInfo;
177 TInt r = DPagedMemoryManager::New(aMemory, aSizeInPages, aAttributes, aCreateFlags);
179 pagedCodeInfo->Close();
181 aMemory->iManagerData = pagedCodeInfo;
187 void DCodePagedMemoryManager::Destruct(DMemoryObject* aMemory)
190 DPagedCodeInfo* pagedCodeInfo = (DPagedCodeInfo*)aMemory->iManagerData;
191 aMemory->iManagerData = 0;
195 pagedCodeInfo->Close();
197 // base call to free memory and close object...
198 DPagedMemoryManager::Destruct(aMemory);
202 void DCodePagedMemoryManager::Free(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
204 DoFree(aMemory,aIndex,aCount);
208 TInt DCodePagedMemoryManager::CleanPage(DMemoryObject* aMemory, SPageInfo* aPageInfo, TPhysAddr*& aPageArrayEntry)
210 if(aPageInfo->IsDirty()==false)
213 // shouldn't be asked to clean a page which is writable...
214 __NK_ASSERT_DEBUG(aPageInfo->IsWritable()==false);
216 // Note, memory may have been modified by the CodeModifier class.
218 // just mark page as clean as we don't try and preserve code modifications...
219 ThePager.SetClean(*aPageInfo);
225 TInt ReadFunc(TAny* aArg1, TAny* aArg2, TLinAddr aBuffer, TInt aBlockNumber, TInt aBlockCount)
227 START_PAGING_BENCHMARK;
228 TInt drive = (TInt)aArg1;
229 TThreadMessage* msg = (TThreadMessage*)aArg2;
230 DPagingDevice* device = CodePagingDevice(drive);
231 TInt r = device->Read(msg, aBuffer, aBlockNumber, aBlockCount, drive);
232 __NK_ASSERT_DEBUG(r!=KErrNoMemory); // not allowed to allocated memory, therefore can't fail with KErrNoMemory
233 END_PAGING_BENCHMARK(EPagingBmReadMedia);
238 TInt DCodePagedMemoryManager::ReadPages(DMemoryObject* aMemory, TUint aIndex, TUint aCount, TPhysAddr* aPages, DPageReadRequest* aRequest)
240 TRACE2(("DCodePagedMemoryManager::ReadPage(0x%08x,0x%08x,0x%08x,?,?)",aMemory,aIndex,aCount));
242 __NK_ASSERT_DEBUG(aRequest->CheckUse(aMemory,aIndex,aCount));
244 START_PAGING_BENCHMARK;
247 DPagedCodeInfo* pagedCodeInfo = (DPagedCodeInfo*)aMemory->iManagerData;
249 pagedCodeInfo->Open();
254 TPagedCodeInfo& info = pagedCodeInfo->Info();
255 DPagingDevice& device = *iDevice[info.iCodeLocalDrive];
257 TLinAddr linAddr = aRequest->MapPages(aIndex,aCount,aPages);
262 // no blockmap yet, use blank pages...
263 memset((TAny*)linAddr, aCount*KPageSize, 0x03);
264 CacheMaintenance::CodeChanged(linAddr, aCount*KPageSize);
268 for(; aCount; ++aIndex, --aCount, linAddr+=KPageSize)
270 // work out which bit of the file to read
271 TInt codeOffset = aIndex<<KPageShift;
274 TInt decompressedSize = Min(KPageSize, info.iCodeSize-codeOffset);
275 if(info.iCompressionType)
277 dataOffset = info.iCodePageOffsets[aIndex];
278 dataSize = info.iCodePageOffsets[aIndex+1] - dataOffset;
279 __KTRACE_OPT(KPAGING,Kern::Printf(" compressed, file offset == %x, size == %d", dataOffset, dataSize));
283 dataOffset = codeOffset + info.iCodeStartInFile;
284 dataSize = Min(KPageSize, info.iBlockMap.DataLength()-dataOffset);
285 __NK_ASSERT_DEBUG(dataSize==decompressedSize);
286 __KTRACE_OPT(KPAGING,Kern::Printf(" uncompressed, file offset == %x, size == %d", dataOffset, dataSize));
289 TInt bufferStart = info.iBlockMap.Read(aRequest->iBuffer,
292 device.iReadUnitShift,
294 (TAny*)info.iCodeLocalDrive,
295 (TAny*)&aRequest->iMessage);
299 r = bufferStart; // return error
300 __NK_ASSERT_DEBUG(0);
304 TLinAddr data = aRequest->iBuffer + bufferStart;
305 r = Decompress(info.iCompressionType, linAddr, decompressedSize, data, dataSize);
308 if(r!=decompressedSize)
310 __KTRACE_OPT(KPANIC, Kern::Printf("DCodePagedMemoryManager::ReadPage: error decompressing page at %08x + %x: %d", dataOffset, dataSize, r));
311 __NK_ASSERT_DEBUG(0);
319 __NK_ASSERT_DEBUG(0);
325 if(decompressedSize<KPageSize)
326 memset((TAny*)(linAddr+decompressedSize), KPageSize-decompressedSize, 0x03);
328 info.ApplyFixups(linAddr, aIndex);
331 aRequest->UnmapPages(true);
333 pagedCodeInfo->AsyncClose();
335 END_PAGING_BENCHMARK(EPagingBmReadCodePage);
340 TBool DCodePagedMemoryManager::IsAllocated(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
342 // all pages allocated if memory not destroyed (iManagerData!=0)...
343 return aMemory->iManagerData!=0;
347 TInt MM::PagedCodeNew(DMemoryObject*& aMemory, TUint aPageCount, TPagedCodeInfo*& aInfo)
349 TRACE(("MM::PagedCodeNew(?,0x%08x,0x%08x)",aPageCount,aInfo));
350 TMemoryCreateFlags createFlags = (TMemoryCreateFlags)(EMemoryCreateNoWipe | EMemoryCreateAllowExecution);
351 TInt r = TheCodePagedMemoryManager->New(aMemory,aPageCount,EMemoryAttributeStandard,createFlags);
353 aInfo = &((DPagedCodeInfo*)aMemory->iManagerData)->Info();
354 TRACE(("MM::PagedCodeNew returns %d, aMemory=0x%08x",r,aMemory));
359 void MM::PagedCodeLoaded(DMemoryObject* aMemory, TLinAddr aLoadAddress)
361 TRACE(("MM::PagedCodeLoaded(0x%08x,0x%08x)",aMemory,aLoadAddress));
363 TPagedCodeInfo& info = ((DPagedCodeInfo*)aMemory->iManagerData)->Info();
365 // we need to apply fixups for all memory already paged in.
366 // Note, if this memory is subsequently discarded it should not be paged-in again
367 // until after this function has completed, because the Loader won't touch the memory
368 // and it has not yet been mapped into any other process.
370 // make iterator for memory...
371 RPageArray::TIter pageIter;
372 aMemory->iPages.FindStart(0,aMemory->iSizeInPages,pageIter);
376 // find some pages...
377 RPageArray::TIter pageList;
378 TUint n = pageIter.Find(pageList);
382 // fix up each page found...
383 UNLOCK_USER_MEMORY();
386 TUint i = pageList.Index();
387 TLinAddr a = aLoadAddress+i*KPageSize;
388 info.ApplyFixups(a,i);
389 CacheMaintenance::CodeChanged(a, KPageSize);
390 // now we've finished updating the page, mark it as read only and
391 // clean as we don't need to save changes if it is stolen.
394 if(pageList.Pages(pages,1)==1)
395 if(RPageArray::IsPresent(*pages))
396 {// The loader page still has a writable mapping but it won't
397 // touch the page again so this is safe. No use restricting the
398 // page to be read only as if the loader did write to it again
399 // it would just be rejuvenated as writeable and made dirty.
400 SPageInfo& pageInfo = *SPageInfo::FromPhysAddr(*pages);
401 pageInfo.SetReadOnly();
402 ThePager.SetClean(pageInfo);
408 while(pageList.Count());
412 pageIter.FindRelease(n);
416 aMemory->iPages.FindEnd(0,aMemory->iSizeInPages);
417 info.iLoaded = true; // allow ReadPage to start applying fixups when handling page faults