First public contribution.
1 // Copyright (c) 1995-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
19 #include "mmu/mcodepaging.h"
21 #include "cache_maintenance.h"
24 DCodeSeg* M::NewCodeSeg(TCodeSegCreateInfo&)
26 __KTRACE_OPT(KDLL,Kern::Printf("M::NewCodeSeg"));
27 return new DMemModelCodeSeg;
32 // DMemModelCodeSegMemory
35 DEpocCodeSegMemory* DEpocCodeSegMemory::New(DEpocCodeSeg* aCodeSeg)
37 return new DMemModelCodeSegMemory(aCodeSeg);
41 DMemModelCodeSegMemory::DMemModelCodeSegMemory(DEpocCodeSeg* aCodeSeg)
42 : DEpocCodeSegMemory(aCodeSeg)
47 TInt DMemModelCodeSegMemory::Create(TCodeSegCreateInfo& aInfo, DMemModelProcess* aProcess)
54 if(!aInfo.iUseCodePaging)
57 codePageCount = MM::RoundToPageCount(iRamInfo.iCodeSize+iRamInfo.iDataSize);
63 codePageCount = MM::RoundToPageCount(iRamInfo.iCodeSize);
64 dataPageCount = MM::RoundToPageCount(iRamInfo.iDataSize);
66 iDataSectionMemory = Kern::Alloc(iRamInfo.iDataSize);
67 if(!iDataSectionMemory)
71 iCodeSeg->iSize = codePageCount<<KPageShift;
73 // allocate virtual address for code to run at...
74 const TUint codeSize = codePageCount<<KPageShift;
76 {// Get the os asid without opening a reference on it as aProcess isn't fully
77 // created yet so won't free its os asid.
78 r = MM::VirtualAlloc(aProcess->OsAsid(),iRamInfo.iCodeRunAddr,codeSize,isDemandPaged);
81 aProcess->iCodeVirtualAllocSize = codeSize;
82 aProcess->iCodeVirtualAllocAddress = iRamInfo.iCodeRunAddr;
83 iCodeSeg->iAttr |= ECodeSegAttAddrNotUnique;
87 r = MM::VirtualAllocCommon(iRamInfo.iCodeRunAddr,codeSize,isDemandPaged);
90 iVirtualAllocCommonSize = codeSize;
93 // create memory object for codeseg...
96 // create memory object...
97 r = MM::PagedCodeNew(iCodeMemoryObject, codePageCount, iPagedCodeInfo);
101 // get file blockmap for codeseg contents...
102 r = iPagedCodeInfo->ReadBlockMap(aInfo);
108 // create memory object...
109 TMemoryCreateFlags flags = (TMemoryCreateFlags)(EMemoryCreateNoWipe | EMemoryCreateAllowExecution);
110 r = MM::MemoryNew(iCodeMemoryObject, EMemoryObjectMovable, codePageCount, flags);
115 r = MM::MemoryAlloc(iCodeMemoryObject,0,codePageCount);
120 // create a mapping of the memory for the loader...
121 // No need to open reference on os asid it is the current thread/process's.
122 DMemModelProcess* pP = (DMemModelProcess*)TheCurrentThread->iOwningProcess;
123 r = MM::MappingNew(iCodeLoadMapping,iCodeMemoryObject,EUserReadWrite,pP->OsAsid());
127 iRamInfo.iCodeLoadAddr = MM::MappingBase(iCodeLoadMapping);
129 // work out where the loader is to put the loaded data section...
130 TInt loadSize = iRamInfo.iCodeSize; // size of memory filled by loader
131 if(iRamInfo.iDataSize)
135 // data loaded immediately after code...
136 iRamInfo.iDataLoadAddr = iRamInfo.iCodeLoadAddr+iRamInfo.iCodeSize;
137 loadSize += iRamInfo.iDataSize;
141 // create memory object for data...
142 DMemoryObject* dataMemory;
143 r = MM::MemoryNew(dataMemory, EMemoryObjectMovable, dataPageCount, EMemoryCreateNoWipe);
148 r = MM::MemoryAlloc(dataMemory,0,dataPageCount);
151 // create a mapping of the memory for the loader...
152 // No need to open reference on os asid it is the current thread/process's.
153 r = MM::MappingNew(iDataLoadMapping,dataMemory,EUserReadWrite,pP->OsAsid());
158 MM::MemoryDestroy(dataMemory);
162 iRamInfo.iDataLoadAddr = MM::MappingBase(iDataLoadMapping);
168 // wipe memory that the loader wont fill...
169 UNLOCK_USER_MEMORY();
170 memset((TAny*)(iRamInfo.iCodeLoadAddr+loadSize), 0x03, codeSize-loadSize);
181 TInt DMemModelCodeSegMemory::Loaded(TCodeSegCreateInfo& aInfo)
185 // get information needed to fixup code for it's run address...
186 TInt r = iPagedCodeInfo->ReadFixupTables(aInfo);
189 MM::PagedCodeLoaded(iCodeMemoryObject, iRamInfo.iCodeLoadAddr);
193 // make code visible to instruction cache...
194 UNLOCK_USER_MEMORY();
195 CacheMaintenance::CodeChanged(iRamInfo.iCodeLoadAddr, iRamInfo.iCodeSize);
199 // adjust iDataLoadAddr to point to address contents for initial data section
200 // in running process...
201 if(iRamInfo.iDataLoadAddr)
203 TAny* dataSection = iDataSectionMemory;
206 // contents for initial data section to be stored in iDataSectionMemory...
207 UNLOCK_USER_MEMORY();
208 memcpy(dataSection,(TAny*)iRamInfo.iDataLoadAddr,iRamInfo.iDataSize);
210 iRamInfo.iDataLoadAddr = (TLinAddr)dataSection;
214 // contents for initial data section stored after code...
215 __NK_ASSERT_DEBUG(iRamInfo.iDataLoadAddr==iRamInfo.iCodeLoadAddr+iRamInfo.iCodeSize); // check data loaded at end of code
216 iRamInfo.iDataLoadAddr = iRamInfo.iCodeRunAddr+iRamInfo.iCodeSize;
220 // copy export directory (this will now have fixups applied)...
221 TInt exportDirSize = iRamInfo.iExportDirCount * sizeof(TLinAddr);
222 if(exportDirSize > 0 || (exportDirSize==0 && (iCodeSeg->iAttr&ECodeSegAttNmdExpData)) )
224 exportDirSize += sizeof(TLinAddr);
225 TLinAddr* expDir = (TLinAddr*)Kern::Alloc(exportDirSize);
228 iCopyOfExportDir = expDir;
229 TLinAddr expDirLoad = iRamInfo.iExportDir-iRamInfo.iCodeRunAddr+iRamInfo.iCodeLoadAddr;
230 UNLOCK_USER_MEMORY();
231 memcpy(expDir,(TAny*)(expDirLoad-sizeof(TLinAddr)),exportDirSize);
235 // unmap code from loading process...
236 DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
237 __ASSERT_ALWAYS(iCreator==pP, MM::Panic(MM::ECodeSegLoadedNotCreator));
238 MM::MappingDestroy(iCodeLoadMapping);
239 MM::MappingAndMemoryDestroy(iDataLoadMapping);
242 // Mark the code memory object read only to prevent malicious code modifying it.
243 TInt r = MM::MemorySetReadOnly(iCodeMemoryObject);
244 __ASSERT_ALWAYS(r == KErrNone, MM::Panic(MM::ECodeSegSetReadOnlyFailure));
250 void DMemModelCodeSegMemory::Destroy()
252 MM::MappingDestroy(iCodeLoadMapping);
253 MM::MappingAndMemoryDestroy(iDataLoadMapping);
257 DMemModelCodeSegMemory::~DMemModelCodeSegMemory()
259 __KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSegMemory::~DMemModelCodeSegMemory %x", this));
260 __NK_ASSERT_DEBUG(iAccessCount==0);
262 MM::MappingDestroy(iCodeLoadMapping);
263 MM::MappingAndMemoryDestroy(iDataLoadMapping);
264 MM::MemoryDestroy(iCodeMemoryObject);
266 if(iVirtualAllocCommonSize)
267 MM::VirtualFreeCommon(iRamInfo.iCodeRunAddr, iVirtualAllocCommonSize);
269 Kern::Free(iCopyOfExportDir);
270 Kern::Free(iDataSectionMemory);
278 DMemModelCodeSeg::DMemModelCodeSeg()
283 DMemModelCodeSeg::~DMemModelCodeSeg()
285 __KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::Destruct %C", this));
288 MM::MappingDestroy(iCodeLoadMapping);
289 MM::MappingDestroy(iCodeGlobalMapping);
290 MM::MemoryDestroy(iCodeMemoryObject);
296 MM::VirtualFreeCommon(iDataAllocBase,iDataAllocSize);
300 Kern::Free(iKernelData);
302 DEpocCodeSeg::Destruct();
306 TInt DMemModelCodeSeg::DoCreateRam(TCodeSegCreateInfo& aInfo, DProcess* aProcess)
308 __KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::DoCreateRam %C", this));
310 SRamCodeInfo& ri = RamInfo();
311 iSize = MM::RoundToPageSize(ri.iCodeSize+ri.iDataSize);
315 TBool kernel = ( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel );
316 // TBool user_global = ( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttGlobal );
317 TBool user_local = ( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == 0 );
319 TUint total_data_size = ri.iDataSize+ri.iBssSize;
323 // setup paging attribute for code...
324 if(aInfo.iUseCodePaging)
325 iAttr |= ECodeSegAttCodePaged;
327 if(total_data_size && !IsExe())
329 // setup paging attribute for data section...
330 if(aInfo.iUseCodePaging)
331 if(K::MemModelAttributes & EMemModelAttrDataPaging)
332 iAttr |= ECodeSegAttDataPaged;
334 // allocate virtual address for data section...
335 TInt r = MM::VirtualAllocCommon(iDataAllocBase,total_data_size,iAttr&ECodeSegAttDataPaged);
338 iDataAllocSize = total_data_size;
339 ri.iDataRunAddr = iDataAllocBase;
342 // create DCodeSegMemory for RAM loaded user local code...
343 TInt r = Memory()->Create(aInfo,(DMemModelProcess*)aProcess);
345 #ifdef BTRACE_FLEXIBLE_MEM_MODEL
348 BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsCodeSeg,Memory()->iCodeMemoryObject,this);
355 // kernel or user-global code...
357 // create memory object for codeseg...
358 TMemoryCreateFlags flags = EMemoryCreateAllowExecution;
361 flags = (TMemoryCreateFlags)(flags|EMemoryCreateNoWipe);
363 TInt r = MM::MemoryNew(iCodeMemoryObject, EMemoryObjectMovable, MM::BytesToPages(iSize), flags);
368 r = MM::MemoryAlloc(iCodeMemoryObject,0,MM::BytesToPages(iSize));
372 // create a mapping of the memory for the loader...
373 // No need to open reference on os asid it is the current thread/process's.
374 DMemModelProcess* pP = (DMemModelProcess*)TheCurrentThread->iOwningProcess;
375 r = MM::MappingNew(iCodeLoadMapping,iCodeMemoryObject,EUserReadWrite,pP->OsAsid());
378 ri.iCodeLoadAddr = MM::MappingBase(iCodeLoadMapping);
380 // create a global mapping of the memory for the codeseg to run at...
381 r = MM::MappingNew(iCodeGlobalMapping,iCodeMemoryObject,kernel?ESupervisorExecute:EUserExecute,KKernelOsAsid);
384 ri.iCodeRunAddr = MM::MappingBase(iCodeGlobalMapping);
388 // setup data section memory...
390 ri.iDataLoadAddr = ri.iCodeLoadAddr+ri.iCodeSize;
393 iKernelData = Kern::Alloc(total_data_size);
396 ri.iDataRunAddr = (TLinAddr)iKernelData;
401 // we don't allow static data in global code...
402 ri.iDataLoadAddr = 0;
406 #ifdef BTRACE_FLEXIBLE_MEM_MODEL
407 BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsCodeSeg,iCodeMemoryObject,this);
415 TInt DMemModelCodeSeg::DoCreateXIP(DProcess* aProcess)
417 // __KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::DoCreateXIP %C proc %O", this, aProcess));
422 TInt DMemModelCodeSeg::Loaded(TCodeSegCreateInfo& aInfo)
425 return DEpocCodeSeg::Loaded(aInfo);
427 TBool kernel = ( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel );
428 TBool user_global = ( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttGlobal );
429 TBool user_local = ( (iAttr&(ECodeSegAttKernel|ECodeSegAttGlobal)) == 0 );
432 TInt r = Memory()->Loaded(aInfo);
436 else if((kernel && iExeCodeSeg!=this) || user_global)
438 // user-global or kernel code...
439 SRamCodeInfo& ri = RamInfo();
440 UNLOCK_USER_MEMORY();
441 CacheMaintenance::CodeChanged(ri.iCodeLoadAddr, ri.iCodeSize);
443 MM::MappingDestroy(iCodeLoadMapping);
444 // adjust iDataLoadAddr to point to address contents for initial data section
445 // in running process...
447 ri.iDataLoadAddr = ri.iCodeRunAddr+ri.iCodeSize;
449 // Mark the code memory object read only to prevent malicious code modifying it.
450 TInt r = MM::MemorySetReadOnly(iCodeMemoryObject);
451 __ASSERT_ALWAYS(r == KErrNone, MM::Panic(MM::ECodeSegSetReadOnlyFailure));
453 return DEpocCodeSeg::Loaded(aInfo);
457 void DMemModelCodeSeg::ReadExportDir(TUint32* aDest)
459 __KTRACE_OPT(KDLL,Kern::Printf("DMemModelCodeSeg::ReadExportDir %C %08x",this, aDest));
463 // This is not XIP code so the loader can't access the export directory.
464 if (Memory()->iCopyOfExportDir)
465 {// This must be local user side code.
466 __NK_ASSERT_DEBUG((iAttr & (ECodeSegAttKernel|ECodeSegAttGlobal)) == 0);
467 // Copy the kernel's copy of the export directory for this code seg to the loader's buffer.
468 SRamCodeInfo& ri = RamInfo();
469 TInt size = (ri.iExportDirCount + 1) * sizeof(TLinAddr);
470 kumemput(aDest, Memory()->iCopyOfExportDir, size);
473 {// This must be kernel side code.
474 __NK_ASSERT_DEBUG((iAttr & (ECodeSegAttKernel|ECodeSegAttGlobal)) == ECodeSegAttKernel);
475 // Copy the export directory for this code seg to the loader's buffer.
476 SRamCodeInfo& ri = RamInfo();
477 TInt size = (ri.iExportDirCount + 1) * sizeof(TLinAddr);
478 TAny* expDirLoad = (TAny*)(ri.iExportDir - sizeof(TLinAddr));
479 kumemput(aDest, expDirLoad, size);
485 TBool DMemModelCodeSeg::OpenCheck(DProcess* aProcess)
487 return FindCheck(aProcess);
491 TBool DMemModelCodeSeg::FindCheck(DProcess* aProcess)
493 __KTRACE_OPT(KDLL,Kern::Printf("CSEG:%08x Compat? proc=%O",this,aProcess));
496 DMemModelProcess& p=*(DMemModelProcess*)aProcess;
497 DCodeSeg* pPSeg=p.CodeSeg();
498 if (iAttachProcess && iAttachProcess!=aProcess)
500 if (iExeCodeSeg && iExeCodeSeg!=pPSeg)
507 void DMemModelCodeSeg::BTracePrime(TInt aCategory)
509 DCodeSeg::BTracePrime(aCategory);
511 #ifdef BTRACE_FLEXIBLE_MEM_MODEL
512 if (aCategory == BTrace::EFlexibleMemModel || aCategory == -1)
514 // code seg mutex is held here, so memory objects cannot be destroyed
515 DMemModelCodeSegMemory* codeSegMemory = Memory();
518 if (codeSegMemory->iCodeMemoryObject)
520 BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsCodeSeg,Memory()->iCodeMemoryObject,this);
525 if (iCodeMemoryObject)
527 BTrace8(BTrace::EFlexibleMemModel,BTrace::EMemoryObjectIsCodeSeg,iCodeMemoryObject,this);
539 TPagedCodeInfo::~TPagedCodeInfo()
541 Kern::Free(iCodeRelocTable);
542 Kern::Free(iCodePageOffsets);
546 TInt TPagedCodeInfo::ReadBlockMap(const TCodeSegCreateInfo& aInfo)
548 if(aInfo.iCodeBlockMapEntriesSize <= 0)
549 return KErrArgument; // no block map provided
551 // get compression data...
552 iCompressionType = aInfo.iCompressionType;
553 switch(iCompressionType)
555 case KFormatNotCompressed:
556 __ASSERT_COMPILE(KFormatNotCompressed==0); // Decompress() assumes this
559 case KUidCompressionBytePair:
561 if(!aInfo.iCodePageOffsets)
564 TInt pageCount = MM::RoundToPageCount(aInfo.iCodeSize);
566 TInt size = sizeof(TInt32) * (pageCount + 1);
567 iCodePageOffsets = (TInt32*)Kern::Alloc(size);
568 if(!iCodePageOffsets)
570 kumemget32(iCodePageOffsets, aInfo.iCodePageOffsets, size);
572 #ifdef __DUMP_BLOCKMAP_INFO
573 Kern::Printf("CodePageOffsets:");
574 for (TInt i = 0 ; i < pageCount + 1 ; ++i)
575 Kern::Printf(" %08x", iCodePageOffsets[i]);
579 for(TInt j=0; j<pageCount+1; ++j)
581 if(iCodePageOffsets[j] < last ||
582 iCodePageOffsets[j] > (aInfo.iCodeLengthInFile + aInfo.iCodeStartInFile))
584 __NK_ASSERT_DEBUG(0);
587 last = iCodePageOffsets[j];
593 return KErrNotSupported;
596 // Copy block map data itself...
598 #ifdef __DUMP_BLOCKMAP_INFO
599 Kern::Printf("Original block map");
600 Kern::Printf(" block granularity: %d", aInfo.iCodeBlockMapCommon.iBlockGranularity);
601 Kern::Printf(" block start offset: %x", aInfo.iCodeBlockMapCommon.iBlockStartOffset);
602 Kern::Printf(" start block address: %016lx", aInfo.iCodeBlockMapCommon.iStartBlockAddress);
603 Kern::Printf(" local drive number: %d", aInfo.iCodeBlockMapCommon.iLocalDriveNumber);
604 Kern::Printf(" entry size: %d", aInfo.iCodeBlockMapEntriesSize);
607 // Find relevant paging device
608 iCodeLocalDrive = aInfo.iCodeBlockMapCommon.iLocalDriveNumber;
609 if(TUint(iCodeLocalDrive) >= (TUint)KMaxLocalDrives)
611 __KTRACE_OPT(KPAGING,Kern::Printf("Bad local drive number"));
615 DPagingDevice* device = CodePagingDevice(iCodeLocalDrive);
618 __KTRACE_OPT(KPAGING,Kern::Printf("No paging device installed for drive"));
619 return KErrNotSupported;
622 // Set code start offset
623 iCodeStartInFile = aInfo.iCodeStartInFile;
624 if(iCodeStartInFile < 0)
626 __KTRACE_OPT(KPAGING,Kern::Printf("Bad code start offset"));
630 // Allocate buffer for block map and copy from user-side
631 TBlockMapEntryBase* buffer = (TBlockMapEntryBase*)Kern::Alloc(aInfo.iCodeBlockMapEntriesSize);
634 kumemget32(buffer, aInfo.iCodeBlockMapEntries, aInfo.iCodeBlockMapEntriesSize);
636 #ifdef __DUMP_BLOCKMAP_INFO
637 Kern::Printf(" entries:");
638 for (TInt k = 0 ; k < aInfo.iCodeBlockMapEntriesSize / sizeof(TBlockMapEntryBase) ; ++k)
639 Kern::Printf(" %d: %d blocks at %08x", k, buffer[k].iNumberOfBlocks, buffer[k].iStartBlock);
642 // Initialise block map
643 TInt r = iBlockMap.Initialise(aInfo.iCodeBlockMapCommon,
645 aInfo.iCodeBlockMapEntriesSize,
646 device->iReadUnitShift,
647 iCodeStartInFile + aInfo.iCodeLengthInFile);
654 #if defined(__DUMP_BLOCKMAP_INFO) && defined(_DEBUG)
658 iCodeSize = aInfo.iCodeSize;
664 Read code relocation table and import fixup table from user side.
666 TInt TPagedCodeInfo::ReadFixupTables(const TCodeSegCreateInfo& aInfo)
668 iCodeRelocTableSize = aInfo.iCodeRelocTableSize;
669 iImportFixupTableSize = aInfo.iImportFixupTableSize;
670 iCodeDelta = aInfo.iCodeDelta;
671 iDataDelta = aInfo.iDataDelta;
673 // round sizes up to four-byte boundaries...
674 TUint relocSize = (iCodeRelocTableSize + 3) & ~3;
675 TUint fixupSize = (iImportFixupTableSize + 3) & ~3;
677 // copy relocs and fixups...
678 iCodeRelocTable = (TUint8*)Kern::Alloc(relocSize+fixupSize);
679 if (!iCodeRelocTable)
681 iImportFixupTable = iCodeRelocTable + relocSize;
682 kumemget32(iCodeRelocTable, aInfo.iCodeRelocTable, relocSize);
683 kumemget32(iImportFixupTable, aInfo.iImportFixupTable, fixupSize);
689 void TPagedCodeInfo::ApplyFixups(TLinAddr aBuffer, TUint iIndex)
691 // START_PAGING_BENCHMARK;
694 if(iCodeRelocTableSize)
696 TUint8* codeRelocTable = iCodeRelocTable;
697 TUint startOffset = ((TUint32*)codeRelocTable)[iIndex];
698 TUint endOffset = ((TUint32*)codeRelocTable)[iIndex+1];
700 __KTRACE_OPT(KPAGING, Kern::Printf("Performing code relocation: start == %x, end == %x", startOffset, endOffset));
701 __ASSERT_ALWAYS(startOffset<=endOffset && endOffset<=iCodeRelocTableSize, K::Fault(K::ECodeSegBadFixupTables));
703 const TUint32 codeDelta = iCodeDelta;
704 const TUint32 dataDelta = iDataDelta;
706 const TUint16* ptr = (const TUint16*)(codeRelocTable + startOffset);
707 const TUint16* end = (const TUint16*)(codeRelocTable + endOffset);
710 TUint16 entry = *ptr++;
711 TUint32* addr = (TUint32*)(aBuffer+(entry&0x0fff));
712 TUint32 word = *addr;
714 TInt type = entry&0xf000;
715 __NK_ASSERT_DEBUG(type==KTextRelocType || type==KDataRelocType);
717 if(entry<KDataRelocType)
726 if(iImportFixupTableSize)
728 TUint8* importFixupTable = iImportFixupTable;
729 TUint startOffset = ((TUint32*)importFixupTable)[iIndex];
730 TUint endOffset = ((TUint32*)importFixupTable)[iIndex+1];
732 __KTRACE_OPT(KPAGING, Kern::Printf("Performing import fixup: start == %x, end == %x", startOffset, endOffset));
733 __ASSERT_ALWAYS(startOffset<=endOffset && endOffset<=iImportFixupTableSize, K::Fault(K::ECodeSegBadFixupTables));
735 const TUint16* ptr = (const TUint16*)(importFixupTable + startOffset);
736 const TUint16* end = (const TUint16*)(importFixupTable + endOffset);
740 TUint16 offset = *ptr++;
741 TUint32 wordLow = *ptr++;
742 TUint32 wordHigh = *ptr++;
743 TUint32 word = (wordHigh << 16) | wordLow;
744 // __KTRACE_OPT(KPAGING, Kern::Printf("DP: Fixup %08x=%08x", iRamInfo.iCodeRunAddr+(page<<KPageShift)+offset, word));
745 *(TUint32*)(aBuffer+offset) = word;
749 // END_PAGING_BENCHMARK(DemandPaging::ThePager, EPagingBmFixupCodePage);