os/kernelhwsrv/kernel/eka/memmodel/epoc/moving/mdefrag.cpp
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
     1 // Copyright (c) 2006-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of the License "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 // e32\memmodel\epoc\moving\mdefrag.cpp
    15 // 
    16 //
    17 
    18 #include <memmodel.h>
    19 #include <defrag.h>
    20 #include "mmboot.h"
    21 #include <mmubase.inl>
    22 #include <ramalloc.h>
    23 #include "cache_maintenance.h"
    24 
    25 /*
    26  * Move a kernel page from aOld to aNew, updating the page table in aChunk.
    27  * Enter with system locked, exit with system unlocked (!!)
    28  * Must hold RAM alloc mutex.
    29  */
    30 TInt Mmu::MoveKernelPage(DChunk* aChunk, TUint32 aOffset, TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest)
    31 	{
    32 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::MoveKernelPage() off=%08x old=%08x",aOffset,aOld));
    33 	Mmu& m=Mmu::Get();
    34 	
    35 	// Release the system lock - the kernel chunks can't ever be freed
    36 	// and the ramalloc mutex protects us from decommit.
    37 	NKern::UnlockSystem();
    38 
    39 	DMemModelChunk* chunk = (DMemModelChunk*)aChunk;
    40 
    41 	// Allocate new page, map it uncached but buffered, and find old mapping
    42 	TPhysAddr newPage;
    43 	if (m.AllocRamPages(&newPage, 1, EPageMovable, aBlockZoneId, aBlockRest) != KErrNone)
    44 		return KErrNoMemory;
    45 	TLinAddr vOld = (TLinAddr)chunk->iBase + aOffset;
    46 	TLinAddr vNew = m.MapTemp(newPage, EFalse);
    47 	
    48 	// Find page table for mapping
    49 	TInt ptid=m.GetPageTableId(vOld);
    50 	if(ptid<0)
    51 		Panic(EDefragKernelChunkNoPageTable);
    52 
    53 	// With the system lock, ask Mmu to remap the page.
    54 	// This will copy and remap it with interrupts disabled, while
    55 	// avoiding touching any cache lines on the heap.
    56 	NKern::LockSystem();
    57 	m.RemapKernelPage(ptid, vOld, vNew, newPage, chunk->iPtePermissions);
    58 
    59 	// update new pageinfo, clear old, then done with system lock.
    60 	SPageInfo* oldpi = SPageInfo::FromPhysAddr(aOld);
    61 	SPageInfo* pi = SPageInfo::FromPhysAddr(newPage);
    62 	pi->Set(oldpi->Type(),oldpi->Owner(),oldpi->Offset());
    63 	oldpi->SetUnused();
    64 	NKern::UnlockSystem();
    65 
    66 	// Remove temporary new page mapping
    67 	m.UnmapTemp();
    68 
    69 	// Remove old page from external cache - RemapKernelPage has already removed it from internal cache.
    70 	CacheMaintenance::PageToReusePhysicalCache(aOld);
    71 
    72 	// Free old page
    73 #ifdef _DEBUG
    74 	m.ClearPages(1, (TPhysAddr*)(aOld|1));
    75 #endif
    76 	m.iRamPageAllocator->FreeRamPage(aOld, EPageMovable);
    77 
    78 	aNew = newPage;
    79 	return KErrNone;
    80 	}
    81 
    82 /* 
    83  * These pages don't exist on moving memory model, no need to move them
    84  * but this function must exist to make the kernel link.
    85  */
    86 TInt Mmu::MoveCodeSegMemoryPage(DMemModelCodeSegMemory* /*aCodeSegMemory*/, TUint32 /*aOffset*/, TPhysAddr /*aOld*/,
    87 		TPhysAddr& /*aNew*/, TUint /*aBlockZoneId*/, TBool /*aBlockRest*/)
    88 	{
    89 	NKern::UnlockSystem();
    90 	return KErrNotSupported;
    91 	}
    92 
    93 /*
    94  * Move a code chunk page from aOld to aNew, updating the page table in aChunk.
    95  * Enter with system locked, exit with system unlocked (!!)
    96  * Must hold RAM alloc mutex.
    97  */
    98 TInt Mmu::MoveCodeChunkPage(DChunk* aChunk, TUint32 aOffset, TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest)
    99 	{
   100 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::MoveCodeChunkPage() off=%08x old=%08x",aOffset,aOld));
   101 	Mmu& m=Mmu::Get();
   102 
   103 	// look up the code seg that corresponds to this page
   104 	TLinAddr aLinearAddress = (TLinAddr)(aChunk->Base() + aOffset);
   105 	DMemModelCodeSeg* codeseg = (DMemModelCodeSeg*)DCodeSeg::CodeSegsByAddress.Find(aLinearAddress);
   106 
   107 	// if the code seg is not done loading yet, we can't move it the easy way
   108 	// also, if it's being unloaded the codeseg will have gone.
   109 	if (!codeseg || !(codeseg->iMark & DCodeSeg::EMarkLoaded))
   110 		{
   111 		NKern::UnlockSystem();
   112 		return KErrInUse;
   113 		}
   114 
   115 	// Release system lock as page can't be decommitted while we hold ramalloc mutex
   116 	NKern::UnlockSystem();
   117 
   118 	// Allocate new page, map it uncached but buffered, and find old mapping
   119 	TPhysAddr newPage;
   120 	if (m.AllocRamPages(&newPage, 1, EPageMovable, aBlockZoneId, aBlockRest) != KErrNone)
   121 		return KErrNoMemory;
   122 	TLinAddr vOld = aLinearAddress;
   123 	TLinAddr vNew = m.MapTemp(newPage, EFalse);
   124 	
   125 	// Copy the page and remap it
   126 	pagecpy((TAny*)vNew, (TAny*)vOld);
   127 	NKern::LockSystem();
   128 	// Substitute drains the write buffer for us during the remap.
   129 	aChunk->Substitute(aOffset, aOld, newPage);
   130 	NKern::UnlockSystem();
   131 
   132 	// Remove temporary new page mapping
   133 	m.UnmapTemp();
   134 
   135 	// Remove old page from physical cache
   136 	CacheMaintenance::PageToReusePhysicalCache(aOld);
   137 
   138 	// Free old page
   139 #ifdef _DEBUG
   140 	m.ClearPages(1, (TPhysAddr*)(aOld|1));
   141 #endif
   142 	m.iRamPageAllocator->FreeRamPage(aOld, EPageMovable);
   143 
   144 	aNew = newPage;
   145 	return KErrNone;
   146 	}
   147 
   148 /*
   149  * Move a data chunk page from aOld to aNew, updating the page table in aChunk.
   150  * Enter with system locked, exit with system unlocked (!!)
   151  * Must hold RAM alloc mutex.
   152  */
   153 TInt Mmu::MoveDataChunkPage(DChunk* aChunk, TUint32 aOffset, TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest)
   154 	{
   155 	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::MoveDataChunkPage() off=%08x old=%08x",aOffset,aOld));
   156 	Mmu& m=Mmu::Get();
   157 	TInt r;
   158 
   159 	// Release system lock as page can't be decommitted while we hold ramalloc mutex
   160 	NKern::UnlockSystem();
   161 
   162 	// Allocate new page, map it uncached but buffered
   163 	TPhysAddr newPage;
   164 	if (m.AllocRamPages(&newPage, 1, EPageMovable, aBlockZoneId, aBlockRest) != KErrNone)
   165 		return KErrNoMemory;
   166 	TLinAddr vNew = m.MapTemp(newPage, EFalse);
   167 
   168 	// Mark the PTE as inaccessible to avoid the data being overwritten while we copy
   169 	// This also takes care of the cache requirements to alias the page elsewhere,
   170 	// since it can't be copied from an inaccessible PTE
   171 	DisablePageModification((DMemModelChunk*)aChunk, aOffset);
   172 	TLinAddr vOldAlias = m.MapSecondTemp(aOld, ETrue);
   173 
   174 	// Copy the page's contents and remap its PTE
   175 	pagecpy((TAny*)vNew, (TAny*)vOldAlias);
   176 	NKern::LockSystem();
   177 	if (iDisabledPte != NULL)
   178 		{
   179 		// Access wasn't reenabled, so we can continue
   180 		aChunk->Substitute(aOffset, aOld, newPage);
   181 		iDisabledAddr = 0;
   182 		iDisabledPte = NULL;
   183 		iDisabledOldVal = 0;
   184 		r = KErrNone;
   185 		}
   186 	else
   187 		r = KErrInUse;
   188 	NKern::UnlockSystem();
   189 
   190 	// Remove temporary page mappings
   191 	CacheMaintenance::PageToReuseVirtualCache(vOldAlias);
   192 	m.UnmapTemp();
   193 	m.UnmapSecondTemp();
   194 
   195 	if (r == KErrNone)
   196 		{
   197 		// Remove old page from physical cache - DisablePageModification removed it from L1 already
   198 		CacheMaintenance::PageToReusePhysicalCache(aOld);
   199 		}
   200 
   201 	if (r == KErrNone)
   202 		{
   203 		// Free old page
   204 #ifdef _DEBUG
   205 		m.ClearPages(1, (TPhysAddr*)(aOld|1));
   206 #endif
   207 		m.iRamPageAllocator->FreeRamPage(aOld, EPageMovable);
   208 		aNew = newPage;
   209 		}
   210 	else
   211 		{
   212 		// Free new page
   213 		m.iRamPageAllocator->FreeRamPage(newPage, EPageMovable);
   214 		}
   215 
   216 	return r;
   217 	}