os/kernelhwsrv/kernel/eka/memmodel/epoc/moving/mdefrag.cpp
author sl
Tue, 10 Jun 2014 14:32:02 +0200
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2006-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\memmodel\epoc\moving\mdefrag.cpp
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
#include <memmodel.h>
sl@0
    19
#include <defrag.h>
sl@0
    20
#include "mmboot.h"
sl@0
    21
#include <mmubase.inl>
sl@0
    22
#include <ramalloc.h>
sl@0
    23
#include "cache_maintenance.h"
sl@0
    24
sl@0
    25
/*
sl@0
    26
 * Move a kernel page from aOld to aNew, updating the page table in aChunk.
sl@0
    27
 * Enter with system locked, exit with system unlocked (!!)
sl@0
    28
 * Must hold RAM alloc mutex.
sl@0
    29
 */
sl@0
    30
TInt Mmu::MoveKernelPage(DChunk* aChunk, TUint32 aOffset, TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest)
sl@0
    31
	{
sl@0
    32
	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::MoveKernelPage() off=%08x old=%08x",aOffset,aOld));
sl@0
    33
	Mmu& m=Mmu::Get();
sl@0
    34
	
sl@0
    35
	// Release the system lock - the kernel chunks can't ever be freed
sl@0
    36
	// and the ramalloc mutex protects us from decommit.
sl@0
    37
	NKern::UnlockSystem();
sl@0
    38
sl@0
    39
	DMemModelChunk* chunk = (DMemModelChunk*)aChunk;
sl@0
    40
sl@0
    41
	// Allocate new page, map it uncached but buffered, and find old mapping
sl@0
    42
	TPhysAddr newPage;
sl@0
    43
	if (m.AllocRamPages(&newPage, 1, EPageMovable, aBlockZoneId, aBlockRest) != KErrNone)
sl@0
    44
		return KErrNoMemory;
sl@0
    45
	TLinAddr vOld = (TLinAddr)chunk->iBase + aOffset;
sl@0
    46
	TLinAddr vNew = m.MapTemp(newPage, EFalse);
sl@0
    47
	
sl@0
    48
	// Find page table for mapping
sl@0
    49
	TInt ptid=m.GetPageTableId(vOld);
sl@0
    50
	if(ptid<0)
sl@0
    51
		Panic(EDefragKernelChunkNoPageTable);
sl@0
    52
sl@0
    53
	// With the system lock, ask Mmu to remap the page.
sl@0
    54
	// This will copy and remap it with interrupts disabled, while
sl@0
    55
	// avoiding touching any cache lines on the heap.
sl@0
    56
	NKern::LockSystem();
sl@0
    57
	m.RemapKernelPage(ptid, vOld, vNew, newPage, chunk->iPtePermissions);
sl@0
    58
sl@0
    59
	// update new pageinfo, clear old, then done with system lock.
sl@0
    60
	SPageInfo* oldpi = SPageInfo::FromPhysAddr(aOld);
sl@0
    61
	SPageInfo* pi = SPageInfo::FromPhysAddr(newPage);
sl@0
    62
	pi->Set(oldpi->Type(),oldpi->Owner(),oldpi->Offset());
sl@0
    63
	oldpi->SetUnused();
sl@0
    64
	NKern::UnlockSystem();
sl@0
    65
sl@0
    66
	// Remove temporary new page mapping
sl@0
    67
	m.UnmapTemp();
sl@0
    68
sl@0
    69
	// Remove old page from external cache - RemapKernelPage has already removed it from internal cache.
sl@0
    70
	CacheMaintenance::PageToReusePhysicalCache(aOld);
sl@0
    71
sl@0
    72
	// Free old page
sl@0
    73
#ifdef _DEBUG
sl@0
    74
	m.ClearPages(1, (TPhysAddr*)(aOld|1));
sl@0
    75
#endif
sl@0
    76
	m.iRamPageAllocator->FreeRamPage(aOld, EPageMovable);
sl@0
    77
sl@0
    78
	aNew = newPage;
sl@0
    79
	return KErrNone;
sl@0
    80
	}
sl@0
    81
sl@0
    82
/* 
sl@0
    83
 * These pages don't exist on moving memory model, no need to move them
sl@0
    84
 * but this function must exist to make the kernel link.
sl@0
    85
 */
sl@0
    86
TInt Mmu::MoveCodeSegMemoryPage(DMemModelCodeSegMemory* /*aCodeSegMemory*/, TUint32 /*aOffset*/, TPhysAddr /*aOld*/,
sl@0
    87
		TPhysAddr& /*aNew*/, TUint /*aBlockZoneId*/, TBool /*aBlockRest*/)
sl@0
    88
	{
sl@0
    89
	NKern::UnlockSystem();
sl@0
    90
	return KErrNotSupported;
sl@0
    91
	}
sl@0
    92
sl@0
    93
/*
sl@0
    94
 * Move a code chunk page from aOld to aNew, updating the page table in aChunk.
sl@0
    95
 * Enter with system locked, exit with system unlocked (!!)
sl@0
    96
 * Must hold RAM alloc mutex.
sl@0
    97
 */
sl@0
    98
TInt Mmu::MoveCodeChunkPage(DChunk* aChunk, TUint32 aOffset, TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest)
sl@0
    99
	{
sl@0
   100
	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::MoveCodeChunkPage() off=%08x old=%08x",aOffset,aOld));
sl@0
   101
	Mmu& m=Mmu::Get();
sl@0
   102
sl@0
   103
	// look up the code seg that corresponds to this page
sl@0
   104
	TLinAddr aLinearAddress = (TLinAddr)(aChunk->Base() + aOffset);
sl@0
   105
	DMemModelCodeSeg* codeseg = (DMemModelCodeSeg*)DCodeSeg::CodeSegsByAddress.Find(aLinearAddress);
sl@0
   106
sl@0
   107
	// if the code seg is not done loading yet, we can't move it the easy way
sl@0
   108
	// also, if it's being unloaded the codeseg will have gone.
sl@0
   109
	if (!codeseg || !(codeseg->iMark & DCodeSeg::EMarkLoaded))
sl@0
   110
		{
sl@0
   111
		NKern::UnlockSystem();
sl@0
   112
		return KErrInUse;
sl@0
   113
		}
sl@0
   114
sl@0
   115
	// Release system lock as page can't be decommitted while we hold ramalloc mutex
sl@0
   116
	NKern::UnlockSystem();
sl@0
   117
sl@0
   118
	// Allocate new page, map it uncached but buffered, and find old mapping
sl@0
   119
	TPhysAddr newPage;
sl@0
   120
	if (m.AllocRamPages(&newPage, 1, EPageMovable, aBlockZoneId, aBlockRest) != KErrNone)
sl@0
   121
		return KErrNoMemory;
sl@0
   122
	TLinAddr vOld = aLinearAddress;
sl@0
   123
	TLinAddr vNew = m.MapTemp(newPage, EFalse);
sl@0
   124
	
sl@0
   125
	// Copy the page and remap it
sl@0
   126
	pagecpy((TAny*)vNew, (TAny*)vOld);
sl@0
   127
	NKern::LockSystem();
sl@0
   128
	// Substitute drains the write buffer for us during the remap.
sl@0
   129
	aChunk->Substitute(aOffset, aOld, newPage);
sl@0
   130
	NKern::UnlockSystem();
sl@0
   131
sl@0
   132
	// Remove temporary new page mapping
sl@0
   133
	m.UnmapTemp();
sl@0
   134
sl@0
   135
	// Remove old page from physical cache
sl@0
   136
	CacheMaintenance::PageToReusePhysicalCache(aOld);
sl@0
   137
sl@0
   138
	// Free old page
sl@0
   139
#ifdef _DEBUG
sl@0
   140
	m.ClearPages(1, (TPhysAddr*)(aOld|1));
sl@0
   141
#endif
sl@0
   142
	m.iRamPageAllocator->FreeRamPage(aOld, EPageMovable);
sl@0
   143
sl@0
   144
	aNew = newPage;
sl@0
   145
	return KErrNone;
sl@0
   146
	}
sl@0
   147
sl@0
   148
/*
sl@0
   149
 * Move a data chunk page from aOld to aNew, updating the page table in aChunk.
sl@0
   150
 * Enter with system locked, exit with system unlocked (!!)
sl@0
   151
 * Must hold RAM alloc mutex.
sl@0
   152
 */
sl@0
   153
TInt Mmu::MoveDataChunkPage(DChunk* aChunk, TUint32 aOffset, TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest)
sl@0
   154
	{
sl@0
   155
	__KTRACE_OPT(KMMU,Kern::Printf("Mmu::MoveDataChunkPage() off=%08x old=%08x",aOffset,aOld));
sl@0
   156
	Mmu& m=Mmu::Get();
sl@0
   157
	TInt r;
sl@0
   158
sl@0
   159
	// Release system lock as page can't be decommitted while we hold ramalloc mutex
sl@0
   160
	NKern::UnlockSystem();
sl@0
   161
sl@0
   162
	// Allocate new page, map it uncached but buffered
sl@0
   163
	TPhysAddr newPage;
sl@0
   164
	if (m.AllocRamPages(&newPage, 1, EPageMovable, aBlockZoneId, aBlockRest) != KErrNone)
sl@0
   165
		return KErrNoMemory;
sl@0
   166
	TLinAddr vNew = m.MapTemp(newPage, EFalse);
sl@0
   167
sl@0
   168
	// Mark the PTE as inaccessible to avoid the data being overwritten while we copy
sl@0
   169
	// This also takes care of the cache requirements to alias the page elsewhere,
sl@0
   170
	// since it can't be copied from an inaccessible PTE
sl@0
   171
	DisablePageModification((DMemModelChunk*)aChunk, aOffset);
sl@0
   172
	TLinAddr vOldAlias = m.MapSecondTemp(aOld, ETrue);
sl@0
   173
sl@0
   174
	// Copy the page's contents and remap its PTE
sl@0
   175
	pagecpy((TAny*)vNew, (TAny*)vOldAlias);
sl@0
   176
	NKern::LockSystem();
sl@0
   177
	if (iDisabledPte != NULL)
sl@0
   178
		{
sl@0
   179
		// Access wasn't reenabled, so we can continue
sl@0
   180
		aChunk->Substitute(aOffset, aOld, newPage);
sl@0
   181
		iDisabledAddr = 0;
sl@0
   182
		iDisabledPte = NULL;
sl@0
   183
		iDisabledOldVal = 0;
sl@0
   184
		r = KErrNone;
sl@0
   185
		}
sl@0
   186
	else
sl@0
   187
		r = KErrInUse;
sl@0
   188
	NKern::UnlockSystem();
sl@0
   189
sl@0
   190
	// Remove temporary page mappings
sl@0
   191
	CacheMaintenance::PageToReuseVirtualCache(vOldAlias);
sl@0
   192
	m.UnmapTemp();
sl@0
   193
	m.UnmapSecondTemp();
sl@0
   194
sl@0
   195
	if (r == KErrNone)
sl@0
   196
		{
sl@0
   197
		// Remove old page from physical cache - DisablePageModification removed it from L1 already
sl@0
   198
		CacheMaintenance::PageToReusePhysicalCache(aOld);
sl@0
   199
		}
sl@0
   200
sl@0
   201
	if (r == KErrNone)
sl@0
   202
		{
sl@0
   203
		// Free old page
sl@0
   204
#ifdef _DEBUG
sl@0
   205
		m.ClearPages(1, (TPhysAddr*)(aOld|1));
sl@0
   206
#endif
sl@0
   207
		m.iRamPageAllocator->FreeRamPage(aOld, EPageMovable);
sl@0
   208
		aNew = newPage;
sl@0
   209
		}
sl@0
   210
	else
sl@0
   211
		{
sl@0
   212
		// Free new page
sl@0
   213
		m.iRamPageAllocator->FreeRamPage(newPage, EPageMovable);
sl@0
   214
		}
sl@0
   215
sl@0
   216
	return r;
sl@0
   217
	}