os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mpdalloc.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200 (2012-06-15)
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
//
sl@0
    15
sl@0
    16
#include "memmodel.h"
sl@0
    17
#include "mm.h"
sl@0
    18
#include "mmu.h"
sl@0
    19
sl@0
    20
#include "mpdalloc.h"
sl@0
    21
#include "mobject.h"
sl@0
    22
#include "cache_maintenance.inl"
sl@0
    23
sl@0
    24
sl@0
    25
// check enough space for page directories...
sl@0
    26
__ASSERT_COMPILE(KNumOsAsids <= (KPageDirectoryEnd-KPageDirectoryBase)/KPageDirectorySize);
sl@0
    27
sl@0
    28
sl@0
    29
PageDirectoryAllocator PageDirectories;
sl@0
    30
sl@0
    31
sl@0
    32
const TUint KLocalPdShift = KPageDirectoryShift > KPageShift ? KPageDirectoryShift-1 : KPageShift;
sl@0
    33
const TUint KLocalPdSize = 1<<KLocalPdShift;
sl@0
    34
const TUint KLocalPdPages = 1<<(KLocalPdShift-KPageShift);
sl@0
    35
sl@0
    36
sl@0
    37
__ASSERT_COMPILE((KPageDirectoryBase&(31*KPageDirectorySize))==0); // following code assumes this alignment
sl@0
    38
sl@0
    39
void PageDirectoryAllocator::GlobalPdeChanged(TPde* aPde)
sl@0
    40
	{
sl@0
    41
	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
sl@0
    42
	__NK_ASSERT_DEBUG(TLinAddr(aPde)>=KPageDirectoryBase);
sl@0
    43
	__NK_ASSERT_DEBUG(TLinAddr(aPde)<KPageDirectoryEnd);
sl@0
    44
	__NK_ASSERT_DEBUG(KLocalPdSize==(TUint)KPageDirectorySize); // shouldn't be called if we have separate global PDs
sl@0
    45
sl@0
    46
	TLinAddr addr = (TLinAddr(aPde)&KPageDirectoryMask)*(KChunkSize/sizeof(TPde));
sl@0
    47
	if(addr<KGlobalMemoryBase)
sl@0
    48
		return; // change was in local part of PD, so nothing to do
sl@0
    49
	if(addr-KIPCAlias<KIPCAliasAreaSize)
sl@0
    50
		return; // change was in IPC alias area, so nothing to do
sl@0
    51
	if(!iAllocator)
sl@0
    52
		return; // not yet initialised
sl@0
    53
sl@0
    54
	TRACE2(("PageDirectoryAllocator::GlobalPdeChanged(0x%08x)",aPde));
sl@0
    55
	TPde pde = *aPde;
sl@0
    56
	TLinAddr pPde = KPageDirectoryBase+(TLinAddr(aPde)&KPageDirectoryMask); // first page directory
sl@0
    57
sl@0
    58
	// copy PDE to all allocated page directories
sl@0
    59
	pPde -= KPageDirectorySize; // start off at PD minus one
sl@0
    60
	TLinAddr lastPd = KPageDirectoryBase+(KNumOsAsids-1)*KPageDirectorySize;
sl@0
    61
	TUint32* ptr = iAllocator->iMap;
sl@0
    62
	do
sl@0
    63
		{
sl@0
    64
		TUint32 bits = ~*ptr++;
sl@0
    65
		do
sl@0
    66
			{
sl@0
    67
			pPde += KPageDirectorySize; // step to next page directory
sl@0
    68
			if(bits&0x80000000u)
sl@0
    69
				{
sl@0
    70
				TRACE2(("!PDE %x=%x",pPde,pde));
sl@0
    71
				*(TPde*)pPde = pde;
sl@0
    72
				CacheMaintenance::SinglePdeUpdated(pPde);
sl@0
    73
				}
sl@0
    74
			}
sl@0
    75
		while(bits<<=1);
sl@0
    76
		pPde |= 31*KPageDirectorySize; // step to next group of 32 PDs
sl@0
    77
		}
sl@0
    78
	while(pPde<lastPd);
sl@0
    79
	}
sl@0
    80
sl@0
    81
sl@0
    82
void PageDirectoryAllocator::Init2()
sl@0
    83
	{
sl@0
    84
	TRACEB(("PageDirectoryAllocator::Init2()"));
sl@0
    85
sl@0
    86
	// construct memory object for page directories...
sl@0
    87
#if defined(__CPU_PAGE_TABLES_FULLY_CACHED)
sl@0
    88
	TMemoryAttributes memAttr = EMemoryAttributeStandard;
sl@0
    89
#else
sl@0
    90
	TMemoryAttributes memAttr = (TMemoryAttributes)(EMemoryAttributeNormalUncached|EMemoryAttributeDefaultShareable);
sl@0
    91
#endif
sl@0
    92
	TInt r = MM::InitFixedKernelMemory(iPageDirectoryMemory, KPageDirectoryBase, KPageDirectoryEnd, KPageDirectorySize, EMemoryObjectHardware, EMemoryCreateNoWipe, memAttr, EMappingCreateFixedVirtual);
sl@0
    93
	__NK_ASSERT_ALWAYS(r==KErrNone);
sl@0
    94
sl@0
    95
	// initialise kernel page directory...
sl@0
    96
	TPhysAddr kernelPd = Mmu::LinearToPhysical((TLinAddr)Mmu::PageDirectory(KKernelOsAsid));
sl@0
    97
	iKernelPageDirectory = kernelPd;
sl@0
    98
	((DMemModelProcess*)K::TheKernelProcess)->iPageDir = kernelPd;
sl@0
    99
	AssignPages(KKernelOsAsid*(KPageDirectorySize>>KPageShift),KPageDirectorySize>>KPageShift,kernelPd);
sl@0
   100
sl@0
   101
	// construct allocator...
sl@0
   102
	iAllocator = TBitMapAllocator::New(KNumOsAsids,ETrue);
sl@0
   103
	__NK_ASSERT_ALWAYS(iAllocator);
sl@0
   104
	iAllocator->Alloc(KKernelOsAsid,1); // kernel page directory already allocated
sl@0
   105
sl@0
   106
	TRACEB(("PageDirectoryAllocator::Init2 done"));
sl@0
   107
	}
sl@0
   108
sl@0
   109
sl@0
   110
void PageDirectoryAllocator::AssignPages(TUint aIndex, TUint aCount, TPhysAddr aPhysAddr)
sl@0
   111
	{
sl@0
   112
	__NK_ASSERT_DEBUG(aCount<=KMaxPageInfoUpdatesInOneGo);
sl@0
   113
	MmuLock::Lock();
sl@0
   114
	SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
sl@0
   115
	SPageInfo* piEnd = pi+aCount;
sl@0
   116
	while(pi<piEnd)
sl@0
   117
		{
sl@0
   118
		pi->SetPhysAlloc(iPageDirectoryMemory,aIndex);
sl@0
   119
		++pi;
sl@0
   120
		++aIndex;
sl@0
   121
		}
sl@0
   122
	MmuLock::Unlock();
sl@0
   123
	}
sl@0
   124
sl@0
   125
sl@0
   126
TInt PageDirectoryAllocator::Alloc(TUint aOsAsid, TPhysAddr& aPageDirectory)
sl@0
   127
	{
sl@0
   128
	TRACE(("PageDirectoryAllocator::Alloc(%d)",aOsAsid));
sl@0
   129
sl@0
   130
	// get memory for local page directory...
sl@0
   131
	Mmu& m = TheMmu;
sl@0
   132
	TUint offset = aOsAsid*KPageDirectorySize;
sl@0
   133
	TPhysAddr pdPhys;
sl@0
   134
	RamAllocLock::Lock();
sl@0
   135
	TInt r = m.AllocContiguousRam(pdPhys, KLocalPdPages, KLocalPdShift-KPageShift, iPageDirectoryMemory->RamAllocFlags());
sl@0
   136
	if(r==KErrNone)
sl@0
   137
		AssignPages(offset>>KPageShift,KLocalPdPages,pdPhys);
sl@0
   138
	RamAllocLock::Unlock();
sl@0
   139
sl@0
   140
	if(r==KErrNone)
sl@0
   141
		{
sl@0
   142
		TRACE(("PageDirectoryAllocator::Alloc pdPhys = 0x%08x",pdPhys));
sl@0
   143
sl@0
   144
		// map local page directory...
sl@0
   145
		r = MM::MemoryAddContiguous(iPageDirectoryMemory,MM::BytesToPages(offset),KLocalPdPages,pdPhys);
sl@0
   146
		if(r!=KErrNone)
sl@0
   147
			{
sl@0
   148
			RamAllocLock::Lock();
sl@0
   149
			m.FreeContiguousRam(pdPhys,KLocalPdPages);
sl@0
   150
			RamAllocLock::Unlock();
sl@0
   151
			}
sl@0
   152
		else
sl@0
   153
			{
sl@0
   154
			aPageDirectory = pdPhys;
sl@0
   155
sl@0
   156
			TPde* pd = Mmu::PageDirectory(aOsAsid);
sl@0
   157
			const TUint globalOffset = (KGlobalMemoryBase>>KChunkShift)*sizeof(TPde); // start of global part
sl@0
   158
sl@0
   159
			// clear local entries in page directory...
sl@0
   160
			memclr(pd,globalOffset);
sl@0
   161
			CacheMaintenance::PdesInitialised((TLinAddr)pd,globalOffset);
sl@0
   162
sl@0
   163
			if(KLocalPdSize<(TUint)KPageDirectorySize)
sl@0
   164
				{
sl@0
   165
				// map global page directory after local part...
sl@0
   166
				__NK_ASSERT_DEBUG(KLocalPdSize==globalOffset);
sl@0
   167
				r = MM::MemoryAddContiguous(iPageDirectoryMemory, MM::BytesToPages(offset+KLocalPdSize), 
sl@0
   168
						(KPageDirectorySize-KLocalPdSize)/KPageSize, iKernelPageDirectory+KLocalPdSize);
sl@0
   169
				__NK_ASSERT_DEBUG(r==KErrNone); // can't fail
sl@0
   170
				MmuLock::Lock(); // need lock because allocator not otherwise atomic
sl@0
   171
				iAllocator->Alloc(aOsAsid,1);
sl@0
   172
				MmuLock::Unlock();
sl@0
   173
				}
sl@0
   174
			else
sl@0
   175
				{
sl@0
   176
				// copy global entries to local page directory...
sl@0
   177
				TPde* globalPd = Mmu::PageDirectory(KKernelOsAsid);
sl@0
   178
				MmuLock::Lock(); // need lock because allocator not otherwise atomic, also  to make sure GlobalPdeChanged() only accesses extant PDs
sl@0
   179
				memcpy((TUint8*)pd+globalOffset,(TUint8*)globalPd+globalOffset,KPageDirectorySize-globalOffset);
sl@0
   180
				iAllocator->Alloc(aOsAsid,1);
sl@0
   181
				MmuLock::Unlock();
sl@0
   182
				CacheMaintenance::PdesInitialised((TLinAddr)((TUint8*)pd+globalOffset),KPageDirectorySize-globalOffset);
sl@0
   183
				}
sl@0
   184
			}
sl@0
   185
		}
sl@0
   186
	TRACE(("PageDirectoryAllocator::Alloc returns %d",r));
sl@0
   187
	return r;
sl@0
   188
	}
sl@0
   189
sl@0
   190
sl@0
   191
void PageDirectoryAllocator::Free(TUint aOsAsid)
sl@0
   192
	{
sl@0
   193
	TRACE(("PageDirectoryAllocator::Free(%d)",aOsAsid));
sl@0
   194
sl@0
   195
	MmuLock::Lock(); // need lock because allocator not otherwise atomic, also to make sure GlobalPdeChanged() only accesses extant PDs
sl@0
   196
	iAllocator->Free(aOsAsid, 1);
sl@0
   197
	MmuLock::Unlock();
sl@0
   198
sl@0
   199
	const TUint KPageDirectoryPageCount = KPageDirectorySize>>KPageShift;
sl@0
   200
	TPhysAddr pages[KPageDirectoryPageCount];
sl@0
   201
	TUint n = MM::MemoryRemovePages(iPageDirectoryMemory,aOsAsid*KPageDirectoryPageCount,KPageDirectoryPageCount,pages);
sl@0
   202
	(void)n;
sl@0
   203
	__NK_ASSERT_DEBUG(n==KPageDirectoryPageCount);
sl@0
   204
sl@0
   205
	RamAllocLock::Lock();
sl@0
   206
	Mmu& m = TheMmu;
sl@0
   207
	// Page directories are fixed.
sl@0
   208
	m.FreeRam(pages, KLocalPdPages, EPageFixed);
sl@0
   209
	RamAllocLock::Unlock();
sl@0
   210
	}
sl@0
   211