1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/memmodel/epoc/mmubase/mmubase.cpp Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,5507 @@
1.4 +// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32\memmodel\epoc\mmubase\mmubase.cpp
1.18 +//
1.19 +//
1.20 +
1.21 +#include <memmodel/epoc/mmubase/mmubase.h>
1.22 +#include <mmubase.inl>
1.23 +#include <ramcache.h>
1.24 +#include <demand_paging.h>
1.25 +#include "cache_maintenance.h"
1.26 +#include "highrestimer.h"
1.27 +#include <defrag.h>
1.28 +#include <ramalloc.h>
1.29 +
1.30 +
1.31 +__ASSERT_COMPILE(sizeof(SPageInfo)==(1<<KPageInfoShift));
1.32 +
1.33 +_LIT(KLitRamAlloc,"RamAlloc");
1.34 +_LIT(KLitHwChunk,"HwChunk");
1.35 +
1.36 +
1.37 +DMutex* MmuBase::HwChunkMutex;
1.38 +DMutex* MmuBase::RamAllocatorMutex;
1.39 +#ifdef BTRACE_KERNEL_MEMORY
1.40 +TInt Epoc::DriverAllocdPhysRam = 0;
1.41 +TInt Epoc::KernelMiscPages = 0;
1.42 +#endif
1.43 +
1.44 +/******************************************************************************
1.45 + * Code common to all MMU memory models
1.46 + ******************************************************************************/
1.47 +
1.48 +const TInt KFreePagesStepSize=16;
1.49 +
1.50 +void MmuBase::Panic(TPanic aPanic)
1.51 + {
1.52 + Kern::Fault("MMUBASE",aPanic);
1.53 + }
1.54 +
1.55 +void SPageInfo::Lock()
1.56 + {
1.57 + CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::Lock");
1.58 + ++iLockCount;
1.59 + if(!iLockCount)
1.60 + MmuBase::Panic(MmuBase::EPageLockedTooManyTimes);
1.61 + }
1.62 +
1.63 +TInt SPageInfo::Unlock()
1.64 + {
1.65 + CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::Unlock");
1.66 + if(!iLockCount)
1.67 + MmuBase::Panic(MmuBase::EPageUnlockedTooManyTimes);
1.68 + return --iLockCount;
1.69 + }
1.70 +
1.71 +#ifdef _DEBUG
1.72 +void SPageInfo::Set(TType aType, TAny* aOwner, TUint32 aOffset)
1.73 + {
1.74 + CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::Set");
1.75 + (TUint16&)iType = aType; // also sets iState to EStateNormal
1.76 +
1.77 + iOwner = aOwner;
1.78 + iOffset = aOffset;
1.79 + iModifier = 0;
1.80 + }
1.81 +
1.82 +void SPageInfo::Change(TType aType,TState aState)
1.83 + {
1.84 + CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::Change");
1.85 + iType = aType;
1.86 + iState = aState;
1.87 + iModifier = 0;
1.88 + }
1.89 +
1.90 +void SPageInfo::SetState(TState aState)
1.91 + {
1.92 + CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::SetState");
1.93 + iState = aState;
1.94 + iModifier = 0;
1.95 + }
1.96 +
1.97 +void SPageInfo::SetModifier(TAny* aModifier)
1.98 + {
1.99 + CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::SetModifier");
1.100 + iModifier = aModifier;
1.101 + }
1.102 +
1.103 +TInt SPageInfo::CheckModified(TAny* aModifier)
1.104 + {
1.105 + CHECK_PRECONDITIONS(MASK_SYSTEM_LOCKED,"SPageInfo::CheckModified");
1.106 + return iModifier!=aModifier;
1.107 + }
1.108 +
1.109 +void SPageInfo::SetZone(TUint8 aZoneIndex)
1.110 + {
1.111 + __ASSERT_ALWAYS(K::Initialising,Kern::Fault("SPageInfo::SetZone",0));
1.112 + iZone = aZoneIndex;
1.113 + }
1.114 +
1.115 +
1.116 +#endif
1.117 +
1.118 +MmuBase::MmuBase()
1.119 + : iRamCache(NULL), iDefrag(NULL)
1.120 + {
1.121 + }
1.122 +
1.123 +TUint32 MmuBase::RoundToPageSize(TUint32 aSize)
1.124 + {
1.125 + return (aSize+KPageMask)&~KPageMask;
1.126 + }
1.127 +
1.128 +TUint32 MmuBase::RoundToChunkSize(TUint32 aSize)
1.129 + {
1.130 + TUint32 mask=TheMmu->iChunkMask;
1.131 + return (aSize+mask)&~mask;
1.132 + }
1.133 +
1.134 +TInt MmuBase::RoundUpRangeToPageSize(TUint32& aBase, TUint32& aSize)
1.135 + {
1.136 + TUint32 mask=KPageMask;
1.137 + TUint32 shift=KPageShift;
1.138 + TUint32 offset=aBase&mask;
1.139 + aBase&=~mask;
1.140 + aSize=(aSize+offset+mask)&~mask;
1.141 + return TInt(aSize>>shift);
1.142 + }
1.143 +
1.144 +void MmuBase::Wait()
1.145 + {
1.146 + Kern::MutexWait(*RamAllocatorMutex);
1.147 + if (RamAllocatorMutex->iHoldCount==1)
1.148 + {
1.149 + MmuBase& m=*TheMmu;
1.150 + m.iInitialFreeMemory=Kern::FreeRamInBytes();
1.151 + m.iAllocFailed=EFalse;
1.152 + }
1.153 + }
1.154 +
1.155 +void MmuBase::Signal()
1.156 + {
1.157 + if (RamAllocatorMutex->iHoldCount>1)
1.158 + {
1.159 + Kern::MutexSignal(*RamAllocatorMutex);
1.160 + return;
1.161 + }
1.162 + MmuBase& m=*TheMmu;
1.163 + TInt initial=m.iInitialFreeMemory;
1.164 + TBool failed=m.iAllocFailed;
1.165 + TInt final=Kern::FreeRamInBytes();
1.166 + Kern::MutexSignal(*RamAllocatorMutex);
1.167 + K::CheckFreeMemoryLevel(initial,final,failed);
1.168 + }
1.169 +
1.170 +void MmuBase::WaitHwChunk()
1.171 + {
1.172 + Kern::MutexWait(*HwChunkMutex);
1.173 + }
1.174 +
1.175 +void MmuBase::SignalHwChunk()
1.176 + {
1.177 + Kern::MutexSignal(*HwChunkMutex);
1.178 + }
1.179 +
1.180 +
1.181 +void MmuBase::MapRamPage(TLinAddr aAddr, TPhysAddr aPage, TPte aPtePerm)
1.182 + {
1.183 + __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MapRamPage %08x@%08x perm %08x", aPage, aAddr, aPtePerm));
1.184 + TInt ptid=PageTableId(aAddr);
1.185 + NKern::LockSystem();
1.186 + MapRamPages(ptid,SPageInfo::EInvalid,0,aAddr,&aPage,1,aPtePerm);
1.187 + NKern::UnlockSystem();
1.188 + }
1.189 +
1.190 +//
1.191 +// Unmap and free pages from a global area
1.192 +//
1.193 +void MmuBase::UnmapAndFree(TLinAddr aAddr, TInt aNumPages)
1.194 + {
1.195 + __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::UnmapAndFree(%08x,%d)",aAddr,aNumPages));
1.196 + while(aNumPages)
1.197 + {
1.198 + TInt pt_np=(iChunkSize-(aAddr&iChunkMask))>>iPageShift;
1.199 + TInt np=Min(aNumPages,pt_np);
1.200 + aNumPages-=np;
1.201 + TInt id=PageTableId(aAddr);
1.202 + if (id>=0)
1.203 + {
1.204 + while(np)
1.205 + {
1.206 + TInt np2=Min(np,KFreePagesStepSize);
1.207 + TPhysAddr phys[KFreePagesStepSize];
1.208 + TInt nptes;
1.209 + TInt nfree;
1.210 + NKern::LockSystem();
1.211 + UnmapPages(id,aAddr,np2,phys,true,nptes,nfree,NULL);
1.212 + NKern::UnlockSystem();
1.213 + if (nfree)
1.214 + {
1.215 + if (iDecommitThreshold)
1.216 + CacheMaintenanceOnDecommit(phys, nfree);
1.217 + iRamPageAllocator->FreeRamPages(phys,nfree,EPageFixed);
1.218 + }
1.219 + np-=np2;
1.220 + aAddr+=(np2<<iPageShift);
1.221 + }
1.222 + }
1.223 + else
1.224 + {
1.225 + aAddr+=(np<<iPageShift);
1.226 + }
1.227 + }
1.228 + }
1.229 +
1.230 +void MmuBase::FreePages(TPhysAddr* aPageList, TInt aCount, TZonePageType aPageType)
1.231 + {
1.232 + __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::FreePages(%08x,%d)",aPageList,aCount));
1.233 + if (!aCount)
1.234 + return;
1.235 + TBool sync_decommit = (TUint(aCount)<iDecommitThreshold);
1.236 + TPhysAddr* ppa=aPageList;
1.237 + TPhysAddr* ppaE=ppa+aCount;
1.238 + NKern::LockSystem();
1.239 + while (ppa<ppaE)
1.240 + {
1.241 + TPhysAddr pa=*ppa++;
1.242 + SPageInfo* pi=SPageInfo::SafeFromPhysAddr(pa);
1.243 + if (pi)
1.244 + {
1.245 + pi->SetUnused();
1.246 + if (pi->LockCount())
1.247 + ppa[-1]=KPhysAddrInvalid; // don't free page if it's locked down
1.248 + else if (sync_decommit)
1.249 + {
1.250 + NKern::UnlockSystem();
1.251 + CacheMaintenanceOnDecommit(pa);
1.252 + NKern::LockSystem();
1.253 + }
1.254 + }
1.255 + if (!sync_decommit)
1.256 + NKern::FlashSystem();
1.257 + }
1.258 + NKern::UnlockSystem();
1.259 + if (iDecommitThreshold && !sync_decommit)
1.260 + CacheMaintenance::SyncPhysicalCache_All();
1.261 + iRamPageAllocator->FreeRamPages(aPageList,aCount, aPageType);
1.262 + }
1.263 +
1.264 +TInt MmuBase::InitPageTableInfo(TInt aId)
1.265 + {
1.266 + __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::InitPageTableInfo(%x)",aId));
1.267 + TInt ptb=aId>>iPtBlockShift;
1.268 + if (++iPtBlockCount[ptb]==1)
1.269 + {
1.270 + // expand page table info array
1.271 + TPhysAddr pagePhys;
1.272 + if (AllocRamPages(&pagePhys,1, EPageFixed)!=KErrNone)
1.273 + {
1.274 + __KTRACE_OPT(KMMU,Kern::Printf("Unable to allocate page"));
1.275 + iPtBlockCount[ptb]=0;
1.276 + iAllocFailed=ETrue;
1.277 + return KErrNoMemory;
1.278 + }
1.279 +#ifdef BTRACE_KERNEL_MEMORY
1.280 + BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, 1<<KPageShift);
1.281 + ++Epoc::KernelMiscPages;
1.282 +#endif
1.283 + TLinAddr pil=PtInfoBlockLinAddr(ptb);
1.284 + NKern::LockSystem();
1.285 + SPageInfo::FromPhysAddr(pagePhys)->SetPtInfo(ptb);
1.286 + NKern::UnlockSystem();
1.287 + MapRamPage(pil, pagePhys, iPtInfoPtePerm);
1.288 + memclr((TAny*)pil, iPageSize);
1.289 + }
1.290 + return KErrNone;
1.291 + }
1.292 +
1.293 +TInt MmuBase::DoAllocPageTable(TPhysAddr& aPhysAddr)
1.294 +//
1.295 +// Allocate a new page table but don't map it.
1.296 +// Return page table id and page number/phys address of new page if any.
1.297 +//
1.298 + {
1.299 + __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::DoAllocPageTable()"));
1.300 +#ifdef _DEBUG
1.301 + if(K::CheckForSimulatedAllocFail())
1.302 + return KErrNoMemory;
1.303 +#endif
1.304 + TInt id=iPageTableAllocator?iPageTableAllocator->Alloc():-1;
1.305 + if (id<0)
1.306 + {
1.307 + // need to allocate a new page
1.308 + if (AllocRamPages(&aPhysAddr,1, EPageFixed)!=KErrNone)
1.309 + {
1.310 + __KTRACE_OPT(KMMU,Kern::Printf("Unable to allocate page"));
1.311 + iAllocFailed=ETrue;
1.312 + return KErrNoMemory;
1.313 + }
1.314 +
1.315 + // allocate an ID for the new page
1.316 + id=iPageTableLinearAllocator->Alloc();
1.317 + if (id>=0)
1.318 + {
1.319 + id<<=iPtClusterShift;
1.320 + __KTRACE_OPT(KMMU,Kern::Printf("Allocated ID %04x",id));
1.321 + }
1.322 + if (id<0 || InitPageTableInfo(id)!=KErrNone)
1.323 + {
1.324 + __KTRACE_OPT(KMMU,Kern::Printf("Unable to allocate page table info"));
1.325 + iPageTableLinearAllocator->Free(id>>iPtClusterShift);
1.326 + if (iDecommitThreshold)
1.327 + CacheMaintenanceOnDecommit(aPhysAddr);
1.328 +
1.329 + iRamPageAllocator->FreeRamPage(aPhysAddr, EPageFixed);
1.330 + iAllocFailed=ETrue;
1.331 + return KErrNoMemory;
1.332 + }
1.333 +
1.334 + // Set up page info for new page
1.335 + NKern::LockSystem();
1.336 + SPageInfo::FromPhysAddr(aPhysAddr)->SetPageTable(id>>iPtClusterShift);
1.337 + NKern::UnlockSystem();
1.338 +#ifdef BTRACE_KERNEL_MEMORY
1.339 + BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, 1<<KPageShift);
1.340 + ++Epoc::KernelMiscPages;
1.341 +#endif
1.342 + // mark all subpages other than first as free for use as page tables
1.343 + if (iPtClusterSize>1)
1.344 + iPageTableAllocator->Free(id+1,iPtClusterSize-1);
1.345 + }
1.346 + else
1.347 + aPhysAddr=KPhysAddrInvalid;
1.348 +
1.349 + __KTRACE_OPT(KMMU,Kern::Printf("DoAllocPageTable returns %d (%08x)",id,aPhysAddr));
1.350 + PtInfo(id).SetUnused();
1.351 + return id;
1.352 + }
1.353 +
1.354 +TInt MmuBase::MapPageTable(TInt aId, TPhysAddr aPhysAddr, TBool aAllowExpand)
1.355 + {
1.356 + __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MapPageTable(%d,%08x)",aId,aPhysAddr));
1.357 + TLinAddr ptLin=PageTableLinAddr(aId);
1.358 + TInt ptg=aId>>iPtGroupShift;
1.359 + if (++iPtGroupCount[ptg]==1)
1.360 + {
1.361 + // need to allocate a new page table
1.362 + __ASSERT_ALWAYS(aAllowExpand, Panic(EMapPageTableBadExpand));
1.363 + TPhysAddr xptPhys;
1.364 + TInt xptid=DoAllocPageTable(xptPhys);
1.365 + if (xptid<0)
1.366 + {
1.367 + __KTRACE_OPT(KMMU,Kern::Printf("Unable to allocate extra page table"));
1.368 + iPtGroupCount[ptg]=0;
1.369 + return KErrNoMemory;
1.370 + }
1.371 + if (xptPhys==KPhysAddrInvalid)
1.372 + xptPhys=aPhysAddr + ((xptid-aId)<<iPageTableShift);
1.373 + BootstrapPageTable(xptid, xptPhys, aId, aPhysAddr); // initialise XPT and map it
1.374 + }
1.375 + else
1.376 + MapRamPage(ptLin, aPhysAddr, iPtPtePerm);
1.377 + return KErrNone;
1.378 + }
1.379 +
1.380 +TInt MmuBase::AllocPageTable()
1.381 +//
1.382 +// Allocate a new page table, mapped at the correct linear address.
1.383 +// Clear all entries to Not Present. Return page table id.
1.384 +//
1.385 + {
1.386 + __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::AllocPageTable()"));
1.387 + __ASSERT_MUTEX(MmuBase::RamAllocatorMutex);
1.388 +
1.389 + TPhysAddr ptPhys;
1.390 + TInt id=DoAllocPageTable(ptPhys);
1.391 + if (id<0)
1.392 + return KErrNoMemory;
1.393 + if (ptPhys!=KPhysAddrInvalid)
1.394 + {
1.395 + TInt r=MapPageTable(id,ptPhys);
1.396 + if (r!=KErrNone)
1.397 + {
1.398 + DoFreePageTable(id);
1.399 + SPageInfo* pi=SPageInfo::FromPhysAddr(ptPhys);
1.400 + NKern::LockSystem();
1.401 + pi->SetUnused();
1.402 + NKern::UnlockSystem();
1.403 + if (iDecommitThreshold)
1.404 + CacheMaintenanceOnDecommit(ptPhys);
1.405 +
1.406 + iRamPageAllocator->FreeRamPage(ptPhys, EPageFixed);
1.407 + return r;
1.408 + }
1.409 + }
1.410 + ClearPageTable(id);
1.411 + __KTRACE_OPT(KMMU,Kern::Printf("AllocPageTable returns %d",id));
1.412 + return id;
1.413 + }
1.414 +
1.415 +TBool MmuBase::DoFreePageTable(TInt aId)
1.416 +//
1.417 +// Free an empty page table. We assume that all pages mapped by the page table have
1.418 +// already been unmapped and freed.
1.419 +//
1.420 + {
1.421 + __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::DoFreePageTable(%d)",aId));
1.422 + SPageTableInfo& s=PtInfo(aId);
1.423 + __NK_ASSERT_DEBUG(!s.iCount); // shouldn't have any pages mapped
1.424 + s.SetUnused();
1.425 +
1.426 + TInt id=aId &~ iPtClusterMask;
1.427 + if (iPageTableAllocator)
1.428 + {
1.429 + iPageTableAllocator->Free(aId);
1.430 + if (iPageTableAllocator->NotFree(id,iPtClusterSize))
1.431 + {
1.432 + // some subpages still in use
1.433 + return ETrue;
1.434 + }
1.435 + __KTRACE_OPT(KMMU,Kern::Printf("Freeing whole page, id=%d",id));
1.436 + // whole page is now free
1.437 + // remove it from the page table allocator
1.438 + iPageTableAllocator->Alloc(id,iPtClusterSize);
1.439 + }
1.440 +
1.441 + TInt ptb=aId>>iPtBlockShift;
1.442 + if (--iPtBlockCount[ptb]==0)
1.443 + {
1.444 + // shrink page table info array
1.445 + TLinAddr pil=PtInfoBlockLinAddr(ptb);
1.446 + UnmapAndFree(pil,1); // remove PTE, null page info, free page
1.447 +#ifdef BTRACE_KERNEL_MEMORY
1.448 + BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscFree, 1<<KPageShift);
1.449 + --Epoc::KernelMiscPages;
1.450 +#endif
1.451 + }
1.452 +
1.453 + // free the page table linear address
1.454 + iPageTableLinearAllocator->Free(id>>iPtClusterShift);
1.455 + return EFalse;
1.456 + }
1.457 +
1.458 +void MmuBase::FreePageTable(TInt aId)
1.459 +//
1.460 +// Free an empty page table. We assume that all pages mapped by the page table have
1.461 +// already been unmapped and freed.
1.462 +//
1.463 + {
1.464 + __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::FreePageTable(%d)",aId));
1.465 + if (DoFreePageTable(aId))
1.466 + return;
1.467 +
1.468 + TInt id=aId &~ iPtClusterMask;
1.469 +
1.470 + // calculate linear address of page
1.471 + TLinAddr ptLin=PageTableLinAddr(id);
1.472 + __KTRACE_OPT(KMMU,Kern::Printf("Page lin %08x",ptLin));
1.473 +
1.474 + // unmap and free the page
1.475 + UnmapAndFree(ptLin,1);
1.476 +#ifdef BTRACE_KERNEL_MEMORY
1.477 + BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscFree, 1<<KPageShift);
1.478 + --Epoc::KernelMiscPages;
1.479 +#endif
1.480 +
1.481 + TInt ptg=aId>>iPtGroupShift;
1.482 + --iPtGroupCount[ptg];
1.483 + // don't shrink the page table mapping for now
1.484 + }
1.485 +
1.486 +TInt MmuBase::AllocPhysicalRam(TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
1.487 + {
1.488 + __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam() size=%x align=%d",aSize,aAlign));
1.489 + TInt r=AllocContiguousRam(aSize, aPhysAddr, EPageFixed, aAlign);
1.490 + if (r!=KErrNone)
1.491 + {
1.492 + iAllocFailed=ETrue;
1.493 + return r;
1.494 + }
1.495 + TInt n=TInt(TUint32(aSize+iPageMask)>>iPageShift);
1.496 + SPageInfo* pI=SPageInfo::FromPhysAddr(aPhysAddr);
1.497 + SPageInfo* pE=pI+n;
1.498 + for (; pI<pE; ++pI)
1.499 + {
1.500 + NKern::LockSystem();
1.501 + __NK_ASSERT_DEBUG(pI->Type()==SPageInfo::EUnused);
1.502 + pI->Lock();
1.503 + NKern::UnlockSystem();
1.504 + }
1.505 + return KErrNone;
1.506 + }
1.507 +
1.508 +/** Attempt to allocate a contiguous block of RAM from the specified zone.
1.509 +
1.510 +@param aZoneIdList An array of the IDs of the RAM zones to allocate from.
1.511 +@param aZoneIdCount The number of RAM zone IDs listed in aZoneIdList.
1.512 +@param aSize The number of contiguous bytes to allocate
1.513 +@param aPhysAddr The physical address of the start of the contiguous block of
1.514 + memory allocated
1.515 +@param aAlign Required alignment
1.516 +@return KErrNone on success, KErrArgument if zone doesn't exist or aSize is larger than the
1.517 +size of the RAM zone or KErrNoMemory when the RAM zone is too full.
1.518 +*/
1.519 +TInt MmuBase::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
1.520 + {
1.521 + __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam() size=0x%x align=%d", aSize, aAlign));
1.522 + TInt r = ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, EPageFixed, aAlign);
1.523 + if (r!=KErrNone)
1.524 + {
1.525 + iAllocFailed=ETrue;
1.526 + return r;
1.527 + }
1.528 + TInt n=TInt(TUint32(aSize+iPageMask)>>iPageShift);
1.529 + SPageInfo* pI=SPageInfo::FromPhysAddr(aPhysAddr);
1.530 + SPageInfo* pE=pI+n;
1.531 + for (; pI<pE; ++pI)
1.532 + {
1.533 + NKern::LockSystem();
1.534 + __NK_ASSERT_DEBUG(pI->Type()==SPageInfo::EUnused);
1.535 + pI->Lock();
1.536 + NKern::UnlockSystem();
1.537 + }
1.538 + return KErrNone;
1.539 + }
1.540 +
1.541 +
1.542 +/** Attempt to allocate discontiguous RAM pages.
1.543 +
1.544 +@param aNumPages The number of pages to allocate.
1.545 +@param aPageList Pointer to an array where each element will be the physical
1.546 + address of each page allocated.
1.547 +@return KErrNone on success, KErrNoMemory otherwise
1.548 +*/
1.549 +TInt MmuBase::AllocPhysicalRam(TInt aNumPages, TPhysAddr* aPageList)
1.550 + {
1.551 + __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam() numpages=%x", aNumPages));
1.552 + TInt r = AllocRamPages(aPageList, aNumPages, EPageFixed);
1.553 + if (r!=KErrNone)
1.554 + {
1.555 + iAllocFailed=ETrue;
1.556 + return r;
1.557 + }
1.558 + TPhysAddr* pageEnd = aPageList + aNumPages;
1.559 + for (TPhysAddr* page = aPageList; page < pageEnd; page++)
1.560 + {
1.561 + SPageInfo* pageInfo = SPageInfo::FromPhysAddr(*page);
1.562 + NKern::LockSystem();
1.563 + __NK_ASSERT_DEBUG(pageInfo->Type() == SPageInfo::EUnused);
1.564 + pageInfo->Lock();
1.565 + NKern::UnlockSystem();
1.566 + }
1.567 + return KErrNone;
1.568 + }
1.569 +
1.570 +
1.571 +/** Attempt to allocate discontiguous RAM pages from the specified RAM zones.
1.572 +
1.573 +@param aZoneIdList An array of the IDs of the RAM zones to allocate from.
1.574 +@param aZoneIdCount The number of RAM zone IDs listed in aZoneIdList.
1.575 +@param aNumPages The number of pages to allocate.
1.576 +@param aPageList Pointer to an array where each element will be the physical
1.577 + address of each page allocated.
1.578 +@return KErrNone on success, KErrArgument if zone doesn't exist or aNumPages is
1.579 +larger than the total number of pages in the RAM zone or KErrNoMemory when the RAM
1.580 +zone is too full.
1.581 +*/
1.582 +TInt MmuBase::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList)
1.583 + {
1.584 + __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam() numpages 0x%x zones 0x%x", aNumPages, aZoneIdCount));
1.585 + TInt r = ZoneAllocRamPages(aZoneIdList, aZoneIdCount, aPageList, aNumPages, EPageFixed);
1.586 + if (r!=KErrNone)
1.587 + {
1.588 + iAllocFailed=ETrue;
1.589 + return r;
1.590 + }
1.591 +
1.592 + TPhysAddr* pageEnd = aPageList + aNumPages;
1.593 + for (TPhysAddr* page = aPageList; page < pageEnd; page++)
1.594 + {
1.595 + SPageInfo* pageInfo = SPageInfo::FromPhysAddr(*page);
1.596 + NKern::LockSystem();
1.597 + __NK_ASSERT_DEBUG(pageInfo->Type() == SPageInfo::EUnused);
1.598 + pageInfo->Lock();
1.599 + NKern::UnlockSystem();
1.600 + }
1.601 + return KErrNone;
1.602 + }
1.603 +
1.604 +
1.605 +TInt MmuBase::FreePhysicalRam(TPhysAddr aPhysAddr, TInt aSize)
1.606 + {
1.607 + __KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreePhysicalRam(%08x,%x)",aPhysAddr,aSize));
1.608 +
1.609 + TInt n=TInt(TUint32(aSize+iPageMask)>>iPageShift);
1.610 + SPageInfo* pI=SPageInfo::FromPhysAddr(aPhysAddr);
1.611 + SPageInfo* pE=pI+n;
1.612 + for (; pI<pE; ++pI)
1.613 + {
1.614 + NKern::LockSystem();
1.615 + __ASSERT_ALWAYS(pI->Type()==SPageInfo::EUnused && pI->Unlock()==0, Panic(EBadFreePhysicalRam));
1.616 + NKern::UnlockSystem();
1.617 + }
1.618 + TInt r=iRamPageAllocator->FreePhysicalRam(aPhysAddr, aSize);
1.619 + return r;
1.620 + }
1.621 +
1.622 +/** Free discontiguous RAM pages that were previously allocated using discontiguous
1.623 +overload of MmuBase::AllocPhysicalRam() or MmuBase::ZoneAllocPhysicalRam().
1.624 +
1.625 +Specifying one of the following may cause the system to panic:
1.626 +a) an invalid physical RAM address.
1.627 +b) valid physical RAM addresses where some had not been previously allocated.
1.628 +c) an adrress not aligned to a page boundary.
1.629 +
1.630 +@param aNumPages Number of pages to free
1.631 +@param aPageList Array of the physical address of each page to free
1.632 +
1.633 +@return KErrNone if the operation was successful.
1.634 +
1.635 +*/
1.636 +TInt MmuBase::FreePhysicalRam(TInt aNumPages, TPhysAddr* aPageList)
1.637 + {
1.638 + __KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreePhysicalRam(%08x,%08x)", aNumPages, aPageList));
1.639 +
1.640 + TPhysAddr* pageEnd = aPageList + aNumPages;
1.641 + TInt r = KErrNone;
1.642 +
1.643 + for (TPhysAddr* page = aPageList; page < pageEnd && r == KErrNone; page++)
1.644 + {
1.645 + SPageInfo* pageInfo = SPageInfo::FromPhysAddr(*page);
1.646 + NKern::LockSystem();
1.647 + __ASSERT_ALWAYS(pageInfo->Type()==SPageInfo::EUnused && pageInfo->Unlock()==0, Panic(EBadFreePhysicalRam));
1.648 + NKern::UnlockSystem();
1.649 +
1.650 + // Free the page
1.651 + r = iRamPageAllocator->FreePhysicalRam(*page, KPageSize);
1.652 + }
1.653 + return r;
1.654 + }
1.655 +
1.656 +
1.657 +TInt MmuBase::ClaimPhysicalRam(TPhysAddr aPhysAddr, TInt aSize)
1.658 + {
1.659 + __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ClaimPhysicalRam(%08x,%x)",aPhysAddr,aSize));
1.660 + TUint32 pa=aPhysAddr;
1.661 + TUint32 size=aSize;
1.662 + TInt n=RoundUpRangeToPageSize(pa,size);
1.663 + TInt r=iRamPageAllocator->ClaimPhysicalRam(pa, size);
1.664 + if (r==KErrNone)
1.665 + {
1.666 + SPageInfo* pI=SPageInfo::FromPhysAddr(pa);
1.667 + SPageInfo* pE=pI+n;
1.668 + for (; pI<pE; ++pI)
1.669 + {
1.670 + NKern::LockSystem();
1.671 + __NK_ASSERT_DEBUG(pI->Type()==SPageInfo::EUnused && pI->LockCount()==0);
1.672 + pI->Lock();
1.673 + NKern::UnlockSystem();
1.674 + }
1.675 + }
1.676 + return r;
1.677 + }
1.678 +
1.679 +/**
1.680 +Allocate a set of discontiguous RAM pages from the specified zone.
1.681 +
1.682 +@param aZoneIdList The array of IDs of the RAM zones to allocate from.
1.683 +@param aZoneIdCount The number of RAM zone IDs in aZoneIdList.
1.684 +@param aPageList Preallocated array of TPhysAddr elements that will receive the
1.685 +physical address of each page allocated.
1.686 +@param aNumPages The number of pages to allocate.
1.687 +@param aPageType The type of the pages being allocated.
1.688 +
1.689 +@return KErrNone on success, KErrArgument if a zone of aZoneIdList doesn't exist,
1.690 +KErrNoMemory if there aren't enough free pages in the zone
1.691 +*/
1.692 +TInt MmuBase::ZoneAllocRamPages(TUint* aZoneIdList, TUint aZoneIdCount, TPhysAddr* aPageList, TInt aNumPages, TZonePageType aPageType)
1.693 + {
1.694 +#ifdef _DEBUG
1.695 + if(K::CheckForSimulatedAllocFail())
1.696 + return KErrNoMemory;
1.697 +#endif
1.698 + __NK_ASSERT_DEBUG(aPageType == EPageFixed);
1.699 +
1.700 + return iRamPageAllocator->ZoneAllocRamPages(aZoneIdList, aZoneIdCount, aPageList, aNumPages, aPageType);
1.701 + }
1.702 +
1.703 +
1.704 +TInt MmuBase::AllocRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aPageType, TUint aBlockedZoneId, TBool aBlockRest)
1.705 + {
1.706 +#ifdef _DEBUG
1.707 + if(K::CheckForSimulatedAllocFail())
1.708 + return KErrNoMemory;
1.709 +#endif
1.710 + TInt missing = iRamPageAllocator->AllocRamPages(aPageList, aNumPages, aPageType, aBlockedZoneId, aBlockRest);
1.711 +
1.712 + // If missing some pages, ask the RAM cache to donate some of its pages.
1.713 + // Don't ask it for discardable pages as those are intended for itself.
1.714 + if(missing && aPageType != EPageDiscard && iRamCache->GetFreePages(missing))
1.715 + missing = iRamPageAllocator->AllocRamPages(aPageList, aNumPages, aPageType, aBlockedZoneId, aBlockRest);
1.716 + return missing ? KErrNoMemory : KErrNone;
1.717 + }
1.718 +
1.719 +
1.720 +TInt MmuBase::AllocContiguousRam(TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aPageType, TInt aAlign, TUint aBlockedZoneId, TBool aBlockRest)
1.721 + {
1.722 +#ifdef _DEBUG
1.723 + if(K::CheckForSimulatedAllocFail())
1.724 + return KErrNoMemory;
1.725 +#endif
1.726 + __NK_ASSERT_DEBUG(aPageType == EPageFixed);
1.727 + TUint contigPages = (aSize + KPageSize - 1) >> KPageShift;
1.728 + TInt r = iRamPageAllocator->AllocContiguousRam(contigPages, aPhysAddr, aPageType, aAlign, aBlockedZoneId, aBlockRest);
1.729 + if (r == KErrNoMemory && contigPages > KMaxFreeableContiguousPages)
1.730 + {// Allocation failed but as this is a large allocation flush the RAM cache
1.731 + // and reattempt the allocation as large allocation wouldn't discard pages.
1.732 + iRamCache->FlushAll();
1.733 + r = iRamPageAllocator->AllocContiguousRam(contigPages, aPhysAddr, aPageType, aAlign, aBlockedZoneId, aBlockRest);
1.734 + }
1.735 + return r;
1.736 + }
1.737 +
1.738 +
1.739 +/**
1.740 +Allocate contiguous RAM from the specified RAM zones.
1.741 +@param aZoneIdList An array of IDs of the RAM zones to allocate from
1.742 +@param aZoneIdCount The number of IDs listed in aZoneIdList
1.743 +@param aSize The number of bytes to allocate
1.744 +@param aPhysAddr Will receive the physical base address of the allocated RAM
1.745 +@param aPageType The type of the pages being allocated
1.746 +@param aAlign The log base 2 alginment required
1.747 +*/
1.748 +TInt MmuBase::ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aPageType, TInt aAlign)
1.749 + {
1.750 +#ifdef _DEBUG
1.751 + if(K::CheckForSimulatedAllocFail())
1.752 + return KErrNoMemory;
1.753 +#endif
1.754 + return iRamPageAllocator->ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, aPageType, aAlign);
1.755 + }
1.756 +
1.757 +SPageInfo* SPageInfo::SafeFromPhysAddr(TPhysAddr aAddress)
1.758 + {
1.759 + TUint index = aAddress>>(KPageShift+KPageShift-KPageInfoShift);
1.760 + TUint flags = ((TUint8*)KPageInfoMap)[index>>3];
1.761 + TUint mask = 1<<(index&7);
1.762 + if(!(flags&mask))
1.763 + return 0; // no SPageInfo for aAddress
1.764 + SPageInfo* info = FromPhysAddr(aAddress);
1.765 + if(info->Type()==SPageInfo::EInvalid)
1.766 + return 0;
1.767 + return info;
1.768 + }
1.769 +
1.770 +/** HAL Function wrapper for the RAM allocator.
1.771 + */
1.772 +
1.773 +TInt RamHalFunction(TAny*, TInt aFunction, TAny* a1, TAny* a2)
1.774 + {
1.775 + DRamAllocator *pRamAlloc = MmuBase::TheMmu->iRamPageAllocator;
1.776 +
1.777 + if (pRamAlloc)
1.778 + return pRamAlloc->HalFunction(aFunction, a1, a2);
1.779 + return KErrNotSupported;
1.780 + }
1.781 +
1.782 +
1.783 +/******************************************************************************
1.784 + * Initialisation
1.785 + ******************************************************************************/
1.786 +
1.787 +void MmuBase::Init1()
1.788 + {
1.789 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("MmuBase::Init1"));
1.790 + iInitialFreeMemory=0;
1.791 + iAllocFailed=EFalse;
1.792 + }
1.793 +
1.794 +void MmuBase::Init2()
1.795 + {
1.796 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("MmuBase::Init2"));
1.797 + TInt total_ram=TheSuperPage().iTotalRamSize;
1.798 + TInt total_ram_pages=total_ram>>iPageShift;
1.799 + iNumPages = total_ram_pages;
1.800 + const SRamInfo& info=*(const SRamInfo*)TheSuperPage().iRamBootData;
1.801 + iRamPageAllocator=DRamAllocator::New(info, RamZoneConfig, RamZoneCallback);
1.802 +
1.803 + TInt max_pt=total_ram>>iPageTableShift;
1.804 + if (max_pt<iMaxPageTables)
1.805 + iMaxPageTables=max_pt;
1.806 + iMaxPageTables &= ~iPtClusterMask;
1.807 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iMaxPageTables=%d",iMaxPageTables));
1.808 + TInt max_ptpg=iMaxPageTables>>iPtClusterShift;
1.809 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("max_ptpg=%d",max_ptpg));
1.810 + iPageTableLinearAllocator=TBitMapAllocator::New(max_ptpg,ETrue);
1.811 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iPageTableLinearAllocator=%08x",iPageTableLinearAllocator));
1.812 + __ASSERT_ALWAYS(iPageTableLinearAllocator,Panic(EPtLinAllocCreateFailed));
1.813 + if (iPtClusterShift) // if more than one page table per page
1.814 + {
1.815 + iPageTableAllocator=TBitMapAllocator::New(iMaxPageTables,EFalse);
1.816 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iPageTableAllocator=%08x",iPageTableAllocator));
1.817 + __ASSERT_ALWAYS(iPageTableAllocator,Panic(EPtAllocCreateFailed));
1.818 + }
1.819 + TInt max_ptb=(iMaxPageTables+iPtBlockMask)>>iPtBlockShift;
1.820 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("max_ptb=%d",max_ptb));
1.821 + iPtBlockCount=(TInt*)Kern::AllocZ(max_ptb*sizeof(TInt));
1.822 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iPtBlockCount=%08x",iPtBlockCount));
1.823 + __ASSERT_ALWAYS(iPtBlockCount,Panic(EPtBlockCountCreateFailed));
1.824 + TInt max_ptg=(iMaxPageTables+iPtGroupMask)>>iPtGroupShift;
1.825 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("ptg_shift=%d, max_ptg=%d",iPtGroupShift,max_ptg));
1.826 + iPtGroupCount=(TInt*)Kern::AllocZ(max_ptg*sizeof(TInt));
1.827 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("iPtGroupCount=%08x",iPtGroupCount));
1.828 + __ASSERT_ALWAYS(iPtGroupCount,Panic(EPtGroupCountCreateFailed));
1.829 +
1.830 +
1.831 + // Clear the inital (and only so far) page table info page so all unused
1.832 + // page tables will be marked as unused.
1.833 + memclr((TAny*)KPageTableInfoBase, KPageSize);
1.834 +
1.835 + // look for page tables - assume first page table (id=0) maps page tables
1.836 + TPte* pPte=(TPte*)iPageTableLinBase;
1.837 + TInt i;
1.838 + for (i=0; i<iChunkSize/iPageSize; ++i)
1.839 + {
1.840 + TPte pte=*pPte++;
1.841 + if (!PteIsPresent(pte)) // after boot, page tables are contiguous
1.842 + break;
1.843 + iPageTableLinearAllocator->Alloc(i,1);
1.844 + TPhysAddr ptpgPhys=PtePhysAddr(pte, i);
1.845 + SPageInfo* pi = SPageInfo::SafeFromPhysAddr(ptpgPhys);
1.846 + __ASSERT_ALWAYS(pi, Panic(EInvalidPageTableAtBoot));
1.847 + pi->SetPageTable(i);
1.848 + pi->Lock();
1.849 + TInt id=i<<iPtClusterShift;
1.850 + TInt ptb=id>>iPtBlockShift;
1.851 + ++iPtBlockCount[ptb];
1.852 + TInt ptg=id>>iPtGroupShift;
1.853 + ++iPtGroupCount[ptg];
1.854 + }
1.855 +
1.856 + // look for mapped pages
1.857 + TInt npdes=1<<(32-iChunkShift);
1.858 + TInt npt=0;
1.859 + for (i=0; i<npdes; ++i)
1.860 + {
1.861 + TLinAddr cAddr=TLinAddr(i<<iChunkShift);
1.862 + if (cAddr>=PP::RamDriveStartAddress && TUint32(cAddr-PP::RamDriveStartAddress)<TUint32(PP::RamDriveRange))
1.863 + continue; // leave RAM drive for now
1.864 + TInt ptid=PageTableId(cAddr);
1.865 + TPhysAddr pdePhys = PdePhysAddr(cAddr); // check for whole PDE mapping
1.866 + pPte = NULL;
1.867 + if (ptid>=0)
1.868 + {
1.869 + ++npt;
1.870 + __KTRACE_OPT(KMMU,Kern::Printf("Addr %08x -> page table %d", cAddr, ptid));
1.871 + pPte=(TPte*)PageTableLinAddr(ptid);
1.872 + }
1.873 +#ifdef KMMU
1.874 + if (pdePhys != KPhysAddrInvalid)
1.875 + {
1.876 + __KTRACE_OPT(KMMU,Kern::Printf("Addr %08x -> Whole PDE Phys %08x", cAddr, pdePhys));
1.877 + }
1.878 +#endif
1.879 + if (ptid>=0 || pdePhys != KPhysAddrInvalid)
1.880 + {
1.881 + TInt j;
1.882 + TInt np=0;
1.883 + for (j=0; j<iChunkSize/iPageSize; ++j)
1.884 + {
1.885 + TBool present = ETrue; // all pages present if whole PDE mapping
1.886 + TPte pte = 0;
1.887 + if (pPte)
1.888 + {
1.889 + pte = pPte[j];
1.890 + present = PteIsPresent(pte);
1.891 + }
1.892 + if (present)
1.893 + {
1.894 + ++np;
1.895 + TPhysAddr pa = pPte ? PtePhysAddr(pte, j) : (pdePhys + (j<<iPageShift));
1.896 + SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa);
1.897 + __KTRACE_OPT(KMMU,Kern::Printf("Addr: %08x PA=%08x",
1.898 + cAddr+(j<<iPageShift), pa));
1.899 + if (pi) // ignore non-RAM mappings
1.900 + {//these pages will never be freed and can't be moved
1.901 + TInt r = iRamPageAllocator->MarkPageAllocated(pa, EPageFixed);
1.902 + // allow KErrAlreadyExists since it's possible that a page is doubly mapped
1.903 + __ASSERT_ALWAYS(r==KErrNone || r==KErrAlreadyExists, Panic(EBadMappedPageAfterBoot));
1.904 + SetupInitialPageInfo(pi,cAddr,j);
1.905 +#ifdef BTRACE_KERNEL_MEMORY
1.906 + if(r==KErrNone)
1.907 + ++Epoc::KernelMiscPages;
1.908 +#endif
1.909 + }
1.910 + }
1.911 + }
1.912 + __KTRACE_OPT(KMMU,Kern::Printf("Addr: %08x #PTEs=%d",cAddr,np));
1.913 + if (ptid>=0)
1.914 + SetupInitialPageTableInfo(ptid,cAddr,np);
1.915 + }
1.916 + }
1.917 +
1.918 + TInt oddpt=npt & iPtClusterMask;
1.919 + if (oddpt)
1.920 + oddpt=iPtClusterSize-oddpt;
1.921 + __KTRACE_OPT(KBOOT,Kern::Printf("Total page tables %d, left over subpages %d",npt,oddpt));
1.922 + if (oddpt)
1.923 + iPageTableAllocator->Free(npt,oddpt);
1.924 +
1.925 + DoInit2();
1.926 +
1.927 + // Save current free RAM size - there can never be more free RAM than this
1.928 + TInt max_free = Kern::FreeRamInBytes();
1.929 + K::MaxFreeRam = max_free;
1.930 + if (max_free < PP::RamDriveMaxSize)
1.931 + PP::RamDriveMaxSize = max_free;
1.932 +
1.933 + if (K::ColdStart)
1.934 + ClearRamDrive(PP::RamDriveStartAddress);
1.935 + else
1.936 + RecoverRamDrive();
1.937 +
1.938 + TInt r=K::MutexCreate((DMutex*&)RamAllocatorMutex, KLitRamAlloc, NULL, EFalse, KMutexOrdRamAlloc);
1.939 + if (r!=KErrNone)
1.940 + Panic(ERamAllocMutexCreateFailed);
1.941 + r=K::MutexCreate((DMutex*&)HwChunkMutex, KLitHwChunk, NULL, EFalse, KMutexOrdHwChunk);
1.942 + if (r!=KErrNone)
1.943 + Panic(EHwChunkMutexCreateFailed);
1.944 +
1.945 +#ifdef __DEMAND_PAGING__
1.946 + if (DemandPaging::RomPagingRequested() || DemandPaging::CodePagingRequested())
1.947 + iRamCache = DemandPaging::New();
1.948 + else
1.949 + iRamCache = new RamCache;
1.950 +#else
1.951 + iRamCache = new RamCache;
1.952 +#endif
1.953 + if (!iRamCache)
1.954 + Panic(ERamCacheAllocFailed);
1.955 + iRamCache->Init2();
1.956 + RamCacheBase::TheRamCache = iRamCache;
1.957 +
1.958 + // Get the allocator to signal to the variant which RAM zones are in use so far
1.959 + iRamPageAllocator->InitialCallback();
1.960 + }
1.961 +
1.962 +void MmuBase::Init3()
1.963 + {
1.964 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("MmuBase::Init3"));
1.965 +
1.966 + // Initialise demand paging
1.967 +#ifdef __DEMAND_PAGING__
1.968 + M::DemandPagingInit();
1.969 +#endif
1.970 +
1.971 + // Register a HAL Function for the Ram allocator.
1.972 + TInt r = Kern::AddHalEntry(EHalGroupRam, RamHalFunction, 0);
1.973 + __NK_ASSERT_ALWAYS(r==KErrNone);
1.974 +
1.975 + //
1.976 + // Perform the intialisation for page moving and RAM defrag object.
1.977 + //
1.978 +
1.979 + // allocate a page to use as an alt stack
1.980 + MmuBase::Wait();
1.981 + TPhysAddr stackpage;
1.982 + r = AllocPhysicalRam(KPageSize, stackpage);
1.983 + MmuBase::Signal();
1.984 + if (r!=KErrNone)
1.985 + Panic(EDefragStackAllocFailed);
1.986 +
1.987 + // map it at a predetermined address
1.988 + TInt ptid = PageTableId(KDefragAltStackAddr);
1.989 + TPte perm = PtePermissions(EKernelStack);
1.990 + NKern::LockSystem();
1.991 + MapRamPages(ptid, SPageInfo::EFixed, NULL, KDefragAltStackAddr, &stackpage, 1, perm);
1.992 + NKern::UnlockSystem();
1.993 + iAltStackBase = KDefragAltStackAddr + KPageSize;
1.994 +
1.995 + __KTRACE_OPT(KMMU,Kern::Printf("Allocated defrag alt stack page at %08x, mapped to %08x, base is now %08x", stackpage, KDefragAltStackAddr, iAltStackBase));
1.996 +
1.997 + // Create the actual defrag object and initialise it.
1.998 + iDefrag = new Defrag;
1.999 + if (!iDefrag)
1.1000 + Panic(EDefragAllocFailed);
1.1001 + iDefrag->Init3(iRamPageAllocator);
1.1002 + }
1.1003 +
1.1004 +void MmuBase::CreateKernelSection(TLinAddr aEnd, TInt aHwChunkAlign)
1.1005 + {
1.1006 + TLinAddr base=(TLinAddr)TheRomHeader().iKernelLimit;
1.1007 + iKernelSection=TLinearSection::New(base, aEnd);
1.1008 + __ASSERT_ALWAYS(iKernelSection!=NULL, Panic(ECreateKernelSectionFailed));
1.1009 + iHwChunkAllocator=THwChunkAddressAllocator::New(aHwChunkAlign, iKernelSection);
1.1010 + __ASSERT_ALWAYS(iHwChunkAllocator!=NULL, Panic(ECreateHwChunkAllocFailed));
1.1011 + }
1.1012 +
1.1013 +// Recover RAM drive contents after a reset
1.1014 +TInt MmuBase::RecoverRamDrive()
1.1015 + {
1.1016 + __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::RecoverRamDrive()"));
1.1017 + TLinAddr ptlin;
1.1018 + TLinAddr chunk = PP::RamDriveStartAddress;
1.1019 + TLinAddr end = chunk + (TLinAddr)PP::RamDriveRange;
1.1020 + TInt size = 0;
1.1021 + TInt limit = RoundToPageSize(TheSuperPage().iRamDriveSize);
1.1022 + for( ; chunk<end; chunk+=iChunkSize)
1.1023 + {
1.1024 + if (size==limit) // have reached end of ram drive
1.1025 + break;
1.1026 + TPhysAddr ptphys = 0;
1.1027 + TInt ptid = BootPageTableId(chunk, ptphys); // ret KErrNotFound if PDE not present, KErrUnknown if present but as yet unknown page table
1.1028 + __KTRACE_OPT(KMMU,Kern::Printf("Addr %08x: PTID=%d PTPHYS=%08x", chunk, ptid, ptphys));
1.1029 + if (ptid==KErrNotFound)
1.1030 + break; // no page table so stop here and clear to end of range
1.1031 + TPhysAddr ptpgphys = ptphys & ~iPageMask;
1.1032 + TInt r = iRamPageAllocator->MarkPageAllocated(ptpgphys, EPageMovable);
1.1033 + __KTRACE_OPT(KMMU,Kern::Printf("MPA: r=%d",r));
1.1034 + if (r==KErrArgument)
1.1035 + break; // page table address was invalid - stop here and clear to end of range
1.1036 + if (r==KErrNone)
1.1037 + {
1.1038 + // this page was currently unallocated
1.1039 + if (ptid>=0)
1.1040 + break; // ID has been allocated - bad news - bail here
1.1041 + ptid = iPageTableLinearAllocator->Alloc();
1.1042 + __ASSERT_ALWAYS(ptid>=0, Panic(ERecoverRamDriveAllocPTIDFailed));
1.1043 + SPageInfo* pi = SPageInfo::SafeFromPhysAddr(ptpgphys);
1.1044 + __ASSERT_ALWAYS(pi, Panic(ERecoverRamDriveBadPageTable));
1.1045 + pi->SetPageTable(ptid); // id = cluster number here
1.1046 + ptid <<= iPtClusterShift;
1.1047 + MapPageTable(ptid, ptpgphys, EFalse);
1.1048 + if (iPageTableAllocator)
1.1049 + iPageTableAllocator->Free(ptid, iPtClusterSize);
1.1050 + ptid |= ((ptphys>>iPageTableShift)&iPtClusterMask);
1.1051 + ptlin = PageTableLinAddr(ptid);
1.1052 + __KTRACE_OPT(KMMU,Kern::Printf("Page table ID %d lin %08x", ptid, ptlin));
1.1053 + if (iPageTableAllocator)
1.1054 + iPageTableAllocator->Alloc(ptid, 1);
1.1055 + }
1.1056 + else
1.1057 + {
1.1058 + // this page was already allocated
1.1059 + if (ptid<0)
1.1060 + break; // ID not allocated - bad news - bail here
1.1061 + ptlin = PageTableLinAddr(ptid);
1.1062 + __KTRACE_OPT(KMMU,Kern::Printf("Page table lin %08x", ptlin));
1.1063 + if (iPageTableAllocator)
1.1064 + iPageTableAllocator->Alloc(ptid, 1);
1.1065 + }
1.1066 + TInt pte_index;
1.1067 + TBool chunk_inc = 0;
1.1068 + TPte* page_table = (TPte*)ptlin;
1.1069 + for (pte_index=0; pte_index<(iChunkSize>>iPageSize); ++pte_index)
1.1070 + {
1.1071 + if (size==limit) // have reached end of ram drive
1.1072 + break;
1.1073 + TPte pte = page_table[pte_index];
1.1074 + if (PteIsPresent(pte))
1.1075 + {
1.1076 + TPhysAddr pa=PtePhysAddr(pte, pte_index);
1.1077 + SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa);
1.1078 + if (!pi)
1.1079 + break;
1.1080 + TInt r = iRamPageAllocator->MarkPageAllocated(pa, EPageMovable);
1.1081 + __ASSERT_ALWAYS(r==KErrNone, Panic(ERecoverRamDriveBadPage));
1.1082 + size+=iPageSize;
1.1083 + chunk_inc = iChunkSize;
1.1084 + }
1.1085 + }
1.1086 + if (pte_index < (iChunkSize>>iPageSize) )
1.1087 + {
1.1088 + // if we recovered pages in this page table, leave it in place
1.1089 + chunk += chunk_inc;
1.1090 +
1.1091 + // clear from here on
1.1092 + ClearPageTable(ptid, pte_index);
1.1093 + break;
1.1094 + }
1.1095 + }
1.1096 + if (chunk < end)
1.1097 + ClearRamDrive(chunk);
1.1098 + __KTRACE_OPT(KMMU,Kern::Printf("Recovered RAM drive size %08x",size));
1.1099 + if (size<TheSuperPage().iRamDriveSize)
1.1100 + {
1.1101 + __KTRACE_OPT(KMMU,Kern::Printf("Truncating RAM drive from %08x to %08x", TheSuperPage().iRamDriveSize, size));
1.1102 + TheSuperPage().iRamDriveSize=size;
1.1103 + }
1.1104 + return KErrNone;
1.1105 + }
1.1106 +
1.1107 +TInt MmuBase::AllocShadowPage(TLinAddr aRomAddr)
1.1108 + {
1.1109 + __KTRACE_OPT(KMMU,Kern::Printf("MmuBase:AllocShadowPage(%08x)", aRomAddr));
1.1110 + aRomAddr &= ~iPageMask;
1.1111 + TPhysAddr orig_phys = KPhysAddrInvalid;
1.1112 + if (aRomAddr>=iRomLinearBase && aRomAddr<=(iRomLinearEnd-iPageSize))
1.1113 + orig_phys = LinearToPhysical(aRomAddr);
1.1114 + __KTRACE_OPT(KMMU,Kern::Printf("OrigPhys = %08x",orig_phys));
1.1115 + if (orig_phys == KPhysAddrInvalid)
1.1116 + {
1.1117 + __KTRACE_OPT(KMMU,Kern::Printf("Invalid ROM address"));
1.1118 + return KErrArgument;
1.1119 + }
1.1120 + SPageInfo* pi = SPageInfo::SafeFromPhysAddr(orig_phys);
1.1121 + if (pi && pi->Type()==SPageInfo::EShadow)
1.1122 + {
1.1123 + __KTRACE_OPT(KMMU,Kern::Printf("ROM address already shadowed"));
1.1124 + return KErrAlreadyExists;
1.1125 + }
1.1126 + TInt ptid = PageTableId(aRomAddr);
1.1127 + __KTRACE_OPT(KMMU, Kern::Printf("Shadow PTID %d", ptid));
1.1128 + TInt newptid = -1;
1.1129 + if (ptid<0)
1.1130 + {
1.1131 + newptid = AllocPageTable();
1.1132 + __KTRACE_OPT(KMMU, Kern::Printf("New shadow PTID %d", newptid));
1.1133 + if (newptid<0)
1.1134 + return KErrNoMemory;
1.1135 + ptid = newptid;
1.1136 + PtInfo(ptid).SetShadow( (aRomAddr-iRomLinearBase)>>iChunkShift );
1.1137 + InitShadowPageTable(ptid, aRomAddr, orig_phys);
1.1138 + }
1.1139 + TPhysAddr shadow_phys;
1.1140 +
1.1141 + if (AllocRamPages(&shadow_phys, 1, EPageFixed) != KErrNone)
1.1142 + {
1.1143 + __KTRACE_OPT(KMMU,Kern::Printf("Unable to allocate page"));
1.1144 + iAllocFailed=ETrue;
1.1145 + if (newptid>=0)
1.1146 + {
1.1147 + FreePageTable(newptid);
1.1148 + }
1.1149 + return KErrNoMemory;
1.1150 + }
1.1151 +#ifdef BTRACE_KERNEL_MEMORY
1.1152 + BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, 1<<KPageShift);
1.1153 + ++Epoc::KernelMiscPages;
1.1154 +#endif
1.1155 + InitShadowPage(shadow_phys, aRomAddr); // copy original ROM contents
1.1156 + NKern::LockSystem();
1.1157 + Pagify(ptid, aRomAddr);
1.1158 + MapRamPages(ptid, SPageInfo::EShadow, (TAny*)orig_phys, (aRomAddr-iRomLinearBase), &shadow_phys, 1, iShadowPtePerm);
1.1159 + NKern::UnlockSystem();
1.1160 + if (newptid>=0)
1.1161 + {
1.1162 + NKern::LockSystem();
1.1163 + AssignShadowPageTable(newptid, aRomAddr);
1.1164 + NKern::UnlockSystem();
1.1165 + }
1.1166 + FlushShadow(aRomAddr);
1.1167 + __KTRACE_OPT(KMMU,Kern::Printf("AllocShadowPage successful"));
1.1168 + return KErrNone;
1.1169 + }
1.1170 +
1.1171 +TInt MmuBase::FreeShadowPage(TLinAddr aRomAddr)
1.1172 + {
1.1173 + __KTRACE_OPT(KMMU,Kern::Printf("MmuBase:FreeShadowPage(%08x)", aRomAddr));
1.1174 + aRomAddr &= ~iPageMask;
1.1175 + TPhysAddr shadow_phys = KPhysAddrInvalid;
1.1176 + if (aRomAddr>=iRomLinearBase || aRomAddr<=(iRomLinearEnd-iPageSize))
1.1177 + shadow_phys = LinearToPhysical(aRomAddr);
1.1178 + __KTRACE_OPT(KMMU,Kern::Printf("ShadowPhys = %08x",shadow_phys));
1.1179 + if (shadow_phys == KPhysAddrInvalid)
1.1180 + {
1.1181 + __KTRACE_OPT(KMMU,Kern::Printf("Invalid ROM address"));
1.1182 + return KErrArgument;
1.1183 + }
1.1184 + TInt ptid = PageTableId(aRomAddr);
1.1185 + SPageInfo* pi = SPageInfo::SafeFromPhysAddr(shadow_phys);
1.1186 + if (ptid<0 || !pi || pi->Type()!=SPageInfo::EShadow)
1.1187 + {
1.1188 + __KTRACE_OPT(KMMU,Kern::Printf("No shadow page at this address"));
1.1189 + return KErrGeneral;
1.1190 + }
1.1191 + TPhysAddr orig_phys = (TPhysAddr)pi->Owner();
1.1192 + DoUnmapShadowPage(ptid, aRomAddr, orig_phys);
1.1193 + SPageTableInfo& pti = PtInfo(ptid);
1.1194 + if (pti.Attribs()==SPageTableInfo::EShadow && --pti.iCount==0)
1.1195 + {
1.1196 + TInt r = UnassignShadowPageTable(aRomAddr, orig_phys);
1.1197 + if (r==KErrNone)
1.1198 + FreePageTable(ptid);
1.1199 + else
1.1200 + pti.SetGlobal(aRomAddr>>iChunkShift);
1.1201 + }
1.1202 +
1.1203 + FreePages(&shadow_phys, 1, EPageFixed);
1.1204 + __KTRACE_OPT(KMMU,Kern::Printf("FreeShadowPage successful"));
1.1205 +#ifdef BTRACE_KERNEL_MEMORY
1.1206 + BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscFree, 1<<KPageShift);
1.1207 + --Epoc::KernelMiscPages;
1.1208 +#endif
1.1209 + return KErrNone;
1.1210 + }
1.1211 +
1.1212 +TInt MmuBase::FreezeShadowPage(TLinAddr aRomAddr)
1.1213 + {
1.1214 + __KTRACE_OPT(KMMU,Kern::Printf("MmuBase:FreezeShadowPage(%08x)", aRomAddr));
1.1215 + aRomAddr &= ~iPageMask;
1.1216 + TPhysAddr shadow_phys = KPhysAddrInvalid;
1.1217 + if (aRomAddr>=iRomLinearBase || aRomAddr<=(iRomLinearEnd-iPageSize))
1.1218 + shadow_phys = LinearToPhysical(aRomAddr);
1.1219 + __KTRACE_OPT(KMMU,Kern::Printf("ShadowPhys = %08x",shadow_phys));
1.1220 + if (shadow_phys == KPhysAddrInvalid)
1.1221 + {
1.1222 + __KTRACE_OPT(KMMU,Kern::Printf("Invalid ROM address"));
1.1223 + return KErrArgument;
1.1224 + }
1.1225 + TInt ptid = PageTableId(aRomAddr);
1.1226 + SPageInfo* pi = SPageInfo::SafeFromPhysAddr(shadow_phys);
1.1227 + if (ptid<0 || pi==0)
1.1228 + {
1.1229 + __KTRACE_OPT(KMMU,Kern::Printf("No shadow page at this address"));
1.1230 + return KErrGeneral;
1.1231 + }
1.1232 + DoFreezeShadowPage(ptid, aRomAddr);
1.1233 + __KTRACE_OPT(KMMU,Kern::Printf("FreezeShadowPage successful"));
1.1234 + return KErrNone;
1.1235 + }
1.1236 +
1.1237 +TInt MmuBase::CopyToShadowMemory(TLinAddr aDest, TLinAddr aSrc, TUint32 aLength)
1.1238 + {
1.1239 + memcpy ((TAny*)aDest, (const TAny*)aSrc, aLength);
1.1240 + return KErrNone;
1.1241 + }
1.1242 +
1.1243 +void M::BTracePrime(TUint aCategory)
1.1244 + {
1.1245 + (void)aCategory;
1.1246 +
1.1247 +#ifdef BTRACE_KERNEL_MEMORY
1.1248 + // Must check for -1 as that is the default value of aCategory for
1.1249 + // BTrace::Prime() which is intended to prime all categories that are
1.1250 + // currently enabled via a single invocation of BTrace::Prime().
1.1251 + if(aCategory==BTrace::EKernelMemory || (TInt)aCategory == -1)
1.1252 + {
1.1253 + NKern::ThreadEnterCS();
1.1254 + Mmu::Wait();
1.1255 + BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryInitialFree,TheSuperPage().iTotalRamSize);
1.1256 + BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryCurrentFree,Kern::FreeRamInBytes());
1.1257 + BTrace4(BTrace::EKernelMemory, BTrace::EKernelMemoryMiscAlloc, Epoc::KernelMiscPages<<KPageShift);
1.1258 + #ifdef __DEMAND_PAGING__
1.1259 + if (DemandPaging::ThePager)
1.1260 + BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryDemandPagingCache,DemandPaging::ThePager->iMinimumPageCount << KPageShift);
1.1261 + #endif
1.1262 + BTrace8(BTrace::EKernelMemory,BTrace::EKernelMemoryDrvPhysAlloc, Epoc::DriverAllocdPhysRam, -1);
1.1263 + Mmu::Signal();
1.1264 + NKern::ThreadLeaveCS();
1.1265 + }
1.1266 +#endif
1.1267 +
1.1268 +#ifdef BTRACE_RAM_ALLOCATOR
1.1269 + // Must check for -1 as that is the default value of aCategroy for
1.1270 + // BTrace::Prime() which is intended to prime all categories that are
1.1271 + // currently enabled via a single invocation of BTrace::Prime().
1.1272 + if(aCategory==BTrace::ERamAllocator || (TInt)aCategory == -1)
1.1273 + {
1.1274 + NKern::ThreadEnterCS();
1.1275 + Mmu::Wait();
1.1276 + Mmu::Get().iRamPageAllocator->SendInitialBtraceLogs();
1.1277 + Mmu::Signal();
1.1278 + NKern::ThreadLeaveCS();
1.1279 + }
1.1280 +#endif
1.1281 + }
1.1282 +
1.1283 +
1.1284 +/******************************************************************************
1.1285 + * Code common to all virtual memory models
1.1286 + ******************************************************************************/
1.1287 +
1.1288 +void RHeapK::Mutate(TInt aOffset, TInt aMaxLength)
1.1289 +//
1.1290 +// Used by the kernel to mutate a fixed heap into a chunk heap.
1.1291 +//
1.1292 + {
1.1293 + iMinLength += aOffset;
1.1294 + iMaxLength = aMaxLength + aOffset;
1.1295 + iOffset = aOffset;
1.1296 + iChunkHandle = (TInt)K::HeapInfo.iChunk;
1.1297 + iPageSize = M::PageSizeInBytes();
1.1298 + iGrowBy = iPageSize;
1.1299 + iFlags = 0;
1.1300 + }
1.1301 +
1.1302 +TInt M::PageSizeInBytes()
1.1303 + {
1.1304 + return KPageSize;
1.1305 + }
1.1306 +
1.1307 +TInt MmuBase::FreeRamInBytes()
1.1308 + {
1.1309 + TInt free = iRamPageAllocator->FreeRamInBytes();
1.1310 + if(iRamCache)
1.1311 + free += iRamCache->NumberOfFreePages()<<iPageShift;
1.1312 + return free;
1.1313 + }
1.1314 +
1.1315 +/** Returns the amount of free RAM currently available.
1.1316 +
1.1317 +@return The number of bytes of free RAM currently available.
1.1318 +@pre any context
1.1319 + */
1.1320 +EXPORT_C TInt Kern::FreeRamInBytes()
1.1321 + {
1.1322 + return MmuBase::TheMmu->FreeRamInBytes();
1.1323 + }
1.1324 +
1.1325 +
1.1326 +/** Rounds up the argument to the size of a MMU page.
1.1327 +
1.1328 + To find out the size of a MMU page:
1.1329 + @code
1.1330 + size = Kern::RoundToPageSize(1);
1.1331 + @endcode
1.1332 +
1.1333 + @param aSize Value to round up
1.1334 + @pre any context
1.1335 + */
1.1336 +EXPORT_C TUint32 Kern::RoundToPageSize(TUint32 aSize)
1.1337 + {
1.1338 + return MmuBase::RoundToPageSize(aSize);
1.1339 + }
1.1340 +
1.1341 +
1.1342 +/** Rounds up the argument to the amount of memory mapped by a MMU page
1.1343 + directory entry.
1.1344 +
1.1345 + Chunks occupy one or more consecutive page directory entries (PDE) and
1.1346 + therefore the amount of linear and physical memory allocated to a chunk is
1.1347 + always a multiple of the amount of memory mapped by a page directory entry.
1.1348 + */
1.1349 +EXPORT_C TUint32 Kern::RoundToChunkSize(TUint32 aSize)
1.1350 + {
1.1351 + return MmuBase::RoundToChunkSize(aSize);
1.1352 + }
1.1353 +
1.1354 +
1.1355 +/**
1.1356 +Allows the variant to specify the details of the RAM zones. This should be invoked
1.1357 +by the variant in its implementation of the pure virtual function Asic::Init1().
1.1358 +
1.1359 +There are some limitations to how the RAM zones can be specified:
1.1360 +- Each RAM zone's address space must be distinct and not overlap with any
1.1361 +other RAM zone's address space
1.1362 +- Each RAM zone's address space must have a size that is multiples of the
1.1363 +ASIC's MMU small page size and be aligned to the ASIC's MMU small page size,
1.1364 +usually 4KB on ARM MMUs.
1.1365 +- When taken together all of the RAM zones must cover the whole of the physical RAM
1.1366 +address space as specified by the bootstrap in the SuperPage members iTotalRamSize
1.1367 +and iRamBootData;.
1.1368 +- There can be no more than KMaxRamZones RAM zones specified by the base port
1.1369 +
1.1370 +Note the verification of the RAM zone data is not performed here but by the ram
1.1371 +allocator later in the boot up sequence. This is because it is only possible to
1.1372 +verify the zone data once the physical RAM configuration has been read from
1.1373 +the super page. Any verification errors result in a "RAM-ALLOC" panic
1.1374 +faulting the kernel during initialisation.
1.1375 +
1.1376 +@param aZones Pointer to an array of SRamZone structs containing the details for all
1.1377 +the zones. The end of the array is specified by an element with an iSize of zero. The array must
1.1378 +remain in memory at least until the kernel has successfully booted.
1.1379 +
1.1380 +@param aCallback Pointer to a call back function that the kernel may invoke to request
1.1381 +one of the operations specified by TRamZoneOp.
1.1382 +
1.1383 +@return KErrNone if successful, otherwise one of the system wide error codes
1.1384 +
1.1385 +@see TRamZoneOp
1.1386 +@see SRamZone
1.1387 +@see TRamZoneCallback
1.1388 +*/
1.1389 +EXPORT_C TInt Epoc::SetRamZoneConfig(const SRamZone* aZones, TRamZoneCallback aCallback)
1.1390 + {
1.1391 + // Ensure this is only called once and only while we are initialising the kernel
1.1392 + if (!K::Initialising || MmuBase::RamZoneConfig != NULL)
1.1393 + {// fault kernel, won't return
1.1394 + K::Fault(K::EBadSetRamZoneConfig);
1.1395 + }
1.1396 +
1.1397 + if (NULL == aZones)
1.1398 + {
1.1399 + return KErrArgument;
1.1400 + }
1.1401 + MmuBase::RamZoneConfig=aZones;
1.1402 + MmuBase::RamZoneCallback=aCallback;
1.1403 + return KErrNone;
1.1404 + }
1.1405 +
1.1406 +
1.1407 +/**
1.1408 +Modify the specified RAM zone's flags.
1.1409 +
1.1410 +This allows the BSP or device driver to configure which type of pages, if any,
1.1411 +can be allocated into a RAM zone by the system.
1.1412 +
1.1413 +Note: updating a RAM zone's flags can result in
1.1414 + 1 - memory allocations failing despite there being enough free RAM in the system.
1.1415 + 2 - the methods TRamDefragRequest::EmptyRamZone(), TRamDefragRequest::ClaimRamZone()
1.1416 + or TRamDefragRequest::DefragRam() never succeeding.
1.1417 +
1.1418 +The flag masks KRamZoneFlagDiscardOnly, KRamZoneFlagMovAndDisOnly and KRamZoneFlagNoAlloc
1.1419 +are intended to be used with this method.
1.1420 +
1.1421 +@param aId The ID of the RAM zone to modify.
1.1422 +@param aClearMask The bit mask to clear, each flag of which must already be set on the RAM zone.
1.1423 +@param aSetMask The bit mask to set.
1.1424 +
1.1425 +@return KErrNone on success, KErrArgument if the RAM zone of aId not found or if
1.1426 +aSetMask contains invalid flag bits.
1.1427 +
1.1428 +@see TRamDefragRequest::EmptyRamZone()
1.1429 +@see TRamDefragRequest::ClaimRamZone()
1.1430 +@see TRamDefragRequest::DefragRam()
1.1431 +
1.1432 +@see KRamZoneFlagDiscardOnly
1.1433 +@see KRamZoneFlagMovAndDisOnly
1.1434 +@see KRamZoneFlagNoAlloc
1.1435 +*/
1.1436 +EXPORT_C TInt Epoc::ModifyRamZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask)
1.1437 + {
1.1438 + MmuBase& m = *MmuBase::TheMmu;
1.1439 + MmuBase::Wait();
1.1440 +
1.1441 + TInt ret = m.ModifyRamZoneFlags(aId, aClearMask, aSetMask);
1.1442 +
1.1443 + MmuBase::Signal();
1.1444 + return ret;
1.1445 + }
1.1446 +
1.1447 +TInt MmuBase::ModifyRamZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask)
1.1448 + {
1.1449 + return iRamPageAllocator->ModifyZoneFlags(aId, aClearMask, aSetMask);
1.1450 + }
1.1451 +
1.1452 +
1.1453 +/**
1.1454 +Gets the current count of a particular RAM zone's pages by type.
1.1455 +
1.1456 +@param aId The ID of the RAM zone to enquire about
1.1457 +@param aPageData If successful, on return this contains the page count
1.1458 +
1.1459 +@return KErrNone if successful, KErrArgument if a RAM zone of aId is not found or
1.1460 +one of the system wide error codes
1.1461 +
1.1462 +@pre Calling thread must be in a critical section.
1.1463 +@pre Interrupts must be enabled.
1.1464 +@pre Kernel must be unlocked.
1.1465 +@pre No fast mutex can be held.
1.1466 +@pre Call in a thread context.
1.1467 +
1.1468 +@see SRamZonePageCount
1.1469 +*/
1.1470 +EXPORT_C TInt Epoc::GetRamZonePageCount(TUint aId, SRamZonePageCount& aPageData)
1.1471 + {
1.1472 + CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::GetRamZonePageCount");
1.1473 +
1.1474 + MmuBase& m = *MmuBase::TheMmu;
1.1475 + MmuBase::Wait(); // Gets RAM alloc mutex
1.1476 +
1.1477 + TInt r = m.GetRamZonePageCount(aId, aPageData);
1.1478 +
1.1479 + MmuBase::Signal();
1.1480 +
1.1481 + return r;
1.1482 + }
1.1483 +
1.1484 +TInt MmuBase::GetRamZonePageCount(TUint aId, SRamZonePageCount& aPageData)
1.1485 + {
1.1486 + return iRamPageAllocator->GetZonePageCount(aId, aPageData);
1.1487 + }
1.1488 +
1.1489 +/**
1.1490 +Replace a page of the system's execute-in-place (XIP) ROM image with a page of
1.1491 +RAM having the same contents. This RAM can subsequently be written to in order
1.1492 +to apply patches to the XIP ROM or to insert software breakpoints for debugging
1.1493 +purposes.
1.1494 +Call Epoc::FreeShadowPage() when you wish to revert to the original ROM page.
1.1495 +
1.1496 +@param aRomAddr The virtual address of the ROM page to be replaced.
1.1497 +@return KErrNone if the operation completed successfully.
1.1498 + KErrArgument if the specified address is not a valid XIP ROM address.
1.1499 + KErrNoMemory if the operation failed due to insufficient free RAM.
1.1500 + KErrAlreadyExists if the XIP ROM page at the specified address has
1.1501 + already been shadowed by a RAM page.
1.1502 +
1.1503 +@pre Calling thread must be in a critical section.
1.1504 +@pre Interrupts must be enabled.
1.1505 +@pre Kernel must be unlocked.
1.1506 +@pre No fast mutex can be held.
1.1507 +@pre Call in a thread context.
1.1508 +*/
1.1509 +EXPORT_C TInt Epoc::AllocShadowPage(TLinAddr aRomAddr)
1.1510 + {
1.1511 + CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::AllocShadowPage");
1.1512 +
1.1513 + TInt r;
1.1514 + r=M::LockRegion(aRomAddr,1);
1.1515 + if(r!=KErrNone && r!=KErrNotFound)
1.1516 + return r;
1.1517 + MmuBase& m=*MmuBase::TheMmu;
1.1518 + MmuBase::Wait();
1.1519 + r=m.AllocShadowPage(aRomAddr);
1.1520 + MmuBase::Signal();
1.1521 + if(r!=KErrNone)
1.1522 + M::UnlockRegion(aRomAddr,1);
1.1523 + return r;
1.1524 + }
1.1525 +
1.1526 +/**
1.1527 +Copies data into shadow memory. Source data is presumed to be in Kernel memory.
1.1528 +
1.1529 +@param aSrc Data to copy from.
1.1530 +@param aDest Address to copy into.
1.1531 +@param aLength Number of bytes to copy. Maximum of 32 bytes of data can be copied.
1.1532 +.
1.1533 +@return KErrNone if the operation completed successfully.
1.1534 + KErrArgument if any part of destination region is not shadow page or
1.1535 + if aLength is greater then 32 bytes.
1.1536 +
1.1537 +@pre Calling thread must be in a critical section.
1.1538 +@pre Interrupts must be enabled.
1.1539 +@pre Kernel must be unlocked.
1.1540 +@pre No fast mutex can be held.
1.1541 +@pre Call in a thread context.
1.1542 +*/
1.1543 +EXPORT_C TInt Epoc::CopyToShadowMemory(TLinAddr aDest, TLinAddr aSrc, TUint32 aLength)
1.1544 + {
1.1545 + CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::CopyToShadowMemory");
1.1546 +
1.1547 + if (aLength>32)
1.1548 + return KErrArgument;
1.1549 + MmuBase& m=*MmuBase::TheMmu;
1.1550 + // This is a simple copy operation except on platforms with __CPU_MEMORY_TYPE_REMAPPING defined,
1.1551 + // where shadow page is read-only and it has to be remapped before it is written into.
1.1552 + return m.CopyToShadowMemory(aDest, aSrc, aLength);
1.1553 + }
1.1554 +/**
1.1555 +Revert an XIP ROM address which has previously been shadowed to the original
1.1556 +page of ROM.
1.1557 +
1.1558 +@param aRomAddr The virtual address of the ROM page to be reverted.
1.1559 +@return KErrNone if the operation completed successfully.
1.1560 + KErrArgument if the specified address is not a valid XIP ROM address.
1.1561 + KErrGeneral if the specified address has not previously been shadowed
1.1562 + using Epoc::AllocShadowPage().
1.1563 +
1.1564 +@pre Calling thread must be in a critical section.
1.1565 +@pre Interrupts must be enabled.
1.1566 +@pre Kernel must be unlocked.
1.1567 +@pre No fast mutex can be held.
1.1568 +@pre Call in a thread context.
1.1569 +*/
1.1570 +EXPORT_C TInt Epoc::FreeShadowPage(TLinAddr aRomAddr)
1.1571 + {
1.1572 + CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreeShadowPage");
1.1573 + MmuBase& m=*MmuBase::TheMmu;
1.1574 + MmuBase::Wait();
1.1575 + TInt r=m.FreeShadowPage(aRomAddr);
1.1576 + MmuBase::Signal();
1.1577 + if(r==KErrNone)
1.1578 + M::UnlockRegion(aRomAddr,1);
1.1579 + return r;
1.1580 + }
1.1581 +
1.1582 +
1.1583 +/**
1.1584 +Change the permissions on an XIP ROM address which has previously been shadowed
1.1585 +by a RAM page so that the RAM page may no longer be written to.
1.1586 +
1.1587 +Note: Shadow page on the latest platforms (that use the reduced set of access permissions:
1.1588 +arm11mpcore, arm1176, cortex) is implemented with read only permissions. Therefore, calling
1.1589 +this function in not necessary, as shadow page is already created as 'frozen'.
1.1590 +
1.1591 +@param aRomAddr The virtual address of the shadow RAM page to be frozen.
1.1592 +@return KErrNone if the operation completed successfully.
1.1593 + KErrArgument if the specified address is not a valid XIP ROM address.
1.1594 + KErrGeneral if the specified address has not previously been shadowed
1.1595 + using Epoc::AllocShadowPage().
1.1596 +
1.1597 +@pre Calling thread must be in a critical section.
1.1598 +@pre Interrupts must be enabled.
1.1599 +@pre Kernel must be unlocked.
1.1600 +@pre No fast mutex can be held.
1.1601 +@pre Call in a thread context.
1.1602 +*/
1.1603 +EXPORT_C TInt Epoc::FreezeShadowPage(TLinAddr aRomAddr)
1.1604 + {
1.1605 + CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreezeShadowPage");
1.1606 + MmuBase& m=*MmuBase::TheMmu;
1.1607 + MmuBase::Wait();
1.1608 + TInt r=m.FreezeShadowPage(aRomAddr);
1.1609 + MmuBase::Signal();
1.1610 + return r;
1.1611 + }
1.1612 +
1.1613 +
1.1614 +/**
1.1615 +Allocate a block of physically contiguous RAM with a physical address aligned
1.1616 +to a specified power of 2 boundary.
1.1617 +When the RAM is no longer required it should be freed using
1.1618 +Epoc::FreePhysicalRam()
1.1619 +
1.1620 +@param aSize The size in bytes of the required block. The specified size
1.1621 + is rounded up to the page size, since only whole pages of
1.1622 + physical RAM can be allocated.
1.1623 +@param aPhysAddr Receives the physical address of the base of the block on
1.1624 + successful allocation.
1.1625 +@param aAlign Specifies the number of least significant bits of the
1.1626 + physical address which are required to be zero. If a value
1.1627 + less than log2(page size) is specified, page alignment is
1.1628 + assumed. Pass 0 for aAlign if there are no special alignment
1.1629 + constraints (other than page alignment).
1.1630 +@return KErrNone if the allocation was successful.
1.1631 + KErrNoMemory if a sufficiently large physically contiguous block of free
1.1632 + RAM with the specified alignment could not be found.
1.1633 +@pre Calling thread must be in a critical section.
1.1634 +@pre Interrupts must be enabled.
1.1635 +@pre Kernel must be unlocked.
1.1636 +@pre No fast mutex can be held.
1.1637 +@pre Call in a thread context.
1.1638 +@pre Can be used in a device driver.
1.1639 +*/
1.1640 +EXPORT_C TInt Epoc::AllocPhysicalRam(TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
1.1641 + {
1.1642 + CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::AllocPhysicalRam");
1.1643 + MmuBase& m=*MmuBase::TheMmu;
1.1644 + MmuBase::Wait();
1.1645 + TInt r=m.AllocPhysicalRam(aSize,aPhysAddr,aAlign);
1.1646 + if (r == KErrNone)
1.1647 + {
1.1648 + // For the sake of platform security we have to clear the memory. E.g. the driver
1.1649 + // could assign it to a chunk visible to user side.
1.1650 + m.ClearPages(Kern::RoundToPageSize(aSize)>>m.iPageShift, (TPhysAddr*)(aPhysAddr|1));
1.1651 +#ifdef BTRACE_KERNEL_MEMORY
1.1652 + TUint size = Kern::RoundToPageSize(aSize);
1.1653 + BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, size, aPhysAddr);
1.1654 + Epoc::DriverAllocdPhysRam += size;
1.1655 +#endif
1.1656 + }
1.1657 + MmuBase::Signal();
1.1658 + return r;
1.1659 + }
1.1660 +
1.1661 +/**
1.1662 +Allocate a block of physically contiguous RAM with a physical address aligned
1.1663 +to a specified power of 2 boundary from the specified zone.
1.1664 +When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
1.1665 +
1.1666 +Note that this method only repsects the KRamZoneFlagNoAlloc flag and will always attempt
1.1667 +to allocate regardless of whether the other flags are set for the specified RAM zones
1.1668 +or not.
1.1669 +
1.1670 +When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
1.1671 +
1.1672 +@param aZoneId The ID of the zone to attempt to allocate from.
1.1673 +@param aSize The size in bytes of the required block. The specified size
1.1674 + is rounded up to the page size, since only whole pages of
1.1675 + physical RAM can be allocated.
1.1676 +@param aPhysAddr Receives the physical address of the base of the block on
1.1677 + successful allocation.
1.1678 +@param aAlign Specifies the number of least significant bits of the
1.1679 + physical address which are required to be zero. If a value
1.1680 + less than log2(page size) is specified, page alignment is
1.1681 + assumed. Pass 0 for aAlign if there are no special alignment
1.1682 + constraints (other than page alignment).
1.1683 +@return KErrNone if the allocation was successful.
1.1684 + KErrNoMemory if a sufficiently large physically contiguous block of free
1.1685 + RAM with the specified alignment could not be found within the specified
1.1686 + zone.
1.1687 + KErrArgument if a RAM zone of the specified ID can't be found or if the
1.1688 + RAM zone has a total number of physical pages which is less than those
1.1689 + requested for the allocation.
1.1690 +
1.1691 +@pre Calling thread must be in a critical section.
1.1692 +@pre Interrupts must be enabled.
1.1693 +@pre Kernel must be unlocked.
1.1694 +@pre No fast mutex can be held.
1.1695 +@pre Call in a thread context.
1.1696 +@pre Can be used in a device driver.
1.1697 +*/
1.1698 +EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint aZoneId, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
1.1699 + {
1.1700 + return ZoneAllocPhysicalRam(&aZoneId, 1, aSize, aPhysAddr, aAlign);
1.1701 + }
1.1702 +
1.1703 +
1.1704 +/**
1.1705 +Allocate a block of physically contiguous RAM with a physical address aligned
1.1706 +to a specified power of 2 boundary from the specified RAM zones.
1.1707 +When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
1.1708 +
1.1709 +RAM will be allocated into the RAM zones in the order they are specified in the
1.1710 +aZoneIdList parameter. If the contiguous allocations are intended to span RAM zones
1.1711 +when required then aZoneIdList should be listed with the RAM zones in ascending
1.1712 +physical address order.
1.1713 +
1.1714 +Note that this method only repsects the KRamZoneFlagNoAlloc flag and will always attempt
1.1715 +to allocate regardless of whether the other flags are set for the specified RAM zones
1.1716 +or not.
1.1717 +
1.1718 +When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
1.1719 +
1.1720 +@param aZoneIdList A pointer to an array of RAM zone IDs of the RAM zones to
1.1721 + attempt to allocate from.
1.1722 +@param aZoneIdCount The number of RAM zone IDs contained in aZoneIdList.
1.1723 +@param aSize The size in bytes of the required block. The specified size
1.1724 + is rounded up to the page size, since only whole pages of
1.1725 + physical RAM can be allocated.
1.1726 +@param aPhysAddr Receives the physical address of the base of the block on
1.1727 + successful allocation.
1.1728 +@param aAlign Specifies the number of least significant bits of the
1.1729 + physical address which are required to be zero. If a value
1.1730 + less than log2(page size) is specified, page alignment is
1.1731 + assumed. Pass 0 for aAlign if there are no special alignment
1.1732 + constraints (other than page alignment).
1.1733 +@return KErrNone if the allocation was successful.
1.1734 + KErrNoMemory if a sufficiently large physically contiguous block of free
1.1735 + RAM with the specified alignment could not be found within the specified
1.1736 + zone.
1.1737 + KErrArgument if a RAM zone of a specified ID can't be found or if the
1.1738 + RAM zones have a total number of physical pages which is less than those
1.1739 + requested for the allocation.
1.1740 +
1.1741 +@pre Calling thread must be in a critical section.
1.1742 +@pre Interrupts must be enabled.
1.1743 +@pre Kernel must be unlocked.
1.1744 +@pre No fast mutex can be held.
1.1745 +@pre Call in a thread context.
1.1746 +@pre Can be used in a device driver.
1.1747 +*/
1.1748 +EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
1.1749 + {
1.1750 + CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::ZoneAllocPhysicalRam");
1.1751 + MmuBase& m=*MmuBase::TheMmu;
1.1752 + MmuBase::Wait();
1.1753 + TInt r = m.ZoneAllocPhysicalRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, aAlign);
1.1754 + if (r == KErrNone)
1.1755 + {
1.1756 + // For the sake of platform security we have to clear the memory. E.g. the driver
1.1757 + // could assign it to a chunk visible to user side.
1.1758 + m.ClearPages(Kern::RoundToPageSize(aSize)>>m.iPageShift, (TPhysAddr*)(aPhysAddr|1));
1.1759 +#ifdef BTRACE_KERNEL_MEMORY
1.1760 + TUint size = Kern::RoundToPageSize(aSize);
1.1761 + BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, size, aPhysAddr);
1.1762 + Epoc::DriverAllocdPhysRam += size;
1.1763 +#endif
1.1764 + }
1.1765 + MmuBase::Signal();
1.1766 + return r;
1.1767 + }
1.1768 +
1.1769 +
1.1770 +/**
1.1771 +Attempt to allocate discontiguous RAM pages.
1.1772 +
1.1773 +When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
1.1774 +
1.1775 +@param aNumPages The number of discontiguous pages required to be allocated
1.1776 +@param aPageList This should be a pointer to a previously allocated array of
1.1777 + aNumPages TPhysAddr elements. On a succesful allocation it
1.1778 + will receive the physical addresses of each page allocated.
1.1779 +
1.1780 +@return KErrNone if the allocation was successful.
1.1781 + KErrNoMemory if the requested number of pages can't be allocated
1.1782 +
1.1783 +@pre Calling thread must be in a critical section.
1.1784 +@pre Interrupts must be enabled.
1.1785 +@pre Kernel must be unlocked.
1.1786 +@pre No fast mutex can be held.
1.1787 +@pre Call in a thread context.
1.1788 +@pre Can be used in a device driver.
1.1789 +*/
1.1790 +EXPORT_C TInt Epoc::AllocPhysicalRam(TInt aNumPages, TPhysAddr* aPageList)
1.1791 + {
1.1792 + CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Epoc::AllocPhysicalRam");
1.1793 + MmuBase& m = *MmuBase::TheMmu;
1.1794 + MmuBase::Wait();
1.1795 + TInt r = m.AllocPhysicalRam(aNumPages, aPageList);
1.1796 + if (r == KErrNone)
1.1797 + {
1.1798 + // For the sake of platform security we have to clear the memory. E.g. the driver
1.1799 + // could assign it to a chunk visible to user side.
1.1800 + m.ClearPages(aNumPages, aPageList);
1.1801 +
1.1802 +#ifdef BTRACE_KERNEL_MEMORY
1.1803 + if (BTrace::CheckFilter(BTrace::EKernelMemory))
1.1804 + {// Only loop round each page if EKernelMemory tracing is enabled
1.1805 + TPhysAddr* pAddr = aPageList;
1.1806 + TPhysAddr* pAddrEnd = aPageList + aNumPages;
1.1807 + while (pAddr < pAddrEnd)
1.1808 + {
1.1809 + BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, KPageSize, *pAddr++);
1.1810 + Epoc::DriverAllocdPhysRam += KPageSize;
1.1811 + }
1.1812 + }
1.1813 +#endif
1.1814 + }
1.1815 + MmuBase::Signal();
1.1816 + return r;
1.1817 + }
1.1818 +
1.1819 +
1.1820 +/**
1.1821 +Attempt to allocate discontiguous RAM pages from the specified zone.
1.1822 +
1.1823 +Note that this method only repsects the KRamZoneFlagNoAlloc flag and will always attempt
1.1824 +to allocate regardless of whether the other flags are set for the specified RAM zones
1.1825 +or not.
1.1826 +
1.1827 +When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
1.1828 +
1.1829 +@param aZoneId The ID of the zone to attempt to allocate from.
1.1830 +@param aNumPages The number of discontiguous pages required to be allocated
1.1831 + from the specified zone.
1.1832 +@param aPageList This should be a pointer to a previously allocated array of
1.1833 + aNumPages TPhysAddr elements. On a succesful
1.1834 + allocation it will receive the physical addresses of each
1.1835 + page allocated.
1.1836 +@return KErrNone if the allocation was successful.
1.1837 + KErrNoMemory if the requested number of pages can't be allocated from the
1.1838 + specified zone.
1.1839 + KErrArgument if a RAM zone of the specified ID can't be found or if the
1.1840 + RAM zone has a total number of physical pages which is less than those
1.1841 + requested for the allocation.
1.1842 +
1.1843 +@pre Calling thread must be in a critical section.
1.1844 +@pre Interrupts must be enabled.
1.1845 +@pre Kernel must be unlocked.
1.1846 +@pre No fast mutex can be held.
1.1847 +@pre Call in a thread context.
1.1848 +@pre Can be used in a device driver.
1.1849 +*/
1.1850 +EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint aZoneId, TInt aNumPages, TPhysAddr* aPageList)
1.1851 + {
1.1852 + return ZoneAllocPhysicalRam(&aZoneId, 1, aNumPages, aPageList);
1.1853 + }
1.1854 +
1.1855 +
1.1856 +/**
1.1857 +Attempt to allocate discontiguous RAM pages from the specified RAM zones.
1.1858 +The RAM pages will be allocated into the RAM zones in the order that they are specified
1.1859 +in the aZoneIdList parameter, the RAM zone preferences will be ignored.
1.1860 +
1.1861 +Note that this method only repsects the KRamZoneFlagNoAlloc flag and will always attempt
1.1862 +to allocate regardless of whether the other flags are set for the specified RAM zones
1.1863 +or not.
1.1864 +
1.1865 +When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
1.1866 +
1.1867 +@param aZoneIdList A pointer to an array of RAM zone IDs of the RAM zones to
1.1868 + attempt to allocate from.
1.1869 +@param aZoneIdCount The number of RAM zone IDs pointed to by aZoneIdList.
1.1870 +@param aNumPages The number of discontiguous pages required to be allocated
1.1871 + from the specified zone.
1.1872 +@param aPageList This should be a pointer to a previously allocated array of
1.1873 + aNumPages TPhysAddr elements. On a succesful
1.1874 + allocation it will receive the physical addresses of each
1.1875 + page allocated.
1.1876 +@return KErrNone if the allocation was successful.
1.1877 + KErrNoMemory if the requested number of pages can't be allocated from the
1.1878 + specified zone.
1.1879 + KErrArgument if a RAM zone of a specified ID can't be found or if the
1.1880 + RAM zones have a total number of physical pages which is less than those
1.1881 + requested for the allocation.
1.1882 +
1.1883 +@pre Calling thread must be in a critical section.
1.1884 +@pre Interrupts must be enabled.
1.1885 +@pre Kernel must be unlocked.
1.1886 +@pre No fast mutex can be held.
1.1887 +@pre Call in a thread context.
1.1888 +@pre Can be used in a device driver.
1.1889 +*/
1.1890 +EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList)
1.1891 + {
1.1892 + CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Epoc::ZoneAllocPhysicalRam");
1.1893 + MmuBase& m = *MmuBase::TheMmu;
1.1894 + MmuBase::Wait();
1.1895 + TInt r = m.ZoneAllocPhysicalRam(aZoneIdList, aZoneIdCount, aNumPages, aPageList);
1.1896 + if (r == KErrNone)
1.1897 + {
1.1898 + // For the sake of platform security we have to clear the memory. E.g. the driver
1.1899 + // could assign it to a chunk visible to user side.
1.1900 + m.ClearPages(aNumPages, aPageList);
1.1901 +
1.1902 +#ifdef BTRACE_KERNEL_MEMORY
1.1903 + if (BTrace::CheckFilter(BTrace::EKernelMemory))
1.1904 + {// Only loop round each page if EKernelMemory tracing is enabled
1.1905 + TPhysAddr* pAddr = aPageList;
1.1906 + TPhysAddr* pAddrEnd = aPageList + aNumPages;
1.1907 + while (pAddr < pAddrEnd)
1.1908 + {
1.1909 + BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, KPageSize, *pAddr++);
1.1910 + Epoc::DriverAllocdPhysRam += KPageSize;
1.1911 + }
1.1912 + }
1.1913 +#endif
1.1914 + }
1.1915 + MmuBase::Signal();
1.1916 + return r;
1.1917 + }
1.1918 +
1.1919 +/**
1.1920 +Free a previously-allocated block of physically contiguous RAM.
1.1921 +
1.1922 +Specifying one of the following may cause the system to panic:
1.1923 +a) an invalid physical RAM address.
1.1924 +b) valid physical RAM addresses where some had not been previously allocated.
1.1925 +c) an adrress not aligned to a page boundary.
1.1926 +
1.1927 +@param aPhysAddr The physical address of the base of the block to be freed.
1.1928 + This must be the address returned by a previous call to
1.1929 + Epoc::AllocPhysicalRam(), Epoc::ZoneAllocPhysicalRam(),
1.1930 + Epoc::ClaimPhysicalRam() or Epoc::ClaimRamZone().
1.1931 +@param aSize The size in bytes of the required block. The specified size
1.1932 + is rounded up to the page size, since only whole pages of
1.1933 + physical RAM can be allocated.
1.1934 +@return KErrNone if the operation was successful.
1.1935 +
1.1936 +
1.1937 +
1.1938 +@pre Calling thread must be in a critical section.
1.1939 +@pre Interrupts must be enabled.
1.1940 +@pre Kernel must be unlocked.
1.1941 +@pre No fast mutex can be held.
1.1942 +@pre Call in a thread context.
1.1943 +@pre Can be used in a device driver.
1.1944 +*/
1.1945 +EXPORT_C TInt Epoc::FreePhysicalRam(TPhysAddr aPhysAddr, TInt aSize)
1.1946 + {
1.1947 + CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreePhysicalRam");
1.1948 + MmuBase& m=*MmuBase::TheMmu;
1.1949 + MmuBase::Wait();
1.1950 + TInt r=m.FreePhysicalRam(aPhysAddr,aSize);
1.1951 +#ifdef BTRACE_KERNEL_MEMORY
1.1952 + if (r == KErrNone)
1.1953 + {
1.1954 + TUint size = Kern::RoundToPageSize(aSize);
1.1955 + BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysFree, size, aPhysAddr);
1.1956 + Epoc::DriverAllocdPhysRam -= size;
1.1957 + }
1.1958 +#endif
1.1959 + MmuBase::Signal();
1.1960 + return r;
1.1961 + }
1.1962 +
1.1963 +
1.1964 +/**
1.1965 +Free a number of physical RAM pages that were previously allocated using
1.1966 +Epoc::AllocPhysicalRam() or Epoc::ZoneAllocPhysicalRam().
1.1967 +
1.1968 +Specifying one of the following may cause the system to panic:
1.1969 +a) an invalid physical RAM address.
1.1970 +b) valid physical RAM addresses where some had not been previously allocated.
1.1971 +c) an adrress not aligned to a page boundary.
1.1972 +
1.1973 +@param aNumPages The number of pages to be freed.
1.1974 +@param aPhysAddr An array of aNumPages TPhysAddr elements. Where each element
1.1975 + should contain the physical address of each page to be freed.
1.1976 + This must be the same set of addresses as those returned by a
1.1977 + previous call to Epoc::AllocPhysicalRam() or
1.1978 + Epoc::ZoneAllocPhysicalRam().
1.1979 +@return KErrNone if the operation was successful.
1.1980 +
1.1981 +@pre Calling thread must be in a critical section.
1.1982 +@pre Interrupts must be enabled.
1.1983 +@pre Kernel must be unlocked.
1.1984 +@pre No fast mutex can be held.
1.1985 +@pre Call in a thread context.
1.1986 +@pre Can be used in a device driver.
1.1987 +
1.1988 +*/
1.1989 +EXPORT_C TInt Epoc::FreePhysicalRam(TInt aNumPages, TPhysAddr* aPageList)
1.1990 + {
1.1991 + CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreePhysicalRam");
1.1992 + MmuBase& m=*MmuBase::TheMmu;
1.1993 + MmuBase::Wait();
1.1994 + TInt r=m.FreePhysicalRam(aNumPages, aPageList);
1.1995 +#ifdef BTRACE_KERNEL_MEMORY
1.1996 + if (r == KErrNone && BTrace::CheckFilter(BTrace::EKernelMemory))
1.1997 + {// Only loop round each page if EKernelMemory tracing is enabled
1.1998 + TPhysAddr* pAddr = aPageList;
1.1999 + TPhysAddr* pAddrEnd = aPageList + aNumPages;
1.2000 + while (pAddr < pAddrEnd)
1.2001 + {
1.2002 + BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysFree, KPageSize, *pAddr++);
1.2003 + Epoc::DriverAllocdPhysRam -= KPageSize;
1.2004 + }
1.2005 + }
1.2006 +#endif
1.2007 + MmuBase::Signal();
1.2008 + return r;
1.2009 + }
1.2010 +
1.2011 +
1.2012 +/**
1.2013 +Allocate a specific block of physically contiguous RAM, specified by physical
1.2014 +base address and size.
1.2015 +If and when the RAM is no longer required it should be freed using
1.2016 +Epoc::FreePhysicalRam()
1.2017 +
1.2018 +@param aPhysAddr The physical address of the base of the required block.
1.2019 +@param aSize The size in bytes of the required block. The specified size
1.2020 + is rounded up to the page size, since only whole pages of
1.2021 + physical RAM can be allocated.
1.2022 +@return KErrNone if the operation was successful.
1.2023 + KErrArgument if the range of physical addresses specified included some
1.2024 + which are not valid physical RAM addresses.
1.2025 + KErrInUse if the range of physical addresses specified are all valid
1.2026 + physical RAM addresses but some of them have already been
1.2027 + allocated for other purposes.
1.2028 +@pre Calling thread must be in a critical section.
1.2029 +@pre Interrupts must be enabled.
1.2030 +@pre Kernel must be unlocked.
1.2031 +@pre No fast mutex can be held.
1.2032 +@pre Call in a thread context.
1.2033 +@pre Can be used in a device driver.
1.2034 +*/
1.2035 +EXPORT_C TInt Epoc::ClaimPhysicalRam(TPhysAddr aPhysAddr, TInt aSize)
1.2036 + {
1.2037 + CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::ClaimPhysicalRam");
1.2038 + MmuBase& m=*MmuBase::TheMmu;
1.2039 + MmuBase::Wait();
1.2040 + TInt r=m.ClaimPhysicalRam(aPhysAddr,aSize);
1.2041 +#ifdef BTRACE_KERNEL_MEMORY
1.2042 + if(r==KErrNone)
1.2043 + {
1.2044 + TUint32 pa=aPhysAddr;
1.2045 + TUint32 size=aSize;
1.2046 + m.RoundUpRangeToPageSize(pa,size);
1.2047 + BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, size, pa);
1.2048 + Epoc::DriverAllocdPhysRam += size;
1.2049 + }
1.2050 +#endif
1.2051 + MmuBase::Signal();
1.2052 + return r;
1.2053 + }
1.2054 +
1.2055 +
1.2056 +/**
1.2057 +Translate a virtual address to the corresponding physical address.
1.2058 +
1.2059 +@param aLinAddr The virtual address to be translated.
1.2060 +@return The physical address corresponding to the given virtual address, or
1.2061 + KPhysAddrInvalid if the specified virtual address is unmapped.
1.2062 +@pre Interrupts must be enabled.
1.2063 +@pre Kernel must be unlocked.
1.2064 +@pre Call in a thread context.
1.2065 +@pre Can be used in a device driver.
1.2066 +@pre Hold system lock if there is any possibility that the virtual address is
1.2067 + unmapped, may become unmapped, or may be remapped during the operation.
1.2068 + This will potentially be the case unless the virtual address refers to a
1.2069 + hardware chunk or shared chunk under the control of the driver calling this
1.2070 + function.
1.2071 +*/
1.2072 +EXPORT_C TPhysAddr Epoc::LinearToPhysical(TLinAddr aLinAddr)
1.2073 + {
1.2074 +// This precondition is violated by various parts of the system under some conditions,
1.2075 +// e.g. when __FLUSH_PT_INTO_RAM__ is defined. This function might also be called by
1.2076 +// a higher-level RTOS for which these conditions are meaningless. Thus, it's been
1.2077 +// disabled for now.
1.2078 +// CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"Epoc::LinearToPhysical");
1.2079 + MmuBase& m=*MmuBase::TheMmu;
1.2080 + TPhysAddr pa=m.LinearToPhysical(aLinAddr);
1.2081 + return pa;
1.2082 + }
1.2083 +
1.2084 +
1.2085 +EXPORT_C TInt TInternalRamDrive::MaxSize()
1.2086 + {
1.2087 + return TheSuperPage().iRamDriveSize+Kern::FreeRamInBytes();
1.2088 + }
1.2089 +
1.2090 +
1.2091 +/******************************************************************************
1.2092 + * Address allocator
1.2093 + ******************************************************************************/
1.2094 +TLinearSection* TLinearSection::New(TLinAddr aBase, TLinAddr aEnd)
1.2095 + {
1.2096 + __KTRACE_OPT(KMMU,Kern::Printf("TLinearSection::New(%08x,%08x)", aBase, aEnd));
1.2097 + MmuBase& m=*MmuBase::TheMmu;
1.2098 + TUint npdes=(aEnd-aBase)>>m.iChunkShift;
1.2099 + TInt nmapw=(npdes+31)>>5;
1.2100 + TInt memsz=sizeof(TLinearSection)+(nmapw-1)*sizeof(TUint32);
1.2101 + TLinearSection* p=(TLinearSection*)Kern::Alloc(memsz);
1.2102 + if (p)
1.2103 + {
1.2104 + new(&p->iAllocator) TBitMapAllocator(npdes, ETrue);
1.2105 + p->iBase=aBase;
1.2106 + p->iEnd=aEnd;
1.2107 + }
1.2108 + __KTRACE_OPT(KMMU,Kern::Printf("TLinearSection at %08x", p));
1.2109 + return p;
1.2110 + }
1.2111 +
1.2112 +/******************************************************************************
1.2113 + * Address allocator for HW chunks
1.2114 + ******************************************************************************/
1.2115 +THwChunkPageTable::THwChunkPageTable(TInt aIndex, TInt aSize, TPde aPdePerm)
1.2116 + : THwChunkRegion(aIndex, 0, aPdePerm),
1.2117 + iAllocator(aSize, ETrue)
1.2118 + {
1.2119 + }
1.2120 +
1.2121 +THwChunkPageTable* THwChunkPageTable::New(TInt aIndex, TPde aPdePerm)
1.2122 + {
1.2123 + __KTRACE_OPT(KMMU, Kern::Printf("THwChunkPageTable::New(%03x,%08x)",aIndex,aPdePerm));
1.2124 + MmuBase& m=*MmuBase::TheMmu;
1.2125 + TInt pdepages=m.iChunkSize>>m.iPageShift;
1.2126 + TInt nmapw=(pdepages+31)>>5;
1.2127 + TInt memsz=sizeof(THwChunkPageTable)+(nmapw-1)*sizeof(TUint32);
1.2128 + THwChunkPageTable* p=(THwChunkPageTable*)Kern::Alloc(memsz);
1.2129 + if (p)
1.2130 + new (p) THwChunkPageTable(aIndex, pdepages, aPdePerm);
1.2131 + __KTRACE_OPT(KMMU, Kern::Printf("THwChunkPageTable at %08x",p));
1.2132 + return p;
1.2133 + }
1.2134 +
1.2135 +THwChunkAddressAllocator::THwChunkAddressAllocator()
1.2136 + {
1.2137 + }
1.2138 +
1.2139 +THwChunkAddressAllocator* THwChunkAddressAllocator::New(TInt aAlign, TLinearSection* aSection)
1.2140 + {
1.2141 + __KTRACE_OPT(KMMU, Kern::Printf("THwChunkAddressAllocator::New(%d,%08x)",aAlign,aSection));
1.2142 + THwChunkAddressAllocator* p=new THwChunkAddressAllocator;
1.2143 + if (p)
1.2144 + {
1.2145 + p->iAlign=aAlign;
1.2146 + p->iSection=aSection;
1.2147 + }
1.2148 + __KTRACE_OPT(KMMU, Kern::Printf("THwChunkAddressAllocator at %08x",p));
1.2149 + return p;
1.2150 + }
1.2151 +
1.2152 +THwChunkRegion* THwChunkAddressAllocator::NewRegion(TInt aIndex, TInt aSize, TPde aPdePerm)
1.2153 + {
1.2154 + __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::NewRegion(index=%x, size=%x, pde=%08x)",aIndex,aSize,aPdePerm));
1.2155 + THwChunkRegion* p=new THwChunkRegion(aIndex, aSize, aPdePerm);
1.2156 + if (p)
1.2157 + {
1.2158 + TInt r=InsertInOrder(p, Order);
1.2159 + __KTRACE_OPT(KMMU, Kern::Printf("p=%08x, insert ret %d",p,r));
1.2160 + if (r<0)
1.2161 + delete p, p=NULL;
1.2162 + }
1.2163 + __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::NewRegion ret %08x)",p));
1.2164 + return p;
1.2165 + }
1.2166 +
1.2167 +THwChunkPageTable* THwChunkAddressAllocator::NewPageTable(TInt aIndex, TPde aPdePerm, TInt aInitB, TInt aInitC)
1.2168 + {
1.2169 + __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::NewPageTable(index=%x, pde=%08x, iB=%d, iC=%d)",aIndex,aPdePerm,aInitB,aInitC));
1.2170 + THwChunkPageTable* p=THwChunkPageTable::New(aIndex, aPdePerm);
1.2171 + if (p)
1.2172 + {
1.2173 + TInt r=InsertInOrder(p, Order);
1.2174 + __KTRACE_OPT(KMMU, Kern::Printf("p=%08x, insert ret %d",p,r));
1.2175 + if (r<0)
1.2176 + delete p, p=NULL;
1.2177 + else
1.2178 + p->iAllocator.Alloc(aInitB, aInitC);
1.2179 + }
1.2180 + __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::NewPageTable ret %08x)",p));
1.2181 + return p;
1.2182 + }
1.2183 +
1.2184 +TLinAddr THwChunkAddressAllocator::SearchExisting(TInt aNumPages, TInt aPageAlign, TInt aPageOffset, TPde aPdePerm)
1.2185 + {
1.2186 + __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::SrchEx np=%03x align=%d offset=%03x pdeperm=%08x",
1.2187 + aNumPages, aPageAlign, aPageOffset, aPdePerm));
1.2188 + TInt c=Count();
1.2189 + if (c==0)
1.2190 + return 0; // don't try to access [0] if array empty!
1.2191 + THwChunkPageTable** pp=(THwChunkPageTable**)&(*this)[0];
1.2192 + THwChunkPageTable** ppE=pp+c;
1.2193 + while(pp<ppE)
1.2194 + {
1.2195 + THwChunkPageTable* p=*pp++;
1.2196 + if (p->iRegionSize!=0 || p->iPdePerm!=aPdePerm)
1.2197 + continue; // if not page table or PDE permissions wrong, we can't use it
1.2198 + TInt r=p->iAllocator.AllocAligned(aNumPages, aPageAlign, -aPageOffset, EFalse);
1.2199 + __KTRACE_OPT(KMMU, Kern::Printf("r=%d", r));
1.2200 + if (r<0)
1.2201 + continue; // not enough space in this page table
1.2202 +
1.2203 + // got enough space in existing page table, so use it
1.2204 + p->iAllocator.Alloc(r, aNumPages);
1.2205 + MmuBase& m=*MmuBase::TheMmu;
1.2206 + TLinAddr a = iSection->iBase + (TLinAddr(p->iIndex)<<m.iChunkShift) + (r<<m.iPageShift);
1.2207 + __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::SrchEx OK, returning %08x", a));
1.2208 + return a;
1.2209 + }
1.2210 + __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::SrchEx not found"));
1.2211 + return 0;
1.2212 + }
1.2213 +
1.2214 +TLinAddr THwChunkAddressAllocator::Alloc(TInt aSize, TInt aAlign, TInt aOffset, TPde aPdePerm)
1.2215 + {
1.2216 + __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::Alloc size=%08x align=%d offset=%08x pdeperm=%08x",
1.2217 + aSize, aAlign, aOffset, aPdePerm));
1.2218 + MmuBase& m=*MmuBase::TheMmu;
1.2219 + TInt npages=(aSize+m.iPageMask)>>m.iPageShift;
1.2220 + TInt align=Max(aAlign,iAlign);
1.2221 + if (align>m.iChunkShift)
1.2222 + return 0;
1.2223 + TInt aligns=1<<align;
1.2224 + TInt alignm=aligns-1;
1.2225 + TInt offset=(aOffset&alignm)>>m.iPageShift;
1.2226 + TInt pdepages=m.iChunkSize>>m.iPageShift;
1.2227 + TInt pdepageshift=m.iChunkShift-m.iPageShift;
1.2228 + MmuBase::WaitHwChunk();
1.2229 + if (npages<pdepages)
1.2230 + {
1.2231 + // for small regions, first try to share an existing page table
1.2232 + TLinAddr a=SearchExisting(npages, align-m.iPageShift, offset, aPdePerm);
1.2233 + if (a)
1.2234 + {
1.2235 + MmuBase::SignalHwChunk();
1.2236 + return a;
1.2237 + }
1.2238 + }
1.2239 +
1.2240 + // large region or no free space in existing page tables - allocate whole PDEs
1.2241 + TInt npdes=(npages+offset+pdepages-1)>>pdepageshift;
1.2242 + __KTRACE_OPT(KMMU, Kern::Printf("Allocate %d PDEs", npdes));
1.2243 + MmuBase::Wait();
1.2244 + TInt ix=iSection->iAllocator.AllocConsecutive(npdes, EFalse);
1.2245 + if (ix>=0)
1.2246 + iSection->iAllocator.Alloc(ix, npdes);
1.2247 + MmuBase::Signal();
1.2248 + TLinAddr a=0;
1.2249 + if (ix>=0)
1.2250 + a = iSection->iBase + (TLinAddr(ix)<<m.iChunkShift) + (TLinAddr(offset)<<m.iPageShift);
1.2251 +
1.2252 + // Create bitmaps for each page table and placeholders for section blocks.
1.2253 + // We only create a bitmap for the first and last PDE and then only if they are not
1.2254 + // fully occupied by this request
1.2255 + THwChunkPageTable* first=NULL;
1.2256 + THwChunkRegion* middle=NULL;
1.2257 + TInt remain=npages;
1.2258 + TInt nix=ix;
1.2259 + if (a && (offset || npages<pdepages))
1.2260 + {
1.2261 + // first PDE is bitmap
1.2262 + TInt first_count = Min(remain, pdepages-offset);
1.2263 + first=NewPageTable(nix, aPdePerm, offset, first_count);
1.2264 + ++nix;
1.2265 + remain -= first_count;
1.2266 + if (!first)
1.2267 + a=0;
1.2268 + }
1.2269 + if (a && remain>=pdepages)
1.2270 + {
1.2271 + // next need whole-PDE-block placeholder
1.2272 + TInt whole_pdes=remain>>pdepageshift;
1.2273 + middle=NewRegion(nix, whole_pdes, aPdePerm);
1.2274 + nix+=whole_pdes;
1.2275 + remain-=(whole_pdes<<pdepageshift);
1.2276 + if (!middle)
1.2277 + a=0;
1.2278 + }
1.2279 + if (a && remain)
1.2280 + {
1.2281 + // need final bitmap section
1.2282 + if (!NewPageTable(nix, aPdePerm, 0, remain))
1.2283 + a=0;
1.2284 + }
1.2285 + if (!a)
1.2286 + {
1.2287 + // alloc failed somewhere - free anything we did create
1.2288 + if (middle)
1.2289 + Discard(middle);
1.2290 + if (first)
1.2291 + Discard(first);
1.2292 + if (ix>=0)
1.2293 + {
1.2294 + MmuBase::Wait();
1.2295 + iSection->iAllocator.Free(ix, npdes);
1.2296 + MmuBase::Signal();
1.2297 + }
1.2298 + }
1.2299 + MmuBase::SignalHwChunk();
1.2300 + __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::Alloc returns %08x", a));
1.2301 + return a;
1.2302 + }
1.2303 +
1.2304 +void THwChunkAddressAllocator::Discard(THwChunkRegion* aRegion)
1.2305 + {
1.2306 + // remove a region from the array and destroy it
1.2307 + TInt r=FindInOrder(aRegion, Order);
1.2308 + if (r>=0)
1.2309 + Remove(r);
1.2310 + Kern::Free(aRegion);
1.2311 + }
1.2312 +
1.2313 +TInt THwChunkAddressAllocator::Order(const THwChunkRegion& a1, const THwChunkRegion& a2)
1.2314 + {
1.2315 + // order two regions by address
1.2316 + return a1.iIndex-a2.iIndex;
1.2317 + }
1.2318 +
1.2319 +THwChunkRegion* THwChunkAddressAllocator::Free(TLinAddr aAddr, TInt aSize)
1.2320 + {
1.2321 + __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::Free addr=%08x size=%08x", aAddr, aSize));
1.2322 + __ASSERT_ALWAYS(aAddr>=iSection->iBase && (aAddr+aSize)<=iSection->iEnd,
1.2323 + MmuBase::Panic(MmuBase::EFreeHwChunkAddrInvalid));
1.2324 + THwChunkRegion* list=NULL;
1.2325 + MmuBase& m=*MmuBase::TheMmu;
1.2326 + TInt ix=(aAddr - iSection->iBase)>>m.iChunkShift;
1.2327 + TInt remain=(aSize+m.iPageMask)>>m.iPageShift;
1.2328 + TInt pdepageshift=m.iChunkShift-m.iPageShift;
1.2329 + TInt offset=(aAddr&m.iChunkMask)>>m.iPageShift;
1.2330 + THwChunkRegion find(ix, 0, 0);
1.2331 + MmuBase::WaitHwChunk();
1.2332 + TInt r=FindInOrder(&find, Order);
1.2333 + __ASSERT_ALWAYS(r>=0, MmuBase::Panic(MmuBase::EFreeHwChunkAddrInvalid));
1.2334 + while (remain)
1.2335 + {
1.2336 + THwChunkPageTable* p=(THwChunkPageTable*)(*this)[r];
1.2337 + __ASSERT_ALWAYS(p->iIndex==ix, MmuBase::Panic(MmuBase::EFreeHwChunkIndexInvalid));
1.2338 + if (p->iRegionSize)
1.2339 + {
1.2340 + // multiple-whole-PDE region
1.2341 + TInt rsz=p->iRegionSize;
1.2342 + remain-=(rsz<<pdepageshift);
1.2343 + Remove(r); // r now indexes following array entry
1.2344 + ix+=rsz;
1.2345 + }
1.2346 + else
1.2347 + {
1.2348 + // bitmap region
1.2349 + TInt n=Min(remain, (1<<pdepageshift)-offset);
1.2350 + p->iAllocator.Free(offset, n);
1.2351 + remain-=n;
1.2352 + ++ix;
1.2353 + if (p->iAllocator.iAvail < p->iAllocator.iSize)
1.2354 + {
1.2355 + // bitmap still in use
1.2356 + offset=0;
1.2357 + ++r; // r indexes following array entry
1.2358 + continue;
1.2359 + }
1.2360 + Remove(r); // r now indexes following array entry
1.2361 + }
1.2362 + offset=0;
1.2363 + p->iNext=list;
1.2364 + list=p; // chain free region descriptors together
1.2365 + }
1.2366 + MmuBase::SignalHwChunk();
1.2367 + __KTRACE_OPT(KMMU, Kern::Printf("THwChAA::Free returns %08x", list));
1.2368 + return list;
1.2369 + }
1.2370 +
1.2371 +/********************************************
1.2372 + * Hardware chunk abstraction
1.2373 + ********************************************/
1.2374 +THwChunkAddressAllocator* MmuBase::MappingRegion(TUint)
1.2375 + {
1.2376 + return iHwChunkAllocator;
1.2377 + }
1.2378 +
1.2379 +TInt MmuBase::AllocateAllPageTables(TLinAddr aLinAddr, TInt aSize, TPde aPdePerm, TInt aMapShift, SPageTableInfo::TAttribs aAttrib)
1.2380 + {
1.2381 + __KTRACE_OPT(KMMU,Kern::Printf("AllocateAllPageTables lin=%08x, size=%x, pde=%08x, mapshift=%d attribs=%d",
1.2382 + aLinAddr, aSize, aPdePerm, aMapShift, aAttrib));
1.2383 + TInt offset=aLinAddr&iChunkMask;
1.2384 + TInt remain=aSize;
1.2385 + TLinAddr a=aLinAddr&~iChunkMask;
1.2386 + TInt newpts=0;
1.2387 + for (; remain>0; a+=iChunkSize)
1.2388 + {
1.2389 + // don't need page table if a whole PDE mapping is permitted here
1.2390 + if (aMapShift<iChunkShift || offset || remain<iChunkSize)
1.2391 + {
1.2392 + // need to check for a page table at a
1.2393 + TInt id=PageTableId(a);
1.2394 + if (id<0)
1.2395 + {
1.2396 + // no page table - must allocate one
1.2397 + id = AllocPageTable();
1.2398 + if (id<0)
1.2399 + break;
1.2400 + // got page table, assign it
1.2401 + // AssignPageTable(TInt aId, TInt aUsage, TAny* aObject, TLinAddr aAddr, TPde aPdePerm)
1.2402 + AssignPageTable(id, aAttrib, NULL, a, aPdePerm);
1.2403 + ++newpts;
1.2404 + }
1.2405 + }
1.2406 + remain -= (iChunkSize-offset);
1.2407 + offset=0;
1.2408 + }
1.2409 + if (remain<=0)
1.2410 + return KErrNone; // completed OK
1.2411 +
1.2412 + // ran out of memory somewhere - free page tables which were allocated
1.2413 + for (; newpts; --newpts)
1.2414 + {
1.2415 + a-=iChunkSize;
1.2416 + TInt id=UnassignPageTable(a);
1.2417 + FreePageTable(id);
1.2418 + }
1.2419 + return KErrNoMemory;
1.2420 + }
1.2421 +
1.2422 +
1.2423 +/**
1.2424 +Create a hardware chunk object mapping a specified block of physical addresses
1.2425 +with specified access permissions and cache policy.
1.2426 +
1.2427 +When the mapping is no longer required, close the chunk using chunk->Close(0);
1.2428 +Note that closing a chunk does not free any RAM pages which were mapped by the
1.2429 +chunk - these must be freed separately using Epoc::FreePhysicalRam().
1.2430 +
1.2431 +@param aChunk Upon successful completion this parameter receives a pointer to
1.2432 + the newly created chunk. Upon unsuccessful completion it is
1.2433 + written with a NULL pointer. The virtual address of the mapping
1.2434 + can subsequently be discovered using the LinearAddress()
1.2435 + function on the chunk.
1.2436 +@param aAddr The base address of the physical region to be mapped. This will
1.2437 + be rounded down to a multiple of the hardware page size before
1.2438 + being used.
1.2439 +@param aSize The size of the physical address region to be mapped. This will
1.2440 + be rounded up to a multiple of the hardware page size before
1.2441 + being used; the rounding is such that the entire range from
1.2442 + aAddr to aAddr+aSize-1 inclusive is mapped. For example if
1.2443 + aAddr=0xB0001FFF, aSize=2 and the hardware page size is 4KB, an
1.2444 + 8KB range of physical addresses from 0xB0001000 to 0xB0002FFF
1.2445 + inclusive will be mapped.
1.2446 +@param aMapAttr Mapping attributes required for the mapping. This is formed
1.2447 + by ORing together values from the TMappingAttributes enumeration
1.2448 + to specify the access permissions and caching policy.
1.2449 +
1.2450 +@pre Calling thread must be in a critical section.
1.2451 +@pre Interrupts must be enabled.
1.2452 +@pre Kernel must be unlocked.
1.2453 +@pre No fast mutex can be held.
1.2454 +@pre Call in a thread context.
1.2455 +@pre Can be used in a device driver.
1.2456 +@see TMappingAttributes
1.2457 +*/
1.2458 +EXPORT_C TInt DPlatChunkHw::New(DPlatChunkHw*& aChunk, TPhysAddr aAddr, TInt aSize, TUint aMapAttr)
1.2459 + {
1.2460 + if (aAddr == KPhysAddrInvalid)
1.2461 + return KErrNotSupported;
1.2462 + return DoNew(aChunk, aAddr, aSize, aMapAttr);
1.2463 + }
1.2464 +
1.2465 +TInt DPlatChunkHw::DoNew(DPlatChunkHw*& aChunk, TPhysAddr aAddr, TInt aSize, TUint aMapAttr)
1.2466 + {
1.2467 + CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"DPlatChunkHw::New");
1.2468 + __KTRACE_OPT(KMMU,Kern::Printf("DPlatChunkHw::New phys=%08x, size=%x, attribs=%x",aAddr,aSize,aMapAttr));
1.2469 + if (aSize<=0)
1.2470 + return KErrArgument;
1.2471 + MmuBase& m=*MmuBase::TheMmu;
1.2472 + aChunk=NULL;
1.2473 + TPhysAddr pa=aAddr!=KPhysAddrInvalid ? aAddr&~m.iPageMask : 0;
1.2474 + TInt size=((aAddr+aSize+m.iPageMask)&~m.iPageMask)-pa;
1.2475 + __KTRACE_OPT(KMMU,Kern::Printf("Rounded %08x+%x", pa, size));
1.2476 + DMemModelChunkHw* pC=new DMemModelChunkHw;
1.2477 + if (!pC)
1.2478 + return KErrNoMemory;
1.2479 + __KTRACE_OPT(KMMU,Kern::Printf("DMemModelChunkHw created at %08x",pC));
1.2480 + pC->iPhysAddr=aAddr;
1.2481 + pC->iSize=size;
1.2482 + TUint mapattr=aMapAttr;
1.2483 + TPde pdePerm=0;
1.2484 + TPte ptePerm=0;
1.2485 + TInt r=m.PdePtePermissions(mapattr, pdePerm, ptePerm);
1.2486 + if (r==KErrNone)
1.2487 + {
1.2488 + pC->iAllocator=m.MappingRegion(mapattr);
1.2489 + pC->iAttribs=mapattr; // save actual mapping attributes
1.2490 + r=pC->AllocateLinearAddress(pdePerm);
1.2491 + if (r>=0)
1.2492 + {
1.2493 + TInt map_shift=r;
1.2494 + MmuBase::Wait();
1.2495 + r=m.AllocateAllPageTables(pC->iLinAddr, size, pdePerm, map_shift, SPageTableInfo::EGlobal);
1.2496 + if (r==KErrNone && aAddr!=KPhysAddrInvalid)
1.2497 + m.Map(pC->iLinAddr, pa, size, pdePerm, ptePerm, map_shift);
1.2498 + MmuBase::Signal();
1.2499 + }
1.2500 + }
1.2501 + if (r==KErrNone)
1.2502 + aChunk=pC;
1.2503 + else
1.2504 + pC->Close(NULL);
1.2505 + return r;
1.2506 + }
1.2507 +
1.2508 +TInt DMemModelChunkHw::AllocateLinearAddress(TPde aPdePerm)
1.2509 + {
1.2510 + __KTRACE_OPT(KMMU, Kern::Printf("DMemModelChunkHw::AllocateLinearAddress(%08x)", aPdePerm));
1.2511 + __KTRACE_OPT(KMMU, Kern::Printf("iAllocator=%08x iPhysAddr=%08x iSize=%08x", iAllocator, iPhysAddr, iSize));
1.2512 + MmuBase& m=*MmuBase::TheMmu;
1.2513 + TInt map_shift = (iPhysAddr<0xffffffffu) ? 30 : m.iPageShift;
1.2514 + for (; map_shift>=m.iPageShift; --map_shift)
1.2515 + {
1.2516 + TUint32 map_size = 1<<map_shift;
1.2517 + TUint32 map_mask = map_size-1;
1.2518 + if (!(m.iMapSizes & map_size))
1.2519 + continue; // map_size is not supported on this hardware
1.2520 + TPhysAddr base = (iPhysAddr+map_mask) &~ map_mask; // base rounded up
1.2521 + TPhysAddr end = (iPhysAddr+iSize)&~map_mask; // end rounded down
1.2522 + if ((base-end)<0x80000000u && map_shift>m.iPageShift)
1.2523 + continue; // region not big enough to use this mapping size
1.2524 + __KTRACE_OPT(KMMU, Kern::Printf("Try map size %08x", map_size));
1.2525 + iLinAddr=iAllocator->Alloc(iSize, map_shift, iPhysAddr, aPdePerm);
1.2526 + if (iLinAddr)
1.2527 + break; // done
1.2528 + }
1.2529 + TInt r=iLinAddr ? map_shift : KErrNoMemory;
1.2530 + __KTRACE_OPT(KMMU, Kern::Printf("iLinAddr=%08x, returning %d", iLinAddr, r));
1.2531 + return r;
1.2532 + }
1.2533 +
1.2534 +void DMemModelChunkHw::DeallocateLinearAddress()
1.2535 + {
1.2536 + __KTRACE_OPT(KMMU, Kern::Printf("DMemModelChunkHw::DeallocateLinearAddress %O", this));
1.2537 + MmuBase& m=*MmuBase::TheMmu;
1.2538 + MmuBase::WaitHwChunk();
1.2539 + THwChunkRegion* rgn=iAllocator->Free(iLinAddr, iSize);
1.2540 + iLinAddr=0;
1.2541 + MmuBase::SignalHwChunk();
1.2542 + TLinAddr base = iAllocator->iSection->iBase;
1.2543 + TBitMapAllocator& section_allocator = iAllocator->iSection->iAllocator;
1.2544 + while (rgn)
1.2545 + {
1.2546 + MmuBase::Wait();
1.2547 + if (rgn->iRegionSize)
1.2548 + {
1.2549 + // free address range
1.2550 + __KTRACE_OPT(KMMU, Kern::Printf("Freeing range %03x+%03x", rgn->iIndex, rgn->iRegionSize));
1.2551 + section_allocator.Free(rgn->iIndex, rgn->iRegionSize);
1.2552 +
1.2553 + // Though this is large region, it still can be made up of page tables (not sections).
1.2554 + // Check each chunk and remove tables in neccessary
1.2555 + TInt i = 0;
1.2556 + TLinAddr a = base + (TLinAddr(rgn->iIndex)<<m.iChunkShift);
1.2557 + for (; i<rgn->iRegionSize ; i++,a+=m.iChunkSize)
1.2558 + {
1.2559 + TInt id = m.UnassignPageTable(a);
1.2560 + if (id>=0)
1.2561 + m.FreePageTable(id);
1.2562 + }
1.2563 + }
1.2564 + else
1.2565 + {
1.2566 + // free address and page table if it exists
1.2567 + __KTRACE_OPT(KMMU, Kern::Printf("Freeing index %03x", rgn->iIndex));
1.2568 + section_allocator.Free(rgn->iIndex);
1.2569 + TLinAddr a = base + (TLinAddr(rgn->iIndex)<<m.iChunkShift);
1.2570 + TInt id = m.UnassignPageTable(a);
1.2571 + if (id>=0)
1.2572 + m.FreePageTable(id);
1.2573 + }
1.2574 + MmuBase::Signal();
1.2575 + THwChunkRegion* free=rgn;
1.2576 + rgn=rgn->iNext;
1.2577 + Kern::Free(free);
1.2578 + }
1.2579 + }
1.2580 +
1.2581 +
1.2582 +//
1.2583 +// RamCacheBase
1.2584 +//
1.2585 +
1.2586 +
1.2587 +RamCacheBase* RamCacheBase::TheRamCache = NULL;
1.2588 +
1.2589 +
1.2590 +RamCacheBase::RamCacheBase()
1.2591 + {
1.2592 + }
1.2593 +
1.2594 +
1.2595 +void RamCacheBase::Init2()
1.2596 + {
1.2597 + __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">RamCacheBase::Init2"));
1.2598 + iMmu = MmuBase::TheMmu;
1.2599 + __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<RamCacheBase::Init2"));
1.2600 + }
1.2601 +
1.2602 +
1.2603 +void RamCacheBase::ReturnToSystem(SPageInfo* aPageInfo)
1.2604 + {
1.2605 + __ASSERT_MUTEX(MmuBase::RamAllocatorMutex);
1.2606 + __ASSERT_SYSTEM_LOCK;
1.2607 + aPageInfo->SetUnused();
1.2608 + --iNumberOfFreePages;
1.2609 + __NK_ASSERT_DEBUG(iNumberOfFreePages>=0);
1.2610 + // Release system lock before using the RAM allocator.
1.2611 + NKern::UnlockSystem();
1.2612 + iMmu->iRamPageAllocator->FreeRamPage(aPageInfo->PhysAddr(), EPageDiscard);
1.2613 + NKern::LockSystem();
1.2614 + }
1.2615 +
1.2616 +
1.2617 +SPageInfo* RamCacheBase::GetPageFromSystem(TUint aBlockedZoneId, TBool aBlockRest)
1.2618 + {
1.2619 + __ASSERT_MUTEX(MmuBase::RamAllocatorMutex);
1.2620 + SPageInfo* pageInfo;
1.2621 + TPhysAddr pagePhys;
1.2622 + TInt r = iMmu->iRamPageAllocator->AllocRamPages(&pagePhys,1, EPageDiscard, aBlockedZoneId, aBlockRest);
1.2623 + if(r==KErrNone)
1.2624 + {
1.2625 + NKern::LockSystem();
1.2626 + pageInfo = SPageInfo::FromPhysAddr(pagePhys);
1.2627 + pageInfo->Change(SPageInfo::EPagedFree,SPageInfo::EStatePagedDead);
1.2628 + ++iNumberOfFreePages;
1.2629 + NKern::UnlockSystem();
1.2630 + }
1.2631 + else
1.2632 + pageInfo = NULL;
1.2633 + return pageInfo;
1.2634 + }
1.2635 +
1.2636 +
1.2637 +//
1.2638 +// RamCache
1.2639 +//
1.2640 +
1.2641 +
1.2642 +void RamCache::Init2()
1.2643 + {
1.2644 + __KTRACE_OPT(KBOOT,Kern::Printf(">RamCache::Init2"));
1.2645 + RamCacheBase::Init2();
1.2646 + __KTRACE_OPT(KBOOT,Kern::Printf("<RamCache::Init2"));
1.2647 + }
1.2648 +
1.2649 +
1.2650 +TInt RamCache::Init3()
1.2651 + {
1.2652 + return KErrNone;
1.2653 + }
1.2654 +
1.2655 +void RamCache::RemovePage(SPageInfo& aPageInfo)
1.2656 + {
1.2657 + __NK_ASSERT_DEBUG(aPageInfo.Type() == SPageInfo::EPagedCache);
1.2658 + __NK_ASSERT_DEBUG(aPageInfo.State() == SPageInfo::EStatePagedYoung);
1.2659 + aPageInfo.iLink.Deque();
1.2660 + aPageInfo.SetState(SPageInfo::EStatePagedDead);
1.2661 + }
1.2662 +
1.2663 +TBool RamCache::GetFreePages(TInt aNumPages)
1.2664 + {
1.2665 + __KTRACE_OPT(KPAGING,Kern::Printf("DP: >GetFreePages %d",aNumPages));
1.2666 + NKern::LockSystem();
1.2667 +
1.2668 + while(aNumPages>0 && NumberOfFreePages()>=aNumPages)
1.2669 + {
1.2670 + // steal a page from cache list and return it to the free pool...
1.2671 + SPageInfo* pageInfo = SPageInfo::FromLink(iPageList.First()->Deque());
1.2672 + pageInfo->SetState(SPageInfo::EStatePagedDead);
1.2673 + SetFree(pageInfo);
1.2674 + ReturnToSystem(pageInfo);
1.2675 + --aNumPages;
1.2676 + }
1.2677 +
1.2678 + NKern::UnlockSystem();
1.2679 + __KTRACE_OPT(KPAGING,Kern::Printf("DP: <GetFreePages %d",!aNumPages));
1.2680 + return !aNumPages;
1.2681 + }
1.2682 +
1.2683 +
1.2684 +void RamCache::DonateRamCachePage(SPageInfo* aPageInfo)
1.2685 + {
1.2686 + SPageInfo::TType type = aPageInfo->Type();
1.2687 + if(type==SPageInfo::EChunk)
1.2688 + {
1.2689 + //Must not donate locked page. An example is DMA trasferred memory.
1.2690 + __NK_ASSERT_DEBUG(0 == aPageInfo->LockCount());
1.2691 +
1.2692 + aPageInfo->Change(SPageInfo::EPagedCache,SPageInfo::EStatePagedYoung);
1.2693 + iPageList.Add(&aPageInfo->iLink);
1.2694 + ++iNumberOfFreePages;
1.2695 + // Update ram allocator counts as this page has changed its type
1.2696 + DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
1.2697 + iMmu->iRamPageAllocator->ChangePageType(aPageInfo, chunk->GetPageType(), EPageDiscard);
1.2698 +
1.2699 +#ifdef BTRACE_PAGING
1.2700 + BTraceContext8(BTrace::EPaging, BTrace::EPagingChunkDonatePage, chunk, aPageInfo->Offset());
1.2701 +#endif
1.2702 + return;
1.2703 + }
1.2704 + // allow already donated pages...
1.2705 + __NK_ASSERT_DEBUG(type==SPageInfo::EPagedCache);
1.2706 + }
1.2707 +
1.2708 +
1.2709 +TBool RamCache::ReclaimRamCachePage(SPageInfo* aPageInfo)
1.2710 + {
1.2711 + SPageInfo::TType type = aPageInfo->Type();
1.2712 +// Kern::Printf("DemandPaging::ReclaimRamCachePage %x %d free=%d",aPageInfo,type,iNumberOfFreePages);
1.2713 +
1.2714 + if(type==SPageInfo::EChunk)
1.2715 + return ETrue; // page already reclaimed
1.2716 +
1.2717 + __NK_ASSERT_DEBUG(type==SPageInfo::EPagedCache);
1.2718 + __NK_ASSERT_DEBUG(aPageInfo->State()==SPageInfo::EStatePagedYoung);
1.2719 + // Update ram allocator counts as this page has changed its type
1.2720 + DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
1.2721 + iMmu->iRamPageAllocator->ChangePageType(aPageInfo, EPageDiscard, chunk->GetPageType());
1.2722 + aPageInfo->iLink.Deque();
1.2723 + --iNumberOfFreePages;
1.2724 + aPageInfo->Change(SPageInfo::EChunk,SPageInfo::EStateNormal);
1.2725 +
1.2726 +#ifdef BTRACE_PAGING
1.2727 + BTraceContext8(BTrace::EPaging, BTrace::EPagingChunkReclaimPage, chunk, aPageInfo->Offset());
1.2728 +#endif
1.2729 + return ETrue;
1.2730 + }
1.2731 +
1.2732 +
1.2733 +/**
1.2734 +Discard the specified page.
1.2735 +Should only be called on a page if a previous call to IsPageDiscardable()
1.2736 +returned ETrue and the system lock hasn't been released between the calls.
1.2737 +
1.2738 +@param aPageInfo The page info of the page to be discarded
1.2739 +@param aBlockedZoneId Not used by this overload.
1.2740 +@param aBlockRest Not used by this overload.
1.2741 +@return ETrue if page succesfully discarded
1.2742 +
1.2743 +@pre System lock held.
1.2744 +@post System lock held.
1.2745 +*/
1.2746 +TBool RamCache::DoDiscardPage(SPageInfo& aPageInfo, TUint aBlockedZoneId, TBool aBlockRest)
1.2747 + {
1.2748 + __NK_ASSERT_DEBUG(iNumberOfFreePages > 0);
1.2749 + RemovePage(aPageInfo);
1.2750 + SetFree(&aPageInfo);
1.2751 + ReturnToSystem(&aPageInfo);
1.2752 + return ETrue;
1.2753 + }
1.2754 +
1.2755 +
1.2756 +/**
1.2757 +First stage in discarding a list of pages.
1.2758 +
1.2759 +Must ensure that the pages will still be discardable even if system lock is released.
1.2760 +To be used in conjunction with RamCacheBase::DoDiscardPages1().
1.2761 +
1.2762 +@param aPageList A NULL terminated list of the pages to be discarded
1.2763 +@return KErrNone on success.
1.2764 +
1.2765 +@pre System lock held
1.2766 +@post System lock held
1.2767 +*/
1.2768 +TInt RamCache::DoDiscardPages0(SPageInfo** aPageList)
1.2769 + {
1.2770 + __ASSERT_SYSTEM_LOCK;
1.2771 +
1.2772 + SPageInfo* pageInfo;
1.2773 + while((pageInfo = *aPageList++) != 0)
1.2774 + {
1.2775 + RemovePage(*pageInfo);
1.2776 + }
1.2777 + return KErrNone;
1.2778 + }
1.2779 +
1.2780 +
1.2781 +/**
1.2782 +Final stage in discarding a list of page
1.2783 +Finish discarding the pages previously removed by RamCacheBase::DoDiscardPages0().
1.2784 +This overload doesn't actually need to do anything.
1.2785 +
1.2786 +@param aPageList A NULL terminated list of the pages to be discarded
1.2787 +@return KErrNone on success.
1.2788 +
1.2789 +@pre System lock held
1.2790 +@post System lock held
1.2791 +*/
1.2792 +TInt RamCache::DoDiscardPages1(SPageInfo** aPageList)
1.2793 + {
1.2794 + __ASSERT_SYSTEM_LOCK;
1.2795 + SPageInfo* pageInfo;
1.2796 + while((pageInfo = *aPageList++) != 0)
1.2797 + {
1.2798 + SetFree(pageInfo);
1.2799 + ReturnToSystem(pageInfo);
1.2800 + }
1.2801 + return KErrNone;
1.2802 + }
1.2803 +
1.2804 +
1.2805 +/**
1.2806 +Check whether the specified page can be discarded by the RAM cache.
1.2807 +
1.2808 +@param aPageInfo The page info of the page being queried.
1.2809 +@return ETrue when the page can be discarded, EFalse otherwise.
1.2810 +@pre System lock held.
1.2811 +@post System lock held.
1.2812 +*/
1.2813 +TBool RamCache::IsPageDiscardable(SPageInfo& aPageInfo)
1.2814 + {
1.2815 + SPageInfo::TType type = aPageInfo.Type();
1.2816 + SPageInfo::TState state = aPageInfo.State();
1.2817 + return (type == SPageInfo::EPagedCache && state == SPageInfo::EStatePagedYoung);
1.2818 + }
1.2819 +
1.2820 +
1.2821 +/**
1.2822 +@return ETrue when the unmapped page should be freed, EFalse otherwise
1.2823 +*/
1.2824 +TBool RamCache::PageUnmapped(SPageInfo* aPageInfo)
1.2825 + {
1.2826 + SPageInfo::TType type = aPageInfo->Type();
1.2827 +// Kern::Printf("DemandPaging::PageUnmapped %x %d",aPageInfo,type);
1.2828 + if(type!=SPageInfo::EPagedCache)
1.2829 + return ETrue;
1.2830 + SPageInfo::TState state = aPageInfo->State();
1.2831 + if(state==SPageInfo::EStatePagedYoung)
1.2832 + {
1.2833 + // This page will be freed by DChunk::DoDecommit as it was originally
1.2834 + // allocated so update page counts in ram allocator
1.2835 + DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
1.2836 + iMmu->iRamPageAllocator->ChangePageType(aPageInfo, EPageDiscard, chunk->GetPageType());
1.2837 + aPageInfo->iLink.Deque();
1.2838 + --iNumberOfFreePages;
1.2839 + }
1.2840 + return ETrue;
1.2841 + }
1.2842 +
1.2843 +
1.2844 +void RamCache::Panic(TFault aFault)
1.2845 + {
1.2846 + Kern::Fault("RamCache",aFault);
1.2847 + }
1.2848 +
1.2849 +/**
1.2850 +Flush all cache pages.
1.2851 +
1.2852 +@pre RAM allocator mutex held
1.2853 +@post RAM allocator mutex held
1.2854 +*/
1.2855 +void RamCache::FlushAll()
1.2856 + {
1.2857 + __ASSERT_MUTEX(MmuBase::RamAllocatorMutex);
1.2858 +#ifdef _DEBUG
1.2859 + // Should always succeed
1.2860 + __NK_ASSERT_DEBUG(GetFreePages(iNumberOfFreePages));
1.2861 +#else
1.2862 + GetFreePages(iNumberOfFreePages);
1.2863 +#endif
1.2864 + }
1.2865 +
1.2866 +
1.2867 +//
1.2868 +// Demand Paging
1.2869 +//
1.2870 +
1.2871 +#ifdef __DEMAND_PAGING__
1.2872 +
1.2873 +DemandPaging* DemandPaging::ThePager = 0;
1.2874 +TBool DemandPaging::PseudoRandInitialised = EFalse;
1.2875 +volatile TUint32 DemandPaging::PseudoRandSeed = 0;
1.2876 +
1.2877 +
1.2878 +void M::DemandPagingInit()
1.2879 + {
1.2880 + __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">M::DemandPagingInit"));
1.2881 + TInt r = RamCacheBase::TheRamCache->Init3();
1.2882 + if (r != KErrNone)
1.2883 + DemandPaging::Panic(DemandPaging::EInitialiseFailed);
1.2884 +
1.2885 + __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<M::DemandPagingInit"));
1.2886 + }
1.2887 +
1.2888 +
1.2889 +TInt M::DemandPagingFault(TAny* aExceptionInfo)
1.2890 + {
1.2891 + DemandPaging* pager = DemandPaging::ThePager;
1.2892 + if(pager)
1.2893 + return pager->Fault(aExceptionInfo);
1.2894 + return KErrAbort;
1.2895 + }
1.2896 +
1.2897 +#ifdef _DEBUG
1.2898 +extern "C" void ASMCheckPagingSafe(TLinAddr aPC, TLinAddr aLR, TLinAddr aStartAddres, TUint aLength)
1.2899 + {
1.2900 + if(M::CheckPagingSafe(EFalse, aStartAddres, aLength))
1.2901 + return;
1.2902 + Kern::Printf("ASM_ASSERT_PAGING_SAFE FAILED: pc=%x lr=%x",aPC,aLR);
1.2903 + __NK_ASSERT_ALWAYS(0);
1.2904 + }
1.2905 +
1.2906 +extern "C" void ASMCheckDataPagingSafe(TLinAddr aPC, TLinAddr aLR, TLinAddr aStartAddres, TUint aLength)
1.2907 + {
1.2908 + if(M::CheckPagingSafe(ETrue, aStartAddres, aLength))
1.2909 + return;
1.2910 + __KTRACE_OPT(KDATAPAGEWARN,Kern::Printf("Data paging: ASM_ASSERT_DATA_PAGING_SAFE FAILED: pc=%x lr=%x",aPC,aLR));
1.2911 + }
1.2912 +#endif
1.2913 +
1.2914 +
1.2915 +TBool M::CheckPagingSafe(TBool aDataPaging, TLinAddr aStartAddr, TUint aLength)
1.2916 + {
1.2917 + DemandPaging* pager = DemandPaging::ThePager;
1.2918 + if(!pager || K::Initialising)
1.2919 + return ETrue;
1.2920 +
1.2921 + NThread* nt = NCurrentThread();
1.2922 + if(!nt)
1.2923 + return ETrue; // We've not booted properly yet!
1.2924 +
1.2925 + if (!pager->NeedsMutexOrderCheck(aStartAddr, aLength))
1.2926 + return ETrue;
1.2927 +
1.2928 + TBool dataPagingEnabled = EFalse; // data paging not supported on moving or multiple models
1.2929 +
1.2930 + DThread* thread = _LOFF(nt,DThread,iNThread);
1.2931 + NFastMutex* fm = NKern::HeldFastMutex();
1.2932 + if(fm)
1.2933 + {
1.2934 + if(!thread->iPagingExcTrap || fm!=&TheScheduler.iLock)
1.2935 + {
1.2936 + if (!aDataPaging)
1.2937 + {
1.2938 + __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: CheckPagingSafe FAILED - FM Held"));
1.2939 + return EFalse;
1.2940 + }
1.2941 + else
1.2942 + {
1.2943 + __KTRACE_OPT(KDATAPAGEWARN, Kern::Printf("Data paging: CheckPagingSafe FAILED - FM Held"));
1.2944 + return !dataPagingEnabled;
1.2945 + }
1.2946 + }
1.2947 + }
1.2948 +
1.2949 + DMutex* m = pager->CheckMutexOrder();
1.2950 + if (m)
1.2951 + {
1.2952 + if (!aDataPaging)
1.2953 + {
1.2954 + __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: Mutex Order Fault %O",m));
1.2955 + return EFalse;
1.2956 + }
1.2957 + else
1.2958 + {
1.2959 + __KTRACE_OPT(KDATAPAGEWARN, Kern::Printf("Data paging: Mutex Order Fault %O",m));
1.2960 + return !dataPagingEnabled;
1.2961 + }
1.2962 + }
1.2963 +
1.2964 + return ETrue;
1.2965 + }
1.2966 +
1.2967 +
1.2968 +TInt M::LockRegion(TLinAddr aStart,TInt aSize)
1.2969 + {
1.2970 + DemandPaging* pager = DemandPaging::ThePager;
1.2971 + if(pager)
1.2972 + return pager->LockRegion(aStart,aSize,NULL);
1.2973 + return KErrNone;
1.2974 + }
1.2975 +
1.2976 +
1.2977 +TInt M::UnlockRegion(TLinAddr aStart,TInt aSize)
1.2978 + {
1.2979 + DemandPaging* pager = DemandPaging::ThePager;
1.2980 + if(pager)
1.2981 + return pager->UnlockRegion(aStart,aSize,NULL);
1.2982 + return KErrNone;
1.2983 + }
1.2984 +
1.2985 +#else // !__DEMAND_PAGING__
1.2986 +
1.2987 +TInt M::LockRegion(TLinAddr /*aStart*/,TInt /*aSize*/)
1.2988 + {
1.2989 + return KErrNone;
1.2990 + }
1.2991 +
1.2992 +
1.2993 +TInt M::UnlockRegion(TLinAddr /*aStart*/,TInt /*aSize*/)
1.2994 + {
1.2995 + return KErrNone;
1.2996 + }
1.2997 +
1.2998 +#endif // __DEMAND_PAGING__
1.2999 +
1.3000 +
1.3001 +
1.3002 +
1.3003 +//
1.3004 +// DemandPaging
1.3005 +//
1.3006 +
1.3007 +#ifdef __DEMAND_PAGING__
1.3008 +
1.3009 +
1.3010 +const TUint16 KDefaultYoungOldRatio = 3;
1.3011 +const TUint KDefaultMinPages = 256;
1.3012 +const TUint KDefaultMaxPages = KMaxTUint >> KPageShift;
1.3013 +
1.3014 +/* Need at least 4 mapped pages to guarentee to be able to execute all ARM instructions.
1.3015 + (Worst case is a THUMB2 STM instruction with both instruction and data stradling page
1.3016 + boundaries.)
1.3017 +*/
1.3018 +const TUint KMinYoungPages = 4;
1.3019 +const TUint KMinOldPages = 1;
1.3020 +
1.3021 +/* A minimum young/old ratio of 1 means that we need at least twice KMinYoungPages pages...
1.3022 +*/
1.3023 +const TUint KAbsoluteMinPageCount = 2*KMinYoungPages;
1.3024 +
1.3025 +__ASSERT_COMPILE(KMinOldPages<=KAbsoluteMinPageCount/2);
1.3026 +
1.3027 +class DMissingPagingDevice : public DPagingDevice
1.3028 + {
1.3029 + TInt Read(TThreadMessage* /*aReq*/,TLinAddr /*aBuffer*/,TUint /*aOffset*/,TUint /*aSize*/,TInt /*aDrvNumber*/)
1.3030 + { DemandPaging::Panic(DemandPaging::EDeviceMissing); return 0; }
1.3031 + };
1.3032 +
1.3033 +
1.3034 +TBool DemandPaging::RomPagingRequested()
1.3035 + {
1.3036 + return TheRomHeader().iPageableRomSize != 0;
1.3037 + }
1.3038 +
1.3039 +
1.3040 +TBool DemandPaging::CodePagingRequested()
1.3041 + {
1.3042 + return (TheSuperPage().KernelConfigFlags() & EKernelConfigCodePagingPolicyDefaultPaged) != EKernelConfigCodePagingPolicyNoPaging;
1.3043 + }
1.3044 +
1.3045 +
1.3046 +DemandPaging::DemandPaging()
1.3047 + {
1.3048 + }
1.3049 +
1.3050 +
1.3051 +void DemandPaging::Init2()
1.3052 + {
1.3053 + __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">DemandPaging::Init2"));
1.3054 +
1.3055 + RamCacheBase::Init2();
1.3056 +
1.3057 + // initialise live list...
1.3058 + SDemandPagingConfig config = TheRomHeader().iDemandPagingConfig;
1.3059 +
1.3060 + iMinimumPageCount = KDefaultMinPages;
1.3061 + if(config.iMinPages)
1.3062 + iMinimumPageCount = config.iMinPages;
1.3063 + if(iMinimumPageCount<KAbsoluteMinPageCount)
1.3064 + iMinimumPageCount = KAbsoluteMinPageCount;
1.3065 + iInitMinimumPageCount = iMinimumPageCount;
1.3066 +
1.3067 + iMaximumPageCount = KDefaultMaxPages;
1.3068 + if(config.iMaxPages)
1.3069 + iMaximumPageCount = config.iMaxPages;
1.3070 + iInitMaximumPageCount = iMaximumPageCount;
1.3071 +
1.3072 + iYoungOldRatio = KDefaultYoungOldRatio;
1.3073 + if(config.iYoungOldRatio)
1.3074 + iYoungOldRatio = config.iYoungOldRatio;
1.3075 + TInt ratioLimit = (iMinimumPageCount-KMinOldPages)/KMinOldPages;
1.3076 + if(iYoungOldRatio>ratioLimit)
1.3077 + iYoungOldRatio = ratioLimit;
1.3078 +
1.3079 + iMinimumPageLimit = (KMinYoungPages * (1 + iYoungOldRatio)) / iYoungOldRatio;
1.3080 + if(iMinimumPageLimit<KAbsoluteMinPageCount)
1.3081 + iMinimumPageLimit = KAbsoluteMinPageCount;
1.3082 +
1.3083 + __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">DemandPaging::InitialiseLiveList min=%d max=%d ratio=%d",iMinimumPageCount,iMaximumPageCount,iYoungOldRatio));
1.3084 +
1.3085 + if(iMaximumPageCount<iMinimumPageCount)
1.3086 + Panic(EInitialiseBadArgs);
1.3087 +
1.3088 + //
1.3089 + // This routine doesn't acuire any mutexes because it should be called before the system
1.3090 + // is fully up and running. I.e. called before another thread can preempt this.
1.3091 + //
1.3092 +
1.3093 + // Calculate page counts
1.3094 + iOldCount = iMinimumPageCount/(1+iYoungOldRatio);
1.3095 + if(iOldCount<KMinOldPages)
1.3096 + Panic(EInitialiseBadArgs);
1.3097 + iYoungCount = iMinimumPageCount-iOldCount;
1.3098 + if(iYoungCount<KMinYoungPages)
1.3099 + Panic(EInitialiseBadArgs); // Need at least 4 pages mapped to execute an ARM LDM instruction in THUMB2 mode
1.3100 + iNumberOfFreePages = 0;
1.3101 +
1.3102 + // Allocate RAM pages and put them all on the old list
1.3103 + iYoungCount = 0;
1.3104 + iOldCount = 0;
1.3105 + for(TUint i=0; i<iMinimumPageCount; i++)
1.3106 + {
1.3107 + // Allocate a single page
1.3108 + TPhysAddr pagePhys;
1.3109 + TInt r = iMmu->iRamPageAllocator->AllocRamPages(&pagePhys,1, EPageDiscard);
1.3110 + if(r!=0)
1.3111 + Panic(EInitialiseFailed);
1.3112 + AddAsFreePage(SPageInfo::FromPhysAddr(pagePhys));
1.3113 + }
1.3114 +
1.3115 + __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<DemandPaging::Init2"));
1.3116 + }
1.3117 +
1.3118 +
1.3119 +TInt VMHalFunction(TAny*, TInt aFunction, TAny* a1, TAny* a2);
1.3120 +
1.3121 +TInt DemandPaging::Init3()
1.3122 + {
1.3123 + __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">DemandPaging::Init3"));
1.3124 + TInt r;
1.3125 +
1.3126 + // construct iBufferChunk
1.3127 + iDeviceBufferSize = 2*KPageSize;
1.3128 + TChunkCreateInfo info;
1.3129 + info.iType = TChunkCreateInfo::ESharedKernelMultiple;
1.3130 + info.iMaxSize = iDeviceBufferSize*KMaxPagingDevices;
1.3131 + info.iMapAttr = EMapAttrCachedMax;
1.3132 + info.iOwnsMemory = ETrue;
1.3133 + TUint32 mapAttr;
1.3134 + r = Kern::ChunkCreate(info,iDeviceBuffersChunk,iDeviceBuffers,mapAttr);
1.3135 + if(r!=KErrNone)
1.3136 + return r;
1.3137 +
1.3138 + // Install 'null' paging devices which panic if used...
1.3139 + DMissingPagingDevice* missingPagingDevice = new DMissingPagingDevice;
1.3140 + for(TInt i=0; i<KMaxPagingDevices; i++)
1.3141 + {
1.3142 + iPagingDevices[i].iInstalled = EFalse;
1.3143 + iPagingDevices[i].iDevice = missingPagingDevice;
1.3144 + }
1.3145 +
1.3146 + // Initialise ROM info...
1.3147 + const TRomHeader& romHeader = TheRomHeader();
1.3148 + iRomLinearBase = (TLinAddr)&romHeader;
1.3149 + iRomSize = iMmu->RoundToPageSize(romHeader.iUncompressedSize);
1.3150 + if(romHeader.iRomPageIndex)
1.3151 + iRomPageIndex = (SRomPageInfo*)((TInt)&romHeader+romHeader.iRomPageIndex);
1.3152 +
1.3153 + TLinAddr pagedStart = romHeader.iPageableRomSize ? (TLinAddr)&romHeader+romHeader.iPageableRomStart : 0;
1.3154 + if(pagedStart)
1.3155 + {
1.3156 + __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("ROM=%x+%x PagedStart=%x",iRomLinearBase,iRomSize,pagedStart));
1.3157 + __NK_ASSERT_ALWAYS(TUint(pagedStart-iRomLinearBase)<TUint(iRomSize));
1.3158 + iRomPagedLinearBase = pagedStart;
1.3159 + iRomPagedSize = iRomLinearBase+iRomSize-pagedStart;
1.3160 + __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("DemandPaging::Init3, ROM Paged start(0x%x), sixe(0x%x)",iRomPagedLinearBase,iRomPagedSize));
1.3161 +
1.3162 +#ifdef __SUPPORT_DEMAND_PAGING_EMULATION__
1.3163 + // Get physical addresses of ROM pages
1.3164 + iOriginalRomPageCount = iMmu->RoundToPageSize(iRomSize)>>KPageShift;
1.3165 + iOriginalRomPages = new TPhysAddr[iOriginalRomPageCount];
1.3166 + __NK_ASSERT_ALWAYS(iOriginalRomPages);
1.3167 + TPhysAddr romPhysAddress;
1.3168 + iMmu->LinearToPhysical(iRomLinearBase,iRomSize,romPhysAddress,iOriginalRomPages);
1.3169 +#endif
1.3170 + }
1.3171 +
1.3172 + r = Kern::AddHalEntry(EHalGroupVM, VMHalFunction, 0);
1.3173 + __NK_ASSERT_ALWAYS(r==KErrNone);
1.3174 +
1.3175 +#ifdef __DEMAND_PAGING_BENCHMARKS__
1.3176 + for (TInt i = 0 ; i < EMaxPagingBm ; ++i)
1.3177 + ResetBenchmarkData((TPagingBenchmark)i);
1.3178 +#endif
1.3179 +
1.3180 + // Initialisation now complete
1.3181 + ThePager = this;
1.3182 + return KErrNone;
1.3183 + }
1.3184 +
1.3185 +
1.3186 +DemandPaging::~DemandPaging()
1.3187 + {
1.3188 +#ifdef __SUPPORT_DEMAND_PAGING_EMULATION__
1.3189 + delete[] iOriginalRomPages;
1.3190 +#endif
1.3191 + for (TUint i = 0 ; i < iPagingRequestCount ; ++i)
1.3192 + delete iPagingRequests[i];
1.3193 + }
1.3194 +
1.3195 +
1.3196 +TInt DemandPaging::InstallPagingDevice(DPagingDevice* aDevice)
1.3197 + {
1.3198 + __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf(">DemandPaging::InstallPagingDevice name='%s' type=%d",aDevice->iName,aDevice->iType));
1.3199 +
1.3200 + if(aDevice->iReadUnitShift>KPageShift)
1.3201 + Panic(EInvalidPagingDevice);
1.3202 +
1.3203 + TInt i;
1.3204 + TInt r = KErrNone;
1.3205 + TBool createRequestObjects = EFalse;
1.3206 +
1.3207 + if ((aDevice->iType & DPagingDevice::ERom) && RomPagingRequested())
1.3208 + {
1.3209 + r = DoInstallPagingDevice(aDevice, 0);
1.3210 + if (r != KErrNone)
1.3211 + goto done;
1.3212 + K::MemModelAttributes|=EMemModelAttrRomPaging;
1.3213 + createRequestObjects = ETrue;
1.3214 + }
1.3215 +
1.3216 + if ((aDevice->iType & DPagingDevice::ECode) && CodePagingRequested())
1.3217 + {
1.3218 + for (i = 0 ; i < KMaxLocalDrives ; ++i)
1.3219 + {
1.3220 + if (aDevice->iDrivesSupported & (1<<i))
1.3221 + {
1.3222 + r = DoInstallPagingDevice(aDevice, i + 1);
1.3223 + if (r != KErrNone)
1.3224 + goto done;
1.3225 + }
1.3226 + }
1.3227 + K::MemModelAttributes|=EMemModelAttrCodePaging;
1.3228 + createRequestObjects = ETrue;
1.3229 + }
1.3230 +
1.3231 + if (createRequestObjects)
1.3232 + {
1.3233 + for (i = 0 ; i < KPagingRequestsPerDevice ; ++i)
1.3234 + {
1.3235 + r = CreateRequestObject();
1.3236 + if (r != KErrNone)
1.3237 + goto done;
1.3238 + }
1.3239 + }
1.3240 +
1.3241 +done:
1.3242 + __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("<DemandPaging::InstallPagingDevice returns %d",r));
1.3243 + return r;
1.3244 + }
1.3245 +
1.3246 +TInt DemandPaging::DoInstallPagingDevice(DPagingDevice* aDevice, TInt aId)
1.3247 + {
1.3248 + NKern::LockSystem();
1.3249 + SPagingDevice* device = &iPagingDevices[aId];
1.3250 + if(device->iInstalled)
1.3251 + {
1.3252 + __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("**** Attempt to install more than one ROM paging device !!!!!!!! ****"));
1.3253 + //Panic(EDeviceAlreadyExists);
1.3254 + NKern::UnlockSystem();
1.3255 + return KErrNone;
1.3256 + }
1.3257 +
1.3258 + aDevice->iDeviceId = aId;
1.3259 + device->iDevice = aDevice;
1.3260 + device->iInstalled = ETrue;
1.3261 + NKern::UnlockSystem();
1.3262 +
1.3263 + __KTRACE_OPT2(KPAGING,KBOOT,Kern::Printf("DemandPaging::InstallPagingDevice id=%d, device=%08x",aId,device));
1.3264 +
1.3265 + return KErrNone;
1.3266 + }
1.3267 +
1.3268 +DemandPaging::DPagingRequest::~DPagingRequest()
1.3269 + {
1.3270 + if (iMutex)
1.3271 + iMutex->Close(NULL);
1.3272 + }
1.3273 +
1.3274 +TInt DemandPaging::CreateRequestObject()
1.3275 + {
1.3276 + _LIT(KLitPagingRequest,"PagingRequest-");
1.3277 +
1.3278 + TInt index;
1.3279 + TInt id = (TInt)__e32_atomic_add_ord32(&iNextPagingRequestCount, 1);
1.3280 + TLinAddr offset = id * iDeviceBufferSize;
1.3281 + TUint32 physAddr = 0;
1.3282 + TInt r = Kern::ChunkCommitContiguous(iDeviceBuffersChunk,offset,iDeviceBufferSize, physAddr);
1.3283 + if(r != KErrNone)
1.3284 + return r;
1.3285 +
1.3286 + DPagingRequest* req = new DPagingRequest();
1.3287 + if (!req)
1.3288 + return KErrNoMemory;
1.3289 +
1.3290 + req->iBuffer = iDeviceBuffers + offset;
1.3291 + AllocLoadAddress(*req, id);
1.3292 +
1.3293 + TBuf<16> mutexName(KLitPagingRequest);
1.3294 + mutexName.AppendNum(id);
1.3295 + r = K::MutexCreate(req->iMutex, mutexName, NULL, EFalse, KMutexOrdPageIn);
1.3296 + if (r!=KErrNone)
1.3297 + goto done;
1.3298 +
1.3299 + // Ensure there are enough young pages to cope with new request object
1.3300 + r = ResizeLiveList(iMinimumPageCount, iMaximumPageCount);
1.3301 + if (r!=KErrNone)
1.3302 + goto done;
1.3303 +
1.3304 + NKern::LockSystem();
1.3305 + index = iPagingRequestCount++;
1.3306 + __NK_ASSERT_ALWAYS(index < KMaxPagingRequests);
1.3307 + iPagingRequests[index] = req;
1.3308 + iFreeRequestPool.AddHead(req);
1.3309 + NKern::UnlockSystem();
1.3310 +
1.3311 +done:
1.3312 + if (r != KErrNone)
1.3313 + delete req;
1.3314 +
1.3315 + return r;
1.3316 + }
1.3317 +
1.3318 +DemandPaging::DPagingRequest* DemandPaging::AcquireRequestObject()
1.3319 + {
1.3320 + __ASSERT_SYSTEM_LOCK;
1.3321 + __NK_ASSERT_DEBUG(iPagingRequestCount > 0);
1.3322 +
1.3323 + DPagingRequest* req = NULL;
1.3324 +
1.3325 + // System lock used to serialise access to our data strucures as we have to hold it anyway when
1.3326 + // we wait on the mutex
1.3327 +
1.3328 + req = (DPagingRequest*)iFreeRequestPool.GetFirst();
1.3329 + if (req != NULL)
1.3330 + __NK_ASSERT_DEBUG(req->iUsageCount == 0);
1.3331 + else
1.3332 + {
1.3333 + // Pick a random request object to wait on
1.3334 + TUint index = (FastPseudoRand() * TUint64(iPagingRequestCount)) >> 32;
1.3335 + __NK_ASSERT_DEBUG(index < iPagingRequestCount);
1.3336 + req = iPagingRequests[index];
1.3337 + __NK_ASSERT_DEBUG(req->iUsageCount > 0);
1.3338 + }
1.3339 +
1.3340 +#ifdef __CONCURRENT_PAGING_INSTRUMENTATION__
1.3341 + ++iWaitingCount;
1.3342 + if (iWaitingCount > iMaxWaitingCount)
1.3343 + iMaxWaitingCount = iWaitingCount;
1.3344 +#endif
1.3345 +
1.3346 + ++req->iUsageCount;
1.3347 + TInt r = req->iMutex->Wait();
1.3348 + __NK_ASSERT_ALWAYS(r == KErrNone);
1.3349 +
1.3350 +#ifdef __CONCURRENT_PAGING_INSTRUMENTATION__
1.3351 + --iWaitingCount;
1.3352 + ++iPagingCount;
1.3353 + if (iPagingCount > iMaxPagingCount)
1.3354 + iMaxPagingCount = iPagingCount;
1.3355 +#endif
1.3356 +
1.3357 + return req;
1.3358 + }
1.3359 +
1.3360 +void DemandPaging::ReleaseRequestObject(DPagingRequest* aReq)
1.3361 + {
1.3362 + __ASSERT_SYSTEM_LOCK;
1.3363 +
1.3364 +#ifdef __CONCURRENT_PAGING_INSTRUMENTATION__
1.3365 + --iPagingCount;
1.3366 +#endif
1.3367 +
1.3368 + // If there are no threads waiting on the mutex then return it to the free pool
1.3369 + __NK_ASSERT_DEBUG(aReq->iUsageCount > 0);
1.3370 + if (--aReq->iUsageCount == 0)
1.3371 + iFreeRequestPool.AddHead(aReq);
1.3372 +
1.3373 + aReq->iMutex->Signal();
1.3374 + NKern::LockSystem();
1.3375 + }
1.3376 +
1.3377 +TInt DemandPaging::ReadRomPage(const DPagingRequest* aReq, TLinAddr aRomAddress)
1.3378 + {
1.3379 + START_PAGING_BENCHMARK;
1.3380 +
1.3381 + TInt pageSize = KPageSize;
1.3382 + TInt dataOffset = aRomAddress-iRomLinearBase;
1.3383 + TInt pageNumber = dataOffset>>KPageShift;
1.3384 + TInt readUnitShift = RomPagingDevice().iDevice->iReadUnitShift;
1.3385 + TInt r;
1.3386 + if(!iRomPageIndex)
1.3387 + {
1.3388 + // ROM not broken into pages, so just read it in directly
1.3389 + START_PAGING_BENCHMARK;
1.3390 + r = RomPagingDevice().iDevice->Read(const_cast<TThreadMessage*>(&aReq->iMessage),aReq->iLoadAddr,dataOffset>>readUnitShift,pageSize>>readUnitShift,-1/*token for ROM paging*/);
1.3391 + END_PAGING_BENCHMARK(DemandPaging::ThePager, EPagingBmReadMedia);
1.3392 + }
1.3393 + else
1.3394 + {
1.3395 + // Work out where data for page is located
1.3396 + SRomPageInfo* romPageInfo = iRomPageIndex+pageNumber;
1.3397 + dataOffset = romPageInfo->iDataStart;
1.3398 + TInt dataSize = romPageInfo->iDataSize;
1.3399 + if(!dataSize)
1.3400 + {
1.3401 + // empty page, fill it with 0xff...
1.3402 + memset((void*)aReq->iLoadAddr,-1,pageSize);
1.3403 + r = KErrNone;
1.3404 + }
1.3405 + else
1.3406 + {
1.3407 + __NK_ASSERT_ALWAYS(romPageInfo->iPagingAttributes&SRomPageInfo::EPageable);
1.3408 +
1.3409 + // Read data for page...
1.3410 + TThreadMessage* msg= const_cast<TThreadMessage*>(&aReq->iMessage);
1.3411 + TLinAddr buffer = aReq->iBuffer;
1.3412 + TUint readStart = dataOffset>>readUnitShift;
1.3413 + TUint readSize = ((dataOffset+dataSize-1)>>readUnitShift)-readStart+1;
1.3414 + __NK_ASSERT_DEBUG((readSize<<readUnitShift)<=iDeviceBufferSize);
1.3415 + START_PAGING_BENCHMARK;
1.3416 + r = RomPagingDevice().iDevice->Read(msg,buffer,readStart,readSize,-1/*token for ROM paging*/);
1.3417 + END_PAGING_BENCHMARK(DemandPaging::ThePager, EPagingBmReadMedia);
1.3418 + if(r==KErrNone)
1.3419 + {
1.3420 + // Decompress data...
1.3421 + TLinAddr data = buffer+dataOffset-(readStart<<readUnitShift);
1.3422 + r = Decompress(romPageInfo->iCompressionType,aReq->iLoadAddr,data,dataSize);
1.3423 + if(r>=0)
1.3424 + {
1.3425 + __NK_ASSERT_ALWAYS(r==pageSize);
1.3426 + r = KErrNone;
1.3427 + }
1.3428 + }
1.3429 + }
1.3430 + }
1.3431 +
1.3432 + END_PAGING_BENCHMARK(this, EPagingBmReadRomPage);
1.3433 + return r;
1.3434 + }
1.3435 +
1.3436 +TInt ReadFunc(TAny* aArg1, TAny* aArg2, TLinAddr aBuffer, TInt aBlockNumber, TInt aBlockCount)
1.3437 + {
1.3438 + START_PAGING_BENCHMARK;
1.3439 + TInt drive = (TInt)aArg1;
1.3440 + TThreadMessage* msg= (TThreadMessage*)aArg2;
1.3441 + DemandPaging::SPagingDevice& device = DemandPaging::ThePager->CodePagingDevice(drive);
1.3442 + TInt r = device.iDevice->Read(msg, aBuffer, aBlockNumber, aBlockCount, drive);
1.3443 + END_PAGING_BENCHMARK(DemandPaging::ThePager, EPagingBmReadMedia);
1.3444 + return r;
1.3445 + }
1.3446 +
1.3447 +TInt DemandPaging::ReadCodePage(const DPagingRequest* aReq, DMmuCodeSegMemory* aCodeSegMemory, TLinAddr aCodeAddress)
1.3448 + {
1.3449 + __KTRACE_OPT(KPAGING,Kern::Printf("ReadCodePage buffer = %08x, csm == %08x, addr == %08x", aReq->iLoadAddr, aCodeSegMemory, aCodeAddress));
1.3450 +
1.3451 + START_PAGING_BENCHMARK;
1.3452 +
1.3453 + // Get the paging device for this drive
1.3454 + SPagingDevice& device = CodePagingDevice(aCodeSegMemory->iCodeLocalDrive);
1.3455 +
1.3456 + // Work out which bit of the file to read
1.3457 + SRamCodeInfo& ri = aCodeSegMemory->iRamInfo;
1.3458 + TInt codeOffset = aCodeAddress - ri.iCodeRunAddr;
1.3459 + TInt pageNumber = codeOffset >> KPageShift;
1.3460 + TBool compressed = aCodeSegMemory->iCompressionType != SRomPageInfo::ENoCompression;
1.3461 + TInt dataOffset, dataSize;
1.3462 + if (compressed)
1.3463 + {
1.3464 + dataOffset = aCodeSegMemory->iCodePageOffsets[pageNumber];
1.3465 + dataSize = aCodeSegMemory->iCodePageOffsets[pageNumber + 1] - dataOffset;
1.3466 + __KTRACE_OPT(KPAGING,Kern::Printf(" compressed, file offset == %x, size == %d", dataOffset, dataSize));
1.3467 + }
1.3468 + else
1.3469 + {
1.3470 + dataOffset = codeOffset + aCodeSegMemory->iCodeStartInFile;
1.3471 + dataSize = Min(KPageSize, aCodeSegMemory->iBlockMap.DataLength() - dataOffset);
1.3472 + __NK_ASSERT_DEBUG(dataSize >= 0);
1.3473 + __KTRACE_OPT(KPAGING,Kern::Printf(" uncompressed, file offset == %x, size == %d", dataOffset, dataSize));
1.3474 + }
1.3475 +
1.3476 + TInt bufferStart = aCodeSegMemory->iBlockMap.Read(aReq->iBuffer,
1.3477 + dataOffset,
1.3478 + dataSize,
1.3479 + device.iDevice->iReadUnitShift,
1.3480 + ReadFunc,
1.3481 + (TAny*)aCodeSegMemory->iCodeLocalDrive,
1.3482 + (TAny*)&aReq->iMessage);
1.3483 +
1.3484 +
1.3485 + TInt r = KErrNone;
1.3486 + if(bufferStart<0)
1.3487 + {
1.3488 + r = bufferStart; // return error
1.3489 + __NK_ASSERT_DEBUG(0);
1.3490 + }
1.3491 + else
1.3492 + {
1.3493 + TLinAddr data = aReq->iBuffer + bufferStart;
1.3494 + if (compressed)
1.3495 + {
1.3496 + TInt r = Decompress(aCodeSegMemory->iCompressionType, aReq->iLoadAddr, data, dataSize);
1.3497 + if(r>=0)
1.3498 + {
1.3499 + dataSize = Min(KPageSize, ri.iCodeSize - codeOffset);
1.3500 + if(r!=dataSize)
1.3501 + {
1.3502 + __NK_ASSERT_DEBUG(0);
1.3503 + r = KErrCorrupt;
1.3504 + }
1.3505 + else
1.3506 + r = KErrNone;
1.3507 + }
1.3508 + else
1.3509 + {
1.3510 + __NK_ASSERT_DEBUG(0);
1.3511 + }
1.3512 + }
1.3513 + else
1.3514 + {
1.3515 + #ifdef BTRACE_PAGING_VERBOSE
1.3516 + BTraceContext4(BTrace::EPaging,BTrace::EPagingDecompressStart,SRomPageInfo::ENoCompression);
1.3517 + #endif
1.3518 + memcpy((TAny*)aReq->iLoadAddr, (TAny*)data, dataSize);
1.3519 + #ifdef BTRACE_PAGING_VERBOSE
1.3520 + BTraceContext0(BTrace::EPaging,BTrace::EPagingDecompressEnd);
1.3521 + #endif
1.3522 + }
1.3523 + }
1.3524 +
1.3525 + if(r==KErrNone)
1.3526 + if (dataSize < KPageSize)
1.3527 + memset((TAny*)(aReq->iLoadAddr + dataSize), KPageSize - dataSize, 0x03);
1.3528 +
1.3529 + END_PAGING_BENCHMARK(this, EPagingBmReadCodePage);
1.3530 +
1.3531 + return KErrNone;
1.3532 + }
1.3533 +
1.3534 +
1.3535 +#include "decompress.h"
1.3536 +
1.3537 +
1.3538 +TInt DemandPaging::Decompress(TInt aCompressionType,TLinAddr aDst,TLinAddr aSrc,TUint aSrcSize)
1.3539 + {
1.3540 +#ifdef BTRACE_PAGING_VERBOSE
1.3541 + BTraceContext4(BTrace::EPaging,BTrace::EPagingDecompressStart,aCompressionType);
1.3542 +#endif
1.3543 + TInt r;
1.3544 + switch(aCompressionType)
1.3545 + {
1.3546 + case SRomPageInfo::ENoCompression:
1.3547 + memcpy((void*)aDst,(void*)aSrc,aSrcSize);
1.3548 + r = aSrcSize;
1.3549 + break;
1.3550 +
1.3551 + case SRomPageInfo::EBytePair:
1.3552 + {
1.3553 + START_PAGING_BENCHMARK;
1.3554 + TUint8* srcNext=0;
1.3555 + r=BytePairDecompress((TUint8*)aDst,KPageSize,(TUint8*)aSrc,aSrcSize,srcNext);
1.3556 + if (r == KErrNone)
1.3557 + __NK_ASSERT_ALWAYS((TLinAddr)srcNext == aSrc + aSrcSize);
1.3558 + END_PAGING_BENCHMARK(this, EPagingBmDecompress);
1.3559 + }
1.3560 + break;
1.3561 +
1.3562 + default:
1.3563 + r = KErrNotSupported;
1.3564 + break;
1.3565 + }
1.3566 +#ifdef BTRACE_PAGING_VERBOSE
1.3567 + BTraceContext0(BTrace::EPaging,BTrace::EPagingDecompressEnd);
1.3568 +#endif
1.3569 + return r;
1.3570 + }
1.3571 +
1.3572 +
1.3573 +void DemandPaging::BalanceAges()
1.3574 + {
1.3575 + if(iOldCount*iYoungOldRatio>=iYoungCount)
1.3576 + return; // We have enough old pages
1.3577 +
1.3578 + // make one young page into an old page...
1.3579 +
1.3580 + __NK_ASSERT_DEBUG(!iYoungList.IsEmpty());
1.3581 + __NK_ASSERT_DEBUG(iYoungCount);
1.3582 + SDblQueLink* link = iYoungList.Last()->Deque();
1.3583 + --iYoungCount;
1.3584 +
1.3585 + SPageInfo* pageInfo = SPageInfo::FromLink(link);
1.3586 + pageInfo->SetState(SPageInfo::EStatePagedOld);
1.3587 +
1.3588 + iOldList.AddHead(link);
1.3589 + ++iOldCount;
1.3590 +
1.3591 + SetOld(pageInfo);
1.3592 +
1.3593 +#ifdef BTRACE_PAGING_VERBOSE
1.3594 + BTraceContext4(BTrace::EPaging,BTrace::EPagingAged,pageInfo->PhysAddr());
1.3595 +#endif
1.3596 + }
1.3597 +
1.3598 +
1.3599 +void DemandPaging::AddAsYoungest(SPageInfo* aPageInfo)
1.3600 + {
1.3601 +#ifdef _DEBUG
1.3602 + SPageInfo::TType type = aPageInfo->Type();
1.3603 + __NK_ASSERT_DEBUG(type==SPageInfo::EPagedROM || type==SPageInfo::EPagedCode || type==SPageInfo::EPagedData || type==SPageInfo::EPagedCache);
1.3604 +#endif
1.3605 + aPageInfo->SetState(SPageInfo::EStatePagedYoung);
1.3606 + iYoungList.AddHead(&aPageInfo->iLink);
1.3607 + ++iYoungCount;
1.3608 + }
1.3609 +
1.3610 +
1.3611 +void DemandPaging::AddAsFreePage(SPageInfo* aPageInfo)
1.3612 + {
1.3613 +#ifdef BTRACE_PAGING
1.3614 + TPhysAddr phys = aPageInfo->PhysAddr();
1.3615 + BTraceContext4(BTrace::EPaging,BTrace::EPagingPageInFree,phys);
1.3616 +#endif
1.3617 + aPageInfo->Change(SPageInfo::EPagedFree,SPageInfo::EStatePagedOld);
1.3618 + iOldList.Add(&aPageInfo->iLink);
1.3619 + ++iOldCount;
1.3620 + }
1.3621 +
1.3622 +
1.3623 +void DemandPaging::RemovePage(SPageInfo* aPageInfo)
1.3624 + {
1.3625 + switch(aPageInfo->State())
1.3626 + {
1.3627 + case SPageInfo::EStatePagedYoung:
1.3628 + __NK_ASSERT_DEBUG(iYoungCount);
1.3629 + aPageInfo->iLink.Deque();
1.3630 + --iYoungCount;
1.3631 + break;
1.3632 +
1.3633 + case SPageInfo::EStatePagedOld:
1.3634 + __NK_ASSERT_DEBUG(iOldCount);
1.3635 + aPageInfo->iLink.Deque();
1.3636 + --iOldCount;
1.3637 + break;
1.3638 +
1.3639 + case SPageInfo::EStatePagedLocked:
1.3640 + break;
1.3641 +
1.3642 + default:
1.3643 + __NK_ASSERT_DEBUG(0);
1.3644 + }
1.3645 + aPageInfo->SetState(SPageInfo::EStatePagedDead);
1.3646 + }
1.3647 +
1.3648 +
1.3649 +SPageInfo* DemandPaging::GetOldestPage()
1.3650 + {
1.3651 + // remove oldest from list...
1.3652 + SDblQueLink* link;
1.3653 + if(iOldCount)
1.3654 + {
1.3655 + __NK_ASSERT_DEBUG(!iOldList.IsEmpty());
1.3656 + link = iOldList.Last()->Deque();
1.3657 + --iOldCount;
1.3658 + }
1.3659 + else
1.3660 + {
1.3661 + __NK_ASSERT_DEBUG(iYoungCount);
1.3662 + __NK_ASSERT_DEBUG(!iYoungList.IsEmpty());
1.3663 + link = iYoungList.Last()->Deque();
1.3664 + --iYoungCount;
1.3665 + }
1.3666 + SPageInfo* pageInfo = SPageInfo::FromLink(link);
1.3667 + pageInfo->SetState(SPageInfo::EStatePagedDead);
1.3668 +
1.3669 + // put page in a free state...
1.3670 + SetFree(pageInfo);
1.3671 + pageInfo->Change(SPageInfo::EPagedFree,SPageInfo::EStatePagedDead);
1.3672 +
1.3673 + // keep live list balanced...
1.3674 + BalanceAges();
1.3675 +
1.3676 + return pageInfo;
1.3677 + }
1.3678 +
1.3679 +
1.3680 +TBool DemandPaging::GetFreePages(TInt aNumPages)
1.3681 + {
1.3682 + __KTRACE_OPT(KPAGING,Kern::Printf("DP: >GetFreePages %d",aNumPages));
1.3683 + NKern::LockSystem();
1.3684 +
1.3685 + while(aNumPages>0 && NumberOfFreePages()>=aNumPages)
1.3686 + {
1.3687 + // steal a page from live page list and return it to the free pool...
1.3688 + ReturnToSystem(GetOldestPage());
1.3689 + --aNumPages;
1.3690 + }
1.3691 +
1.3692 + NKern::UnlockSystem();
1.3693 + __KTRACE_OPT(KPAGING,Kern::Printf("DP: <GetFreePages %d",!aNumPages));
1.3694 + return !aNumPages;
1.3695 + }
1.3696 +
1.3697 +
1.3698 +void DemandPaging::DonateRamCachePage(SPageInfo* aPageInfo)
1.3699 + {
1.3700 + __NK_ASSERT_DEBUG(iMinimumPageCount + iNumberOfFreePages <= iMaximumPageCount);
1.3701 + SPageInfo::TType type = aPageInfo->Type();
1.3702 + if(type==SPageInfo::EChunk)
1.3703 + {
1.3704 + //Must not donate locked page. An example is DMA trasferred memory.
1.3705 + __NK_ASSERT_DEBUG(0 == aPageInfo->LockCount());
1.3706 +
1.3707 + aPageInfo->Change(SPageInfo::EPagedCache,SPageInfo::EStatePagedYoung);
1.3708 +
1.3709 + // Update ram allocator counts as this page has changed its type
1.3710 + DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
1.3711 + iMmu->iRamPageAllocator->ChangePageType(aPageInfo, chunk->GetPageType(), EPageDiscard);
1.3712 +
1.3713 + AddAsYoungest(aPageInfo);
1.3714 + ++iNumberOfFreePages;
1.3715 + if (iMinimumPageCount + iNumberOfFreePages > iMaximumPageCount)
1.3716 + ReturnToSystem(GetOldestPage());
1.3717 + BalanceAges();
1.3718 + return;
1.3719 + }
1.3720 + // allow already donated pages...
1.3721 + __NK_ASSERT_DEBUG(type==SPageInfo::EPagedCache);
1.3722 + }
1.3723 +
1.3724 +
1.3725 +TBool DemandPaging::ReclaimRamCachePage(SPageInfo* aPageInfo)
1.3726 + {
1.3727 + SPageInfo::TType type = aPageInfo->Type();
1.3728 + if(type==SPageInfo::EChunk)
1.3729 + return ETrue; // page already reclaimed
1.3730 +
1.3731 + __NK_ASSERT_DEBUG(type==SPageInfo::EPagedCache);
1.3732 +
1.3733 + if(!iNumberOfFreePages)
1.3734 + return EFalse;
1.3735 + --iNumberOfFreePages;
1.3736 +
1.3737 + RemovePage(aPageInfo);
1.3738 + aPageInfo->Change(SPageInfo::EChunk,SPageInfo::EStateNormal);
1.3739 +
1.3740 + // Update ram allocator counts as this page has changed its type
1.3741 + DMemModelChunk* chunk = (DMemModelChunk*)aPageInfo->Owner();
1.3742 + iMmu->iRamPageAllocator->ChangePageType(aPageInfo, EPageDiscard, chunk->GetPageType());
1.3743 + return ETrue;
1.3744 + }
1.3745 +
1.3746 +
1.3747 +SPageInfo* DemandPaging::AllocateNewPage()
1.3748 + {
1.3749 + __ASSERT_SYSTEM_LOCK
1.3750 + SPageInfo* pageInfo;
1.3751 +
1.3752 + NKern::UnlockSystem();
1.3753 + MmuBase::Wait();
1.3754 + NKern::LockSystem();
1.3755 +
1.3756 + // Try getting a free page from our active page list
1.3757 + if(iOldCount)
1.3758 + {
1.3759 + pageInfo = SPageInfo::FromLink(iOldList.Last());
1.3760 + if(pageInfo->Type()==SPageInfo::EPagedFree)
1.3761 + {
1.3762 + pageInfo = GetOldestPage();
1.3763 + goto done;
1.3764 + }
1.3765 + }
1.3766 +
1.3767 + // Try getting a free page from the system pool
1.3768 + if(iMinimumPageCount+iNumberOfFreePages<iMaximumPageCount)
1.3769 + {
1.3770 + NKern::UnlockSystem();
1.3771 + pageInfo = GetPageFromSystem();
1.3772 + NKern::LockSystem();
1.3773 + if(pageInfo)
1.3774 + goto done;
1.3775 + }
1.3776 +
1.3777 + // As a last resort, steal one from our list of active pages
1.3778 + pageInfo = GetOldestPage();
1.3779 +
1.3780 +done:
1.3781 + NKern::UnlockSystem();
1.3782 + MmuBase::Signal();
1.3783 + NKern::LockSystem();
1.3784 + return pageInfo;
1.3785 + }
1.3786 +
1.3787 +
1.3788 +void DemandPaging::Rejuvenate(SPageInfo* aPageInfo)
1.3789 + {
1.3790 + SPageInfo::TState state = aPageInfo->State();
1.3791 + if(state==SPageInfo::EStatePagedOld)
1.3792 + {
1.3793 + // move page from old list to head of young list...
1.3794 + __NK_ASSERT_DEBUG(iOldCount);
1.3795 + aPageInfo->iLink.Deque();
1.3796 + --iOldCount;
1.3797 + AddAsYoungest(aPageInfo);
1.3798 + BalanceAges();
1.3799 + }
1.3800 + else if(state==SPageInfo::EStatePagedYoung)
1.3801 + {
1.3802 + // page was already young, move it to the start of the list (make it the youngest)
1.3803 + aPageInfo->iLink.Deque();
1.3804 + iYoungList.AddHead(&aPageInfo->iLink);
1.3805 + }
1.3806 + else
1.3807 + {
1.3808 + // leave locked pages alone
1.3809 + __NK_ASSERT_DEBUG(state==SPageInfo::EStatePagedLocked);
1.3810 + }
1.3811 + }
1.3812 +
1.3813 +
1.3814 +TInt DemandPaging::CheckRealtimeThreadFault(DThread* aThread, TAny* aContext)
1.3815 + {
1.3816 + TInt r = KErrNone;
1.3817 + DThread* client = aThread->iIpcClient;
1.3818 +
1.3819 + // If iIpcClient is set then we are accessing the address space of a remote thread. If we are
1.3820 + // in an IPC trap, this will contain information the local and remte addresses being accessed.
1.3821 + // If this is not set then we assume than any fault must be the fault of a bad remote address.
1.3822 + TIpcExcTrap* ipcTrap = (TIpcExcTrap*)aThread->iExcTrap;
1.3823 + if (ipcTrap && !ipcTrap->IsTIpcExcTrap())
1.3824 + ipcTrap = 0;
1.3825 + if (client && (!ipcTrap || ipcTrap->ExcLocation(aThread, aContext) == TIpcExcTrap::EExcRemote))
1.3826 + {
1.3827 + // Kill client thread...
1.3828 + NKern::UnlockSystem();
1.3829 + if(K::IllegalFunctionForRealtimeThread(client,"Access to Paged Memory (by other thread)"))
1.3830 + {
1.3831 + // Treat memory access as bad...
1.3832 + r = KErrAbort;
1.3833 + }
1.3834 + // else thread is in 'warning only' state so allow paging
1.3835 + }
1.3836 + else
1.3837 + {
1.3838 + // Kill current thread...
1.3839 + NKern::UnlockSystem();
1.3840 + if(K::IllegalFunctionForRealtimeThread(NULL,"Access to Paged Memory"))
1.3841 + {
1.3842 + // If current thread is in critical section, then the above kill will be deferred
1.3843 + // and we will continue executing. We will handle this by returning an error
1.3844 + // which means that the thread will take an exception (which hopfully is XTRAPed!)
1.3845 + r = KErrAbort;
1.3846 + }
1.3847 + // else thread is in 'warning only' state so allow paging
1.3848 + }
1.3849 +
1.3850 + NKern::LockSystem();
1.3851 + return r;
1.3852 + }
1.3853 +
1.3854 +
1.3855 +TInt DemandPaging::ResizeLiveList(TUint aMinimumPageCount,TUint aMaximumPageCount)
1.3856 + {
1.3857 + if(!aMaximumPageCount)
1.3858 + {
1.3859 + aMinimumPageCount = iInitMinimumPageCount;
1.3860 + aMaximumPageCount = iInitMaximumPageCount;
1.3861 + }
1.3862 +
1.3863 + // Min must not be greater than max...
1.3864 + if(aMinimumPageCount>aMaximumPageCount)
1.3865 + return KErrArgument;
1.3866 +
1.3867 + NKern::ThreadEnterCS();
1.3868 + MmuBase::Wait();
1.3869 +
1.3870 + NKern::LockSystem();
1.3871 +
1.3872 + // Make sure aMinimumPageCount is not less than absolute minimum we can cope with...
1.3873 + iMinimumPageLimit = ((KMinYoungPages + iNextPagingRequestCount) * (1 + iYoungOldRatio)) / iYoungOldRatio;
1.3874 + if(iMinimumPageLimit<KAbsoluteMinPageCount)
1.3875 + iMinimumPageLimit = KAbsoluteMinPageCount;
1.3876 + if(aMinimumPageCount<iMinimumPageLimit+iReservePageCount)
1.3877 + aMinimumPageCount = iMinimumPageLimit+iReservePageCount;
1.3878 + if(aMaximumPageCount<aMinimumPageCount)
1.3879 + aMaximumPageCount=aMinimumPageCount;
1.3880 +
1.3881 + // Increase iMaximumPageCount?
1.3882 + TInt extra = aMaximumPageCount-iMaximumPageCount;
1.3883 + if(extra>0)
1.3884 + iMaximumPageCount += extra;
1.3885 +
1.3886 + // Reduce iMinimumPageCount?
1.3887 + TInt spare = iMinimumPageCount-aMinimumPageCount;
1.3888 + if(spare>0)
1.3889 + {
1.3890 + iMinimumPageCount -= spare;
1.3891 + iNumberOfFreePages += spare;
1.3892 + }
1.3893 +
1.3894 + // Increase iMinimumPageCount?
1.3895 + TInt r=KErrNone;
1.3896 + while(aMinimumPageCount>iMinimumPageCount)
1.3897 + {
1.3898 + if(iNumberOfFreePages==0) // Need more pages?
1.3899 + {
1.3900 + // get a page from the system
1.3901 + NKern::UnlockSystem();
1.3902 + SPageInfo* pageInfo = GetPageFromSystem();
1.3903 + NKern::LockSystem();
1.3904 + if(!pageInfo)
1.3905 + {
1.3906 + r=KErrNoMemory;
1.3907 + break;
1.3908 + }
1.3909 + AddAsFreePage(pageInfo);
1.3910 + }
1.3911 + ++iMinimumPageCount;
1.3912 + --iNumberOfFreePages;
1.3913 + NKern::FlashSystem();
1.3914 + }
1.3915 +
1.3916 + // Reduce iMaximumPageCount?
1.3917 + while(iMaximumPageCount>aMaximumPageCount)
1.3918 + {
1.3919 + if (iMinimumPageCount+iNumberOfFreePages==iMaximumPageCount) // Need to free pages?
1.3920 + {
1.3921 + ReturnToSystem(GetOldestPage());
1.3922 + }
1.3923 + --iMaximumPageCount;
1.3924 + NKern::FlashSystem();
1.3925 + }
1.3926 +
1.3927 +#ifdef BTRACE_KERNEL_MEMORY
1.3928 + BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryDemandPagingCache,ThePager->iMinimumPageCount << KPageShift);
1.3929 +#endif
1.3930 +
1.3931 + __NK_ASSERT_DEBUG(iMinimumPageCount + iNumberOfFreePages <= iMaximumPageCount);
1.3932 +
1.3933 + NKern::UnlockSystem();
1.3934 +
1.3935 + MmuBase::Signal();
1.3936 + NKern::ThreadLeaveCS();
1.3937 +
1.3938 + return r;
1.3939 + }
1.3940 +
1.3941 +
1.3942 +TInt VMHalFunction(TAny*, TInt aFunction, TAny* a1, TAny* a2)
1.3943 + {
1.3944 + DemandPaging* pager = DemandPaging::ThePager;
1.3945 + switch(aFunction)
1.3946 + {
1.3947 + case EVMHalFlushCache:
1.3948 + if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalFlushCache)")))
1.3949 + K::UnlockedPlatformSecurityPanic();
1.3950 + pager->FlushAll();
1.3951 + return KErrNone;
1.3952 +
1.3953 + case EVMHalSetCacheSize:
1.3954 + {
1.3955 + if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalSetCacheSize)")))
1.3956 + K::UnlockedPlatformSecurityPanic();
1.3957 + TUint min = (TUint)a1>>KPageShift;
1.3958 + if((TUint)a1&KPageMask)
1.3959 + ++min;
1.3960 + TUint max = (TUint)a2>>KPageShift;
1.3961 + if((TUint)a2&KPageMask)
1.3962 + ++max;
1.3963 + return pager->ResizeLiveList(min,max);
1.3964 + }
1.3965 +
1.3966 + case EVMHalGetCacheSize:
1.3967 + {
1.3968 + SVMCacheInfo info;
1.3969 + NKern::LockSystem(); // lock system to ensure consistent set of values are read...
1.3970 + info.iMinSize = pager->iMinimumPageCount<<KPageShift;
1.3971 + info.iMaxSize = pager->iMaximumPageCount<<KPageShift;
1.3972 + info.iCurrentSize = (pager->iMinimumPageCount+pager->iNumberOfFreePages)<<KPageShift;
1.3973 + info.iMaxFreeSize = pager->iNumberOfFreePages<<KPageShift;
1.3974 + NKern::UnlockSystem();
1.3975 + kumemput32(a1,&info,sizeof(info));
1.3976 + }
1.3977 + return KErrNone;
1.3978 +
1.3979 + case EVMHalGetEventInfo:
1.3980 + {
1.3981 + SVMEventInfo info;
1.3982 + NKern::LockSystem(); // lock system to ensure consistent set of values are read...
1.3983 + info = pager->iEventInfo;
1.3984 + NKern::UnlockSystem();
1.3985 + Kern::InfoCopy(*(TDes8*)a1,(TUint8*)&info,sizeof(info));
1.3986 + }
1.3987 + return KErrNone;
1.3988 +
1.3989 + case EVMHalResetEventInfo:
1.3990 + NKern::LockSystem();
1.3991 + memclr(&pager->iEventInfo, sizeof(pager->iEventInfo));
1.3992 + NKern::UnlockSystem();
1.3993 + return KErrNone;
1.3994 +
1.3995 +#ifdef __SUPPORT_DEMAND_PAGING_EMULATION__
1.3996 + case EVMHalGetOriginalRomPages:
1.3997 + *(TPhysAddr**)a1 = pager->iOriginalRomPages;
1.3998 + *(TInt*)a2 = pager->iOriginalRomPageCount;
1.3999 + return KErrNone;
1.4000 +#endif
1.4001 +
1.4002 + case EVMPageState:
1.4003 + return pager->PageState((TLinAddr)a1);
1.4004 +
1.4005 +#ifdef __CONCURRENT_PAGING_INSTRUMENTATION__
1.4006 + case EVMHalGetConcurrencyInfo:
1.4007 + {
1.4008 + NKern::LockSystem();
1.4009 + SPagingConcurrencyInfo info = { pager->iMaxWaitingCount, pager->iMaxPagingCount };
1.4010 + NKern::UnlockSystem();
1.4011 + kumemput32(a1,&info,sizeof(info));
1.4012 + }
1.4013 + return KErrNone;
1.4014 +
1.4015 + case EVMHalResetConcurrencyInfo:
1.4016 + NKern::LockSystem();
1.4017 + pager->iMaxWaitingCount = 0;
1.4018 + pager->iMaxPagingCount = 0;
1.4019 + NKern::UnlockSystem();
1.4020 + return KErrNone;
1.4021 +#endif
1.4022 +
1.4023 +#ifdef __DEMAND_PAGING_BENCHMARKS__
1.4024 + case EVMHalGetPagingBenchmark:
1.4025 + {
1.4026 + TUint index = (TInt) a1;
1.4027 + if (index >= EMaxPagingBm)
1.4028 + return KErrNotFound;
1.4029 + NKern::LockSystem();
1.4030 + SPagingBenchmarkInfo info = pager->iBenchmarkInfo[index];
1.4031 + NKern::UnlockSystem();
1.4032 + kumemput32(a2,&info,sizeof(info));
1.4033 + }
1.4034 + return KErrNone;
1.4035 +
1.4036 + case EVMHalResetPagingBenchmark:
1.4037 + {
1.4038 + TUint index = (TInt) a1;
1.4039 + if (index >= EMaxPagingBm)
1.4040 + return KErrNotFound;
1.4041 + NKern::LockSystem();
1.4042 + pager->ResetBenchmarkData((TPagingBenchmark)index);
1.4043 + NKern::UnlockSystem();
1.4044 + }
1.4045 + return KErrNone;
1.4046 +#endif
1.4047 +
1.4048 + default:
1.4049 + return KErrNotSupported;
1.4050 + }
1.4051 + }
1.4052 +
1.4053 +void DemandPaging::Panic(TFault aFault)
1.4054 + {
1.4055 + Kern::Fault("DEMAND-PAGING",aFault);
1.4056 + }
1.4057 +
1.4058 +
1.4059 +DMutex* DemandPaging::CheckMutexOrder()
1.4060 + {
1.4061 +#ifdef _DEBUG
1.4062 + SDblQue& ml = TheCurrentThread->iMutexList;
1.4063 + if(ml.IsEmpty())
1.4064 + return NULL;
1.4065 + DMutex* mm = _LOFF(ml.First(), DMutex, iOrderLink);
1.4066 + if (KMutexOrdPageIn >= mm->iOrder)
1.4067 + return mm;
1.4068 +#endif
1.4069 + return NULL;
1.4070 + }
1.4071 +
1.4072 +
1.4073 +TBool DemandPaging::ReservePage()
1.4074 + {
1.4075 + __ASSERT_SYSTEM_LOCK;
1.4076 + __ASSERT_CRITICAL;
1.4077 +
1.4078 + NKern::UnlockSystem();
1.4079 + MmuBase::Wait();
1.4080 + NKern::LockSystem();
1.4081 +
1.4082 + __NK_ASSERT_DEBUG(iMinimumPageCount >= iMinimumPageLimit + iReservePageCount);
1.4083 + while (iMinimumPageCount == iMinimumPageLimit + iReservePageCount &&
1.4084 + iNumberOfFreePages == 0)
1.4085 + {
1.4086 + NKern::UnlockSystem();
1.4087 + SPageInfo* pageInfo = GetPageFromSystem();
1.4088 + if(!pageInfo)
1.4089 + {
1.4090 + MmuBase::Signal();
1.4091 + NKern::LockSystem();
1.4092 + return EFalse;
1.4093 + }
1.4094 + NKern::LockSystem();
1.4095 + AddAsFreePage(pageInfo);
1.4096 + }
1.4097 + if (iMinimumPageCount == iMinimumPageLimit + iReservePageCount)
1.4098 + {
1.4099 + ++iMinimumPageCount;
1.4100 + --iNumberOfFreePages;
1.4101 + if (iMinimumPageCount > iMaximumPageCount)
1.4102 + iMaximumPageCount = iMinimumPageCount;
1.4103 + }
1.4104 + ++iReservePageCount;
1.4105 + __NK_ASSERT_DEBUG(iMinimumPageCount >= iMinimumPageLimit + iReservePageCount);
1.4106 + __NK_ASSERT_DEBUG(iMinimumPageCount + iNumberOfFreePages <= iMaximumPageCount);
1.4107 +
1.4108 + NKern::UnlockSystem();
1.4109 + MmuBase::Signal();
1.4110 + NKern::LockSystem();
1.4111 + return ETrue;
1.4112 + }
1.4113 +
1.4114 +
1.4115 +TInt DemandPaging::LockRegion(TLinAddr aStart,TInt aSize,DProcess* aProcess)
1.4116 + {
1.4117 + __KTRACE_OPT(KPAGING,Kern::Printf("DP: LockRegion(%08x,%x)",aStart,aSize));
1.4118 + NKern::ThreadEnterCS();
1.4119 +
1.4120 + // calculate the number of pages required to lock aSize bytes
1.4121 + TUint32 mask=KPageMask;
1.4122 + TUint32 offset=aStart&mask;
1.4123 + TInt numPages = (aSize+offset+mask)>>KPageShift;
1.4124 +
1.4125 + // Lock pages...
1.4126 + TInt r=KErrNone;
1.4127 + TLinAddr page = aStart;
1.4128 +
1.4129 + NKern::LockSystem();
1.4130 + while(--numPages>=0)
1.4131 + {
1.4132 + if (!ReservePage())
1.4133 + break;
1.4134 + TPhysAddr phys;
1.4135 + r = LockPage(page,aProcess,phys);
1.4136 + NKern::FlashSystem();
1.4137 + if(r!=KErrNone)
1.4138 + break;
1.4139 + page += KPageSize;
1.4140 + }
1.4141 +
1.4142 + NKern::UnlockSystem();
1.4143 +
1.4144 + // If error, unlock whatever we managed to lock...
1.4145 + if(r!=KErrNone)
1.4146 + {
1.4147 + while((page-=KPageSize)>=aStart)
1.4148 + {
1.4149 + NKern::LockSystem();
1.4150 + UnlockPage(aStart,aProcess,KPhysAddrInvalid);
1.4151 + --iReservePageCount;
1.4152 + NKern::UnlockSystem();
1.4153 + }
1.4154 + }
1.4155 +
1.4156 + NKern::ThreadLeaveCS();
1.4157 + __KTRACE_OPT(KPAGING,Kern::Printf("DP: LockRegion returns %d",r));
1.4158 + return r;
1.4159 + }
1.4160 +
1.4161 +
1.4162 +TInt DemandPaging::UnlockRegion(TLinAddr aStart,TInt aSize,DProcess* aProcess)
1.4163 + {
1.4164 + __KTRACE_OPT(KPAGING,Kern::Printf("DP: UnlockRegion(%08x,%x)",aStart,aSize));
1.4165 + TUint32 mask=KPageMask;
1.4166 + TUint32 offset=aStart&mask;
1.4167 + TInt numPages = (aSize+offset+mask)>>KPageShift;
1.4168 + NKern::LockSystem();
1.4169 + __NK_ASSERT_DEBUG(iReservePageCount >= (TUint)numPages);
1.4170 + while(--numPages>=0)
1.4171 + {
1.4172 + UnlockPage(aStart,aProcess,KPhysAddrInvalid);
1.4173 + --iReservePageCount;
1.4174 + NKern::FlashSystem();
1.4175 + aStart += KPageSize;
1.4176 + }
1.4177 + NKern::UnlockSystem();
1.4178 + return KErrNone;
1.4179 + }
1.4180 +
1.4181 +
1.4182 +void DemandPaging::FlushAll()
1.4183 + {
1.4184 + NKern::ThreadEnterCS();
1.4185 + MmuBase::Wait();
1.4186 + // look at all RAM pages in the system, and unmap all those used for paging
1.4187 + const TUint32* piMap = (TUint32*)KPageInfoMap;
1.4188 + const TUint32* piMapEnd = piMap+(KNumPageInfoPages>>5);
1.4189 + SPageInfo* pi = (SPageInfo*)KPageInfoLinearBase;
1.4190 + NKern::LockSystem();
1.4191 + do
1.4192 + {
1.4193 + SPageInfo* piNext = pi+(KPageInfosPerPage<<5);
1.4194 + for(TUint32 piFlags=*piMap++; piFlags; piFlags>>=1)
1.4195 + {
1.4196 + if(!(piFlags&1))
1.4197 + {
1.4198 + pi += KPageInfosPerPage;
1.4199 + continue;
1.4200 + }
1.4201 + SPageInfo* piEnd = pi+KPageInfosPerPage;
1.4202 + do
1.4203 + {
1.4204 + SPageInfo::TState state = pi->State();
1.4205 + if(state==SPageInfo::EStatePagedYoung || state==SPageInfo::EStatePagedOld)
1.4206 + {
1.4207 + RemovePage(pi);
1.4208 + SetFree(pi);
1.4209 + AddAsFreePage(pi);
1.4210 + NKern::FlashSystem();
1.4211 + }
1.4212 + ++pi;
1.4213 + const TUint KFlashCount = 64; // flash every 64 page infos (must be a power-of-2)
1.4214 + __ASSERT_COMPILE((TUint)KPageInfosPerPage >= KFlashCount);
1.4215 + if(((TUint)pi&((KFlashCount-1)<<KPageInfoShift))==0)
1.4216 + NKern::FlashSystem();
1.4217 + }
1.4218 + while(pi<piEnd);
1.4219 + }
1.4220 + pi = piNext;
1.4221 + }
1.4222 + while(piMap<piMapEnd);
1.4223 + NKern::UnlockSystem();
1.4224 +
1.4225 + // reduce live page list to a minimum
1.4226 + while(GetFreePages(1)) {};
1.4227 +
1.4228 + MmuBase::Signal();
1.4229 + NKern::ThreadLeaveCS();
1.4230 + }
1.4231 +
1.4232 +
1.4233 +TInt DemandPaging::LockPage(TLinAddr aPage, DProcess *aProcess, TPhysAddr& aPhysAddr)
1.4234 + {
1.4235 + __KTRACE_OPT(KPAGING,Kern::Printf("DP: LockPage() %08x",aPage));
1.4236 + __ASSERT_SYSTEM_LOCK
1.4237 +
1.4238 + aPhysAddr = KPhysAddrInvalid;
1.4239 +
1.4240 + TInt r = EnsurePagePresent(aPage,aProcess);
1.4241 + if (r != KErrNone)
1.4242 + return KErrArgument; // page doesn't exist
1.4243 +
1.4244 + // get info about page to be locked...
1.4245 + TPhysAddr phys = LinearToPhysical(aPage,aProcess);
1.4246 +retry:
1.4247 + __NK_ASSERT_DEBUG(phys!=KPhysAddrInvalid);
1.4248 +
1.4249 + SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(phys);
1.4250 + if(!pageInfo)
1.4251 + return KErrNotFound;
1.4252 +
1.4253 + // lock it...
1.4254 + SPageInfo::TType type = pageInfo->Type();
1.4255 + if(type==SPageInfo::EShadow)
1.4256 + {
1.4257 + // get the page which is being shadowed and lock that
1.4258 + phys = (TPhysAddr)pageInfo->Owner();
1.4259 + goto retry;
1.4260 + }
1.4261 +
1.4262 + switch(pageInfo->State())
1.4263 + {
1.4264 + case SPageInfo::EStatePagedLocked:
1.4265 + // already locked, so just increment lock count...
1.4266 + ++pageInfo->PagedLock();
1.4267 + break;
1.4268 +
1.4269 + case SPageInfo::EStatePagedYoung:
1.4270 + {
1.4271 + if(type!=SPageInfo::EPagedROM && type !=SPageInfo::EPagedCode)
1.4272 + {
1.4273 + // not implemented yet
1.4274 + __NK_ASSERT_ALWAYS(0);
1.4275 + }
1.4276 +
1.4277 + // remove page to be locked from live list...
1.4278 + RemovePage(pageInfo);
1.4279 +
1.4280 + // change to locked state...
1.4281 + pageInfo->SetState(SPageInfo::EStatePagedLocked);
1.4282 + pageInfo->PagedLock() = 1; // Start with lock count of one
1.4283 +
1.4284 + // open reference on memory...
1.4285 + if(type==SPageInfo::EPagedCode)
1.4286 + {
1.4287 + DMemModelCodeSegMemory* codeSegMemory = (DMemModelCodeSegMemory*)pageInfo->Owner();
1.4288 + if(codeSegMemory->Open()!=KErrNone)
1.4289 + {
1.4290 + __NK_ASSERT_DEBUG(0);
1.4291 + }
1.4292 + }
1.4293 + }
1.4294 +
1.4295 + break;
1.4296 +
1.4297 + case SPageInfo::EStatePagedOld:
1.4298 + // can't happen because we forced the page to be accessible earlier
1.4299 + __NK_ASSERT_ALWAYS(0);
1.4300 + return KErrCorrupt;
1.4301 +
1.4302 + default:
1.4303 + return KErrNotFound;
1.4304 + }
1.4305 +
1.4306 + aPhysAddr = phys;
1.4307 +
1.4308 +#ifdef BTRACE_PAGING
1.4309 + BTraceContext8(BTrace::EPaging,BTrace::EPagingPageLock,phys,pageInfo->PagedLock());
1.4310 +#endif
1.4311 + return KErrNone;
1.4312 + }
1.4313 +
1.4314 +
1.4315 +TInt DemandPaging::UnlockPage(TLinAddr aPage, DProcess* aProcess, TPhysAddr aPhysAddr)
1.4316 + {
1.4317 + __KTRACE_OPT(KPAGING,Kern::Printf("DP: UnlockPage() %08x",aPage));
1.4318 + __ASSERT_SYSTEM_LOCK;
1.4319 + __ASSERT_CRITICAL;
1.4320 +
1.4321 + // Get info about page to be unlocked
1.4322 + TPhysAddr phys = LinearToPhysical(aPage,aProcess);
1.4323 + if(phys==KPhysAddrInvalid)
1.4324 + {
1.4325 + phys = aPhysAddr;
1.4326 + if(phys==KPhysAddrInvalid)
1.4327 + return KErrNotFound;
1.4328 + }
1.4329 +retry:
1.4330 + SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(phys);
1.4331 + if(!pageInfo)
1.4332 + return KErrNotFound;
1.4333 +
1.4334 + SPageInfo::TType type = pageInfo->Type();
1.4335 + if(type==SPageInfo::EShadow)
1.4336 + {
1.4337 + // Get the page which is being shadowed and unlock that
1.4338 + phys = (TPhysAddr)pageInfo->Owner();
1.4339 + goto retry;
1.4340 + }
1.4341 +
1.4342 + __NK_ASSERT_DEBUG(phys==aPhysAddr || aPhysAddr==KPhysAddrInvalid);
1.4343 +
1.4344 + // Unlock it...
1.4345 + switch(pageInfo->State())
1.4346 + {
1.4347 + case SPageInfo::EStatePagedLocked:
1.4348 +#ifdef BTRACE_PAGING
1.4349 + BTraceContext8(BTrace::EPaging,BTrace::EPagingPageUnlock,phys,pageInfo->PagedLock());
1.4350 +#endif
1.4351 + if(!(--pageInfo->PagedLock()))
1.4352 + {
1.4353 + // get pointer to memory...
1.4354 + DMemModelCodeSegMemory* codeSegMemory = 0;
1.4355 + if(type==SPageInfo::EPagedCode)
1.4356 + codeSegMemory = (DMemModelCodeSegMemory*)pageInfo->Owner();
1.4357 +
1.4358 + // put page back on live list...
1.4359 + AddAsYoungest(pageInfo);
1.4360 + BalanceAges();
1.4361 +
1.4362 + // close reference on memory...
1.4363 + if(codeSegMemory)
1.4364 + {
1.4365 + NKern::UnlockSystem();
1.4366 + codeSegMemory->Close();
1.4367 + NKern::LockSystem();
1.4368 + }
1.4369 + }
1.4370 + break;
1.4371 +
1.4372 + default:
1.4373 + return KErrNotFound;
1.4374 + }
1.4375 +
1.4376 + return KErrNone;
1.4377 + }
1.4378 +
1.4379 +
1.4380 +
1.4381 +TInt DemandPaging::ReserveAlloc(TInt aSize, DDemandPagingLock& aLock)
1.4382 + {
1.4383 + __NK_ASSERT_DEBUG(aLock.iPages == NULL);
1.4384 +
1.4385 + // calculate the number of pages required to lock aSize bytes
1.4386 + TInt numPages = ((aSize-1+KPageMask)>>KPageShift)+1;
1.4387 +
1.4388 + __KTRACE_OPT(KPAGING,Kern::Printf("DP: ReserveAlloc() pages %d",numPages));
1.4389 +
1.4390 + NKern::ThreadEnterCS();
1.4391 +
1.4392 + aLock.iPages = (TPhysAddr*)Kern::Alloc(numPages*sizeof(TPhysAddr));
1.4393 + if(!aLock.iPages)
1.4394 + {
1.4395 + NKern::ThreadLeaveCS();
1.4396 + return KErrNoMemory;
1.4397 + }
1.4398 +
1.4399 + MmuBase::Wait();
1.4400 + NKern::LockSystem();
1.4401 +
1.4402 + // reserve pages, adding more if necessary
1.4403 + while (aLock.iReservedPageCount < numPages)
1.4404 + {
1.4405 + if (!ReservePage())
1.4406 + break;
1.4407 + ++aLock.iReservedPageCount;
1.4408 + }
1.4409 +
1.4410 + NKern::UnlockSystem();
1.4411 + MmuBase::Signal();
1.4412 +
1.4413 + TBool enoughPages = aLock.iReservedPageCount == numPages;
1.4414 + if(!enoughPages)
1.4415 + ReserveFree(aLock);
1.4416 +
1.4417 + NKern::ThreadLeaveCS();
1.4418 + return enoughPages ? KErrNone : KErrNoMemory;
1.4419 + }
1.4420 +
1.4421 +
1.4422 +
1.4423 +void DemandPaging::ReserveFree(DDemandPagingLock& aLock)
1.4424 + {
1.4425 + NKern::ThreadEnterCS();
1.4426 +
1.4427 + // make sure pages aren't still locked
1.4428 + ReserveUnlock(aLock);
1.4429 +
1.4430 + NKern::LockSystem();
1.4431 + __NK_ASSERT_DEBUG(iReservePageCount >= (TUint)aLock.iReservedPageCount);
1.4432 + iReservePageCount -= aLock.iReservedPageCount;
1.4433 + aLock.iReservedPageCount = 0;
1.4434 + NKern::UnlockSystem();
1.4435 +
1.4436 + // free page array...
1.4437 + Kern::Free(aLock.iPages);
1.4438 + aLock.iPages = 0;
1.4439 +
1.4440 + NKern::ThreadLeaveCS();
1.4441 + }
1.4442 +
1.4443 +
1.4444 +
1.4445 +TBool DemandPaging::ReserveLock(DThread* aThread, TLinAddr aStart,TInt aSize, DDemandPagingLock& aLock)
1.4446 + {
1.4447 + if(aLock.iLockedPageCount)
1.4448 + Panic(ELockTwice);
1.4449 +
1.4450 + // calculate the number of pages that need to be locked...
1.4451 + TUint32 mask=KPageMask;
1.4452 + TUint32 offset=aStart&mask;
1.4453 + TInt numPages = (aSize+offset+mask)>>KPageShift;
1.4454 + if(numPages>aLock.iReservedPageCount)
1.4455 + Panic(ELockTooBig);
1.4456 +
1.4457 + NKern::LockSystem();
1.4458 +
1.4459 + // lock the pages
1.4460 + TBool locked = EFalse; // becomes true if any pages were locked
1.4461 + DProcess* process = aThread->iOwningProcess;
1.4462 + TLinAddr page=aStart;
1.4463 + TInt count=numPages;
1.4464 + TPhysAddr* physPages = aLock.iPages;
1.4465 + while(--count>=0)
1.4466 + {
1.4467 + if(LockPage(page,process,*physPages)==KErrNone)
1.4468 + locked = ETrue;
1.4469 + NKern::FlashSystem();
1.4470 + page += KPageSize;
1.4471 + ++physPages;
1.4472 + }
1.4473 +
1.4474 + // if any pages were locked, save the lock info...
1.4475 + if(locked)
1.4476 + {
1.4477 + if(aLock.iLockedPageCount)
1.4478 + Panic(ELockTwice);
1.4479 + aLock.iLockedStart = aStart;
1.4480 + aLock.iLockedPageCount = numPages;
1.4481 + aLock.iProcess = process;
1.4482 + aLock.iProcess->Open();
1.4483 + }
1.4484 +
1.4485 + NKern::UnlockSystem();
1.4486 + return locked;
1.4487 + }
1.4488 +
1.4489 +
1.4490 +
1.4491 +void DemandPaging::ReserveUnlock(DDemandPagingLock& aLock)
1.4492 + {
1.4493 + NKern::ThreadEnterCS();
1.4494 +
1.4495 + DProcess* process = NULL;
1.4496 + NKern::LockSystem();
1.4497 + TInt numPages = aLock.iLockedPageCount;
1.4498 + TLinAddr page = aLock.iLockedStart;
1.4499 + TPhysAddr* physPages = aLock.iPages;
1.4500 + while(--numPages>=0)
1.4501 + {
1.4502 + UnlockPage(page, aLock.iProcess,*physPages);
1.4503 + NKern::FlashSystem();
1.4504 + page += KPageSize;
1.4505 + ++physPages;
1.4506 + }
1.4507 + process = aLock.iProcess;
1.4508 + aLock.iProcess = NULL;
1.4509 + aLock.iLockedPageCount = 0;
1.4510 + NKern::UnlockSystem();
1.4511 + if (process)
1.4512 + process->Close(NULL);
1.4513 +
1.4514 + NKern::ThreadLeaveCS();
1.4515 + }
1.4516 +
1.4517 +/**
1.4518 +Check whether the specified page can be discarded by the RAM cache.
1.4519 +
1.4520 +@param aPageInfo The page info of the page being queried.
1.4521 +@return ETrue when the page can be discarded, EFalse otherwise.
1.4522 +@pre System lock held.
1.4523 +@post System lock held.
1.4524 +*/
1.4525 +TBool DemandPaging::IsPageDiscardable(SPageInfo& aPageInfo)
1.4526 + {
1.4527 + // on live list?
1.4528 + SPageInfo::TState state = aPageInfo.State();
1.4529 + return (state == SPageInfo::EStatePagedYoung || state == SPageInfo::EStatePagedOld);
1.4530 + }
1.4531 +
1.4532 +
1.4533 +/**
1.4534 +Discard the specified page.
1.4535 +Should only be called on a page if a previous call to IsPageDiscardable()
1.4536 +returned ETrue and the system lock hasn't been released between the calls.
1.4537 +
1.4538 +@param aPageInfo The page info of the page to be discarded
1.4539 +@param aBlockZoneId The ID of the RAM zone that shouldn't be allocated into.
1.4540 +@param aBlockRest Set to ETrue to stop allocation as soon as aBlockedZoneId is reached
1.4541 +in preference ordering. EFalse otherwise.
1.4542 +@return ETrue if the page could be discarded, EFalse otherwise.
1.4543 +
1.4544 +@pre System lock held.
1.4545 +@post System lock held.
1.4546 +*/
1.4547 +TBool DemandPaging::DoDiscardPage(SPageInfo& aPageInfo, TUint aBlockedZoneId, TBool aBlockRest)
1.4548 + {
1.4549 + __ASSERT_SYSTEM_LOCK;
1.4550 + // Ensure that we don't reduce the cache beyond its minimum.
1.4551 + if (iNumberOfFreePages == 0)
1.4552 + {
1.4553 + NKern::UnlockSystem();
1.4554 + SPageInfo* newPage = GetPageFromSystem(aBlockedZoneId, aBlockRest);
1.4555 + NKern::LockSystem();
1.4556 + if (newPage == NULL)
1.4557 + {// couldn't allocate a new page
1.4558 + return EFalse;
1.4559 + }
1.4560 + if (IsPageDiscardable(aPageInfo))
1.4561 + {// page can still be discarded so use new page
1.4562 + // and discard old one
1.4563 + AddAsFreePage(newPage);
1.4564 + RemovePage(&aPageInfo);
1.4565 + SetFree(&aPageInfo);
1.4566 + ReturnToSystem(&aPageInfo);
1.4567 + BalanceAges();
1.4568 + return ETrue;
1.4569 + }
1.4570 + else
1.4571 + {// page no longer discardable so no longer require new page
1.4572 + ReturnToSystem(newPage);
1.4573 + return EFalse;
1.4574 + }
1.4575 + }
1.4576 +
1.4577 + // Discard the page
1.4578 + RemovePage(&aPageInfo);
1.4579 + SetFree(&aPageInfo);
1.4580 + ReturnToSystem(&aPageInfo);
1.4581 + BalanceAges();
1.4582 +
1.4583 + return ETrue;
1.4584 + }
1.4585 +
1.4586 +
1.4587 +/**
1.4588 +First stage in discarding a list of pages.
1.4589 +
1.4590 +Must ensure that the pages will still be discardable even if system lock is released.
1.4591 +To be used in conjunction with RamCacheBase::DoDiscardPages1().
1.4592 +
1.4593 +@param aPageList A NULL terminated list of the pages to be discarded
1.4594 +@return KErrNone on success.
1.4595 +
1.4596 +@pre System lock held
1.4597 +@post System lock held
1.4598 +*/
1.4599 +TInt DemandPaging::DoDiscardPages0(SPageInfo** aPageList)
1.4600 + {
1.4601 + __ASSERT_SYSTEM_LOCK;
1.4602 +
1.4603 + SPageInfo* pageInfo;
1.4604 + while((pageInfo = *aPageList++) != 0)
1.4605 + {
1.4606 + RemovePage(pageInfo);
1.4607 + }
1.4608 + return KErrNone;
1.4609 + }
1.4610 +
1.4611 +
1.4612 +/**
1.4613 +Final stage in discarding a list of page
1.4614 +Finish discarding the pages previously removed by RamCacheBase::DoDiscardPages0().
1.4615 +
1.4616 +@param aPageList A NULL terminated list of the pages to be discarded
1.4617 +@return KErrNone on success.
1.4618 +
1.4619 +@pre System lock held
1.4620 +@post System lock held
1.4621 +*/
1.4622 +TInt DemandPaging::DoDiscardPages1(SPageInfo** aPageList)
1.4623 + {
1.4624 + __ASSERT_SYSTEM_LOCK;
1.4625 +
1.4626 + SPageInfo* pageInfo;
1.4627 + while((pageInfo = *aPageList++)!=0)
1.4628 + {
1.4629 + SetFree(pageInfo);
1.4630 + ReturnToSystem(pageInfo);
1.4631 + BalanceAges();
1.4632 + }
1.4633 + return KErrNone;
1.4634 + }
1.4635 +
1.4636 +
1.4637 +TBool DemandPaging::MayBePaged(TLinAddr aStartAddr, TUint aLength)
1.4638 + {
1.4639 + TLinAddr endAddr = aStartAddr + aLength;
1.4640 + TBool rangeTouchesPagedRom =
1.4641 + TUint(aStartAddr - iRomPagedLinearBase) < iRomSize ||
1.4642 + TUint(endAddr - iRomPagedLinearBase) < iRomSize;
1.4643 + TBool rangeTouchesCodeArea =
1.4644 + TUint(aStartAddr - iCodeLinearBase) < iCodeSize ||
1.4645 + TUint(endAddr - iCodeLinearBase) < iCodeSize;
1.4646 + return rangeTouchesPagedRom || rangeTouchesCodeArea;
1.4647 + }
1.4648 +
1.4649 +
1.4650 +#ifdef __DEMAND_PAGING_BENCHMARKS__
1.4651 +
1.4652 +void DemandPaging::ResetBenchmarkData(TPagingBenchmark aBm)
1.4653 + {
1.4654 + SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm];
1.4655 + info.iCount = 0;
1.4656 + info.iTotalTime = 0;
1.4657 + info.iMaxTime = 0;
1.4658 + info.iMinTime = KMaxTInt;
1.4659 + }
1.4660 +
1.4661 +void DemandPaging::RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime)
1.4662 + {
1.4663 + SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm];
1.4664 + ++info.iCount;
1.4665 +#if !defined(HIGH_RES_TIMER) || defined(HIGH_RES_TIMER_COUNTS_UP)
1.4666 + TInt64 elapsed = aEndTime - aStartTime;
1.4667 +#else
1.4668 + TInt64 elapsed = aStartTime - aEndTime;
1.4669 +#endif
1.4670 + info.iTotalTime += elapsed;
1.4671 + if (elapsed > info.iMaxTime)
1.4672 + info.iMaxTime = elapsed;
1.4673 + if (elapsed < info.iMinTime)
1.4674 + info.iMinTime = elapsed;
1.4675 + }
1.4676 +
1.4677 +#endif
1.4678 +
1.4679 +
1.4680 +//
1.4681 +// DDemandPagingLock
1.4682 +//
1.4683 +
1.4684 +EXPORT_C DDemandPagingLock::DDemandPagingLock()
1.4685 + : iThePager(DemandPaging::ThePager), iReservedPageCount(0), iLockedPageCount(0), iPages(0)
1.4686 + {
1.4687 + }
1.4688 +
1.4689 +
1.4690 +EXPORT_C TInt DDemandPagingLock::Alloc(TInt aSize)
1.4691 + {
1.4692 + if (iThePager)
1.4693 + return iThePager->ReserveAlloc(aSize,*this);
1.4694 + else
1.4695 + return KErrNone;
1.4696 + }
1.4697 +
1.4698 +
1.4699 +EXPORT_C void DDemandPagingLock::DoUnlock()
1.4700 + {
1.4701 + if (iThePager)
1.4702 + iThePager->ReserveUnlock(*this);
1.4703 + }
1.4704 +
1.4705 +
1.4706 +EXPORT_C void DDemandPagingLock::Free()
1.4707 + {
1.4708 + if (iThePager)
1.4709 + iThePager->ReserveFree(*this);
1.4710 + }
1.4711 +
1.4712 +
1.4713 +EXPORT_C TInt Kern::InstallPagingDevice(DPagingDevice* aDevice)
1.4714 + {
1.4715 + if (DemandPaging::ThePager)
1.4716 + return DemandPaging::ThePager->InstallPagingDevice(aDevice);
1.4717 + else
1.4718 + return KErrNotSupported;
1.4719 + }
1.4720 +
1.4721 +
1.4722 +#else // !__DEMAND_PAGING__
1.4723 +
1.4724 +EXPORT_C DDemandPagingLock::DDemandPagingLock()
1.4725 + : iLockedPageCount(0)
1.4726 + {
1.4727 + }
1.4728 +
1.4729 +EXPORT_C TInt DDemandPagingLock::Alloc(TInt /*aSize*/)
1.4730 + {
1.4731 + return KErrNone;
1.4732 + }
1.4733 +
1.4734 +EXPORT_C TBool DDemandPagingLock::Lock(DThread* /*aThread*/, TLinAddr /*aStart*/, TInt /*aSize*/)
1.4735 + {
1.4736 + return EFalse;
1.4737 + }
1.4738 +
1.4739 +EXPORT_C void DDemandPagingLock::DoUnlock()
1.4740 + {
1.4741 + }
1.4742 +
1.4743 +EXPORT_C void DDemandPagingLock::Free()
1.4744 + {
1.4745 + }
1.4746 +
1.4747 +EXPORT_C TInt Kern::InstallPagingDevice(DPagingDevice* aDevice)
1.4748 + {
1.4749 + return KErrNotSupported;
1.4750 + }
1.4751 +
1.4752 +#endif // __DEMAND_PAGING__
1.4753 +
1.4754 +
1.4755 +DMmuCodeSegMemory::DMmuCodeSegMemory(DEpocCodeSeg* aCodeSeg)
1.4756 + : DEpocCodeSegMemory(aCodeSeg), iCodeAllocBase(KMinTInt)
1.4757 + {
1.4758 + }
1.4759 +
1.4760 +//#define __DUMP_BLOCKMAP_INFO
1.4761 +DMmuCodeSegMemory::~DMmuCodeSegMemory()
1.4762 + {
1.4763 +#ifdef __DEMAND_PAGING__
1.4764 + Kern::Free(iCodeRelocTable);
1.4765 + Kern::Free(iCodePageOffsets);
1.4766 + Kern::Free(iDataSectionMemory);
1.4767 +#endif
1.4768 + }
1.4769 +
1.4770 +#ifdef __DEMAND_PAGING__
1.4771 +
1.4772 +/**
1.4773 +Read and process the block map and related data.
1.4774 +*/
1.4775 +TInt DMmuCodeSegMemory::ReadBlockMap(const TCodeSegCreateInfo& aInfo)
1.4776 + {
1.4777 + __KTRACE_OPT(KPAGING,Kern::Printf("DP: Reading block map for %C", iCodeSeg));
1.4778 +
1.4779 + if (aInfo.iCodeBlockMapEntriesSize <= 0)
1.4780 + return KErrArgument; // no block map provided
1.4781 +
1.4782 + // Get compression data
1.4783 + switch (aInfo.iCompressionType)
1.4784 + {
1.4785 + case KFormatNotCompressed:
1.4786 + iCompressionType = SRomPageInfo::ENoCompression;
1.4787 + break;
1.4788 +
1.4789 + case KUidCompressionBytePair:
1.4790 + {
1.4791 + iCompressionType = SRomPageInfo::EBytePair;
1.4792 + if (!aInfo.iCodePageOffsets)
1.4793 + return KErrArgument;
1.4794 + TInt size = sizeof(TInt32) * (iPageCount + 1);
1.4795 + iCodePageOffsets = (TInt32*)Kern::Alloc(size);
1.4796 + if (!iCodePageOffsets)
1.4797 + return KErrNoMemory;
1.4798 + kumemget32(iCodePageOffsets, aInfo.iCodePageOffsets, size);
1.4799 +
1.4800 +#ifdef __DUMP_BLOCKMAP_INFO
1.4801 + Kern::Printf("CodePageOffsets:");
1.4802 + for (TInt i = 0 ; i < iPageCount + 1 ; ++i)
1.4803 + Kern::Printf(" %08x", iCodePageOffsets[i]);
1.4804 +#endif
1.4805 +
1.4806 + TInt last = 0;
1.4807 + for (TInt j = 0 ; j < iPageCount + 1 ; ++j)
1.4808 + {
1.4809 + if (iCodePageOffsets[j] < last ||
1.4810 + iCodePageOffsets[j] > (aInfo.iCodeLengthInFile + aInfo.iCodeStartInFile))
1.4811 + {
1.4812 + __NK_ASSERT_DEBUG(0);
1.4813 + return KErrCorrupt;
1.4814 + }
1.4815 + last = iCodePageOffsets[j];
1.4816 + }
1.4817 + }
1.4818 + break;
1.4819 +
1.4820 + default:
1.4821 + return KErrNotSupported;
1.4822 + }
1.4823 +
1.4824 + // Copy block map data itself...
1.4825 +
1.4826 +#ifdef __DUMP_BLOCKMAP_INFO
1.4827 + Kern::Printf("Original block map");
1.4828 + Kern::Printf(" block granularity: %d", aInfo.iCodeBlockMapCommon.iBlockGranularity);
1.4829 + Kern::Printf(" block start offset: %x", aInfo.iCodeBlockMapCommon.iBlockStartOffset);
1.4830 + Kern::Printf(" start block address: %016lx", aInfo.iCodeBlockMapCommon.iStartBlockAddress);
1.4831 + Kern::Printf(" local drive number: %d", aInfo.iCodeBlockMapCommon.iLocalDriveNumber);
1.4832 + Kern::Printf(" entry size: %d", aInfo.iCodeBlockMapEntriesSize);
1.4833 +#endif
1.4834 +
1.4835 + // Find relevant paging device
1.4836 + iCodeLocalDrive = aInfo.iCodeBlockMapCommon.iLocalDriveNumber;
1.4837 + if (TUint(iCodeLocalDrive) >= (TUint)KMaxLocalDrives)
1.4838 + {
1.4839 + __KTRACE_OPT(KPAGING,Kern::Printf("Bad local drive number"));
1.4840 + return KErrArgument;
1.4841 + }
1.4842 + DemandPaging* pager = DemandPaging::ThePager;
1.4843 +
1.4844 + if (!pager->CodePagingDevice(iCodeLocalDrive).iInstalled)
1.4845 + {
1.4846 + __KTRACE_OPT(KPAGING,Kern::Printf("No paging device installed for drive"));
1.4847 + return KErrNotSupported;
1.4848 + }
1.4849 + DPagingDevice* device = pager->CodePagingDevice(iCodeLocalDrive).iDevice;
1.4850 +
1.4851 + // Set code start offset
1.4852 + iCodeStartInFile = aInfo.iCodeStartInFile;
1.4853 + if (iCodeStartInFile < 0)
1.4854 + {
1.4855 + __KTRACE_OPT(KPAGING,Kern::Printf("Bad code start offset"));
1.4856 + return KErrArgument;
1.4857 + }
1.4858 +
1.4859 + // Allocate buffer for block map and copy from user-side
1.4860 + TBlockMapEntryBase* buffer = (TBlockMapEntryBase*)Kern::Alloc(aInfo.iCodeBlockMapEntriesSize);
1.4861 + if (!buffer)
1.4862 + return KErrNoMemory;
1.4863 + kumemget32(buffer, aInfo.iCodeBlockMapEntries, aInfo.iCodeBlockMapEntriesSize);
1.4864 +
1.4865 +#ifdef __DUMP_BLOCKMAP_INFO
1.4866 + Kern::Printf(" entries:");
1.4867 + for (TInt k = 0 ; k < aInfo.iCodeBlockMapEntriesSize / sizeof(TBlockMapEntryBase) ; ++k)
1.4868 + Kern::Printf(" %d: %d blocks at %08x", k, buffer[k].iNumberOfBlocks, buffer[k].iStartBlock);
1.4869 +#endif
1.4870 +
1.4871 + // Initialise block map
1.4872 + TInt r = iBlockMap.Initialise(aInfo.iCodeBlockMapCommon,
1.4873 + buffer,
1.4874 + aInfo.iCodeBlockMapEntriesSize,
1.4875 + device->iReadUnitShift,
1.4876 + iCodeStartInFile + aInfo.iCodeLengthInFile);
1.4877 + if (r != KErrNone)
1.4878 + {
1.4879 + Kern::Free(buffer);
1.4880 + return r;
1.4881 + }
1.4882 +
1.4883 +#if defined(__DUMP_BLOCKMAP_INFO) && defined(_DEBUG)
1.4884 + iBlockMap.Dump();
1.4885 +#endif
1.4886 +
1.4887 + return KErrNone;
1.4888 + }
1.4889 +
1.4890 +/**
1.4891 +Read code relocation table and import fixup table from user side.
1.4892 +*/
1.4893 +TInt DMmuCodeSegMemory::ReadFixupTables(const TCodeSegCreateInfo& aInfo)
1.4894 + {
1.4895 + __KTRACE_OPT(KPAGING,Kern::Printf("DP: Reading fixup tables for %C", iCodeSeg));
1.4896 +
1.4897 + iCodeRelocTableSize = aInfo.iCodeRelocTableSize;
1.4898 + iImportFixupTableSize = aInfo.iImportFixupTableSize;
1.4899 + iCodeDelta = aInfo.iCodeDelta;
1.4900 + iDataDelta = aInfo.iDataDelta;
1.4901 +
1.4902 + // round sizes to four-byte boundaris...
1.4903 + TInt relocSize = (iCodeRelocTableSize + 3) & ~3;
1.4904 + TInt fixupSize = (iImportFixupTableSize + 3) & ~3;
1.4905 +
1.4906 + // copy relocs and fixups...
1.4907 + iCodeRelocTable = (TUint8*)Kern::Alloc(relocSize+fixupSize);
1.4908 + if (!iCodeRelocTable)
1.4909 + return KErrNoMemory;
1.4910 + iImportFixupTable = iCodeRelocTable + relocSize;
1.4911 + kumemget32(iCodeRelocTable, aInfo.iCodeRelocTable, relocSize);
1.4912 + kumemget32(iImportFixupTable, aInfo.iImportFixupTable, fixupSize);
1.4913 +
1.4914 + return KErrNone;
1.4915 + }
1.4916 +
1.4917 +#endif
1.4918 +
1.4919 +
1.4920 +TInt DMmuCodeSegMemory::Create(TCodeSegCreateInfo& aInfo)
1.4921 + {
1.4922 + TInt r = KErrNone;
1.4923 + if (!aInfo.iUseCodePaging)
1.4924 + iPageCount=(iRamInfo.iCodeSize+iRamInfo.iDataSize+KPageMask)>>KPageShift;
1.4925 + else
1.4926 + {
1.4927 +#ifdef __DEMAND_PAGING__
1.4928 + iDataSectionMemory = Kern::Alloc(iRamInfo.iDataSize);
1.4929 + if (!iDataSectionMemory)
1.4930 + return KErrNoMemory;
1.4931 +
1.4932 + iPageCount=(iRamInfo.iCodeSize+KPageMask)>>KPageShift;
1.4933 + iDataPageCount=(iRamInfo.iDataSize+KPageMask)>>KPageShift;
1.4934 +
1.4935 + r = ReadBlockMap(aInfo);
1.4936 + if (r != KErrNone)
1.4937 + return r;
1.4938 +
1.4939 + iIsDemandPaged = ETrue;
1.4940 + iCodeSeg->iAttr |= ECodeSegAttCodePaged;
1.4941 +#endif
1.4942 + }
1.4943 +
1.4944 + iCodeSeg->iSize = (iPageCount+iDataPageCount)<<KPageShift;
1.4945 + return r;
1.4946 + }
1.4947 +
1.4948 +
1.4949 +TInt DMmuCodeSegMemory::Loaded(TCodeSegCreateInfo& aInfo)
1.4950 + {
1.4951 +#ifdef __DEMAND_PAGING__
1.4952 + if(iIsDemandPaged)
1.4953 + {
1.4954 + TInt r = ReadFixupTables(aInfo);
1.4955 + if (r != KErrNone)
1.4956 + return r;
1.4957 + }
1.4958 + TAny* dataSection = iDataSectionMemory;
1.4959 + if(dataSection)
1.4960 + {
1.4961 + UNLOCK_USER_MEMORY();
1.4962 + memcpy(dataSection,(TAny*)iRamInfo.iDataLoadAddr,iRamInfo.iDataSize);
1.4963 + LOCK_USER_MEMORY();
1.4964 + iRamInfo.iDataLoadAddr = (TLinAddr)dataSection;
1.4965 + }
1.4966 +#endif
1.4967 + return KErrNone;
1.4968 + }
1.4969 +
1.4970 +
1.4971 +void DMmuCodeSegMemory::ApplyCodeFixups(TUint32* aBuffer, TLinAddr aDestAddress)
1.4972 + {
1.4973 + __NK_ASSERT_DEBUG(iRamInfo.iCodeRunAddr==iRamInfo.iCodeLoadAddr); // code doesn't work if this isn't true
1.4974 +
1.4975 + START_PAGING_BENCHMARK;
1.4976 +
1.4977 + TUint offset = aDestAddress - iRamInfo.iCodeRunAddr;
1.4978 + __ASSERT_ALWAYS(offset < (TUint)(iRamInfo.iCodeSize + iRamInfo.iDataSize), K::Fault(K::ECodeSegBadFixupAddress));
1.4979 +
1.4980 + // Index tables are only valid for pages containg code
1.4981 + if (offset >= (TUint)iRamInfo.iCodeSize)
1.4982 + return;
1.4983 +
1.4984 + UNLOCK_USER_MEMORY();
1.4985 +
1.4986 + TInt page = offset >> KPageShift;
1.4987 +
1.4988 + // Relocate code
1.4989 +
1.4990 + if (iCodeRelocTableSize > 0)
1.4991 + {
1.4992 + TUint32* codeRelocTable32 = (TUint32*)iCodeRelocTable;
1.4993 + TUint startOffset = codeRelocTable32[page];
1.4994 + TUint endOffset = codeRelocTable32[page + 1];
1.4995 +
1.4996 + __KTRACE_OPT(KPAGING, Kern::Printf("Performing code relocation: start == %x, end == %x", startOffset, endOffset));
1.4997 + __ASSERT_ALWAYS(startOffset <= endOffset && endOffset <= (TUint)iCodeRelocTableSize,
1.4998 + K::Fault(K::ECodeSegBadFixupTables));
1.4999 +
1.5000 + TUint8* codeRelocTable8 = (TUint8*)codeRelocTable32;
1.5001 + const TUint16* ptr = (const TUint16*)(codeRelocTable8 + startOffset);
1.5002 + const TUint16* end = (const TUint16*)(codeRelocTable8 + endOffset);
1.5003 +
1.5004 + const TUint32 codeDelta = iCodeDelta;
1.5005 + const TUint32 dataDelta = iDataDelta;
1.5006 +
1.5007 + while (ptr < end)
1.5008 + {
1.5009 + TUint16 entry = *ptr++;
1.5010 +
1.5011 + // address of word to fix up is sum of page start and 12-bit offset
1.5012 + TUint32* addr = (TUint32*)((TUint8*)aBuffer + (entry & 0x0fff));
1.5013 +
1.5014 + TUint32 word = *addr;
1.5015 +#ifdef _DEBUG
1.5016 + TInt type = entry & 0xf000;
1.5017 + __NK_ASSERT_DEBUG(type == KTextRelocType || type == KDataRelocType);
1.5018 +#endif
1.5019 + if (entry < KDataRelocType /* => type == KTextRelocType */)
1.5020 + word += codeDelta;
1.5021 + else
1.5022 + word += dataDelta;
1.5023 + *addr = word;
1.5024 + }
1.5025 + }
1.5026 +
1.5027 + // Fixup imports
1.5028 +
1.5029 + if (iImportFixupTableSize > 0)
1.5030 + {
1.5031 + TUint32* importFixupTable32 = (TUint32*)iImportFixupTable;
1.5032 + TUint startOffset = importFixupTable32[page];
1.5033 + TUint endOffset = importFixupTable32[page + 1];
1.5034 +
1.5035 + __KTRACE_OPT(KPAGING, Kern::Printf("Performing import fixup: start == %x, end == %x", startOffset, endOffset));
1.5036 + __ASSERT_ALWAYS(startOffset <= endOffset && endOffset <= (TUint)iImportFixupTableSize,
1.5037 + K::Fault(K::ECodeSegBadFixupTables));
1.5038 +
1.5039 + TUint8* importFixupTable8 = (TUint8*)importFixupTable32;
1.5040 + const TUint16* ptr = (const TUint16*)(importFixupTable8 + startOffset);
1.5041 + const TUint16* end = (const TUint16*)(importFixupTable8 + endOffset);
1.5042 +
1.5043 + while (ptr < end)
1.5044 + {
1.5045 + TUint16 offset = *ptr++;
1.5046 +
1.5047 + // get word to write into that address
1.5048 + // (don't read as a single TUint32 because may not be word-aligned)
1.5049 + TUint32 wordLow = *ptr++;
1.5050 + TUint32 wordHigh = *ptr++;
1.5051 + TUint32 word = (wordHigh << 16) | wordLow;
1.5052 +
1.5053 + __KTRACE_OPT(KPAGING, Kern::Printf("DP: Fixup %08x=%08x", iRamInfo.iCodeRunAddr+(page<<KPageShift)+offset, word));
1.5054 + *(TUint32*)((TLinAddr)aBuffer+offset) = word;
1.5055 + }
1.5056 + }
1.5057 +
1.5058 + LOCK_USER_MEMORY();
1.5059 +
1.5060 + END_PAGING_BENCHMARK(DemandPaging::ThePager, EPagingBmFixupCodePage);
1.5061 + }
1.5062 +
1.5063 +
1.5064 +TInt DMmuCodeSegMemory::ApplyCodeFixupsOnLoad(TUint32* aBuffer, TLinAddr aDestAddress)
1.5065 + {
1.5066 +#ifdef __DEMAND_PAGING__
1.5067 + TInt r=DemandPaging::ThePager->LockRegion((TLinAddr)aBuffer,KPageSize,&Kern::CurrentProcess());
1.5068 + if(r!=KErrNone)
1.5069 + return r;
1.5070 +#endif
1.5071 + ApplyCodeFixups(aBuffer,aDestAddress);
1.5072 + UNLOCK_USER_MEMORY();
1.5073 + CacheMaintenance::CodeChanged((TLinAddr)aBuffer, KPageSize);
1.5074 + LOCK_USER_MEMORY();
1.5075 +#ifdef __DEMAND_PAGING__
1.5076 + DemandPaging::ThePager->UnlockRegion((TLinAddr)aBuffer,KPageSize,&Kern::CurrentProcess());
1.5077 +#endif
1.5078 + return KErrNone;
1.5079 + }
1.5080 +
1.5081 +
1.5082 +#ifdef __DEMAND_PAGING__
1.5083 +
1.5084 +TInt M::CreateVirtualPinObject(TVirtualPinObject*& aPinObject)
1.5085 + {
1.5086 + aPinObject = (TVirtualPinObject*) new DDemandPagingLock;
1.5087 + return aPinObject != NULL ? KErrNone : KErrNoMemory;
1.5088 + }
1.5089 +
1.5090 +TInt M::PinVirtualMemory(TVirtualPinObject* aPinObject, TLinAddr aStart, TUint aSize, DThread* aThread)
1.5091 + {
1.5092 + if (!DemandPaging::ThePager)
1.5093 + return KErrNone;
1.5094 +
1.5095 + if (!DemandPaging::ThePager->MayBePaged(aStart, aSize))
1.5096 + return KErrNone;
1.5097 +
1.5098 + DDemandPagingLock* lock = (DDemandPagingLock*)aPinObject;
1.5099 + TInt r = lock->Alloc(aSize);
1.5100 + if (r != KErrNone)
1.5101 + return r;
1.5102 + lock->Lock(aThread, aStart, aSize);
1.5103 + return KErrNone;
1.5104 + }
1.5105 +
1.5106 +TInt M::CreateAndPinVirtualMemory(TVirtualPinObject*& aPinObject, TLinAddr aStart, TUint aSize)
1.5107 + {
1.5108 + aPinObject = 0;
1.5109 +
1.5110 + if (!DemandPaging::ThePager)
1.5111 + return KErrNone;
1.5112 + if (!DemandPaging::ThePager->MayBePaged(aStart, aSize))
1.5113 + return KErrNone;
1.5114 +
1.5115 + TInt r = CreateVirtualPinObject(aPinObject);
1.5116 + if (r != KErrNone)
1.5117 + return r;
1.5118 +
1.5119 + DDemandPagingLock* lock = (DDemandPagingLock*)aPinObject;
1.5120 + r = lock->Alloc(aSize);
1.5121 + if (r != KErrNone)
1.5122 + return r;
1.5123 + lock->Lock(TheCurrentThread, aStart, aSize);
1.5124 + return KErrNone;
1.5125 + }
1.5126 +
1.5127 +void M::UnpinVirtualMemory(TVirtualPinObject* aPinObject)
1.5128 + {
1.5129 + DDemandPagingLock* lock = (DDemandPagingLock*)aPinObject;
1.5130 + if (lock)
1.5131 + lock->Free();
1.5132 + }
1.5133 +
1.5134 +void M::DestroyVirtualPinObject(TVirtualPinObject*& aPinObject)
1.5135 + {
1.5136 + DDemandPagingLock* lock = (DDemandPagingLock*)__e32_atomic_swp_ord_ptr(&aPinObject, 0);
1.5137 + if (lock)
1.5138 + lock->AsyncDelete();
1.5139 + }
1.5140 +
1.5141 +#else
1.5142 +
1.5143 +class TVirtualPinObject
1.5144 + {
1.5145 + };
1.5146 +
1.5147 +TInt M::CreateVirtualPinObject(TVirtualPinObject*& aPinObject)
1.5148 + {
1.5149 + aPinObject = new TVirtualPinObject;
1.5150 + return aPinObject != NULL ? KErrNone : KErrNoMemory;
1.5151 + }
1.5152 +
1.5153 +TInt M::PinVirtualMemory(TVirtualPinObject* aPinObject, TLinAddr, TUint, DThread*)
1.5154 + {
1.5155 + __ASSERT_DEBUG(aPinObject, K::Fault(K::EVirtualPinObjectBad));
1.5156 + (void)aPinObject;
1.5157 + return KErrNone;
1.5158 + }
1.5159 +
1.5160 +TInt M::CreateAndPinVirtualMemory(TVirtualPinObject*& aPinObject, TLinAddr, TUint)
1.5161 + {
1.5162 + aPinObject = 0;
1.5163 + return KErrNone;
1.5164 + }
1.5165 +
1.5166 +void M::UnpinVirtualMemory(TVirtualPinObject* aPinObject)
1.5167 + {
1.5168 + __ASSERT_DEBUG(aPinObject, K::Fault(K::EVirtualPinObjectBad));
1.5169 + (void)aPinObject;
1.5170 + }
1.5171 +
1.5172 +void M::DestroyVirtualPinObject(TVirtualPinObject*& aPinObject)
1.5173 + {
1.5174 + TVirtualPinObject* object = (TVirtualPinObject*)__e32_atomic_swp_ord_ptr(&aPinObject, 0);
1.5175 + if (object)
1.5176 + Kern::AsyncFree(object);
1.5177 + }
1.5178 +
1.5179 +#endif
1.5180 +
1.5181 +TInt M::CreatePhysicalPinObject(TPhysicalPinObject*& aPinObject)
1.5182 + {
1.5183 + return KErrNotSupported;
1.5184 + }
1.5185 +
1.5186 +TInt M::PinPhysicalMemory(TPhysicalPinObject*, TLinAddr, TUint, TBool, TUint32&, TUint32*, TUint32&, TUint&, DThread*)
1.5187 + {
1.5188 + K::Fault(K::EPhysicalPinObjectBad);
1.5189 + return KErrNone;
1.5190 + }
1.5191 +
1.5192 +void M::UnpinPhysicalMemory(TPhysicalPinObject* aPinObject)
1.5193 + {
1.5194 + K::Fault(K::EPhysicalPinObjectBad);
1.5195 + }
1.5196 +
1.5197 +void M::DestroyPhysicalPinObject(TPhysicalPinObject*& aPinObject)
1.5198 + {
1.5199 + K::Fault(K::EPhysicalPinObjectBad);
1.5200 + }
1.5201 +
1.5202 +
1.5203 +//
1.5204 +// Kernel map and pin (Not supported on the moving or multiple memory models).
1.5205 +//
1.5206 +
1.5207 +TInt M::CreateKernelMapObject(TKernelMapObject*&, TUint)
1.5208 + {
1.5209 + return KErrNotSupported;
1.5210 + }
1.5211 +
1.5212 +
1.5213 +TInt M::MapAndPinMemory(TKernelMapObject*, DThread*, TLinAddr, TUint, TUint, TLinAddr&, TPhysAddr*)
1.5214 + {
1.5215 + return KErrNotSupported;
1.5216 + }
1.5217 +
1.5218 +
1.5219 +void M::UnmapAndUnpinMemory(TKernelMapObject*)
1.5220 + {
1.5221 + }
1.5222 +
1.5223 +
1.5224 +void M::DestroyKernelMapObject(TKernelMapObject*&)
1.5225 + {
1.5226 + }
1.5227 +
1.5228 +
1.5229 +// Misc DPagingDevice methods
1.5230 +
1.5231 +EXPORT_C void DPagingDevice::NotifyIdle()
1.5232 + {
1.5233 + // Not used on this memory model
1.5234 + }
1.5235 +
1.5236 +EXPORT_C void DPagingDevice::NotifyBusy()
1.5237 + {
1.5238 + // Not used on this memory model
1.5239 + }
1.5240 +
1.5241 +EXPORT_C TInt Cache::SyncPhysicalMemoryBeforeDmaWrite(TPhysAddr* , TUint , TUint , TUint , TUint32 )
1.5242 + {
1.5243 + CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryBeforeDmaWrite");
1.5244 + return KErrNotSupported;
1.5245 + }
1.5246 +
1.5247 +EXPORT_C TInt Cache::SyncPhysicalMemoryBeforeDmaRead(TPhysAddr* , TUint , TUint , TUint , TUint32 )
1.5248 + {
1.5249 + CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryBeforeDmaRead");
1.5250 + return KErrNotSupported;
1.5251 + }
1.5252 +EXPORT_C TInt Cache::SyncPhysicalMemoryAfterDmaRead(TPhysAddr* , TUint , TUint , TUint , TUint32 )
1.5253 + {
1.5254 + CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryAfterDmaRead");
1.5255 + return KErrNotSupported;
1.5256 + }
1.5257 +
1.5258 +//
1.5259 +// Page moving methods
1.5260 +//
1.5261 +
1.5262 +/*
1.5263 + * Move a page from aOld to aNew safely, updating any references to the page
1.5264 + * stored elsewhere (such as page table entries). The destination page must
1.5265 + * already be allocated. If the move is successful, the source page will be
1.5266 + * freed and returned to the allocator.
1.5267 + *
1.5268 + * @pre RAM alloc mutex must be held.
1.5269 + * @pre Calling thread must be in a critical section.
1.5270 + * @pre Interrupts must be enabled.
1.5271 + * @pre Kernel must be unlocked.
1.5272 + * @pre No fast mutex can be held.
1.5273 + * @pre Call in a thread context.
1.5274 + */
1.5275 +TInt MmuBase::MovePage(TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest)
1.5276 + {
1.5277 + CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Defrag::DoMovePage");
1.5278 + __ASSERT_WITH_MESSAGE_MUTEX(MmuBase::RamAllocatorMutex, "Ram allocator mutex must be held", "Defrag::DoMovePage");
1.5279 + __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage() old=%08x",aOld));
1.5280 + TInt r = KErrNotSupported;
1.5281 +#if defined(__CPU_X86) && defined(__MEMMODEL_MULTIPLE__)
1.5282 + return r;
1.5283 +#endif
1.5284 + aNew = KPhysAddrInvalid;
1.5285 + NKern::LockSystem();
1.5286 + SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aOld);
1.5287 + if (!pi)
1.5288 + {
1.5289 + __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage() fails: page has no PageInfo"));
1.5290 + r = KErrArgument;
1.5291 + goto fail;
1.5292 + }
1.5293 + if (pi->LockCount())
1.5294 + {
1.5295 + __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage() fails: page is locked"));
1.5296 + goto fail;
1.5297 + }
1.5298 +
1.5299 + switch(pi->Type())
1.5300 + {
1.5301 + case SPageInfo::EUnused:
1.5302 + // Nothing to do - we allow this, though, in case the caller wasn't
1.5303 + // actually checking the free bitmap.
1.5304 + r = KErrNotFound;
1.5305 + __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage(): page unused"));
1.5306 + break;
1.5307 +
1.5308 + case SPageInfo::EChunk:
1.5309 + {
1.5310 + // It's a chunk - we need to investigate what it's used for.
1.5311 + DChunk* chunk = (DChunk*)pi->Owner();
1.5312 + TInt offset = pi->Offset()<<KPageShift;
1.5313 +
1.5314 + switch(chunk->iChunkType)
1.5315 + {
1.5316 + case EKernelData:
1.5317 + case EKernelMessage:
1.5318 + // The kernel data/bss/heap chunk pages are not moved as DMA may be accessing them.
1.5319 + __KTRACE_OPT(KMMU, Kern::Printf("MmuBase::MovePage() fails: kernel data"));
1.5320 + goto fail;
1.5321 +
1.5322 + case EKernelStack:
1.5323 + // The kernel thread stack chunk.
1.5324 + r = MoveKernelStackPage(chunk, offset, aOld, aNew, aBlockZoneId, aBlockRest);
1.5325 + __KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: k stack r%d",r));
1.5326 + __NK_ASSERT_DEBUG(NKern::HeldFastMutex()==0);
1.5327 + goto released;
1.5328 +
1.5329 + case EKernelCode:
1.5330 + case EDll:
1.5331 + // The kernel code chunk, or a global user code chunk.
1.5332 + r = MoveCodeChunkPage(chunk, offset, aOld, aNew, aBlockZoneId, aBlockRest);
1.5333 + __KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: code chk r%d",r));
1.5334 + __NK_ASSERT_DEBUG(NKern::HeldFastMutex()==0);
1.5335 + goto released;
1.5336 +
1.5337 + case ERamDrive:
1.5338 + case EUserData:
1.5339 + case EDllData:
1.5340 + case EUserSelfModCode:
1.5341 + // A data chunk of some description.
1.5342 + r = MoveDataChunkPage(chunk, offset, aOld, aNew, aBlockZoneId, aBlockRest);
1.5343 + __KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: data chk r%d",r));
1.5344 + __NK_ASSERT_DEBUG(NKern::HeldFastMutex()==0);
1.5345 + goto released;
1.5346 +
1.5347 + case ESharedKernelSingle:
1.5348 + case ESharedKernelMultiple:
1.5349 + case ESharedIo:
1.5350 + case ESharedKernelMirror:
1.5351 + // These chunk types cannot be moved
1.5352 + r = KErrNotSupported;
1.5353 + __KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: shared r%d",r));
1.5354 + break;
1.5355 +
1.5356 + case EUserCode:
1.5357 + default:
1.5358 + // Unknown page type, or EUserCode.
1.5359 + // EUserCode is not used in moving model, and on multiple model
1.5360 + // it never owns any pages so shouldn't be found via SPageInfo
1.5361 + __KTRACE_OPT(KMMU,Kern::Printf("Defrag::DoMovePage fails: unknown chunk type %d",chunk->iChunkType));
1.5362 + Panic(EDefragUnknownChunkType);
1.5363 + }
1.5364 + }
1.5365 + break;
1.5366 +
1.5367 + case SPageInfo::ECodeSegMemory:
1.5368 + // It's a code segment memory section (multiple model only)
1.5369 + r = MoveCodeSegMemoryPage((DMemModelCodeSegMemory*)pi->Owner(), pi->Offset()<<KPageShift, aOld, aNew, aBlockZoneId, aBlockRest);
1.5370 + __KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: codeseg r%d",r));
1.5371 + __NK_ASSERT_DEBUG(NKern::HeldFastMutex()==0);
1.5372 + goto released;
1.5373 +
1.5374 + case SPageInfo::EPagedROM:
1.5375 + case SPageInfo::EPagedCode:
1.5376 + case SPageInfo::EPagedData:
1.5377 + case SPageInfo::EPagedCache:
1.5378 + case SPageInfo::EPagedFree:
1.5379 + {// DP or RamCache page so attempt to discard it. Added for testing purposes only
1.5380 + // In normal use ClearDiscardableFromZone will have already removed RAM cache pages
1.5381 + r = KErrInUse;
1.5382 + MmuBase& mmu = *MmuBase::TheMmu;
1.5383 + RamCacheBase& ramCache = *(mmu.iRamCache);
1.5384 + if (ramCache.IsPageDiscardable(*pi))
1.5385 + {
1.5386 + if (ramCache.DoDiscardPage(*pi, KRamZoneInvalidId, EFalse))
1.5387 + {// Sucessfully discarded the page.
1.5388 + r = KErrNone;
1.5389 + }
1.5390 + }
1.5391 + __KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: paged r%d",r));
1.5392 + goto fail; // Goto fail to release the system lock.
1.5393 + }
1.5394 +
1.5395 +
1.5396 + case SPageInfo::EPageTable:
1.5397 + case SPageInfo::EPageDir:
1.5398 + case SPageInfo::EPtInfo:
1.5399 + case SPageInfo::EInvalid:
1.5400 + case SPageInfo::EFixed:
1.5401 + case SPageInfo::EShadow:
1.5402 + // These page types cannot be moved (or don't need to be moved)
1.5403 + r = KErrNotSupported;
1.5404 + __KTRACE_OPT(KMMU,if (r!=KErrNone) Kern::Printf("MmuBase::MovePage() fails: PT etc r%d",r));
1.5405 + break;
1.5406 +
1.5407 + default:
1.5408 + // Unknown page type
1.5409 + __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage() fails: unknown page type %d",pi->Type()));
1.5410 + Panic(EDefragUnknownPageType);
1.5411 + }
1.5412 +
1.5413 +fail:
1.5414 + NKern::UnlockSystem();
1.5415 +released:
1.5416 + __KTRACE_OPT(KMMU,Kern::Printf("MmuBase::MovePage() returns %d",r));
1.5417 + return r;
1.5418 + }
1.5419 +
1.5420 +
1.5421 +TInt MmuBase::DiscardPage(TPhysAddr aAddr, TUint aBlockZoneId, TBool aBlockRest)
1.5422 + {
1.5423 + TInt r = KErrInUse;
1.5424 + NKern::LockSystem();
1.5425 + SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(aAddr);
1.5426 + if (pageInfo != NULL)
1.5427 + {// Allocatable page at this address so is it a discardable one?
1.5428 + if (iRamCache->IsPageDiscardable(*pageInfo))
1.5429 + {
1.5430 + // Discard this page and return it to the ram allocator
1.5431 + if (!iRamCache->DoDiscardPage(*pageInfo, aBlockZoneId, aBlockRest))
1.5432 + {// Couldn't discard the page.
1.5433 + if (aBlockRest)
1.5434 + {
1.5435 + __KTRACE_OPT(KMMU, Kern::Printf("ClearDiscardableFromZone: page discard fail addr %x", aAddr));
1.5436 + NKern::UnlockSystem();
1.5437 + return KErrNoMemory;
1.5438 + }
1.5439 + }
1.5440 + else
1.5441 + {// Page discarded successfully.
1.5442 + r = KErrNone;
1.5443 + }
1.5444 + }
1.5445 + }
1.5446 + NKern::UnlockSystem();
1.5447 + return r;
1.5448 + }
1.5449 +
1.5450 +TUint MmuBase::NumberOfFreeDpPages()
1.5451 + {
1.5452 + TUint free = 0;
1.5453 + if(iRamCache)
1.5454 + {
1.5455 + free = iRamCache->NumberOfFreePages();
1.5456 + }
1.5457 + return free;
1.5458 + }
1.5459 +
1.5460 +
1.5461 +EXPORT_C TInt Epoc::MovePhysicalPage(TPhysAddr aOld, TPhysAddr& aNew, TRamDefragPageToMove aPageToMove)
1.5462 + {
1.5463 + CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::MovePhysicalPage");
1.5464 + __KTRACE_OPT(KMMU,Kern::Printf("Epoc::MovePhysicalPage() old=%08x pageToMove=%d",aOld,aPageToMove));
1.5465 +
1.5466 + switch(aPageToMove)
1.5467 + {
1.5468 + case ERamDefragPage_Physical:
1.5469 + break;
1.5470 + default:
1.5471 + return KErrNotSupported;
1.5472 + }
1.5473 +
1.5474 + MmuBase::Wait();
1.5475 + TInt r=M::MovePage(aOld,aNew,KRamZoneInvalidId,EFalse);
1.5476 + if (r!=KErrNone)
1.5477 + aNew = KPhysAddrInvalid;
1.5478 + MmuBase::Signal();
1.5479 + __KTRACE_OPT(KMMU,Kern::Printf("Epoc::MovePhysicalPage() returns %d",r));
1.5480 + return r;
1.5481 + }
1.5482 +
1.5483 +
1.5484 +TInt M::RamDefragFault(TAny* aExceptionInfo)
1.5485 + {
1.5486 + // If the mmu has been initialised then let it try processing the fault.
1.5487 + if(MmuBase::TheMmu)
1.5488 + return MmuBase::TheMmu->RamDefragFault(aExceptionInfo);
1.5489 + return KErrAbort;
1.5490 + }
1.5491 +
1.5492 +
1.5493 +void M::RamZoneClaimed(SZone* aZone)
1.5494 + {
1.5495 + // Lock each page. OK to traverse SPageInfo array as we know no unknown
1.5496 + // pages are in the zone.
1.5497 + SPageInfo* pageInfo = SPageInfo::FromPhysAddr(aZone->iPhysBase);
1.5498 + SPageInfo* pageInfoEnd = pageInfo + aZone->iPhysPages;
1.5499 + for (; pageInfo < pageInfoEnd; ++pageInfo)
1.5500 + {
1.5501 + NKern::LockSystem();
1.5502 + __NK_ASSERT_DEBUG(pageInfo->Type()==SPageInfo::EUnused);
1.5503 + pageInfo->Lock();
1.5504 + NKern::UnlockSystem();
1.5505 + }
1.5506 + // For the sake of platform security we have to clear the memory. E.g. the driver
1.5507 + // could assign it to a chunk visible to user side. Set LSB so ClearPages
1.5508 + // knows this is a contiguous memory region.
1.5509 + Mmu::Get().ClearPages(aZone->iPhysPages, (TPhysAddr*)(aZone->iPhysBase|1));
1.5510 + }