1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mpager.cpp Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,2593 @@
1.4 +// Copyright (c) 2005-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +//
1.18 +
1.19 +#include "memmodel.h"
1.20 +#include "mm.h"
1.21 +#include "mmu.h"
1.22 +
1.23 +#include "mpager.h"
1.24 +#include "mrom.h"
1.25 +#include "mobject.h"
1.26 +#include "mmapping.h"
1.27 +#include "maddressspace.h"
1.28 +#include "mmanager.h"
1.29 +#include "mptalloc.h"
1.30 +#include "mpagearray.h"
1.31 +#include "mswap.h"
1.32 +#include "mthrash.h"
1.33 +#include "cache_maintenance.inl"
1.34 +
1.35 +
1.36 +const TUint16 KDefaultYoungOldRatio = 3;
1.37 +const TUint16 KDefaultMinPages = 256;
1.38 +#ifdef _USE_OLDEST_LISTS
1.39 +const TUint16 KDefaultOldOldestRatio = 3;
1.40 +#endif
1.41 +
1.42 +const TUint KMinOldPages = 1;
1.43 +
1.44 +/* On a 32 bit system without PAE can't have more than 2^(32-KPageShift) pages.
1.45 + * Subtract 1 so it doesn't overflow when converted to bytes.
1.46 +*/
1.47 +const TUint KAbsoluteMaxPageCount = (1u<<(32-KPageShift))-1u;
1.48 +
1.49 +
1.50 +
1.51 +DPager ThePager;
1.52 +
1.53 +
1.54 +DPager::DPager()
1.55 + : iMinimumPageCount(0), iMaximumPageCount(0), iYoungOldRatio(0),
1.56 + iYoungCount(0),iOldCount(0),iNumberOfFreePages(0)
1.57 + {
1.58 + }
1.59 +
1.60 +
1.61 +void DPager::Init2()
1.62 + {
1.63 + TRACEB(("DPager::Init2()"));
1.64 +
1.65 +#if defined(__CPU_ARM)
1.66 +
1.67 +/** Minimum number of young pages the demand paging live list may have.
1.68 + Need at least 4 mapped pages to guarantee to be able to execute all ARM instructions,
1.69 + plus enough pages for 4 page tables to map those pages, plus enough pages for the
1.70 + page table info structures of those page tables.
1.71 + (Worst case is a Thumb-2 STM instruction with both instruction and data straddling chunk
1.72 + boundaries.)
1.73 +*/
1.74 + iMinYoungPages = 4 // pages
1.75 + +(4+KPtClusterSize-1)/KPtClusterSize // page table pages
1.76 + +(4+KPageTableInfosPerPage-1)/KPageTableInfosPerPage; // page table info pages
1.77 +
1.78 +#elif defined(__CPU_X86)
1.79 +
1.80 +/* Need at least 6 mapped pages to guarantee to be able to execute all ARM instructions,
1.81 + plus enough pages for 6 page tables to map those pages, plus enough pages for the
1.82 + page table info structures of those page tables.
1.83 + (Worst case is (?) a MOV [X],[Y] instruction with instruction, 'X' and 'Y' all
1.84 + straddling chunk boundaries.)
1.85 +*/
1.86 + iMinYoungPages = 6 // pages
1.87 + +(6+KPtClusterSize-1)/KPtClusterSize // page table pages
1.88 + +(6+KPageTableInfosPerPage-1)/KPageTableInfosPerPage; // page table info pages
1.89 +
1.90 +#else
1.91 +#error Unknown CPU
1.92 +#endif
1.93 +
1.94 +#ifdef __SMP__
1.95 + // Adjust min page count so that all CPUs are guaranteed to make progress.
1.96 + // NOTE: Can't use NKern::NumberOfCpus here because we haven't finished booting yet and will
1.97 + // always have only one CPU running at this point...
1.98 +
1.99 + // TODO: Before we can enable this the base test configuration needs
1.100 + // updating to have a sufficient minimum page size...
1.101 + //
1.102 + // iMinYoungPages *= KMaxCpus;
1.103 +#endif
1.104 +
1.105 + // A minimum young/old ratio of 1 means that we need at least twice iMinYoungPages pages...
1.106 + iAbsoluteMinPageCount = 2*iMinYoungPages;
1.107 +
1.108 + __NK_ASSERT_DEBUG(KMinOldPages<=iAbsoluteMinPageCount/2);
1.109 +
1.110 + // initialise live list...
1.111 + TUint minimumPageCount = 0;
1.112 + TUint maximumPageCount = 0;
1.113 +
1.114 + SDemandPagingConfig config = TheRomHeader().iDemandPagingConfig;
1.115 +
1.116 + iMinimumPageCount = KDefaultMinPages;
1.117 + if(minimumPageCount)
1.118 + iMinimumPageCount = minimumPageCount;
1.119 + if(config.iMinPages)
1.120 + iMinimumPageCount = config.iMinPages;
1.121 + if(iMinimumPageCount<iAbsoluteMinPageCount)
1.122 + iMinimumPageCount = iAbsoluteMinPageCount;
1.123 + iInitMinimumPageCount = iMinimumPageCount;
1.124 +
1.125 + iMaximumPageCount = KMaxTInt;
1.126 + if(maximumPageCount)
1.127 + iMaximumPageCount = maximumPageCount;
1.128 + if(config.iMaxPages)
1.129 + iMaximumPageCount = config.iMaxPages;
1.130 + if (iMaximumPageCount > KAbsoluteMaxPageCount)
1.131 + iMaximumPageCount = KAbsoluteMaxPageCount;
1.132 + iInitMaximumPageCount = iMaximumPageCount;
1.133 +
1.134 + iYoungOldRatio = KDefaultYoungOldRatio;
1.135 + if(config.iYoungOldRatio)
1.136 + iYoungOldRatio = config.iYoungOldRatio;
1.137 + TInt ratioLimit = (iMinimumPageCount-KMinOldPages)/KMinOldPages;
1.138 + if(iYoungOldRatio>ratioLimit)
1.139 + iYoungOldRatio = ratioLimit;
1.140 +
1.141 +#ifdef _USE_OLDEST_LISTS
1.142 + iOldOldestRatio = KDefaultOldOldestRatio;
1.143 + if(config.iSpare[2])
1.144 + iOldOldestRatio = config.iSpare[2];
1.145 +#endif
1.146 +
1.147 + iMinimumPageLimit = (iMinYoungPages * (1 + iYoungOldRatio)) / iYoungOldRatio;
1.148 + if(iMinimumPageLimit<iAbsoluteMinPageCount)
1.149 + iMinimumPageLimit = iAbsoluteMinPageCount;
1.150 +
1.151 + TRACEB(("DPager::Init2() live list min=%d max=%d ratio=%d",iMinimumPageCount,iMaximumPageCount,iYoungOldRatio));
1.152 +
1.153 + if(iMaximumPageCount<iMinimumPageCount)
1.154 + __NK_ASSERT_ALWAYS(0);
1.155 +
1.156 + //
1.157 + // This routine doesn't acquire any mutexes because it should be called before the system
1.158 + // is fully up and running. I.e. called before another thread can preempt this.
1.159 + //
1.160 +
1.161 + // Calculate page counts
1.162 + TUint minOldAndOldest = iMinimumPageCount / (1 + iYoungOldRatio);
1.163 + if(minOldAndOldest < KMinOldPages)
1.164 + __NK_ASSERT_ALWAYS(0);
1.165 + if (iMinimumPageCount < minOldAndOldest)
1.166 + __NK_ASSERT_ALWAYS(0);
1.167 + TUint minYoung = iMinimumPageCount - minOldAndOldest;
1.168 + if(minYoung < iMinYoungPages)
1.169 + __NK_ASSERT_ALWAYS(0); // Need at least iMinYoungPages pages mapped to execute worst case CPU instruction
1.170 +#ifdef _USE_OLDEST_LISTS
1.171 + // There should always be enough old pages to allow the oldest lists ratio.
1.172 + TUint oldestCount = minOldAndOldest / (1 + iOldOldestRatio);
1.173 + if (!oldestCount)
1.174 + __NK_ASSERT_ALWAYS(0);
1.175 +#endif
1.176 + iNumberOfFreePages = 0;
1.177 + iNumberOfDirtyPages = 0;
1.178 +
1.179 + // Allocate RAM pages and put them all on the old list
1.180 + RamAllocLock::Lock();
1.181 + iYoungCount = 0;
1.182 + iOldCount = 0;
1.183 +#ifdef _USE_OLDEST_LISTS
1.184 + iOldestCleanCount = 0;
1.185 + iOldestDirtyCount = 0;
1.186 +#endif
1.187 + Mmu& m = TheMmu;
1.188 + for(TUint i=0; i<iMinimumPageCount; i++)
1.189 + {
1.190 + // Allocate a single page
1.191 + TPhysAddr pagePhys;
1.192 + TInt r = m.AllocRam(&pagePhys, 1,
1.193 + (Mmu::TRamAllocFlags)(EMemAttNormalCached|Mmu::EAllocNoWipe|Mmu::EAllocNoPagerReclaim),
1.194 + EPageDiscard);
1.195 + if(r!=KErrNone)
1.196 + __NK_ASSERT_ALWAYS(0);
1.197 + MmuLock::Lock();
1.198 + AddAsFreePage(SPageInfo::FromPhysAddr(pagePhys));
1.199 + MmuLock::Unlock();
1.200 + }
1.201 + RamAllocLock::Unlock();
1.202 +
1.203 +#ifdef _USE_OLDEST_LISTS
1.204 + TRACEB(("DPager::Init2() end with young=%d old=%d oldClean=%d oldDirty=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iOldestCleanCount,iOldestDirtyCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
1.205 +#else
1.206 + TRACEB(("DPager::Init2() end with young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
1.207 +#endif
1.208 + }
1.209 +
1.210 +
1.211 +#ifdef _DEBUG
1.212 +TBool DPager::CheckLists()
1.213 + {
1.214 +#if 0
1.215 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.216 + SDblQueLink* head = &iOldList.iA;
1.217 + TInt n = iOldCount;
1.218 + SDblQueLink* link = head;
1.219 + while(n--)
1.220 + {
1.221 + link = link->iNext;
1.222 + if(link==head)
1.223 + return false;
1.224 + }
1.225 + link = link->iNext;
1.226 + if(link!=head)
1.227 + return false;
1.228 +
1.229 + head = &iYoungList.iA;
1.230 + n = iYoungCount;
1.231 + link = head;
1.232 + while(n--)
1.233 + {
1.234 + link = link->iNext;
1.235 + if(link==head)
1.236 + return false;
1.237 + }
1.238 + link = link->iNext;
1.239 + if(link!=head)
1.240 + return false;
1.241 +
1.242 +// TRACEP(("DP: y=%d o=%d f=%d",iYoungCount,iOldCount,iNumberOfFreePages));
1.243 +#endif
1.244 +// TraceCounts();
1.245 + return true;
1.246 + }
1.247 +
1.248 +void DPager::TraceCounts()
1.249 + {
1.250 + TRACEP(("DP: y=%d o=%d f=%d min=%d max=%d ml=%d res=%d",
1.251 + iYoungCount,iOldCount,iNumberOfFreePages,iMinimumPageCount,
1.252 + iMaximumPageCount,iMinimumPageLimit,iReservePageCount));
1.253 + }
1.254 +
1.255 +#endif
1.256 +
1.257 +
1.258 +TBool DPager::HaveTooManyPages()
1.259 + {
1.260 + __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
1.261 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.262 + return iMinimumPageCount+iNumberOfFreePages > iMaximumPageCount;
1.263 + }
1.264 +
1.265 +
1.266 +TBool DPager::HaveMaximumPages()
1.267 + {
1.268 + __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
1.269 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.270 + return iMinimumPageCount+iNumberOfFreePages >= iMaximumPageCount;
1.271 + }
1.272 +
1.273 +
1.274 +void DPager::AddAsYoungestPage(SPageInfo* aPageInfo)
1.275 + {
1.276 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.277 + __NK_ASSERT_DEBUG(CheckLists());
1.278 + __NK_ASSERT_DEBUG(aPageInfo->PagedState()==SPageInfo::EUnpaged);
1.279 +
1.280 + aPageInfo->SetPagedState(SPageInfo::EPagedYoung);
1.281 + iYoungList.AddHead(&aPageInfo->iLink);
1.282 + ++iYoungCount;
1.283 + }
1.284 +
1.285 +
1.286 +void DPager::AddAsFreePage(SPageInfo* aPageInfo)
1.287 + {
1.288 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.289 + __NK_ASSERT_DEBUG(CheckLists());
1.290 +
1.291 + __NK_ASSERT_DEBUG(aPageInfo->PagedState()==SPageInfo::EUnpaged);
1.292 + TheMmu.PageFreed(aPageInfo);
1.293 + __NK_ASSERT_DEBUG(aPageInfo->PagedState()==SPageInfo::EUnpaged);
1.294 +
1.295 + // add as oldest page...
1.296 +#ifdef _USE_OLDEST_LISTS
1.297 + aPageInfo->SetPagedState(SPageInfo::EPagedOldestClean);
1.298 + iOldestCleanList.Add(&aPageInfo->iLink);
1.299 + ++iOldestCleanCount;
1.300 +#else
1.301 + aPageInfo->SetPagedState(SPageInfo::EPagedOld);
1.302 + iOldList.Add(&aPageInfo->iLink);
1.303 + ++iOldCount;
1.304 +#endif
1.305 +
1.306 + Event(EEventPageInFree,aPageInfo);
1.307 + }
1.308 +
1.309 +
1.310 +TInt DPager::PageFreed(SPageInfo* aPageInfo)
1.311 + {
1.312 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.313 + __NK_ASSERT_DEBUG(CheckLists());
1.314 +
1.315 + switch(aPageInfo->PagedState())
1.316 + {
1.317 + case SPageInfo::EUnpaged:
1.318 + return KErrNotFound;
1.319 +
1.320 + case SPageInfo::EPagedYoung:
1.321 + __NK_ASSERT_DEBUG(iYoungCount);
1.322 + aPageInfo->iLink.Deque();
1.323 + --iYoungCount;
1.324 + break;
1.325 +
1.326 + case SPageInfo::EPagedOld:
1.327 + __NK_ASSERT_DEBUG(iOldCount);
1.328 + aPageInfo->iLink.Deque();
1.329 + --iOldCount;
1.330 + break;
1.331 +
1.332 +#ifdef _USE_OLDEST_LISTS
1.333 + case SPageInfo::EPagedOldestClean:
1.334 + __NK_ASSERT_DEBUG(iOldestCleanCount);
1.335 + aPageInfo->iLink.Deque();
1.336 + --iOldestCleanCount;
1.337 + break;
1.338 +
1.339 + case SPageInfo::EPagedOldestDirty:
1.340 + __NK_ASSERT_DEBUG(iOldestDirtyCount);
1.341 + aPageInfo->iLink.Deque();
1.342 + --iOldestDirtyCount;
1.343 + break;
1.344 +#endif
1.345 +
1.346 + case SPageInfo::EPagedPinned:
1.347 + // this can occur if a pinned mapping is being unmapped when memory is decommitted.
1.348 + // the decommit will have succeeded because the the mapping no longer vetoes this,
1.349 + // however the unpinning hasn't yet got around to changing the page state.
1.350 + // When the state change happens the page will be put back on the live list so
1.351 + // we don't have to do anything now...
1.352 + return KErrNone;
1.353 +
1.354 + case SPageInfo::EPagedPinnedMoved:
1.355 + // This page was pinned when it was moved but it has not been returned
1.356 + // to the free pool yet so make sure it is...
1.357 + aPageInfo->SetPagedState(SPageInfo::EUnpaged); // Must be unpaged before returned to free pool.
1.358 + return KErrNotFound;
1.359 +
1.360 + default:
1.361 + __NK_ASSERT_DEBUG(0);
1.362 + return KErrNotFound;
1.363 + }
1.364 +
1.365 + // Update the dirty page count as required...
1.366 + if (aPageInfo->IsDirty())
1.367 + SetClean(*aPageInfo);
1.368 +
1.369 + // add as oldest page...
1.370 +#ifdef _USE_OLDEST_LISTS
1.371 + aPageInfo->SetPagedState(SPageInfo::EPagedOldestClean);
1.372 + iOldestCleanList.Add(&aPageInfo->iLink);
1.373 + ++iOldestCleanCount;
1.374 +#else
1.375 + aPageInfo->SetPagedState(SPageInfo::EPagedOld);
1.376 + iOldList.Add(&aPageInfo->iLink);
1.377 + ++iOldCount;
1.378 +#endif
1.379 +
1.380 + return KErrNone;
1.381 + }
1.382 +
1.383 +
1.384 +extern TBool IsPageTableUnpagedRemoveAllowed(SPageInfo* aPageInfo);
1.385 +
1.386 +void DPager::RemovePage(SPageInfo* aPageInfo)
1.387 + {
1.388 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.389 + __NK_ASSERT_DEBUG(CheckLists());
1.390 +
1.391 + switch(aPageInfo->PagedState())
1.392 + {
1.393 + case SPageInfo::EPagedYoung:
1.394 + __NK_ASSERT_DEBUG(iYoungCount);
1.395 + aPageInfo->iLink.Deque();
1.396 + --iYoungCount;
1.397 + break;
1.398 +
1.399 + case SPageInfo::EPagedOld:
1.400 + __NK_ASSERT_DEBUG(iOldCount);
1.401 + aPageInfo->iLink.Deque();
1.402 + --iOldCount;
1.403 + break;
1.404 +
1.405 +#ifdef _USE_OLDEST_LISTS
1.406 + case SPageInfo::EPagedOldestClean:
1.407 + __NK_ASSERT_DEBUG(iOldestCleanCount);
1.408 + aPageInfo->iLink.Deque();
1.409 + --iOldestCleanCount;
1.410 + break;
1.411 +
1.412 + case SPageInfo::EPagedOldestDirty:
1.413 + __NK_ASSERT_DEBUG(iOldestDirtyCount);
1.414 + aPageInfo->iLink.Deque();
1.415 + --iOldestDirtyCount;
1.416 + break;
1.417 +#endif
1.418 +
1.419 + case SPageInfo::EPagedPinned:
1.420 + __NK_ASSERT_DEBUG(0);
1.421 + case SPageInfo::EUnpaged:
1.422 +#ifdef _DEBUG
1.423 + if (!IsPageTableUnpagedRemoveAllowed(aPageInfo))
1.424 + __NK_ASSERT_DEBUG(0);
1.425 + break;
1.426 +#endif
1.427 + default:
1.428 + __NK_ASSERT_DEBUG(0);
1.429 + return;
1.430 + }
1.431 +
1.432 + aPageInfo->SetPagedState(SPageInfo::EUnpaged);
1.433 + }
1.434 +
1.435 +
1.436 +void DPager::ReplacePage(SPageInfo& aOldPageInfo, SPageInfo& aNewPageInfo)
1.437 + {
1.438 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.439 + __NK_ASSERT_DEBUG(CheckLists());
1.440 +
1.441 + __NK_ASSERT_DEBUG(aOldPageInfo.PagedState() == aNewPageInfo.PagedState());
1.442 + switch(aOldPageInfo.PagedState())
1.443 + {
1.444 + case SPageInfo::EPagedYoung:
1.445 + case SPageInfo::EPagedOld:
1.446 + case SPageInfo::EPagedOldestClean:
1.447 + case SPageInfo::EPagedOldestDirty:
1.448 + {// Update the list links point to the new page.
1.449 + __NK_ASSERT_DEBUG(iYoungCount);
1.450 + SDblQueLink* prevLink = aOldPageInfo.iLink.iPrev;
1.451 +#ifdef _DEBUG
1.452 + SDblQueLink* nextLink = aOldPageInfo.iLink.iNext;
1.453 + __NK_ASSERT_DEBUG(prevLink == aOldPageInfo.iLink.iPrev);
1.454 + __NK_ASSERT_DEBUG(prevLink->iNext == &aOldPageInfo.iLink);
1.455 + __NK_ASSERT_DEBUG(nextLink == aOldPageInfo.iLink.iNext);
1.456 + __NK_ASSERT_DEBUG(nextLink->iPrev == &aOldPageInfo.iLink);
1.457 +#endif
1.458 + aOldPageInfo.iLink.Deque();
1.459 + aNewPageInfo.iLink.InsertAfter(prevLink);
1.460 + aOldPageInfo.SetPagedState(SPageInfo::EUnpaged);
1.461 +#ifdef _DEBUG
1.462 + __NK_ASSERT_DEBUG(prevLink == aNewPageInfo.iLink.iPrev);
1.463 + __NK_ASSERT_DEBUG(prevLink->iNext == &aNewPageInfo.iLink);
1.464 + __NK_ASSERT_DEBUG(nextLink == aNewPageInfo.iLink.iNext);
1.465 + __NK_ASSERT_DEBUG(nextLink->iPrev == &aNewPageInfo.iLink);
1.466 +#endif
1.467 + }
1.468 + break;
1.469 + case SPageInfo::EPagedPinned:
1.470 + // Mark the page as 'pinned moved' so that when the page moving invokes
1.471 + // Mmu::FreeRam() it returns this page to the free pool.
1.472 + aOldPageInfo.ClearPinCount();
1.473 + aOldPageInfo.SetPagedState(SPageInfo::EPagedPinnedMoved);
1.474 + break;
1.475 + case SPageInfo::EPagedPinnedMoved:
1.476 + // Shouldn't happen as the ram alloc mutex will be held for the
1.477 + // entire time the page's is paged state == EPagedPinnedMoved.
1.478 + case SPageInfo::EUnpaged:
1.479 + // Shouldn't happen as we only move pinned memory and unpinning will
1.480 + // atomically add the page to the live list and it can't be removed
1.481 + // from the live list without the ram alloc mutex.
1.482 + __NK_ASSERT_DEBUG(0);
1.483 + break;
1.484 + }
1.485 + }
1.486 +
1.487 +
1.488 +SPageInfo* DPager::StealOldestPage()
1.489 + {
1.490 + __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
1.491 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.492 +
1.493 + for(;;)
1.494 + {
1.495 + // find oldest page in list...
1.496 + SDblQueLink* link;
1.497 +#ifdef _USE_OLDEST_LISTS
1.498 + if (iOldestCleanCount)
1.499 + {
1.500 + __NK_ASSERT_DEBUG(!iOldestCleanList.IsEmpty());
1.501 + link = iOldestCleanList.Last();
1.502 + }
1.503 + else if (iOldestDirtyCount)
1.504 + {
1.505 + __NK_ASSERT_DEBUG(!iOldestDirtyList.IsEmpty());
1.506 + link = iOldestDirtyList.Last();
1.507 + }
1.508 + else if (iOldCount)
1.509 +#else
1.510 + if (iOldCount)
1.511 +#endif
1.512 + {
1.513 + __NK_ASSERT_DEBUG(!iOldList.IsEmpty());
1.514 + link = iOldList.Last();
1.515 + }
1.516 + else
1.517 + {
1.518 + __NK_ASSERT_DEBUG(iYoungCount);
1.519 + __NK_ASSERT_ALWAYS(!iYoungList.IsEmpty());
1.520 + link = iYoungList.Last();
1.521 + }
1.522 + SPageInfo* pageInfo = SPageInfo::FromLink(link);
1.523 +
1.524 + // steal it from owning object...
1.525 + TInt r = StealPage(pageInfo);
1.526 +
1.527 + BalanceAges();
1.528 +
1.529 + if(r==KErrNone)
1.530 + return pageInfo; // done
1.531 +
1.532 + // loop back and try again
1.533 + }
1.534 + }
1.535 +
1.536 +
1.537 +TInt DPager::RestrictPage(SPageInfo* aPageInfo, TRestrictPagesType aRestriction)
1.538 + {
1.539 + TRACE(("DPager::RestrictPage(0x%08x,%d)",aPageInfo,aRestriction));
1.540 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.541 +
1.542 + TInt r;
1.543 + if(aPageInfo->Type()==SPageInfo::EUnused)
1.544 + {
1.545 + // page was unused, so nothing to do...
1.546 + r = KErrNone;
1.547 + }
1.548 + else
1.549 + {
1.550 + // get memory object which owns the page...
1.551 + __NK_ASSERT_DEBUG(aPageInfo->Type()==SPageInfo::EManaged);
1.552 + DMemoryObject* memory = aPageInfo->Owner();
1.553 + memory->Open();
1.554 +
1.555 + // try restricting access to page...
1.556 + r = memory->iManager->RestrictPage(memory,aPageInfo,aRestriction);
1.557 + __NK_ASSERT_DEBUG(r!=KErrNotSupported);
1.558 +
1.559 + // close memory object...
1.560 + MmuLock::Unlock();
1.561 + memory->AsyncClose();
1.562 + MmuLock::Lock();
1.563 + }
1.564 +
1.565 + TRACE(("DPager::RestrictPage returns %d",r));
1.566 + return r;
1.567 + }
1.568 +
1.569 +
1.570 +TInt DPager::StealPage(SPageInfo* aPageInfo)
1.571 + {
1.572 + TRACE(("DPager::StealPage(0x%08x)",aPageInfo));
1.573 + __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
1.574 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.575 +
1.576 + __UNLOCK_GUARD_START(MmuLock);
1.577 + RemovePage(aPageInfo);
1.578 +
1.579 + TInt r;
1.580 + if(aPageInfo->Type()==SPageInfo::EUnused)
1.581 + {
1.582 + // page was unused, so nothing to do...
1.583 + r = KErrNone;
1.584 + __UNLOCK_GUARD_END(MmuLock);
1.585 + MmuLock::Unlock();
1.586 + }
1.587 + else
1.588 + {
1.589 + // get memory object which owns the page...
1.590 + __NK_ASSERT_DEBUG(aPageInfo->Type()==SPageInfo::EManaged);
1.591 + DMemoryObject* memory = aPageInfo->Owner();
1.592 + memory->Open();
1.593 +
1.594 + // try and steal page from memory object...
1.595 + __UNLOCK_GUARD_END(MmuLock); // StealPage must be called without releasing the MmuLock
1.596 + r = memory->iManager->StealPage(memory,aPageInfo);
1.597 + __NK_ASSERT_DEBUG(r!=KErrNotSupported);
1.598 +
1.599 + // close memory object...
1.600 + MmuLock::Unlock();
1.601 + memory->AsyncClose();
1.602 + }
1.603 +
1.604 + MmuLock::Lock();
1.605 +
1.606 + if(r==KErrNone)
1.607 + Event(EEventPageOut,aPageInfo);
1.608 +
1.609 + TRACE(("DPager::StealPage returns %d",r));
1.610 + return r;
1.611 + }
1.612 +
1.613 +
1.614 +TInt DPager::DiscardPage(SPageInfo* aOldPageInfo, TUint aBlockZoneId, TBool aBlockRest)
1.615 + {
1.616 + __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
1.617 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.618 +
1.619 + TInt r;
1.620 + // If the page is pinned or if the page is dirty and a general defrag is being
1.621 + // performed then don't attempt to steal it.
1.622 + if (aOldPageInfo->Type() != SPageInfo::EUnused &&
1.623 + (aOldPageInfo->PagedState() == SPageInfo::EPagedPinned ||
1.624 + (aBlockRest && aOldPageInfo->IsDirty())))
1.625 + {// The page is pinned or is dirty and this is a general defrag so move the page.
1.626 + DMemoryObject* memory = aOldPageInfo->Owner();
1.627 + // Page must be managed if it is pinned or dirty.
1.628 + __NK_ASSERT_DEBUG(aOldPageInfo->Type()==SPageInfo::EManaged);
1.629 + __NK_ASSERT_DEBUG(memory);
1.630 + MmuLock::Unlock();
1.631 + TPhysAddr newAddr;
1.632 + return memory->iManager->MovePage(memory, aOldPageInfo, newAddr, aBlockZoneId, aBlockRest);
1.633 + }
1.634 +
1.635 + if (!iNumberOfFreePages)
1.636 + {
1.637 + // Allocate a new page for the live list as it has reached its minimum size.
1.638 + MmuLock::Unlock();
1.639 + SPageInfo* newPageInfo = GetPageFromSystem((Mmu::TRamAllocFlags)(EMemAttNormalCached|Mmu::EAllocNoWipe),
1.640 + aBlockZoneId, aBlockRest);
1.641 + if (!newPageInfo)
1.642 + return KErrNoMemory;
1.643 +
1.644 + // Re-acquire the mmulock and re-check that the page is not pinned or dirty.
1.645 + MmuLock::Lock();
1.646 + if (aOldPageInfo->Type() != SPageInfo::EUnused &&
1.647 + (aOldPageInfo->PagedState() == SPageInfo::EPagedPinned ||
1.648 + (aBlockRest && aOldPageInfo->IsDirty())))
1.649 + {// Page is now pinned or dirty so give up as it is inuse.
1.650 + ReturnPageToSystem(*newPageInfo);
1.651 + MmuLock::Unlock();
1.652 + return KErrInUse;
1.653 + }
1.654 +
1.655 + // Attempt to steal the page
1.656 + r = StealPage(aOldPageInfo);
1.657 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.658 +
1.659 + if (r == KErrCompletion)
1.660 + {// This was a page table that has been freed but added to the
1.661 + // live list as a free page. Remove from live list and continue.
1.662 + __NK_ASSERT_DEBUG(!aOldPageInfo->IsDirty());
1.663 + RemovePage(aOldPageInfo);
1.664 + r = KErrNone;
1.665 + }
1.666 +
1.667 + if (r == KErrNone)
1.668 + {// Add the new page to the live list as discarding the old page
1.669 + // will reduce the live list below the minimum.
1.670 + AddAsFreePage(newPageInfo);
1.671 + // We've successfully discarded the page so return it to the free pool.
1.672 + ReturnPageToSystem(*aOldPageInfo);
1.673 + BalanceAges();
1.674 + }
1.675 + else
1.676 + {
1.677 + // New page not required so just return it to the system. This is safe as
1.678 + // iNumberOfFreePages will have this page counted but as it is not on the live list
1.679 + // noone else can touch it.
1.680 + ReturnPageToSystem(*newPageInfo);
1.681 + }
1.682 + }
1.683 + else
1.684 + {
1.685 + // Attempt to steal the page
1.686 + r = StealPage(aOldPageInfo);
1.687 +
1.688 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.689 +
1.690 + if (r == KErrCompletion)
1.691 + {// This was a page table that has been freed but added to the
1.692 + // live list as a free page. Remove from live list.
1.693 + __NK_ASSERT_DEBUG(!aOldPageInfo->IsDirty());
1.694 + RemovePage(aOldPageInfo);
1.695 + r = KErrNone;
1.696 + }
1.697 +
1.698 + if (r == KErrNone)
1.699 + {// We've successfully discarded the page so return it to the free pool.
1.700 + ReturnPageToSystem(*aOldPageInfo);
1.701 + BalanceAges();
1.702 + }
1.703 + }
1.704 + MmuLock::Unlock();
1.705 + return r;
1.706 + }
1.707 +
1.708 +
1.709 +TBool DPager::TryGrowLiveList()
1.710 + {
1.711 + __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
1.712 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.713 +
1.714 + MmuLock::Unlock();
1.715 + SPageInfo* sparePage = GetPageFromSystem((Mmu::TRamAllocFlags)(EMemAttNormalCached|Mmu::EAllocNoWipe));
1.716 + MmuLock::Lock();
1.717 +
1.718 + if(!sparePage)
1.719 + return false;
1.720 +
1.721 + // add page to live list...
1.722 + AddAsFreePage(sparePage);
1.723 + return true;
1.724 + }
1.725 +
1.726 +
1.727 +SPageInfo* DPager::GetPageFromSystem(Mmu::TRamAllocFlags aAllocFlags, TUint aBlockZoneId, TBool aBlockRest)
1.728 + {
1.729 + __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
1.730 +
1.731 + TPhysAddr pagePhys;
1.732 + TInt r = TheMmu.AllocRam(&pagePhys, 1,
1.733 + (Mmu::TRamAllocFlags)(aAllocFlags|Mmu::EAllocNoPagerReclaim),
1.734 + EPageDiscard, aBlockZoneId, aBlockRest);
1.735 + if(r!=KErrNone)
1.736 + return NULL;
1.737 +
1.738 + MmuLock::Lock();
1.739 + ++iNumberOfFreePages;
1.740 + MmuLock::Unlock();
1.741 +
1.742 + return SPageInfo::FromPhysAddr(pagePhys);
1.743 + }
1.744 +
1.745 +
1.746 +void DPager::ReturnPageToSystem()
1.747 + {
1.748 + __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
1.749 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.750 +
1.751 + ReturnPageToSystem(*StealOldestPage());
1.752 + }
1.753 +
1.754 +
1.755 +void DPager::ReturnPageToSystem(SPageInfo& aPageInfo)
1.756 + {
1.757 + __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
1.758 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.759 +
1.760 + __NK_ASSERT_DEBUG(iNumberOfFreePages>0);
1.761 + --iNumberOfFreePages;
1.762 +
1.763 + MmuLock::Unlock();
1.764 +
1.765 + TPhysAddr pagePhys = aPageInfo.PhysAddr();
1.766 + TheMmu.FreeRam(&pagePhys, 1, EPageDiscard);
1.767 +
1.768 + MmuLock::Lock();
1.769 + }
1.770 +
1.771 +
1.772 +SPageInfo* DPager::PageInAllocPage(Mmu::TRamAllocFlags aAllocFlags)
1.773 + {
1.774 + SPageInfo* pageInfo;
1.775 + TPhysAddr pagePhys;
1.776 +
1.777 + RamAllocLock::Lock();
1.778 + MmuLock::Lock();
1.779 +
1.780 + // try getting a free page from our live list...
1.781 +#ifdef _USE_OLDEST_LISTS
1.782 + if (iOldestCleanCount)
1.783 + {
1.784 + pageInfo = SPageInfo::FromLink(iOldestCleanList.Last());
1.785 + if(pageInfo->Type()==SPageInfo::EUnused)
1.786 + goto get_oldest;
1.787 + }
1.788 +#else
1.789 + if(iOldCount)
1.790 + {
1.791 + pageInfo = SPageInfo::FromLink(iOldList.Last());
1.792 + if(pageInfo->Type()==SPageInfo::EUnused)
1.793 + goto get_oldest;
1.794 + }
1.795 +#endif
1.796 +
1.797 + // try getting a free page from the system pool...
1.798 + if(!HaveMaximumPages())
1.799 + {
1.800 + MmuLock::Unlock();
1.801 + pageInfo = GetPageFromSystem(aAllocFlags);
1.802 + if(pageInfo)
1.803 + goto done;
1.804 + MmuLock::Lock();
1.805 + }
1.806 +
1.807 + // as a last resort, steal a page from the live list...
1.808 +get_oldest:
1.809 +#ifdef _USE_OLDEST_LISTS
1.810 + __NK_ASSERT_ALWAYS(iOldestCleanCount|iOldestDirtyCount|iOldCount|iYoungCount);
1.811 +#else
1.812 + __NK_ASSERT_ALWAYS(iOldCount|iYoungCount);
1.813 +#endif
1.814 + pageInfo = StealOldestPage();
1.815 + MmuLock::Unlock();
1.816 +
1.817 + // make page state same as a freshly allocated page...
1.818 + pagePhys = pageInfo->PhysAddr();
1.819 + TheMmu.PagesAllocated(&pagePhys,1,aAllocFlags);
1.820 +
1.821 +done:
1.822 + RamAllocLock::Unlock();
1.823 + return pageInfo;
1.824 + }
1.825 +
1.826 +
1.827 +TBool DPager::GetFreePages(TInt aNumPages)
1.828 + {
1.829 + TRACE(("DPager::GetFreePages(%d)",aNumPages));
1.830 +
1.831 + __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
1.832 +
1.833 + MmuLock::Lock();
1.834 + while(aNumPages>0 && (TInt)NumberOfFreePages()>=aNumPages)
1.835 + {
1.836 + ReturnPageToSystem();
1.837 + --aNumPages;
1.838 + }
1.839 + MmuLock::Unlock();
1.840 +
1.841 + TRACE(("DPager::GetFreePages returns %d",!aNumPages));
1.842 + return !aNumPages;
1.843 + }
1.844 +
1.845 +
1.846 +void DPager::DonatePages(TUint aCount, TPhysAddr* aPages)
1.847 + {
1.848 + TRACE(("DPager::DonatePages(%d,?)",aCount));
1.849 + __ASSERT_CRITICAL;
1.850 + RamAllocLock::Lock();
1.851 + MmuLock::Lock();
1.852 +
1.853 + TPhysAddr* end = aPages+aCount;
1.854 + while(aPages<end)
1.855 + {
1.856 + TPhysAddr pagePhys = *aPages++;
1.857 + if(RPageArray::State(pagePhys)!=RPageArray::ECommitted)
1.858 + continue; // page is not present
1.859 +
1.860 +#ifdef _DEBUG
1.861 + SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys&~KPageMask);
1.862 + __NK_ASSERT_DEBUG(pi);
1.863 +#else
1.864 + SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
1.865 +#endif
1.866 + switch(pi->PagedState())
1.867 + {
1.868 + case SPageInfo::EUnpaged:
1.869 + // Change the type of this page to discardable and
1.870 + // then add it to live list.
1.871 + // Only the DDiscardableMemoryManager should be invoking this and
1.872 + // its pages will be movable before they are donated.
1.873 + __NK_ASSERT_DEBUG(pi->Owner()->iManager->PageType() == EPageMovable);
1.874 + TheMmu.ChangePageType(pi, EPageMovable, EPageDiscard);
1.875 + break;
1.876 +
1.877 + case SPageInfo::EPagedYoung:
1.878 + case SPageInfo::EPagedOld:
1.879 +#ifdef _USE_OLDEST_LISTS
1.880 + case SPageInfo::EPagedOldestDirty:
1.881 + case SPageInfo::EPagedOldestClean:
1.882 +#endif
1.883 + continue; // discard already been allowed
1.884 +
1.885 + case SPageInfo::EPagedPinned:
1.886 + __NK_ASSERT_DEBUG(0);
1.887 + default:
1.888 + __NK_ASSERT_DEBUG(0);
1.889 + continue;
1.890 + }
1.891 +
1.892 + // put page on live list...
1.893 + AddAsYoungestPage(pi);
1.894 + ++iNumberOfFreePages;
1.895 +
1.896 + Event(EEventPageDonate,pi);
1.897 +
1.898 + // re-balance live list...
1.899 + RemoveExcessPages();
1.900 + BalanceAges();
1.901 + }
1.902 +
1.903 + MmuLock::Unlock();
1.904 + RamAllocLock::Unlock();
1.905 + }
1.906 +
1.907 +
1.908 +TInt DPager::ReclaimPages(TUint aCount, TPhysAddr* aPages)
1.909 + {
1.910 + TRACE(("DPager::ReclaimPages(%d,?)",aCount));
1.911 + __ASSERT_CRITICAL;
1.912 + RamAllocLock::Lock();
1.913 + MmuLock::Lock();
1.914 +
1.915 + TInt r = KErrNone;
1.916 + TPhysAddr* end = aPages+aCount;
1.917 + while(aPages<end)
1.918 + {
1.919 + TPhysAddr pagePhys = *aPages++;
1.920 + TBool changeType = EFalse;
1.921 +
1.922 + if(RPageArray::State(pagePhys)!=RPageArray::ECommitted)
1.923 + {
1.924 + r = KErrNotFound; // too late, page has gone
1.925 + continue;
1.926 + }
1.927 +
1.928 +#ifdef _DEBUG
1.929 + SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys&~KPageMask);
1.930 + __NK_ASSERT_DEBUG(pi);
1.931 +#else
1.932 + SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
1.933 +#endif
1.934 + switch(pi->PagedState())
1.935 + {
1.936 + case SPageInfo::EUnpaged:
1.937 + continue; // discard already been disallowed
1.938 +
1.939 + case SPageInfo::EPagedYoung:
1.940 + case SPageInfo::EPagedOld:
1.941 +#ifdef _USE_OLDEST_LISTS
1.942 + case SPageInfo::EPagedOldestClean:
1.943 + case SPageInfo::EPagedOldestDirty:
1.944 +#endif
1.945 + changeType = ETrue;
1.946 + break; // remove from live list
1.947 +
1.948 + case SPageInfo::EPagedPinned:
1.949 + __NK_ASSERT_DEBUG(0);
1.950 + default:
1.951 + __NK_ASSERT_DEBUG(0);
1.952 + break;
1.953 + }
1.954 +
1.955 + // check paging list has enough pages before we remove one...
1.956 + if(iNumberOfFreePages<1)
1.957 + {
1.958 + // need more pages so get a page from the system...
1.959 + if(!TryGrowLiveList())
1.960 + {
1.961 + // out of memory...
1.962 + r = KErrNoMemory;
1.963 + break;
1.964 + }
1.965 + // retry the page reclaim...
1.966 + --aPages;
1.967 + continue;
1.968 + }
1.969 +
1.970 + if (changeType)
1.971 + {// Change the type of this page to movable, wait until any retries
1.972 + // have been attempted as we can't change a page's type twice.
1.973 + // Only the DDiscardableMemoryManager should be invoking this and
1.974 + // its pages should be movable once they are reclaimed.
1.975 + __NK_ASSERT_DEBUG(pi->Owner()->iManager->PageType() == EPageMovable);
1.976 + TheMmu.ChangePageType(pi, EPageDiscard, EPageMovable);
1.977 + }
1.978 +
1.979 + // remove page from paging list...
1.980 + __NK_ASSERT_DEBUG(iNumberOfFreePages>0);
1.981 + --iNumberOfFreePages;
1.982 + RemovePage(pi);
1.983 +
1.984 + Event(EEventPageReclaim,pi);
1.985 +
1.986 + // re-balance live list...
1.987 + BalanceAges();
1.988 + }
1.989 +
1.990 + // we may have added a spare free page to the live list without removing one,
1.991 + // this could cause us to have too many pages, so deal with this...
1.992 + RemoveExcessPages();
1.993 +
1.994 + MmuLock::Unlock();
1.995 + RamAllocLock::Unlock();
1.996 + return r;
1.997 + }
1.998 +
1.999 +
1.1000 +TInt VMHalFunction(TAny*, TInt aFunction, TAny* a1, TAny* a2);
1.1001 +
1.1002 +void DPager::Init3()
1.1003 + {
1.1004 + TRACEB(("DPager::Init3()"));
1.1005 + TheRomMemoryManager->Init3();
1.1006 + TheDataPagedMemoryManager->Init3();
1.1007 + TheCodePagedMemoryManager->Init3();
1.1008 + TInt r = Kern::AddHalEntry(EHalGroupVM, VMHalFunction, 0);
1.1009 + __NK_ASSERT_ALWAYS(r==KErrNone);
1.1010 + }
1.1011 +
1.1012 +
1.1013 +void DPager::Fault(TFault aFault)
1.1014 + {
1.1015 + Kern::Fault("DPager",aFault);
1.1016 + }
1.1017 +
1.1018 +
1.1019 +void DPager::BalanceAges()
1.1020 + {
1.1021 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.1022 + TBool restrictPage = EFalse;
1.1023 + SPageInfo* pageInfo = NULL;
1.1024 +#ifdef _USE_OLDEST_LISTS
1.1025 + TUint oldestCount = iOldestCleanCount + iOldestDirtyCount;
1.1026 + if((iOldCount + oldestCount) * iYoungOldRatio < iYoungCount)
1.1027 +#else
1.1028 + if (iOldCount * iYoungOldRatio < iYoungCount)
1.1029 +#endif
1.1030 + {
1.1031 + // Need more old pages so make one young page into an old page...
1.1032 + __NK_ASSERT_DEBUG(!iYoungList.IsEmpty());
1.1033 + __NK_ASSERT_DEBUG(iYoungCount);
1.1034 + SDblQueLink* link = iYoungList.Last()->Deque();
1.1035 + --iYoungCount;
1.1036 +
1.1037 + pageInfo = SPageInfo::FromLink(link);
1.1038 + pageInfo->SetPagedState(SPageInfo::EPagedOld);
1.1039 +
1.1040 + iOldList.AddHead(link);
1.1041 + ++iOldCount;
1.1042 +
1.1043 + Event(EEventPageAged,pageInfo);
1.1044 + // Delay restricting the page until it is safe to release the MmuLock.
1.1045 + restrictPage = ETrue;
1.1046 + }
1.1047 +
1.1048 +#ifdef _USE_OLDEST_LISTS
1.1049 + // Check we have enough oldest pages.
1.1050 + if (oldestCount * iOldOldestRatio < iOldCount)
1.1051 + {
1.1052 + __NK_ASSERT_DEBUG(!iOldList.IsEmpty());
1.1053 + __NK_ASSERT_DEBUG(iOldCount);
1.1054 + SDblQueLink* link = iOldList.Last()->Deque();
1.1055 + --iOldCount;
1.1056 +
1.1057 + SPageInfo* oldestPageInfo = SPageInfo::FromLink(link);
1.1058 + if (oldestPageInfo->IsDirty())
1.1059 + {
1.1060 + oldestPageInfo->SetPagedState(SPageInfo::EPagedOldestDirty);
1.1061 + iOldestDirtyList.AddHead(link);
1.1062 + ++iOldestDirtyCount;
1.1063 + Event(EEventPageAgedDirty,oldestPageInfo);
1.1064 + }
1.1065 + else
1.1066 + {
1.1067 + oldestPageInfo->SetPagedState(SPageInfo::EPagedOldestClean);
1.1068 + iOldestCleanList.AddHead(link);
1.1069 + ++iOldestCleanCount;
1.1070 + Event(EEventPageAgedClean,oldestPageInfo);
1.1071 + }
1.1072 + }
1.1073 +#endif
1.1074 + if (restrictPage)
1.1075 + {
1.1076 + // Make the recently aged old page inaccessible. This is done last as it
1.1077 + // will release the MmuLock and therefore the page counts may otherwise change.
1.1078 + RestrictPage(pageInfo,ERestrictPagesNoAccessForOldPage);
1.1079 + }
1.1080 + }
1.1081 +
1.1082 +
1.1083 +void DPager::RemoveExcessPages()
1.1084 + {
1.1085 + __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
1.1086 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.1087 + while(HaveTooManyPages())
1.1088 + ReturnPageToSystem();
1.1089 + }
1.1090 +
1.1091 +
1.1092 +void DPager::RejuvenatePageTable(TPte* aPt)
1.1093 + {
1.1094 + SPageInfo* pi = SPageInfo::FromPhysAddr(Mmu::PageTablePhysAddr(aPt));
1.1095 +
1.1096 + SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPt);
1.1097 + if(!pti->IsDemandPaged())
1.1098 + {
1.1099 + __NK_ASSERT_DEBUG(pi->PagedState()==SPageInfo::EUnpaged);
1.1100 + return;
1.1101 + }
1.1102 +
1.1103 + TRACE2(("DP: %O Rejuvenate PT 0x%08x 0x%08x",TheCurrentThread,pi->PhysAddr(),aPt));
1.1104 + switch(pi->PagedState())
1.1105 + {
1.1106 + case SPageInfo::EPagedYoung:
1.1107 + case SPageInfo::EPagedOld:
1.1108 +#ifdef _USE_OLDEST_LISTS
1.1109 + case SPageInfo::EPagedOldestClean:
1.1110 + case SPageInfo::EPagedOldestDirty:
1.1111 +#endif
1.1112 + RemovePage(pi);
1.1113 + AddAsYoungestPage(pi);
1.1114 + BalanceAges();
1.1115 + break;
1.1116 +
1.1117 + case SPageInfo::EUnpaged:
1.1118 + AddAsYoungestPage(pi);
1.1119 + BalanceAges();
1.1120 + break;
1.1121 +
1.1122 + case SPageInfo::EPagedPinned:
1.1123 + break;
1.1124 +
1.1125 + default:
1.1126 + __NK_ASSERT_DEBUG(0);
1.1127 + break;
1.1128 + }
1.1129 + }
1.1130 +
1.1131 +TInt DPager::PteAndInfoFromLinAddr( TInt aOsAsid, TLinAddr aAddress, DMemoryMappingBase* aMapping,
1.1132 + TUint aMapInstanceCount, TPte*& aPte, SPageInfo*& aPageInfo)
1.1133 + {
1.1134 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.1135 +
1.1136 + // Verify the mapping is still mapped and has not been reused.
1.1137 + if (aMapInstanceCount != aMapping->MapInstanceCount() || aMapping->BeingDetached())
1.1138 + return KErrAbort;
1.1139 +
1.1140 + aPte = Mmu::SafePtePtrFromLinAddr(aAddress,aOsAsid);
1.1141 + if(!aPte)
1.1142 + return KErrNotFound;
1.1143 +
1.1144 + TPte pte = *aPte;
1.1145 + if(pte==KPteUnallocatedEntry)
1.1146 + return KErrNotFound;
1.1147 +
1.1148 + SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pte & ~KPageMask);
1.1149 + if(!pi)
1.1150 + return KErrNotFound;
1.1151 + aPageInfo = pi;
1.1152 +
1.1153 + return KErrNone;
1.1154 + }
1.1155 +
1.1156 +TInt DPager::TryRejuvenate( TInt aOsAsid, TLinAddr aAddress, TUint aAccessPermissions, TLinAddr aPc,
1.1157 + DMemoryMappingBase* aMapping, TUint aMapInstanceCount, DThread* aThread,
1.1158 + TAny* aExceptionInfo)
1.1159 + {
1.1160 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.1161 +
1.1162 + SPageInfo* pi;
1.1163 + TPte* pPte;
1.1164 + TPte pte;
1.1165 + TInt r = PteAndInfoFromLinAddr(aOsAsid, aAddress, aMapping, aMapInstanceCount, pPte, pi);
1.1166 + if (r != KErrNone)
1.1167 + {
1.1168 + if (aThread->IsRealtime())
1.1169 + {// This thread is real time so it shouldn't be accessing paged out paged memory
1.1170 + // unless there is a paging trap.
1.1171 + MmuLock::Unlock();
1.1172 + // Ensure that we abort when the thread is not allowed to access paged out pages.
1.1173 + if (CheckRealtimeThreadFault(aThread, aExceptionInfo) != KErrNone)
1.1174 + r = KErrAbort;
1.1175 + MmuLock::Lock();
1.1176 + }
1.1177 + return r;
1.1178 + }
1.1179 + pte = *pPte;
1.1180 + SPageInfo::TType type = pi->Type();
1.1181 + SPageInfo::TPagedState state = pi->PagedState();
1.1182 +
1.1183 + if (aThread->IsRealtime() &&
1.1184 + state != SPageInfo::EPagedPinned &&
1.1185 + state != SPageInfo::EPagedPinnedMoved)
1.1186 + {// This thread is real time so it shouldn't be accessing unpinned paged memory
1.1187 + // unless there is a paging trap.
1.1188 + MmuLock::Unlock();
1.1189 + r = CheckRealtimeThreadFault(aThread, aExceptionInfo);
1.1190 + MmuLock::Lock();
1.1191 + if (r != KErrNone)
1.1192 + return r;
1.1193 + // We had to release the MmuLock have to reverify the status of the page and mappings.
1.1194 + r = PteAndInfoFromLinAddr(aOsAsid, aAddress, aMapping, aMapInstanceCount, pPte, pi);
1.1195 + if (r != KErrNone)
1.1196 + return r;
1.1197 + pte = *pPte;
1.1198 + type = pi->Type();
1.1199 + state = pi->PagedState();
1.1200 + }
1.1201 +
1.1202 + if (type != SPageInfo::EManaged)
1.1203 + return KErrNotFound;
1.1204 +
1.1205 + if(state==SPageInfo::EUnpaged)
1.1206 + return KErrNotFound;
1.1207 +
1.1208 + DMemoryObject* memory = pi->Owner();
1.1209 + TUint index = pi->Index();
1.1210 +
1.1211 + TPhysAddr page = memory->iPages.Page(index);
1.1212 + if(!RPageArray::IsPresent(page))
1.1213 + return KErrNotFound;
1.1214 +
1.1215 + TPhysAddr physAddr = pi->PhysAddr();
1.1216 + if ((page^physAddr) >= (TPhysAddr)KPageSize)
1.1217 + {// Page array entry should contain same physical address as PTE unless the
1.1218 + // page has or is being moved and this mapping accessed the page.
1.1219 + // Get the page info for the page that we should be using.
1.1220 + physAddr = page & ~KPageMask;
1.1221 + pi = SPageInfo::SafeFromPhysAddr(physAddr);
1.1222 + if(!pi)
1.1223 + return KErrNotFound;
1.1224 +
1.1225 + type = pi->Type();
1.1226 + if (type!=SPageInfo::EManaged)
1.1227 + return KErrNotFound;
1.1228 +
1.1229 + state = pi->PagedState();
1.1230 + if(state==SPageInfo::EUnpaged)
1.1231 + return KErrNotFound;
1.1232 +
1.1233 + memory = pi->Owner();
1.1234 + index = pi->Index();
1.1235 +
1.1236 + // Update pte to point to the correct physical address for this memory object's page.
1.1237 + pte = (pte & KPageMask) | physAddr;
1.1238 + }
1.1239 +
1.1240 + if(aAccessPermissions&EReadWrite)
1.1241 + {// The mapping that took the fault permits writes and is still attached
1.1242 + // to the memory object therefore the object can't be read only.
1.1243 + __NK_ASSERT_DEBUG(!memory->IsReadOnly());
1.1244 + SetWritable(*pi);
1.1245 + }
1.1246 +
1.1247 + pte = Mmu::MakePteAccessible(pte,aAccessPermissions&EReadWrite);
1.1248 + TRACE2(("!PTE %x=%x",pPte,pte));
1.1249 + *pPte = pte;
1.1250 + CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
1.1251 + InvalidateTLBForPage((aAddress&~KPageMask)|aOsAsid);
1.1252 +
1.1253 + Event(EEventPageRejuvenate,pi,aPc,aAddress,aAccessPermissions);
1.1254 +
1.1255 + TBool balance = false;
1.1256 +#ifdef _USE_OLDEST_LISTS
1.1257 + if( state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld ||
1.1258 + state==SPageInfo::EPagedOldestClean || state==SPageInfo::EPagedOldestDirty)
1.1259 +#else
1.1260 + if(state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld)
1.1261 +#endif
1.1262 + {
1.1263 + RemovePage(pi);
1.1264 + AddAsYoungestPage(pi);
1.1265 + // delay BalanceAges because we don't want to release MmuLock until after
1.1266 + // RejuvenatePageTable has chance to look at the page table page...
1.1267 + balance = true;
1.1268 + }
1.1269 + else
1.1270 + {// Clear the modifier so that if this page is being moved then this
1.1271 + // access is detected. For non-pinned pages the modifier is cleared
1.1272 + // by RemovePage().
1.1273 + __NK_ASSERT_DEBUG(state==SPageInfo::EPagedPinned);
1.1274 + pi->SetModifier(0);
1.1275 + }
1.1276 +
1.1277 + RejuvenatePageTable(pPte);
1.1278 +
1.1279 + if(balance)
1.1280 + BalanceAges();
1.1281 +
1.1282 + return KErrNone;
1.1283 + }
1.1284 +
1.1285 +
1.1286 +TInt DPager::PageInAllocPages(TPhysAddr* aPages, TUint aCount, Mmu::TRamAllocFlags aAllocFlags)
1.1287 + {
1.1288 + TUint n = 0;
1.1289 + while(n<aCount)
1.1290 + {
1.1291 + SPageInfo* pi = PageInAllocPage(aAllocFlags);
1.1292 + if(!pi)
1.1293 + goto fail;
1.1294 + aPages[n++] = pi->PhysAddr();
1.1295 + }
1.1296 + return KErrNone;
1.1297 +fail:
1.1298 + PageInFreePages(aPages,n);
1.1299 + return KErrNoMemory;
1.1300 + }
1.1301 +
1.1302 +
1.1303 +void DPager::PageInFreePages(TPhysAddr* aPages, TUint aCount)
1.1304 + {
1.1305 + while(aCount--)
1.1306 + {
1.1307 + MmuLock::Lock();
1.1308 + SPageInfo* pi = SPageInfo::FromPhysAddr(aPages[aCount]);
1.1309 + switch(pi->PagedState())
1.1310 + {
1.1311 + case SPageInfo::EPagedYoung:
1.1312 + case SPageInfo::EPagedOld:
1.1313 +#ifdef _USE_OLDEST_LISTS
1.1314 + case SPageInfo::EPagedOldestClean:
1.1315 + case SPageInfo::EPagedOldestDirty:
1.1316 +#endif
1.1317 + RemovePage(pi);
1.1318 + // fall through...
1.1319 + case SPageInfo::EUnpaged:
1.1320 + AddAsFreePage(pi);
1.1321 + break;
1.1322 +
1.1323 + case SPageInfo::EPagedPinned:
1.1324 + __NK_ASSERT_DEBUG(0);
1.1325 + break;
1.1326 + default:
1.1327 + __NK_ASSERT_DEBUG(0);
1.1328 + break;
1.1329 + }
1.1330 + MmuLock::Unlock();
1.1331 + }
1.1332 + }
1.1333 +
1.1334 +
1.1335 +void DPager::PagedInUnneeded(SPageInfo* aPageInfo)
1.1336 + {
1.1337 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.1338 + Event(EEventPageInUnneeded,aPageInfo);
1.1339 + AddAsFreePage(aPageInfo);
1.1340 + }
1.1341 +
1.1342 +
1.1343 +void DPager::PagedIn(SPageInfo* aPageInfo)
1.1344 + {
1.1345 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.1346 + switch(aPageInfo->PagedState())
1.1347 + {
1.1348 + case SPageInfo::EPagedYoung:
1.1349 + case SPageInfo::EPagedOld:
1.1350 +#ifdef _USE_OLDEST_LISTS
1.1351 + case SPageInfo::EPagedOldestClean:
1.1352 + case SPageInfo::EPagedOldestDirty:
1.1353 +#endif
1.1354 + RemovePage(aPageInfo);
1.1355 + AddAsYoungestPage(aPageInfo);
1.1356 + BalanceAges();
1.1357 + break;
1.1358 +
1.1359 + case SPageInfo::EUnpaged:
1.1360 + AddAsYoungestPage(aPageInfo);
1.1361 + BalanceAges();
1.1362 + break;
1.1363 +
1.1364 + case SPageInfo::EPagedPinned:
1.1365 + // Clear the modifier so that if this page is being moved then this
1.1366 + // access is detected. For non-pinned pages the modifier is cleared by RemovePage().
1.1367 + aPageInfo->SetModifier(0);
1.1368 + break;
1.1369 +
1.1370 + default:
1.1371 + __NK_ASSERT_DEBUG(0);
1.1372 + break;
1.1373 + }
1.1374 + }
1.1375 +
1.1376 +
1.1377 +void DPager::PagedInPinned(SPageInfo* aPageInfo, TPinArgs& aPinArgs)
1.1378 + {
1.1379 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.1380 + Pin(aPageInfo,aPinArgs);
1.1381 + }
1.1382 +
1.1383 +
1.1384 +void DPager::Pin(SPageInfo* aPageInfo, TPinArgs& aPinArgs)
1.1385 + {
1.1386 + __ASSERT_CRITICAL;
1.1387 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.1388 + __NK_ASSERT_DEBUG(aPinArgs.HaveSufficientPages(1));
1.1389 +
1.1390 + aPageInfo->IncPinCount();
1.1391 + Event(EEventPagePin,aPageInfo);
1.1392 +
1.1393 + // remove page from live list...
1.1394 + switch(aPageInfo->PagedState())
1.1395 + {
1.1396 + case SPageInfo::EPagedYoung:
1.1397 + __NK_ASSERT_DEBUG(iYoungCount);
1.1398 + aPageInfo->iLink.Deque();
1.1399 + --iYoungCount;
1.1400 + __NK_ASSERT_DEBUG(aPageInfo->PinCount()==1);
1.1401 + break;
1.1402 +
1.1403 + case SPageInfo::EPagedOld:
1.1404 + __NK_ASSERT_DEBUG(iOldCount);
1.1405 + aPageInfo->iLink.Deque();
1.1406 + --iOldCount;
1.1407 + __NK_ASSERT_DEBUG(aPageInfo->PinCount()==1);
1.1408 + break;
1.1409 +
1.1410 +#ifdef _USE_OLDEST_LISTS
1.1411 + case SPageInfo::EPagedOldestClean:
1.1412 + __NK_ASSERT_DEBUG(iOldestCleanCount);
1.1413 + aPageInfo->iLink.Deque();
1.1414 + --iOldestCleanCount;
1.1415 + __NK_ASSERT_DEBUG(aPageInfo->PinCount()==1);
1.1416 + break;
1.1417 +
1.1418 + case SPageInfo::EPagedOldestDirty:
1.1419 + __NK_ASSERT_DEBUG(iOldestDirtyCount);
1.1420 + aPageInfo->iLink.Deque();
1.1421 + --iOldestDirtyCount;
1.1422 + __NK_ASSERT_DEBUG(aPageInfo->PinCount()==1);
1.1423 + break;
1.1424 +#endif
1.1425 +
1.1426 + case SPageInfo::EPagedPinned:
1.1427 + // nothing more to do...
1.1428 + __NK_ASSERT_DEBUG(aPageInfo->PinCount()>1);
1.1429 + return;
1.1430 +
1.1431 + case SPageInfo::EUnpaged:
1.1432 + __NK_ASSERT_DEBUG(aPageInfo->PinCount()==1);
1.1433 + TRACE2(("DPager::PinPage page was unpaged"));
1.1434 + // This could be a page in the process of being stolen.
1.1435 + // Could also be page for storing page table infos, which aren't necessarily
1.1436 + // on the live list.
1.1437 + break;
1.1438 +
1.1439 + default:
1.1440 + __NK_ASSERT_DEBUG(0);
1.1441 + return;
1.1442 + }
1.1443 +
1.1444 + // page has now been removed from the live list and is pinned...
1.1445 + aPageInfo->SetPagedState(SPageInfo::EPagedPinned);
1.1446 +
1.1447 + if(aPinArgs.iReplacementPages==TPinArgs::EUseReserveForPinReplacementPages)
1.1448 + {
1.1449 + // pinned paged counts as coming from reserve pool...
1.1450 + aPageInfo->SetPinnedReserve();
1.1451 + }
1.1452 + else
1.1453 + {
1.1454 + // we used up a replacement page...
1.1455 + --aPinArgs.iReplacementPages;
1.1456 + }
1.1457 +
1.1458 + BalanceAges();
1.1459 + }
1.1460 +
1.1461 +
1.1462 +void DPager::Unpin(SPageInfo* aPageInfo, TPinArgs& aPinArgs)
1.1463 + {
1.1464 + __ASSERT_CRITICAL;
1.1465 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.1466 + __NK_ASSERT_DEBUG(aPageInfo->PagedState()==SPageInfo::EPagedPinned);
1.1467 + __NK_ASSERT_DEBUG(aPageInfo->PinCount()>0);
1.1468 +
1.1469 + TUint pinCount = aPageInfo->DecPinCount();
1.1470 + Event(EEventPageUnpin,aPageInfo);
1.1471 +
1.1472 + if(pinCount)
1.1473 + return;
1.1474 +
1.1475 + aPageInfo->SetPagedState(SPageInfo::EUnpaged);
1.1476 +
1.1477 + if(!aPageInfo->ClearPinnedReserve())
1.1478 + {
1.1479 + // was not a pinned reserve page, so we how have a spare replacement page,
1.1480 + // which can be used again or freed later ...
1.1481 + __NK_ASSERT_DEBUG(aPinArgs.iReplacementPages!=TPinArgs::EUseReserveForPinReplacementPages);
1.1482 + ++aPinArgs.iReplacementPages;
1.1483 + }
1.1484 +
1.1485 + AddAsYoungestPage(aPageInfo);
1.1486 + BalanceAges();
1.1487 + }
1.1488 +
1.1489 +
1.1490 +TInt TPinArgs::AllocReplacementPages(TUint aNumPages)
1.1491 + {
1.1492 + if(iUseReserve)
1.1493 + {
1.1494 + __NK_ASSERT_DEBUG(iReplacementPages==0 || iReplacementPages==EUseReserveForPinReplacementPages);
1.1495 + iReplacementPages = EUseReserveForPinReplacementPages;
1.1496 + }
1.1497 + else
1.1498 + {
1.1499 + if(aNumPages>iReplacementPages)
1.1500 + {
1.1501 + if(!ThePager.AllocPinReplacementPages(aNumPages-iReplacementPages))
1.1502 + return KErrNoMemory;
1.1503 + iReplacementPages = aNumPages;
1.1504 + }
1.1505 + }
1.1506 + return KErrNone;
1.1507 + }
1.1508 +
1.1509 +
1.1510 +void TPinArgs::FreeReplacementPages()
1.1511 + {
1.1512 + if(iReplacementPages!=0 && iReplacementPages!=EUseReserveForPinReplacementPages)
1.1513 + ThePager.FreePinReplacementPages(iReplacementPages);
1.1514 + iReplacementPages = 0;
1.1515 + }
1.1516 +
1.1517 +
1.1518 +TBool DPager::AllocPinReplacementPages(TUint aNumPages)
1.1519 + {
1.1520 + TRACE2(("DPager::AllocPinReplacementPages(0x%x)",aNumPages));
1.1521 + __ASSERT_CRITICAL;
1.1522 + RamAllocLock::Lock();
1.1523 + MmuLock::Lock();
1.1524 +
1.1525 + TBool ok = false;
1.1526 + do
1.1527 + {
1.1528 + if(iNumberOfFreePages>=aNumPages)
1.1529 + {
1.1530 + iNumberOfFreePages -= aNumPages;
1.1531 + ok = true;
1.1532 + break;
1.1533 + }
1.1534 + }
1.1535 + while(TryGrowLiveList());
1.1536 +
1.1537 + MmuLock::Unlock();
1.1538 + RamAllocLock::Unlock();
1.1539 + return ok;
1.1540 + }
1.1541 +
1.1542 +
1.1543 +void DPager::FreePinReplacementPages(TUint aNumPages)
1.1544 + {
1.1545 + TRACE2(("DPager::FreePinReplacementPage(0x%x)",aNumPages));
1.1546 + __ASSERT_CRITICAL;
1.1547 +
1.1548 + RamAllocLock::Lock();
1.1549 + MmuLock::Lock();
1.1550 +
1.1551 + iNumberOfFreePages += aNumPages;
1.1552 + RemoveExcessPages();
1.1553 +
1.1554 + MmuLock::Unlock();
1.1555 + RamAllocLock::Unlock();
1.1556 + }
1.1557 +
1.1558 +
1.1559 +TBool DPager::ReservePage()
1.1560 + {
1.1561 + __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
1.1562 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.1563 + __ASSERT_CRITICAL;
1.1564 + __NK_ASSERT_DEBUG(iMinimumPageCount >= iMinimumPageLimit+iReservePageCount);
1.1565 + while(iMinimumPageCount==iMinimumPageLimit+iReservePageCount && iNumberOfFreePages==0)
1.1566 + {
1.1567 + if(!TryGrowLiveList())
1.1568 + return false;
1.1569 + }
1.1570 + if(iMinimumPageCount==iMinimumPageLimit+iReservePageCount)
1.1571 + {
1.1572 + ++iMinimumPageCount;
1.1573 + --iNumberOfFreePages;
1.1574 + if(iMinimumPageCount>iMaximumPageCount)
1.1575 + iMaximumPageCount = iMinimumPageCount;
1.1576 + }
1.1577 + ++iReservePageCount;
1.1578 + __NK_ASSERT_DEBUG(iMinimumPageCount >= iMinimumPageLimit+iReservePageCount);
1.1579 + __NK_ASSERT_DEBUG(iMinimumPageCount+iNumberOfFreePages <= iMaximumPageCount);
1.1580 + return ETrue;
1.1581 + }
1.1582 +
1.1583 +
1.1584 +TBool DPager::ReservePages(TUint aRequiredCount, TUint& aCount)
1.1585 + {
1.1586 + __ASSERT_CRITICAL;
1.1587 +
1.1588 + RamAllocLock::Lock();
1.1589 + MmuLock::Lock();
1.1590 + while(aCount<aRequiredCount)
1.1591 + {
1.1592 + if(!ReservePage())
1.1593 + break;
1.1594 + ++aCount;
1.1595 + MmuLock::Flash();
1.1596 + }
1.1597 + TBool enoughPages = aCount==aRequiredCount;
1.1598 + MmuLock::Unlock();
1.1599 + RamAllocLock::Unlock();
1.1600 +
1.1601 + if(!enoughPages)
1.1602 + UnreservePages(aCount);
1.1603 +
1.1604 + return enoughPages;
1.1605 + }
1.1606 +
1.1607 +
1.1608 +void DPager::UnreservePages(TUint& aCount)
1.1609 + {
1.1610 + MmuLock::Lock();
1.1611 + iReservePageCount -= aCount;
1.1612 + aCount = 0;
1.1613 + MmuLock::Unlock();
1.1614 + }
1.1615 +
1.1616 +
1.1617 +TInt DPager::CheckRealtimeThreadFault(DThread* aThread, TAny* aExceptionInfo)
1.1618 + {
1.1619 + // realtime threads shouldn't take paging faults...
1.1620 + DThread* client = aThread->iIpcClient;
1.1621 +
1.1622 + // If iIpcClient is set then we are accessing the address space of a remote thread. If we are
1.1623 + // in an IPC trap, this will contain information the local and remote addresses being accessed.
1.1624 + // If this is not set then we assume than any fault must be the fault of a bad remote address.
1.1625 + TIpcExcTrap* ipcTrap = (TIpcExcTrap*)aThread->iExcTrap;
1.1626 + if (ipcTrap && !ipcTrap->IsTIpcExcTrap())
1.1627 + ipcTrap = 0;
1.1628 + if (client && (!ipcTrap || ipcTrap->ExcLocation(aThread, aExceptionInfo) == TIpcExcTrap::EExcRemote))
1.1629 + {
1.1630 + // kill client thread...
1.1631 + if(K::IllegalFunctionForRealtimeThread(client,"Access to Paged Memory (by other thread)"))
1.1632 + {
1.1633 + // treat memory access as bad...
1.1634 + return KErrAbort;
1.1635 + }
1.1636 + // else thread is in 'warning only' state so allow paging...
1.1637 + }
1.1638 + else
1.1639 + {
1.1640 + // kill current thread...
1.1641 + if(K::IllegalFunctionForRealtimeThread(NULL,"Access to Paged Memory"))
1.1642 + {
1.1643 + // if current thread is in critical section, then the above kill will be deferred
1.1644 + // and we will continue executing. We will handle this by returning an error
1.1645 + // which means that the thread will take an exception (which hopefully is XTRAPed!)
1.1646 + return KErrAbort;
1.1647 + }
1.1648 + // else thread is in 'warning only' state so allow paging...
1.1649 + }
1.1650 + return KErrNone;
1.1651 + }
1.1652 +
1.1653 +
1.1654 +TInt DPager::HandlePageFault( TLinAddr aPc, TLinAddr aFaultAddress, TUint aFaultAsid, TUint aFaultIndex,
1.1655 + TUint aAccessPermissions, DMemoryObject* aMemory, DMemoryMapping* aMapping,
1.1656 + TUint aMapInstanceCount, DThread* aThread, TAny* aExceptionInfo)
1.1657 + {
1.1658 + MmuLock::Lock();
1.1659 + TInt r = TryRejuvenate( aFaultAsid, aFaultAddress, aAccessPermissions, aPc, aMapping, aMapInstanceCount,
1.1660 + aThread, aExceptionInfo);
1.1661 + if(r == KErrNone || r == KErrAbort)
1.1662 + {
1.1663 + MmuLock::Unlock();
1.1664 + }
1.1665 + else
1.1666 + {
1.1667 + // rejuvenate failed, call memory manager to page in memory...
1.1668 + Event(EEventPageInStart, 0, aPc, aFaultAddress, aAccessPermissions);
1.1669 + MmuLock::Unlock();
1.1670 + TheThrashMonitor.NotifyStartPaging();
1.1671 +
1.1672 + DMemoryManager* manager = aMemory->iManager;
1.1673 + r = manager->HandleFault(aMemory, aFaultIndex, aMapping, aMapInstanceCount, aAccessPermissions);
1.1674 +
1.1675 + TheThrashMonitor.NotifyEndPaging();
1.1676 + }
1.1677 + return r;
1.1678 + }
1.1679 +
1.1680 +
1.1681 +TInt DPager::ResizeLiveList()
1.1682 + {
1.1683 + MmuLock::Lock();
1.1684 + TUint min = iMinimumPageCount;
1.1685 + TUint max = iMaximumPageCount;
1.1686 + MmuLock::Unlock();
1.1687 + return ResizeLiveList(min,max);
1.1688 + }
1.1689 +
1.1690 +
1.1691 +TInt DPager::ResizeLiveList(TUint aMinimumPageCount, TUint aMaximumPageCount)
1.1692 + {
1.1693 + TRACE(("DPager::ResizeLiveList(%d,%d) current young=%d old=%d min=%d free=%d max=%d",aMinimumPageCount,aMaximumPageCount,iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
1.1694 + if(!aMaximumPageCount)
1.1695 + {
1.1696 + aMinimumPageCount = iInitMinimumPageCount;
1.1697 + aMaximumPageCount = iInitMaximumPageCount;
1.1698 + }
1.1699 + if (aMaximumPageCount > KAbsoluteMaxPageCount)
1.1700 + aMaximumPageCount = KAbsoluteMaxPageCount;
1.1701 +
1.1702 + // Min must not be greater than max...
1.1703 + if(aMinimumPageCount>aMaximumPageCount)
1.1704 + return KErrArgument;
1.1705 +
1.1706 + NKern::ThreadEnterCS();
1.1707 + RamAllocLock::Lock();
1.1708 +
1.1709 + MmuLock::Lock();
1.1710 +
1.1711 + // Make sure aMinimumPageCount is not less than absolute minimum we can cope with...
1.1712 + iMinimumPageLimit = iMinYoungPages * (1 + iYoungOldRatio) / iYoungOldRatio
1.1713 + + DPageReadRequest::ReservedPagesRequired();
1.1714 + if(iMinimumPageLimit<iAbsoluteMinPageCount)
1.1715 + iMinimumPageLimit = iAbsoluteMinPageCount;
1.1716 + if(aMinimumPageCount<iMinimumPageLimit+iReservePageCount)
1.1717 + aMinimumPageCount = iMinimumPageLimit+iReservePageCount;
1.1718 + if(aMaximumPageCount<aMinimumPageCount)
1.1719 + aMaximumPageCount=aMinimumPageCount;
1.1720 +
1.1721 + // Increase iMaximumPageCount?
1.1722 + TInt extra = aMaximumPageCount-iMaximumPageCount;
1.1723 + if(extra>0)
1.1724 + iMaximumPageCount += extra;
1.1725 +
1.1726 + // Reduce iMinimumPageCount?
1.1727 + TInt spare = iMinimumPageCount-aMinimumPageCount;
1.1728 + if(spare>0)
1.1729 + {
1.1730 + iMinimumPageCount -= spare;
1.1731 + iNumberOfFreePages += spare;
1.1732 + }
1.1733 +
1.1734 + // Increase iMinimumPageCount?
1.1735 + TInt r=KErrNone;
1.1736 + while(iMinimumPageCount<aMinimumPageCount)
1.1737 + {
1.1738 + TUint newMin = aMinimumPageCount;
1.1739 + TUint maxMin = iMinimumPageCount+iNumberOfFreePages;
1.1740 + if(newMin>maxMin)
1.1741 + newMin = maxMin;
1.1742 +
1.1743 + TUint delta = newMin-iMinimumPageCount;
1.1744 + if(delta)
1.1745 + {
1.1746 + iMinimumPageCount = newMin;
1.1747 + iNumberOfFreePages -= delta;
1.1748 + continue;
1.1749 + }
1.1750 +
1.1751 + if(!TryGrowLiveList())
1.1752 + {
1.1753 + r=KErrNoMemory;
1.1754 + break;
1.1755 + }
1.1756 + }
1.1757 +
1.1758 + // Reduce iMaximumPageCount?
1.1759 + while(iMaximumPageCount>aMaximumPageCount)
1.1760 + {
1.1761 + TUint newMax = aMaximumPageCount;
1.1762 + TUint minMax = iMinimumPageCount+iNumberOfFreePages;
1.1763 + if(newMax<minMax)
1.1764 + newMax = minMax;
1.1765 +
1.1766 + TUint delta = iMaximumPageCount-newMax;
1.1767 + if(delta)
1.1768 + {
1.1769 + iMaximumPageCount = newMax;
1.1770 + continue;
1.1771 + }
1.1772 +
1.1773 + ReturnPageToSystem();
1.1774 + }
1.1775 +
1.1776 + TRACE(("DPager::ResizeLiveList end with young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
1.1777 +
1.1778 +#ifdef BTRACE_KERNEL_MEMORY
1.1779 + BTrace4(BTrace::EKernelMemory,BTrace::EKernelMemoryDemandPagingCache,iMinimumPageCount << KPageShift);
1.1780 +#endif
1.1781 +
1.1782 + MmuLock::Unlock();
1.1783 +
1.1784 + RamAllocLock::Unlock();
1.1785 + NKern::ThreadLeaveCS();
1.1786 +
1.1787 + return r;
1.1788 + }
1.1789 +
1.1790 +
1.1791 +void DPager::FlushAll()
1.1792 + {
1.1793 + NKern::ThreadEnterCS();
1.1794 + RamAllocLock::Lock();
1.1795 +
1.1796 + TRACE(("DPager::FlushAll() live list young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
1.1797 +
1.1798 + // look at all RAM pages in the system, and unmap all those used for paging
1.1799 + const TUint32* piMap = (TUint32*)KPageInfoMap;
1.1800 + const TUint32* piMapEnd = piMap+(KNumPageInfoPages>>5);
1.1801 + SPageInfo* pi = (SPageInfo*)KPageInfoLinearBase;
1.1802 + MmuLock::Lock();
1.1803 + do
1.1804 + {
1.1805 + SPageInfo* piNext = pi+(KPageInfosPerPage<<5);
1.1806 + for(TUint32 piFlags=*piMap++; piFlags; piFlags>>=1)
1.1807 + {
1.1808 + if(!(piFlags&1))
1.1809 + {
1.1810 + pi += KPageInfosPerPage;
1.1811 + continue;
1.1812 + }
1.1813 + SPageInfo* piEnd = pi+KPageInfosPerPage;
1.1814 + do
1.1815 + {
1.1816 + SPageInfo::TPagedState state = pi->PagedState();
1.1817 +#ifdef _USE_OLDEST_LISTS
1.1818 + if (state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld ||
1.1819 + state==SPageInfo::EPagedOldestClean || state==SPageInfo::EPagedOldestDirty)
1.1820 +#else
1.1821 + if(state==SPageInfo::EPagedYoung || state==SPageInfo::EPagedOld)
1.1822 +#endif
1.1823 + {
1.1824 + if (pi->Type() != SPageInfo::EUnused)
1.1825 + {
1.1826 + TInt r = StealPage(pi);
1.1827 + if(r==KErrNone)
1.1828 + AddAsFreePage(pi);
1.1829 + MmuLock::Flash();
1.1830 + }
1.1831 + }
1.1832 + ++pi;
1.1833 + if(((TUint)pi&(0xf<<KPageInfoShift))==0)
1.1834 + MmuLock::Flash(); // every 16 page infos
1.1835 + }
1.1836 + while(pi<piEnd);
1.1837 + }
1.1838 + pi = piNext;
1.1839 + }
1.1840 + while(piMap<piMapEnd);
1.1841 + MmuLock::Unlock();
1.1842 +
1.1843 + // reduce live page list to a minimum
1.1844 + while(GetFreePages(1)) {};
1.1845 +
1.1846 + TRACE(("DPager::FlushAll() end with young=%d old=%d min=%d free=%d max=%d",iYoungCount,iOldCount,iMinimumPageCount,iNumberOfFreePages,iMaximumPageCount));
1.1847 +
1.1848 + RamAllocLock::Unlock();
1.1849 + NKern::ThreadLeaveCS();
1.1850 + }
1.1851 +
1.1852 +
1.1853 +void DPager::GetLiveListInfo(SVMCacheInfo& aInfo)
1.1854 + {
1.1855 + MmuLock::Lock(); // ensure consistent set of values are read...
1.1856 + aInfo.iMinSize = iMinimumPageCount<<KPageShift;
1.1857 + aInfo.iMaxSize = iMaximumPageCount<<KPageShift;
1.1858 + aInfo.iCurrentSize = (iMinimumPageCount+iNumberOfFreePages)<<KPageShift;
1.1859 + aInfo.iMaxFreeSize = iNumberOfFreePages<<KPageShift;
1.1860 + MmuLock::Unlock();
1.1861 + }
1.1862 +
1.1863 +
1.1864 +void DPager::GetEventInfo(SVMEventInfo& aInfoOut)
1.1865 + {
1.1866 + MmuLock::Lock(); // ensure consistent set of values are read...
1.1867 + aInfoOut = iEventInfo;
1.1868 + MmuLock::Unlock();
1.1869 + }
1.1870 +
1.1871 +
1.1872 +void DPager::ResetEventInfo()
1.1873 + {
1.1874 + MmuLock::Lock();
1.1875 + memclr(&iEventInfo, sizeof(iEventInfo));
1.1876 + MmuLock::Unlock();
1.1877 + }
1.1878 +
1.1879 +
1.1880 +TInt TestPageState(TLinAddr aAddr)
1.1881 + {
1.1882 + DMemModelProcess* process = (DMemModelProcess*)TheCurrentThread->iOwningProcess;
1.1883 + // Get the os asid of current thread's process so no need to open a reference on it.
1.1884 + TInt osAsid = process->OsAsid();
1.1885 + TPte* ptePtr = 0;
1.1886 + TPte pte = 0;
1.1887 + TInt r = 0;
1.1888 + SPageInfo* pageInfo = NULL;
1.1889 +
1.1890 + NKern::ThreadEnterCS();
1.1891 +
1.1892 + TUint offsetInMapping;
1.1893 + TUint mapInstanceCount;
1.1894 + DMemoryMapping* mapping = MM::FindMappingInAddressSpace(osAsid, aAddr, 1, offsetInMapping, mapInstanceCount);
1.1895 +
1.1896 + MmuLock::Lock();
1.1897 +
1.1898 + if(mapping)
1.1899 + {
1.1900 + DMemoryObject* memory = mapping->Memory();
1.1901 + if(mapInstanceCount == mapping->MapInstanceCount() && memory)
1.1902 + {
1.1903 + DMemoryManager* manager = memory->iManager;
1.1904 + if(manager==TheCodePagedMemoryManager)
1.1905 + r |= EPageStateInRamCode|EPageStatePaged;
1.1906 + }
1.1907 + }
1.1908 +
1.1909 + ptePtr = Mmu::SafePtePtrFromLinAddr(aAddr,osAsid);
1.1910 + if (!ptePtr)
1.1911 + goto done;
1.1912 + pte = *ptePtr;
1.1913 + if (pte == KPteUnallocatedEntry)
1.1914 + goto done;
1.1915 + r |= EPageStatePtePresent;
1.1916 + if (pte!=Mmu::MakePteInaccessible(pte,0))
1.1917 + r |= EPageStatePteValid;
1.1918 +
1.1919 + pageInfo = SPageInfo::SafeFromPhysAddr(pte&~KPageMask);
1.1920 + if(pageInfo)
1.1921 + {
1.1922 + r |= pageInfo->Type();
1.1923 + r |= pageInfo->PagedState()<<8;
1.1924 + }
1.1925 +done:
1.1926 + MmuLock::Unlock();
1.1927 + if(mapping)
1.1928 + mapping->Close();
1.1929 + NKern::ThreadLeaveCS();
1.1930 + return r;
1.1931 + }
1.1932 +
1.1933 +
1.1934 +
1.1935 +TInt VMHalFunction(TAny*, TInt aFunction, TAny* a1, TAny* a2)
1.1936 + {
1.1937 + switch(aFunction)
1.1938 + {
1.1939 + case EVMHalFlushCache:
1.1940 + if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalFlushCache)")))
1.1941 + K::UnlockedPlatformSecurityPanic();
1.1942 + ThePager.FlushAll();
1.1943 + return KErrNone;
1.1944 +
1.1945 + case EVMHalSetCacheSize:
1.1946 + {
1.1947 + if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalSetCacheSize)")))
1.1948 + K::UnlockedPlatformSecurityPanic();
1.1949 + TUint min = TUint(a1)>>KPageShift;
1.1950 + if(TUint(a1)&KPageMask)
1.1951 + ++min;
1.1952 + TUint max = TUint(a2)>>KPageShift;
1.1953 + if(TUint(a2)&KPageMask)
1.1954 + ++max;
1.1955 + return ThePager.ResizeLiveList(min,max);
1.1956 + }
1.1957 +
1.1958 + case EVMHalGetCacheSize:
1.1959 + {
1.1960 + SVMCacheInfo info;
1.1961 + ThePager.GetLiveListInfo(info);
1.1962 + kumemput32(a1,&info,sizeof(info));
1.1963 + }
1.1964 + return KErrNone;
1.1965 +
1.1966 + case EVMHalGetEventInfo:
1.1967 + {
1.1968 + SVMEventInfo info;
1.1969 + ThePager.GetEventInfo(info);
1.1970 + Kern::InfoCopy(*(TDes8*)a1,(TUint8*)&info,sizeof(info));
1.1971 + }
1.1972 + return KErrNone;
1.1973 +
1.1974 + case EVMHalResetEventInfo:
1.1975 + ThePager.ResetEventInfo();
1.1976 + return KErrNone;
1.1977 +
1.1978 +#ifdef __SUPPORT_DEMAND_PAGING_EMULATION__
1.1979 + case EVMHalGetOriginalRomPages:
1.1980 + RomOriginalPages(*((TPhysAddr**)a1), *((TUint*)a2));
1.1981 + return KErrNone;
1.1982 +#endif
1.1983 +
1.1984 + case EVMPageState:
1.1985 + return TestPageState((TLinAddr)a1);
1.1986 +
1.1987 + case EVMHalGetSwapInfo:
1.1988 + {
1.1989 + if ((K::MemModelAttributes & EMemModelAttrDataPaging) == 0)
1.1990 + return KErrNotSupported;
1.1991 + SVMSwapInfo info;
1.1992 + GetSwapInfo(info);
1.1993 + kumemput32(a1,&info,sizeof(info));
1.1994 + }
1.1995 + return KErrNone;
1.1996 +
1.1997 + case EVMHalGetThrashLevel:
1.1998 + return TheThrashMonitor.ThrashLevel();
1.1999 +
1.2000 + case EVMHalSetSwapThresholds:
1.2001 + {
1.2002 + if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalSetSwapThresholds)")))
1.2003 + K::UnlockedPlatformSecurityPanic();
1.2004 + if ((K::MemModelAttributes & EMemModelAttrDataPaging) == 0)
1.2005 + return KErrNotSupported;
1.2006 + SVMSwapThresholds thresholds;
1.2007 + kumemget32(&thresholds,a1,sizeof(thresholds));
1.2008 + return SetSwapThresholds(thresholds);
1.2009 + }
1.2010 +
1.2011 + case EVMHalSetThrashThresholds:
1.2012 + if(!TheCurrentThread->HasCapability(ECapabilityWriteDeviceData,__PLATSEC_DIAGNOSTIC_STRING("Checked by VMHalFunction(EVMHalSetThrashThresholds)")))
1.2013 + K::UnlockedPlatformSecurityPanic();
1.2014 + return TheThrashMonitor.SetThresholds((TUint)a1, (TUint)a2);
1.2015 +
1.2016 +#ifdef __DEMAND_PAGING_BENCHMARKS__
1.2017 + case EVMHalGetPagingBenchmark:
1.2018 + {
1.2019 + TUint index = (TInt) a1;
1.2020 + if (index >= EMaxPagingBm)
1.2021 + return KErrNotFound;
1.2022 + NKern::LockSystem();
1.2023 + SPagingBenchmarkInfo info = ThePager.iBenchmarkInfo[index];
1.2024 + NKern::UnlockSystem();
1.2025 + kumemput32(a2,&info,sizeof(info));
1.2026 + }
1.2027 + return KErrNone;
1.2028 +
1.2029 + case EVMHalResetPagingBenchmark:
1.2030 + {
1.2031 + TUint index = (TInt) a1;
1.2032 + if (index >= EMaxPagingBm)
1.2033 + return KErrNotFound;
1.2034 + NKern::LockSystem();
1.2035 + ThePager.ResetBenchmarkData((TPagingBenchmark)index);
1.2036 + NKern::UnlockSystem();
1.2037 + }
1.2038 + return KErrNone;
1.2039 +#endif
1.2040 +
1.2041 + default:
1.2042 + return KErrNotSupported;
1.2043 + }
1.2044 + }
1.2045 +
1.2046 +
1.2047 +#ifdef __DEMAND_PAGING_BENCHMARKS__
1.2048 +
1.2049 +void DPager::ResetBenchmarkData(TPagingBenchmark aBm)
1.2050 + {
1.2051 + SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm];
1.2052 + info.iCount = 0;
1.2053 + info.iTotalTime = 0;
1.2054 + info.iMaxTime = 0;
1.2055 + info.iMinTime = KMaxTInt;
1.2056 + }
1.2057 +
1.2058 +void DPager::RecordBenchmarkData(TPagingBenchmark aBm, TUint32 aStartTime, TUint32 aEndTime)
1.2059 + {
1.2060 + SPagingBenchmarkInfo& info = iBenchmarkInfo[aBm];
1.2061 + ++info.iCount;
1.2062 +#if !defined(HIGH_RES_TIMER) || defined(HIGH_RES_TIMER_COUNTS_UP)
1.2063 + TInt64 elapsed = aEndTime - aStartTime;
1.2064 +#else
1.2065 + TInt64 elapsed = aStartTime - aEndTime;
1.2066 +#endif
1.2067 + info.iTotalTime += elapsed;
1.2068 + if (elapsed > info.iMaxTime)
1.2069 + info.iMaxTime = elapsed;
1.2070 + if (elapsed < info.iMinTime)
1.2071 + info.iMinTime = elapsed;
1.2072 + }
1.2073 +
1.2074 +#endif //__DEMAND_PAGING_BENCHMARKS__
1.2075 +
1.2076 +
1.2077 +//
1.2078 +// Paging request management...
1.2079 +//
1.2080 +
1.2081 +//
1.2082 +// DPagingRequest
1.2083 +//
1.2084 +
1.2085 +DPagingRequest::DPagingRequest(DPagingRequestPool::TGroup& aPoolGroup)
1.2086 + : iPoolGroup(aPoolGroup), iUseRegionMemory(0), iUseRegionIndex(0), iUseRegionCount(0)
1.2087 + {
1.2088 + }
1.2089 +
1.2090 +
1.2091 +FORCE_INLINE void DPagingRequest::SetUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
1.2092 + {
1.2093 + __ASSERT_SYSTEM_LOCK;
1.2094 + iUseRegionMemory = aMemory;
1.2095 + iUseRegionIndex = aIndex;
1.2096 + iUseRegionCount = aCount;
1.2097 + }
1.2098 +
1.2099 +
1.2100 +TBool DPagingRequest::CheckUse(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
1.2101 + {
1.2102 + return aMemory==iUseRegionMemory
1.2103 + && TUint(aIndex-iUseRegionIndex) < iUseRegionCount
1.2104 + && TUint(iUseRegionCount-TUint(aIndex-iUseRegionIndex)) <= aCount;
1.2105 + }
1.2106 +
1.2107 +
1.2108 +void DPagingRequest::Release()
1.2109 + {
1.2110 + NKern::LockSystem();
1.2111 + SetUse(0,0,0);
1.2112 + Signal();
1.2113 + }
1.2114 +
1.2115 +
1.2116 +void DPagingRequest::Wait()
1.2117 + {
1.2118 + __ASSERT_SYSTEM_LOCK;
1.2119 + ++iUsageCount;
1.2120 + TInt r = iMutex->Wait();
1.2121 + __NK_ASSERT_ALWAYS(r == KErrNone);
1.2122 + }
1.2123 +
1.2124 +
1.2125 +void DPagingRequest::Signal()
1.2126 + {
1.2127 + __ASSERT_SYSTEM_LOCK;
1.2128 + iPoolGroup.Signal(this);
1.2129 + }
1.2130 +
1.2131 +
1.2132 +FORCE_INLINE TBool DPagingRequest::IsCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
1.2133 + {
1.2134 + __ASSERT_SYSTEM_LOCK;
1.2135 + DMemoryObject* memory = iUseRegionMemory;
1.2136 + TUint index = iUseRegionIndex;
1.2137 + TUint count = iUseRegionCount;
1.2138 + // note, this comparison would fail if either region includes page number KMaxTUint,
1.2139 + // but it isn't possible to create a memory object which is > KMaxTUint pages...
1.2140 + return memory == aMemory && index+count > aIndex && index < aIndex+aCount;
1.2141 + }
1.2142 +
1.2143 +
1.2144 +TLinAddr DPagingRequest::MapPages(TUint aColour, TUint aCount, TPhysAddr* aPages)
1.2145 + {
1.2146 + __NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread());
1.2147 + return iTempMapping.Map(aPages,aCount,aColour);
1.2148 + }
1.2149 +
1.2150 +
1.2151 +void DPagingRequest::UnmapPages(TBool aIMBRequired)
1.2152 + {
1.2153 + __NK_ASSERT_DEBUG(iMutex->iCleanup.iThread == &Kern::CurrentThread());
1.2154 + iTempMapping.Unmap(aIMBRequired);
1.2155 + }
1.2156 +
1.2157 +
1.2158 +//
1.2159 +// DPageReadRequest
1.2160 +//
1.2161 +
1.2162 +TInt DPageReadRequest::iAllocNext = 0;
1.2163 +
1.2164 +TInt DPageReadRequest::Construct()
1.2165 + {
1.2166 + // allocate id and mutex...
1.2167 + TUint id = (TUint)__e32_atomic_add_ord32(&iAllocNext, 1);
1.2168 + _LIT(KLitPagingRequest,"PageReadRequest-");
1.2169 + TBuf<sizeof("PageReadRequest-")+10> mutexName(KLitPagingRequest);
1.2170 + mutexName.AppendNum(id);
1.2171 + TInt r = K::MutexCreate(iMutex, mutexName, NULL, EFalse, KMutexOrdPageIn);
1.2172 + if(r!=KErrNone)
1.2173 + return r;
1.2174 +
1.2175 + // allocate space for mapping pages whilst they're being loaded...
1.2176 + iTempMapping.Alloc(EMaxPages);
1.2177 +
1.2178 + // create memory buffer...
1.2179 + TUint bufferSize = EMaxPages+1;
1.2180 + DMemoryObject* bufferMemory;
1.2181 + r = MM::MemoryNew(bufferMemory,EMemoryObjectUnpaged,bufferSize,EMemoryCreateNoWipe);
1.2182 + if(r!=KErrNone)
1.2183 + return r;
1.2184 + MM::MemorySetLock(bufferMemory,iMutex);
1.2185 + TPhysAddr physAddr;
1.2186 + r = MM::MemoryAllocContiguous(bufferMemory,0,bufferSize,0,physAddr);
1.2187 + (void)physAddr;
1.2188 + if(r!=KErrNone)
1.2189 + return r;
1.2190 + DMemoryMapping* bufferMapping;
1.2191 + r = MM::MappingNew(bufferMapping,bufferMemory,ESupervisorReadWrite,KKernelOsAsid);
1.2192 + if(r!=KErrNone)
1.2193 + return r;
1.2194 + iBuffer = MM::MappingBase(bufferMapping);
1.2195 +
1.2196 + // ensure there are enough young pages to cope with new request object...
1.2197 + r = ThePager.ResizeLiveList();
1.2198 + if(r!=KErrNone)
1.2199 + return r;
1.2200 +
1.2201 + return r;
1.2202 + }
1.2203 +
1.2204 +
1.2205 +//
1.2206 +// DPageWriteRequest
1.2207 +//
1.2208 +
1.2209 +TInt DPageWriteRequest::iAllocNext = 0;
1.2210 +
1.2211 +TInt DPageWriteRequest::Construct()
1.2212 + {
1.2213 + // allocate id and mutex...
1.2214 + TUint id = (TUint)__e32_atomic_add_ord32(&iAllocNext, 1);
1.2215 + _LIT(KLitPagingRequest,"PageWriteRequest-");
1.2216 + TBuf<sizeof("PageWriteRequest-")+10> mutexName(KLitPagingRequest);
1.2217 + mutexName.AppendNum(id);
1.2218 + TInt r = K::MutexCreate(iMutex, mutexName, NULL, EFalse, KMutexOrdPageOut);
1.2219 + if(r!=KErrNone)
1.2220 + return r;
1.2221 +
1.2222 + // allocate space for mapping pages whilst they're being loaded...
1.2223 + iTempMapping.Alloc(EMaxPages);
1.2224 +
1.2225 + return r;
1.2226 + }
1.2227 +
1.2228 +
1.2229 +//
1.2230 +// DPagingRequestPool
1.2231 +//
1.2232 +
1.2233 +DPagingRequestPool::DPagingRequestPool(TUint aNumPageReadRequest,TUint aNumPageWriteRequest)
1.2234 + : iPageReadRequests(aNumPageReadRequest), iPageWriteRequests(aNumPageWriteRequest)
1.2235 + {
1.2236 + TUint i;
1.2237 +
1.2238 + for(i=0; i<aNumPageReadRequest; ++i)
1.2239 + {
1.2240 + DPageReadRequest* req = new DPageReadRequest(iPageReadRequests);
1.2241 + __NK_ASSERT_ALWAYS(req);
1.2242 + TInt r = req->Construct();
1.2243 + __NK_ASSERT_ALWAYS(r==KErrNone);
1.2244 + iPageReadRequests.iRequests[i] = req;
1.2245 + iPageReadRequests.iFreeList.Add(req);
1.2246 + }
1.2247 +
1.2248 + for(i=0; i<aNumPageWriteRequest; ++i)
1.2249 + {
1.2250 + DPageWriteRequest* req = new DPageWriteRequest(iPageWriteRequests);
1.2251 + __NK_ASSERT_ALWAYS(req);
1.2252 + TInt r = req->Construct();
1.2253 + __NK_ASSERT_ALWAYS(r==KErrNone);
1.2254 + iPageWriteRequests.iRequests[i] = req;
1.2255 + iPageWriteRequests.iFreeList.Add(req);
1.2256 + }
1.2257 + }
1.2258 +
1.2259 +
1.2260 +DPagingRequestPool::~DPagingRequestPool()
1.2261 + {
1.2262 + __NK_ASSERT_ALWAYS(0); // deletion not implemented
1.2263 + }
1.2264 +
1.2265 +
1.2266 +DPageReadRequest* DPagingRequestPool::AcquirePageReadRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
1.2267 + {
1.2268 + NKern::LockSystem();
1.2269 +
1.2270 + DPagingRequest* req;
1.2271 +
1.2272 + // if we collide with page write operation...
1.2273 + req = iPageWriteRequests.FindCollision(aMemory,aIndex,aCount);
1.2274 + if(req)
1.2275 + {
1.2276 + // wait until write completes...
1.2277 + req->Wait();
1.2278 + req->Signal();
1.2279 + return 0; // caller expected to retry if needed
1.2280 + }
1.2281 +
1.2282 + // get a request object to use...
1.2283 + req = iPageReadRequests.GetRequest(aMemory,aIndex,aCount);
1.2284 +
1.2285 + // check no new requests collide with us...
1.2286 + if(iPageWriteRequests.FindCollision(aMemory,aIndex,aCount)
1.2287 + || iPageReadRequests.FindCollision(aMemory,aIndex,aCount))
1.2288 + {
1.2289 + // another operation is colliding with this region, give up and retry...
1.2290 + req->Signal();
1.2291 + return 0; // caller expected to retry if needed
1.2292 + }
1.2293 +
1.2294 + // we have a request object which we can use...
1.2295 + req->SetUse(aMemory,aIndex,aCount);
1.2296 +
1.2297 + NKern::UnlockSystem();
1.2298 + return (DPageReadRequest*)req;
1.2299 + }
1.2300 +
1.2301 +
1.2302 +DPageWriteRequest* DPagingRequestPool::AcquirePageWriteRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
1.2303 + {
1.2304 + NKern::LockSystem();
1.2305 +
1.2306 + DPagingRequest* req;
1.2307 +
1.2308 + for(;;)
1.2309 + {
1.2310 + // get a request object to use...
1.2311 + req = iPageWriteRequests.GetRequest(aMemory,aIndex,aCount);
1.2312 +
1.2313 + if(iPageWriteRequests.FindCollision(aMemory,aIndex,aCount))
1.2314 + {
1.2315 + // another write operation is colliding with this region, give up and retry...
1.2316 + req->Signal();
1.2317 + // Reacquire the system lock as Signal() above will release it.
1.2318 + NKern::LockSystem();
1.2319 + continue;
1.2320 + }
1.2321 +
1.2322 + break;
1.2323 + }
1.2324 +
1.2325 + // we have a request object which we can use...
1.2326 + req->SetUse(aMemory,aIndex,aCount);
1.2327 +
1.2328 + NKern::UnlockSystem();
1.2329 + return (DPageWriteRequest*)req;
1.2330 + }
1.2331 +
1.2332 +
1.2333 +DPagingRequestPool::TGroup::TGroup(TUint aNumRequests)
1.2334 + {
1.2335 + iNumRequests = aNumRequests;
1.2336 + iRequests = new DPagingRequest*[aNumRequests];
1.2337 + __NK_ASSERT_ALWAYS(iRequests);
1.2338 + }
1.2339 +
1.2340 +
1.2341 +DPagingRequest* DPagingRequestPool::TGroup::FindCollision(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
1.2342 + {
1.2343 + __ASSERT_SYSTEM_LOCK;
1.2344 + DPagingRequest** ptr = iRequests;
1.2345 + DPagingRequest** ptrEnd = ptr+iNumRequests;
1.2346 + while(ptr<ptrEnd)
1.2347 + {
1.2348 + DPagingRequest* req = *ptr++;
1.2349 + if(req->IsCollision(aMemory,aIndex,aCount))
1.2350 + return req;
1.2351 + }
1.2352 + return 0;
1.2353 + }
1.2354 +
1.2355 +
1.2356 +static TUint32 RandomSeed = 33333;
1.2357 +
1.2358 +DPagingRequest* DPagingRequestPool::TGroup::GetRequest(DMemoryObject* aMemory, TUint aIndex, TUint aCount)
1.2359 + {
1.2360 + __NK_ASSERT_DEBUG(iNumRequests > 0);
1.2361 +
1.2362 + // try using an existing request which collides with this region...
1.2363 + DPagingRequest* req = FindCollision(aMemory,aIndex,aCount);
1.2364 + if(!req)
1.2365 + {
1.2366 + // use a free request...
1.2367 + req = (DPagingRequest*)iFreeList.GetFirst();
1.2368 + if(req)
1.2369 + {
1.2370 + // free requests aren't being used...
1.2371 + __NK_ASSERT_DEBUG(req->iUsageCount == 0);
1.2372 + }
1.2373 + else
1.2374 + {
1.2375 + // pick a random request...
1.2376 + RandomSeed = RandomSeed*69069+1; // next 'random' number
1.2377 + TUint index = (TUint64(RandomSeed) * TUint64(iNumRequests)) >> 32;
1.2378 + req = iRequests[index];
1.2379 + __NK_ASSERT_DEBUG(req->iUsageCount > 0); // we only pick random when none are free
1.2380 + }
1.2381 + }
1.2382 +
1.2383 + // wait for chosen request object...
1.2384 + req->Wait();
1.2385 +
1.2386 + return req;
1.2387 + }
1.2388 +
1.2389 +
1.2390 +void DPagingRequestPool::TGroup::Signal(DPagingRequest* aRequest)
1.2391 + {
1.2392 + // if there are no threads waiting on the mutex then return it to the free pool...
1.2393 + __NK_ASSERT_DEBUG(aRequest->iUsageCount > 0);
1.2394 + if (--aRequest->iUsageCount==0)
1.2395 + iFreeList.AddHead(aRequest);
1.2396 +
1.2397 + aRequest->iMutex->Signal();
1.2398 + }
1.2399 +
1.2400 +
1.2401 +/**
1.2402 +Register the specified paging device with the kernel.
1.2403 +
1.2404 +@param aDevice A pointer to the paging device to install
1.2405 +
1.2406 +@return KErrNone on success
1.2407 +*/
1.2408 +EXPORT_C TInt Kern::InstallPagingDevice(DPagingDevice* aDevice)
1.2409 + {
1.2410 + TRACEB(("Kern::InstallPagingDevice(0x%08x) name='%s' type=%d",aDevice,aDevice->iName,aDevice->iType));
1.2411 +
1.2412 + __NK_ASSERT_ALWAYS(aDevice->iReadUnitShift <= KPageShift);
1.2413 +
1.2414 + TInt r = KErrNotSupported; // Will return this if unsupported device type is installed
1.2415 +
1.2416 + // create the pools of page out and page in requests...
1.2417 + const TInt writeReqs = (aDevice->iType & DPagingDevice::EData) ? KPagingRequestsPerDevice : 0;
1.2418 + aDevice->iRequestPool = new DPagingRequestPool(KPagingRequestsPerDevice,writeReqs);
1.2419 + if(!aDevice->iRequestPool)
1.2420 + {
1.2421 + r = KErrNoMemory;
1.2422 + goto exit;
1.2423 + }
1.2424 +
1.2425 + if(aDevice->iType & DPagingDevice::ERom)
1.2426 + {
1.2427 + r = TheRomMemoryManager->InstallPagingDevice(aDevice);
1.2428 + if(r!=KErrNone)
1.2429 + goto exit;
1.2430 + }
1.2431 +
1.2432 + if(aDevice->iType & DPagingDevice::ECode)
1.2433 + {
1.2434 + r = TheCodePagedMemoryManager->InstallPagingDevice(aDevice);
1.2435 + if(r!=KErrNone)
1.2436 + goto exit;
1.2437 + }
1.2438 +
1.2439 + if(aDevice->iType & DPagingDevice::EData)
1.2440 + {
1.2441 + r = TheDataPagedMemoryManager->InstallPagingDevice(aDevice);
1.2442 + if(r!=KErrNone)
1.2443 + goto exit;
1.2444 + }
1.2445 +
1.2446 + if (K::MemModelAttributes & (EMemModelAttrRomPaging | EMemModelAttrCodePaging | EMemModelAttrDataPaging))
1.2447 + TheThrashMonitor.Start();
1.2448 +
1.2449 +exit:
1.2450 + TRACEB(("Kern::InstallPagingDevice returns %d",r));
1.2451 + return r;
1.2452 + }
1.2453 +
1.2454 +
1.2455 +
1.2456 +//
1.2457 +// DDemandPagingLock
1.2458 +//
1.2459 +
1.2460 +EXPORT_C DDemandPagingLock::DDemandPagingLock()
1.2461 + : iReservedPageCount(0), iLockedPageCount(0), iPinMapping(0)
1.2462 + {
1.2463 + }
1.2464 +
1.2465 +
1.2466 +EXPORT_C TInt DDemandPagingLock::Alloc(TInt aSize)
1.2467 + {
1.2468 + TRACEP(("DDemandPagingLock[0x%08x]::Alloc(0x%x)",this,aSize));
1.2469 + iMaxPageCount = ((aSize-1+KPageMask)>>KPageShift)+1;
1.2470 +
1.2471 + TInt r = KErrNoMemory;
1.2472 +
1.2473 + NKern::ThreadEnterCS();
1.2474 +
1.2475 + TUint maxPt = DVirtualPinMapping::MaxPageTables(iMaxPageCount);
1.2476 + // Note, we need to reserve whole pages even for page tables which are smaller
1.2477 + // because pinning can remove the page from live list...
1.2478 + TUint reserve = iMaxPageCount+maxPt*KNumPagesToPinOnePageTable;
1.2479 + if(ThePager.ReservePages(reserve,(TUint&)iReservedPageCount))
1.2480 + {
1.2481 + iPinMapping = DVirtualPinMapping::New(iMaxPageCount);
1.2482 + if(iPinMapping)
1.2483 + r = KErrNone;
1.2484 + else
1.2485 + ThePager.UnreservePages((TUint&)iReservedPageCount);
1.2486 + }
1.2487 +
1.2488 + NKern::ThreadLeaveCS();
1.2489 + TRACEP(("DDemandPagingLock[0x%08x]::Alloc returns %d, iMaxPageCount=%d, iReservedPageCount=%d",this,r,iMaxPageCount,iReservedPageCount));
1.2490 + return r;
1.2491 + }
1.2492 +
1.2493 +
1.2494 +EXPORT_C void DDemandPagingLock::Free()
1.2495 + {
1.2496 + TRACEP(("DDemandPagingLock[0x%08x]::Free()"));
1.2497 + Unlock();
1.2498 + NKern::ThreadEnterCS();
1.2499 + DVirtualPinMapping* pinMapping = (DVirtualPinMapping*)__e32_atomic_swp_ord_ptr(&iPinMapping, 0);
1.2500 + if (pinMapping)
1.2501 + pinMapping->Close();
1.2502 + NKern::ThreadLeaveCS();
1.2503 + ThePager.UnreservePages((TUint&)iReservedPageCount);
1.2504 + }
1.2505 +
1.2506 +
1.2507 +EXPORT_C TInt DDemandPagingLock::Lock(DThread* aThread, TLinAddr aStart, TInt aSize)
1.2508 + {
1.2509 +// TRACEP(("DDemandPagingLock[0x%08x]::Lock(0x%08x,0x%08x,0x%08x)",this,aThread,aStart,aSize));
1.2510 + if(iLockedPageCount)
1.2511 + __NK_ASSERT_ALWAYS(0); // lock already used
1.2512 +
1.2513 + // calculate the number of pages that need to be locked...
1.2514 + TUint mask=KPageMask;
1.2515 + TUint offset=aStart&mask;
1.2516 + TInt numPages = (aSize+offset+mask)>>KPageShift;
1.2517 + if(numPages>iMaxPageCount)
1.2518 + __NK_ASSERT_ALWAYS(0);
1.2519 +
1.2520 + NKern::ThreadEnterCS();
1.2521 +
1.2522 + // find mapping which covers the specified region...
1.2523 + TUint offsetInMapping;
1.2524 + TUint mapInstanceCount;
1.2525 + DMemoryMapping* mapping = MM::FindMappingInThread((DMemModelThread*)aThread, aStart, aSize, offsetInMapping, mapInstanceCount);
1.2526 + if(!mapping)
1.2527 + {
1.2528 + NKern::ThreadLeaveCS();
1.2529 + return KErrBadDescriptor;
1.2530 + }
1.2531 +
1.2532 + MmuLock::Lock();
1.2533 + DMemoryObject* memory = mapping->Memory();
1.2534 + if(mapInstanceCount != mapping->MapInstanceCount() || !memory)
1.2535 + {// Mapping has been reused or no memory.
1.2536 + MmuLock::Unlock();
1.2537 + mapping->Close();
1.2538 + NKern::ThreadLeaveCS();
1.2539 + return KErrBadDescriptor;
1.2540 + }
1.2541 +
1.2542 + if(!memory->IsDemandPaged())
1.2543 + {
1.2544 + // memory not demand paged, so we have nothing to do...
1.2545 + MmuLock::Unlock();
1.2546 + mapping->Close();
1.2547 + NKern::ThreadLeaveCS();
1.2548 + return KErrNone;
1.2549 + }
1.2550 +
1.2551 + // Open a reference on the memory so it doesn't get deleted.
1.2552 + memory->Open();
1.2553 + MmuLock::Unlock();
1.2554 +
1.2555 + // pin memory...
1.2556 + TUint index = (offsetInMapping>>KPageShift)+mapping->iStartIndex;
1.2557 + TUint count = ((offsetInMapping&KPageMask)+aSize+KPageMask)>>KPageShift;
1.2558 + TInt r = ((DVirtualPinMapping*)iPinMapping)->Pin( memory,index,count,mapping->Permissions(),
1.2559 + mapping, mapInstanceCount);
1.2560 +
1.2561 + if(r==KErrNotFound)
1.2562 + {
1.2563 + // some memory wasn't present, so treat this as an error...
1.2564 + memory->Close();
1.2565 + mapping->Close();
1.2566 + NKern::ThreadLeaveCS();
1.2567 + return KErrBadDescriptor;
1.2568 + }
1.2569 +
1.2570 + // we can't fail to pin otherwise...
1.2571 + __NK_ASSERT_DEBUG(r!=KErrNoMemory); // separate OOM assert to aid debugging
1.2572 + __NK_ASSERT_ALWAYS(r==KErrNone);
1.2573 +
1.2574 + // indicate that we have actually pinned...
1.2575 + __NK_ASSERT_DEBUG(iLockedPageCount==0);
1.2576 + iLockedPageCount = count;
1.2577 +
1.2578 + // cleanup...
1.2579 + memory->Close();
1.2580 + mapping->Close();
1.2581 + NKern::ThreadLeaveCS();
1.2582 +
1.2583 + return 1;
1.2584 + }
1.2585 +
1.2586 +
1.2587 +EXPORT_C void DDemandPagingLock::DoUnlock()
1.2588 + {
1.2589 + NKern::ThreadEnterCS();
1.2590 + ((DVirtualPinMapping*)iPinMapping)->Unpin();
1.2591 + __NK_ASSERT_DEBUG(iLockedPageCount);
1.2592 + iLockedPageCount = 0;
1.2593 + NKern::ThreadLeaveCS();
1.2594 + }
1.2595 +
1.2596 +