os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mpagearray.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
     1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
     2 // All rights reserved.
     3 // This component and the accompanying materials are made available
     4 // under the terms of the License "Eclipse Public License v1.0"
     5 // which accompanies this distribution, and is available
     6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
     7 //
     8 // Initial Contributors:
     9 // Nokia Corporation - initial contribution.
    10 //
    11 // Contributors:
    12 //
    13 // Description:
    14 //
    15 
    16 #include <plat_priv.h>
    17 #include "mm.h"
    18 #include "mmu.h"
    19 
    20 #include "mpagearray.h"
    21 #include "mslaballoc.h"
    22 
    23 
    24 static RStaticSlabAllocator<RPageArray::TSegment,KPageArraySegmentBase,KPageArraySegmentEnd> PageSegmentAllocator;
    25 
    26 
    27 //
    28 // RPageArray::TSegment
    29 //
    30 
    31 RPageArray::TSegment* RPageArray::TSegment::New()
    32 	{
    33 	__NK_ASSERT_DEBUG(!MmuLock::IsHeld());
    34 
    35 	// allocate segment...
    36 	TSegment* s = PageSegmentAllocator.Alloc();
    37 	if(!s)
    38 		return s;
    39 
    40 	// initialise segment...
    41 	s->iCounts = 1; // lock count = 1, alloc count = 0
    42 	TPhysAddr* p = s->iPages;
    43 	TPhysAddr* pEnd = p+KPageArraySegmentSize;
    44 	TPhysAddr nullPage = EEmptyEntry;
    45 	do *p++ = nullPage;
    46 	while(p<pEnd);
    47 
    48 	return s;
    49 	}
    50 
    51 
    52 RPageArray::TSegment* RPageArray::TSegment::Delete(TSegment* aSegment)
    53 	{
    54 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
    55 	__NK_ASSERT_DEBUG(aSegment->iCounts==0);
    56 #ifdef _DEBUG
    57 	TPhysAddr* p = aSegment->iPages;
    58 	TPhysAddr* pEnd = p+KPageArraySegmentSize;
    59 	do
    60 		{
    61 		TPhysAddr a = *p++;
    62 		if(IsPresent(a))
    63 			{
    64 			Kern::Printf("TSegment Delete with allocated pages! [%d]=0x%08x",p-aSegment->iPages-1,a);
    65 			__NK_ASSERT_DEBUG(0);
    66 			}
    67 		}
    68 	while(p<pEnd);
    69 #endif
    70 	PageSegmentAllocator.Free(aSegment);
    71 	return 0;
    72 	}
    73 
    74 
    75 FORCE_INLINE void RPageArray::TSegment::Lock(TUint aCount)
    76 	{
    77 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
    78 	__e32_atomic_add_ord32(&iCounts, (TUint32)aCount);
    79 	__NK_ASSERT_DEBUG((iCounts&KPageArraySegmentLockCountMask));
    80 	}
    81 
    82 
    83 /**
    84 @return True if segment still exists, false if segment was deleted.
    85 */
    86 TBool RPageArray::TSegment::Unlock(TSegment*& aSegment, TUint aCount)
    87 	{
    88 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
    89 
    90 	TSegment* s = aSegment;
    91 	__NK_ASSERT_DEBUG(s);
    92 
    93 	TUint oldCounts = (TUint)__e32_atomic_add_ord32(&s->iCounts, (TUint32)-(TInt)aCount);
    94 	__NK_ASSERT_DEBUG(oldCounts&KPageArraySegmentLockCountMask); // alloc count must have been non-zero before decrementing
    95 
    96 #ifdef _DEBUG
    97 	if((oldCounts&KPageArraySegmentLockCountMask)==aCount)
    98 		{
    99 		// check alloc count is consistent...
   100 		TUint allocCount = s->iCounts>>KPageArraySegmentAllocCountShift;
   101 		__NK_ASSERT_DEBUG(allocCount<=KPageArraySegmentSize);
   102 		TUint realAllocCount = 0;
   103 		TPhysAddr* p = s->iPages;
   104 		TPhysAddr* pEnd = p+KPageArraySegmentSize;
   105 		do
   106 			{
   107 			if(IsPresent(*p++))
   108 				++realAllocCount;
   109 			}
   110 		while(p<pEnd);
   111 		if(realAllocCount!=allocCount)
   112 			{
   113 			Kern::Printf("TSegment::Unlock alloc count missmatch %u!=%u",realAllocCount,allocCount);
   114 			__NK_ASSERT_DEBUG(0);
   115 			}
   116 		}
   117 #endif
   118 
   119 	if(oldCounts>1)
   120 		return oldCounts; // return 'true' to indicate segment still exists
   121 
   122 	// delete segment...
   123 	aSegment = 0;
   124 	return (TBool)Delete(s); // returns 'false'
   125 	}
   126 
   127 
   128 FORCE_INLINE void RPageArray::TSegment::AdjustAllocCount(TInt aDelta)
   129 	{
   130 	__NK_ASSERT_DEBUG((iCounts&KPageArraySegmentLockCountMask));
   131 	__e32_atomic_add_ord32(&iCounts, TUint32(aDelta)<<KPageArraySegmentAllocCountShift);
   132 	}
   133 
   134 
   135 #ifdef _DEBUG
   136 void RPageArray::TSegment::Dump()
   137 	{
   138 	TUint allocCount = iCounts>>KPageArraySegmentAllocCountShift;
   139 	TUint lockCount = iCounts&KPageArraySegmentLockCountMask;
   140 	Kern::Printf("RPageArray::TSegment[0x%08x]::Dump() allocCount=%d lockCount=%d",this,allocCount,lockCount);
   141 	for(TUint i=0; i<KPageArraySegmentSize; i+=4)
   142 		Kern::Printf("  %08x %08x %08x %08x",iPages[i+0],iPages[i+1],iPages[i+2],iPages[i+3]);
   143 	}
   144 #endif
   145 
   146 
   147 //
   148 // RPageArray::TIter
   149 //
   150 
   151 TUint RPageArray::TIter::Pages(TPhysAddr*& aStart, TUint aMaxCount)
   152 	{
   153 	// MmuLock *may* be needed, depends if segments have been locked
   154 
   155 	TUint index = iIndex;
   156 	TUint size = iEndIndex-index;
   157 	if(!size)
   158 		return 0;
   159 
   160 	TUint offset = index&KPageArraySegmentMask;
   161 	aStart = iSegments[index>>KPageArraySegmentShift]->iPages+offset;
   162 
   163 	TUint n = KPageArraySegmentSize-offset;
   164 	if(n>aMaxCount)
   165 		n = aMaxCount;
   166 	if(n>size)
   167 		n = size;
   168 	return n;
   169 	}
   170 
   171 
   172 TUint RPageArray::TIter::AddFind(TIter& aPageList)
   173 	{
   174 	TRACE2(("RPageArray::TIter::AddFind range 0x%x..0x%x",iIndex,iEndIndex));
   175 
   176 	TUint index = iIndex;
   177 	TUint endIndex = iEndIndex;
   178 	if(index==endIndex)
   179 		{
   180 nothing_found:
   181 		aPageList.iIndex = endIndex;
   182 		aPageList.iEndIndex = endIndex;
   183 		TRACE2(("RPageArray::TIter::AddFind returns 0x%x+0x%x",iEndIndex,0));
   184 		return 0;
   185 		}
   186 
   187 	TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
   188 	TPhysAddr* p;
   189 	TUint limit;
   190 
   191 	MmuLock::Lock();
   192 
   193 	// scan for empty entries...
   194 	do
   195 		{
   196 		// get segment...
   197 		p = (*pS++)->iPages+(index&KPageArraySegmentMask);
   198 		TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
   199 		limit = (nextIndex<endIndex) ? nextIndex : endIndex;
   200 		// scan segment...
   201 		do
   202 			{
   203 			TPhysAddr page = *p;
   204 			if(!IsPresent(page))
   205 				goto find_start;
   206 #if _DEBUG
   207 			if(State(page)!=ECommitted)
   208 				{
   209 				Kern::Printf("RPageArray::TIter::AddFind found unexpected page: %x",page);
   210 				__NK_ASSERT_DEBUG(0);
   211 				// *p = (page&~(EStateMask|EVetoed))|ECommitted; // mark page as allocated again
   212 				}
   213 #endif
   214 			++p;
   215 			}
   216 		while(++index<limit);
   217 
   218 		MmuLock::Flash();
   219 		}
   220 	while(index<endIndex);
   221 
   222 	MmuLock::Unlock();
   223 	goto nothing_found;
   224 
   225 find_start:
   226 	TUint startIndex = index;
   227 	// scan for end of empty region...
   228 	for(;;)
   229 		{
   230 		// scan segment...
   231 		do
   232 			{
   233 			if(IsPresent(*p++))
   234 				goto find_end;
   235 			}
   236 		while(++index<limit);
   237 		// check for end...
   238 		if(index>=endIndex)
   239 			break;
   240 		MmuLock::Flash();
   241 		// get next segment...
   242 		p = (*pS++)->iPages;
   243 		TUint nextIndex = index+KPageArraySegmentSize;
   244 		limit = (nextIndex<endIndex) ? nextIndex : endIndex;
   245 		}
   246 
   247 find_end:
   248 	MmuLock::Unlock();
   249 
   250 	aPageList.iSegments = iSegments;
   251 	aPageList.iIndex = startIndex;
   252 	aPageList.iEndIndex = index;
   253 
   254 	iIndex = index;
   255 	TUint n = index-startIndex;
   256 	TRACE2(("RPageArray::TIter::AddFind returns 0x%x+0x%x",startIndex,n));
   257 	return n;
   258 	}
   259 
   260 
   261 void RPageArray::TIter::Add(TUint aCount, TPhysAddr* aPages)
   262 	{
   263 	// MmuLock NOT required because...
   264 	// 1. AddStart has ensured all segments are allocated and locked (so they can't be deleted)
   265 	// 2. AddFind returns an unallocated region. This can only be changed by Adding pages
   266 	//    and we only allow one thread to do this at a time (i.e. the thread calling this function.)
   267 
   268 	TRACE2(("RPageArray::TIter::Add 0x%x+0x%x",iIndex,aCount));
   269 	__NK_ASSERT_DEBUG(aCount);
   270 
   271 	TUint index = iIndex;
   272 	TUint endIndex = index+aCount;
   273 	TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
   274 	do
   275 		{
   276 		// get segment...
   277 		TSegment* s = *pS++;
   278 		TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
   279 		TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
   280 		TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
   281 
   282 		// add pages to segment...
   283 		s->AdjustAllocCount(limit-index);
   284 		do
   285 			{
   286 			__NK_ASSERT_DEBUG((*aPages&KPageMask)==0);
   287 			__NK_ASSERT_DEBUG(!IsPresent(*p)); // AddFind only found not-present entries
   288 			*p++ = *aPages++|ECommitted;
   289 			}
   290 		while(++index<limit);
   291 		}
   292 	while(index<endIndex);
   293 
   294 	iIndex = index;
   295 	}
   296 
   297 
   298 void RPageArray::TIter::AddContiguous(TUint aCount, TPhysAddr aPhysAddr)
   299 	{
   300 	// MmuLock NOT required because...
   301 	// 1. AddStart has ensured all segments are allocated and locked (so they can't be deleted)
   302 	// 2. AddFind returns an unallocated region. This can only be changed by Adding pages
   303 	//    and we only allow one thread to do this at a time (i.e. the thread calling this function.)
   304 
   305 	TRACE2(("RPageArray::TIter::AddContiguous 0x%x+0x%x",iIndex,aCount));
   306 	__NK_ASSERT_DEBUG(aCount);
   307 	__NK_ASSERT_DEBUG((aPhysAddr&KPageMask)==0);
   308 
   309 	TUint index = iIndex;
   310 	TUint endIndex = index+aCount;
   311 	TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
   312 
   313 	do
   314 		{
   315 		// get segment...
   316 		TSegment* s = *pS++;
   317 		TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
   318 		TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
   319 		TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
   320 
   321 		// add pages to segment...
   322 		s->AdjustAllocCount(limit-index);
   323 		do
   324 			{
   325 			__NK_ASSERT_DEBUG(!IsPresent(*p)); // AddFind only found not-present entries
   326 			*p++ = aPhysAddr|ECommitted;
   327 			aPhysAddr += KPageSize;
   328 			}
   329 		while(++index<limit);
   330 		}
   331 	while(index<endIndex);
   332 
   333 	iIndex = index;
   334 	}
   335 
   336 
   337 void RPageArray::TIter::Added(TUint aCount, TUint aChanged)
   338 	{
   339 	__NK_ASSERT_DEBUG(aCount);
   340 	__NK_ASSERT_DEBUG(aChanged<=aCount);
   341 	TUint index = iIndex;
   342 	__NK_ASSERT_DEBUG((index>>KPageArraySegmentShift)==((index+aCount-1)>>KPageArraySegmentShift));
   343 	TSegment* s = iSegments[index>>KPageArraySegmentShift];
   344 	__NK_ASSERT_DEBUG(s);
   345 	__NK_ASSERT_DEBUG(s->iCounts&KPageArraySegmentLockCountMask);
   346 	s->AdjustAllocCount(aChanged);
   347 	Skip(aCount);
   348 	}
   349 
   350 
   351 TUint RPageArray::TIter::Find(TIter& aPageList)
   352 	{
   353 	TRACE2(("RPageArray::TIter::Find range 0x%x..0x%x",iIndex,iEndIndex));
   354 
   355 	MmuLock::Lock();
   356 	TUint index = iIndex;
   357 	TUint endIndex = iEndIndex;
   358 	TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
   359 	TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
   360 
   361 	// search for first page...
   362 	while(index<endIndex)
   363 		{
   364 		TSegment* s = *pS;
   365 		if(!s)
   366 			index = nextIndex;
   367 		else
   368 			{
   369 			// search segment...
   370 			TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
   371 			TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
   372 			do
   373 				{
   374 				if(RPageArray::IsPresent(*p++))
   375 					goto start_done;
   376 				}
   377 			while(++index<limit);
   378 			}
   379 		// next segment...
   380 		MmuLock::Flash();
   381 		++pS;
   382 		nextIndex = index+KPageArraySegmentSize;
   383 		}
   384 start_done:
   385 	// we can't flash or release the MmuLock until we've Locked the segment we found!
   386 	iIndex = index;
   387 
   388 	// search for range of allocated pages...
   389 	while(index<endIndex)
   390 		{
   391 		// check first entry...
   392 		TSegment* s = *pS;
   393 		if(!s)
   394 			break;
   395 		TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
   396 		if(!RPageArray::IsPresent(*p++))
   397 			break;
   398 
   399 		// segment has pages, lock it...
   400 		s->Lock();
   401 
   402 		// scan rest of entries...
   403 		TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
   404 		TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
   405 		while(++index<limit)
   406 			if(!RPageArray::IsPresent(*p++))
   407 				goto done;
   408 
   409 		// next segment...
   410 		MmuLock::Flash();
   411 		++pS;
   412 		}
   413 done:
   414 	MmuLock::Unlock();
   415 
   416 	aPageList.iSegments = iSegments;
   417 	aPageList.iIndex = iIndex;
   418 	aPageList.iEndIndex = index;
   419 	TInt n = index-iIndex;
   420 	TRACE2(("RPageArray::TIter::Find returns 0x%x+0x%x",iIndex,n));
   421 	return n;
   422 	}
   423 
   424 
   425 void RPageArray::TIter::FindRelease(TUint aCount)
   426 	{
   427 	TUint index = iIndex;
   428 	Skip(aCount);
   429 	RPageArray::Release(iSegments,index,aCount);
   430 	}
   431 
   432 
   433 TUint RPageArray::TIter::RemoveFind(TIter& aPageList)
   434 	{
   435 	TRACE2(("RPageArray::TIter::RemoveFind range 0x%x..0x%x",iIndex,iEndIndex));
   436 
   437 	MmuLock::Lock();
   438 
   439 	TUint index = iIndex;
   440 	TUint endIndex = iEndIndex;
   441 	TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
   442 	TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
   443 
   444 	// search for first page...
   445 	while(index<endIndex)
   446 		{
   447 		TSegment* s = *pS;
   448 		if(!s)
   449 			index = nextIndex;
   450 		else
   451 			{
   452 			// search segment...
   453 			TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
   454 			TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
   455 			do
   456 				{
   457 				if(State(*p++)>=EDecommitting)
   458 					goto start_done;
   459 				}
   460 			while(++index<limit);
   461 			}
   462 
   463 		// next segment...
   464 		MmuLock::Flash();
   465 		++pS;
   466 		nextIndex = index+KPageArraySegmentSize;
   467 		}
   468 start_done:
   469 	// we can't flash or release the MmuLock until we've Locked the segment we found!
   470 	iIndex = index;
   471 
   472 	// search for range of allocated pages, marking them EDecommitting...
   473 	while(index<endIndex)
   474 		{
   475 		// check first entry...
   476 		TSegment* s = *pS;
   477 		if(!s)
   478 			break;
   479 		TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
   480 		TPhysAddr page = *p++;
   481 		if(State(page)<EDecommitting)
   482 			break;
   483 
   484 		p[-1] = (page&~EStateMask)|EDecommitting;
   485 
   486 		// segment has pages, lock it...
   487 		s->Lock();
   488 
   489 		// scan rest of entries...
   490 		TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
   491 		TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
   492 		while(++index<limit)
   493 			{
   494 			TPhysAddr page = *p++;
   495 			if(State(page)<EDecommitting)
   496 				goto done;
   497 			p[-1] = (page&~EStateMask)|EDecommitting;
   498 			}
   499 
   500 		// next segment...
   501 		MmuLock::Flash();
   502 		++pS;
   503 		}
   504 done:
   505 	MmuLock::Unlock();
   506 
   507 	aPageList.iSegments = iSegments;
   508 	aPageList.iIndex = iIndex;
   509 	aPageList.iEndIndex = index;
   510 	TInt n = index-iIndex;
   511 	TRACE2(("RPageArray::TIter::RemoveFind returns 0x%x+0x%x",iIndex,n));
   512 	return n;
   513 	}
   514 
   515 
   516 TUint RPageArray::TIter::Remove(TUint aMaxCount, TPhysAddr* aPages)
   517 	{
   518 	TRACE2(("RPageArray::TIter::Remove 0x%x..0x%x max=0x%x",iIndex,iEndIndex,aMaxCount));
   519 
   520 	__NK_ASSERT_DEBUG(aMaxCount);
   521 
   522 	TUint count = 0;
   523 	TUint index = iIndex;
   524 	TUint endIndex = iEndIndex;
   525 	if(index==endIndex)
   526 		return 0;
   527 
   528 	TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
   529 
   530 	MmuLock::Lock();
   531 
   532 	do
   533 		{
   534 		// get segment...
   535 		TSegment* s = *pS++;
   536 		TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
   537 		TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
   538 		TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
   539 
   540 		// remove pages from segment...
   541 		do
   542 			{
   543 			TPhysAddr page = *p++;
   544 			__NK_ASSERT_DEBUG(State(page)!=EStealing); // can't be stealing as that only happens with the RamAllocLock held, which we should already hold if freeing demand paged pages
   545 			if(State(page)==EDecommitting || State(page)==EDecommitted)
   546 				{
   547 				// remove a page...
   548 				if(page&EUnmapVetoed)
   549 					{
   550 					p[-1] = (page&~(EUnmapVetoed|EStateMask))|EDecommitted; // change to EDecommitted state
   551 					}
   552 				else
   553 					{
   554 					p[-1] = EEmptyEntry;
   555 					s->AdjustAllocCount(-1);
   556 					TPhysAddr pagePhys = page&~KPageMask;
   557 					aPages[count++] = pagePhys;
   558 					TRACE2(("RPageArray::TIter::Remove index=0x%x returns 0x%08x",index,pagePhys));
   559 					if(count>=aMaxCount)
   560 						{
   561 						++index;
   562 						goto done;
   563 						}
   564 					}
   565 				// check not removing managed pages without the RamAllocLock...
   566 				__NK_ASSERT_DEBUG(RamAllocLock::IsHeld()
   567 					|| SPageInfo::FromPhysAddr(page)->Type()!=SPageInfo::EManaged);
   568 				}
   569 			}
   570 		while(++index<limit);
   571 
   572 		MmuLock::Flash();
   573 		}
   574 	while(index<endIndex);
   575 
   576 done:
   577 	MmuLock::Unlock();
   578 	iIndex = index;
   579 	return count;
   580 	}
   581 
   582 
   583 void RPageArray::TIter::VetoUnmap()
   584 	{
   585 	TUint index = iIndex;
   586 	TUint endIndex = iEndIndex;
   587 	if(index==endIndex)
   588 		return;
   589 
   590 	TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
   591 
   592 	MmuLock::Lock();
   593 
   594 	do
   595 		{
   596 		// get segment...
   597 		TSegment* s = *pS++;
   598 		TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
   599 		TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
   600 		TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
   601 
   602 		// veto pages in segment...
   603 		do
   604 			{
   605 			TPhysAddr page = *p++;
   606 			TRACE2(("RPageArray::TIter::Veto() yes/no=%d page=0x%08x",IsPresent(page) && TargetStateIsDecommitted(page),page));
   607 			if(IsPresent(page) && TargetStateIsDecommitted(page))
   608 				p[-1] = page|EUnmapVetoed;
   609 			}
   610 		while(++index<limit);
   611 
   612 		MmuLock::Flash();
   613 		}
   614 	while(index<endIndex);
   615 
   616 	MmuLock::Unlock();
   617 	}
   618 	
   619 
   620 void RPageArray::TIter::VetoRestrict(TBool aPageMoving)
   621 	{
   622 	TUint index = iIndex;
   623 	TUint endIndex = iEndIndex;
   624 	if(index==endIndex)
   625 		return;
   626 
   627 	RPageArray::TState operation = 	aPageMoving ? RPageArray::EMoving : RPageArray::ERestrictingNA;
   628 
   629 	TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
   630 
   631 	MmuLock::Lock();
   632 
   633 	do
   634 		{
   635 		// get segment...
   636 		TSegment* s = *pS++;
   637 		TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
   638 		TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
   639 		TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
   640 
   641 		// veto pages in segment...
   642 		do
   643 			{
   644 			TPhysAddr page = *p++;
   645 			TRACE2(("RPageArray::TIter::VetoRestrict() yes/no=%d page=0x%08x",State(page)==operation,page));
   646 			if(State(page)==operation)
   647 				{
   648 				// to veto a 'restrict page' operation, we put the page back into the committed...
   649 				p[-1] = (page&~EStateMask)|ECommitted;
   650 				}
   651 			}
   652 		while(++index<limit);
   653 
   654 		MmuLock::Flash();
   655 		}
   656 	while(index<endIndex);
   657 
   658 	MmuLock::Unlock();
   659 	}
   660 	
   661 
   662 FORCE_INLINE void RPageArray::TIter::Set(RPageArray::TSegment** aSegments, TUint aIndex, TUint aEndIndex)
   663 	{
   664 	iSegments = aSegments;
   665 	iIndex = aIndex;
   666 	iEndIndex = aEndIndex;
   667 	}
   668 
   669 
   670 //
   671 // RPageArray
   672 //
   673 
   674 void RPageArray::Init2A()
   675 	{
   676 	TInt r = PageSegmentAllocator.Construct();
   677 	__NK_ASSERT_ALWAYS(r==KErrNone);
   678 	}
   679 
   680 
   681 void RPageArray::Init2B(DMutex* aLock)
   682 	{
   683 	// construct memory object for slabs...
   684 	DMemoryObject* memory;
   685 	TMappingCreateFlags mapFlags = (TMappingCreateFlags)(EMappingCreateFixedVirtual|EMappingCreateReserveAllResources);
   686 	TMemoryAttributes memAttr = EMemoryAttributeStandard;
   687 	TInt r = MM::InitFixedKernelMemory(memory, KPageArraySegmentBase, KPageArraySegmentEnd, KPageSize, EMemoryObjectUnpaged, EMemoryCreateNoWipe, memAttr, mapFlags);
   688 	__NK_ASSERT_ALWAYS(r==KErrNone);
   689 	MM::MemorySetLock(memory,aLock);
   690 	PageSegmentAllocator.SetMemory(memory,1);
   691 	}
   692 
   693 
   694 RPageArray::RPageArray()
   695 	{
   696 	__NK_ASSERT_DEBUG(!iSegments);
   697 	}
   698 
   699 
   700 TInt RPageArray::Construct(TUint aMaxPages, TBool aPreallocateMemory)
   701 	{
   702 	iNumSegments = (aMaxPages+KPageArraySegmentMask)>>KPageArraySegmentShift;
   703 	iSegments = (TSegment**)Kern::AllocZ(iNumSegments*sizeof(TSegment*));
   704 	if(!iSegments)
   705 		return KErrNoMemory;
   706 
   707 	if(!aPreallocateMemory)
   708 		return KErrNone;
   709 
   710 	return PreallocateMemory();
   711 	}
   712 
   713 
   714 TInt RPageArray::PreallocateMemory()
   715 	{
   716 	MmuLock::Lock();
   717 
   718 	__NK_ASSERT_DEBUG(!iPreallocatedMemory);
   719 	iPreallocatedMemory = true;
   720 
   721 	TSegment** pS = iSegments;
   722 	TSegment** pGEnd = pS+iNumSegments;
   723 	do
   724 		{
   725 		if(!GetOrAllocateSegment(pS,1))
   726 			{
   727 			iNumSegments = pS-iSegments; // truncate to amount successfully allocated
   728 			MmuLock::Unlock();
   729 			return KErrNoMemory;
   730 			}
   731 		}
   732 	while(++pS<pGEnd);
   733 
   734 	MmuLock::Unlock();
   735 	return KErrNone;
   736 	}
   737 
   738 
   739 RPageArray::~RPageArray()
   740 	{
   741 	TSegment** pS = iSegments;
   742 	if(pS)
   743 		{
   744 		TSegment** pGEnd = pS+iNumSegments;
   745 		if(!iPreallocatedMemory)
   746 			{
   747 			// check all segments have already been deleted...
   748 			while(pS<pGEnd)
   749 				{
   750 #ifdef _DEBUG
   751 				if(*pS)
   752 					(*pS)->Dump();
   753 #endif
   754 				__NK_ASSERT_DEBUG(!*pS);
   755 				++pS;
   756 				}
   757 			}
   758 		else
   759 			{
   760 			MmuLock::Lock();
   761 			while(pS<pGEnd)
   762 				{
   763 				__NK_ASSERT_DEBUG(*pS);
   764 				TSegment::Unlock(*pS);
   765 #ifdef _DEBUG
   766 				if(*pS)
   767 					(*pS)->Dump();
   768 #endif
   769 				__NK_ASSERT_DEBUG(!*pS);
   770 				TRACE2(("RPageArray::~RPageArray delete segment=%d",pS-iSegments));
   771 				++pS;
   772 				if(pS<pGEnd)
   773 					MmuLock::Flash();
   774 				}
   775 			MmuLock::Unlock();
   776 			}
   777 
   778 		Kern::Free(iSegments);
   779 		}
   780 	}
   781 
   782 
   783 RPageArray::TSegment* RPageArray::GetOrAllocateSegment(TSegment** aSegmentEntry, TUint aLockCount)
   784 	{
   785 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
   786 	__NK_ASSERT_DEBUG(aLockCount);
   787 
   788 	for(;;)
   789 		{
   790 		TSegment* s = *aSegmentEntry;
   791 		if(s)
   792 			{
   793 			s->Lock(aLockCount);
   794 			return s;
   795 			}
   796 
   797 		// no segment, so allocate one...
   798 		MmuLock::Unlock();
   799 		s = TSegment::New();
   800 		MmuLock::Lock();
   801 		if(!s)
   802 			return s;
   803 
   804 		// if someone else allocated one...
   805 		if(*aSegmentEntry)
   806 			{
   807 			// free the one we created...
   808 			TSegment::Unlock(s); 
   809 			//and retry...
   810 			continue;
   811 			}
   812 
   813 		// use new segment...
   814 		TRACE2(("RPageArray::GetOrAllocateSegment new segment=%d",aSegmentEntry-iSegments));
   815 		*aSegmentEntry = s;
   816 		if(--aLockCount)
   817 			s->Lock(aLockCount);
   818 		return s;
   819 		}
   820 	}
   821 
   822 
   823 TInt RPageArray::Alloc(TUint aIndex, TUint aCount)
   824 	{
   825 	TRACE2(("RPageArray::Alloc(0x%x,0x%x)",aIndex,aCount));
   826 	__NK_ASSERT_DEBUG(aIndex+aCount<=iNumSegments*KPageArraySegmentSize);
   827 	__NK_ASSERT_DEBUG(aIndex+aCount>=aIndex);
   828 
   829 	MmuLock::Lock();
   830 
   831 	TUint index = aIndex;
   832 	TUint endIndex = aIndex+aCount;
   833 	TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
   834 	while(index<endIndex)
   835 		{
   836 		TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
   837 		TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
   838 		TUint lockCount = limit-index;
   839 		index = limit;
   840 		TSegment* s = GetOrAllocateSegment(pS++,lockCount);
   841 		if(!s)
   842 			goto no_memory;
   843 		}
   844 
   845 	MmuLock::Unlock();
   846 	return KErrNone;
   847 
   848 no_memory:
   849 	MmuLock::Unlock();
   850 
   851 	// free what we actually alloced...
   852 	endIndex = index&~KPageArraySegmentMask;
   853 	Free(aIndex,endIndex-aIndex);
   854 
   855 	return KErrNoMemory;
   856 	}
   857 
   858 
   859 void RPageArray::Free(TUint aIndex, TUint aCount)
   860 	{
   861 	TRACE2(("RPageArray::Free(0x%x,0x%x)",aIndex,aCount));
   862 	__NK_ASSERT_DEBUG(aIndex+aCount<=iNumSegments*KPageArraySegmentSize);
   863 	__NK_ASSERT_DEBUG(aIndex+aCount>aIndex);
   864 
   865 	MmuLock::Lock();
   866 
   867 	TUint index = aIndex;
   868 	TUint endIndex = aIndex+aCount;
   869 	TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
   870 	while(index<endIndex)
   871 		{
   872 		__NK_ASSERT_DEBUG(*pS);
   873 		TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
   874 		TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
   875 		TSegment::Unlock(*pS,limit-index);
   876 		index = limit;
   877 		++pS;
   878 		}
   879 
   880 	MmuLock::Unlock();
   881 	}
   882 
   883 
   884 TInt RPageArray::AddStart(TUint aIndex, TUint aCount, TIter& aIter, TBool aAllowExisting)
   885 	{
   886 	TRACE2(("RPageArray::AddStart(0x%x,0x%x,?,%d)",aIndex,aCount,aAllowExisting));
   887 	__NK_ASSERT_DEBUG(aIndex+aCount<=iNumSegments*KPageArraySegmentSize);
   888 	__NK_ASSERT_DEBUG(aIndex+aCount>aIndex);
   889 
   890 	aIter.Set(iSegments,aIndex,aIndex+aCount);
   891 
   892 	MmuLock::Lock();
   893 
   894 	TInt r;
   895 	TUint index = aIndex;
   896 	TUint endIndex = aIndex+aCount;
   897 	TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
   898 	while(index<endIndex)
   899 		{
   900 		TSegment* s = *pS;
   901 		if(!s)
   902 			{
   903 			// no segment, so allocate one...
   904 			MmuLock::Unlock();
   905 			s = TSegment::New();
   906 			MmuLock::Lock();
   907 			if(!s)
   908 				goto no_memory;
   909 
   910 			// if someone else allocated one
   911 			if(*pS)
   912 				{
   913 				// free the one we created...
   914 				TSegment::Unlock(s); 
   915 				//and retry...
   916 				continue;
   917 				}
   918 
   919 			// use new segment...
   920 			TRACE2(("RPageArray::AddStart new segment=%d",pS-iSegments));
   921 			*pS = s;
   922 
   923 			// move on...
   924 			index = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
   925 			}
   926 		else
   927 			{
   928 			TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
   929 			if(aAllowExisting)
   930 				{
   931 				// just move on to next segment...
   932 				index = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
   933 				}
   934 			else
   935 				{
   936 				// check page entries are empty...
   937 				TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
   938 				TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
   939 				do
   940 					{
   941 					if(IsPresent(*p++))
   942 						goto already_exists;
   943 					}
   944 				while(++index<limit);
   945 				}
   946 			// lock segment so that it doesn't go away...
   947 			s->Lock();
   948 
   949 			if(index<endIndex)
   950 				MmuLock::Flash();
   951 			}
   952 		++pS;
   953 		}
   954 
   955 	// done...
   956 	MmuLock::Unlock();
   957 	return KErrNone;
   958 
   959 no_memory:
   960 	r = KErrNoMemory;
   961 	goto error;
   962 already_exists:
   963 	r = KErrAlreadyExists;
   964 error:
   965 	MmuLock::Unlock();
   966 
   967 	// unlock any segments that we locked...
   968 	endIndex = index&~KPageArraySegmentMask;
   969 	if(endIndex>aIndex)
   970 		Release(iSegments,aIndex,endIndex-aIndex);
   971 
   972 	// return error...
   973 	return r;
   974 	}
   975 
   976 
   977 void RPageArray::AddEnd(TUint aIndex, TUint aCount)
   978 	{
   979 	Release(iSegments,aIndex,aCount);
   980 	}
   981 
   982 
   983 void RPageArray::FindStart(TUint aIndex, TUint aCount, TIter& aIter)
   984 	{
   985 	TRACE2(("RPageArray::FindStart(0x%x,0x%x,?)",aIndex,aCount));
   986 	__NK_ASSERT_DEBUG(aIndex+aCount<=iNumSegments*KPageArraySegmentSize);
   987 	__NK_ASSERT_DEBUG(aIndex+aCount>aIndex);
   988 
   989 	aIter.Set(iSegments,aIndex,aIndex+aCount);
   990 	}
   991 
   992 
   993 void RPageArray::Release(TSegment** aSegments, TUint aIndex, TUint aCount)
   994 	{
   995 	__NK_ASSERT_DEBUG(aIndex+aCount>aIndex);
   996 
   997 	MmuLock::Lock();
   998 
   999 	TSegment** pS = aSegments+(aIndex>>KPageArraySegmentShift);
  1000 	TSegment** pGLast = aSegments+((aIndex+aCount-1)>>KPageArraySegmentShift);
  1001 	__NK_ASSERT_DEBUG(pS<=pGLast);
  1002 	TUint flash = 0;
  1003 	do
  1004 		{
  1005 		MmuLock::Flash(flash,KMaxPagesInOneGo);
  1006 		if(TSegment::Unlock(*pS)==0)
  1007 			{
  1008 			TRACE2(("RPageArray::Release delete segment=%d",pS-aSegments));
  1009 			}
  1010 		++pS;
  1011 		}
  1012 	while(pS<=pGLast);
  1013 
  1014 	MmuLock::Unlock();
  1015 	}
  1016 
  1017 
  1018 TPhysAddr* RPageArray::AddPageStart(TUint aIndex, TIter& aPageList)
  1019 	{
  1020 	__NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
  1021 
  1022 	MmuLock::Lock();
  1023 	TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
  1024 	TSegment* s = GetOrAllocateSegment(pS,1);
  1025 	MmuLock::Unlock();
  1026 
  1027 	if(!s)
  1028 		return 0;
  1029 
  1030 	aPageList.Set(iSegments,aIndex,aIndex+1);
  1031 
  1032 	return s->iPages+(aIndex&KPageArraySegmentMask);
  1033 	}
  1034 
  1035 
  1036 TPhysAddr* RPageArray::RemovePageStart(TUint aIndex, TIter& aPageList)
  1037 	{
  1038 	__NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
  1039 
  1040 	MmuLock::Lock();
  1041 
  1042 	TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
  1043 	TSegment* s = *pS;
  1044 	if(!s)
  1045 		{
  1046 		MmuLock::Unlock();
  1047 		return 0;
  1048 		}
  1049 
  1050 	TPhysAddr* p = s->iPages+(aIndex&KPageArraySegmentMask);
  1051 	TPhysAddr page = *p;
  1052 	if(State(page)<EDecommitting)
  1053 		{
  1054 		MmuLock::Unlock();
  1055 		return 0;
  1056 		}
  1057 
  1058 	*p = (page&~EStateMask)|EDecommitting;
  1059 
  1060 	s->Lock();
  1061 
  1062 	MmuLock::Unlock();
  1063 
  1064 	aPageList.Set(iSegments,aIndex,aIndex+1);
  1065 
  1066 	return p;
  1067 	}
  1068 
  1069 
  1070 TPhysAddr RPageArray::RemovePage(TPhysAddr* aPageEntry)
  1071 	{
  1072 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1073 	TPhysAddr page = *aPageEntry;
  1074 	__NK_ASSERT_DEBUG(State(page)!=EStealing); // can't be stealing as that only happens with the RamAllocLock held, which we should already hold if freeing demand paged pages
  1075 	if(State(page)==EDecommitting || State(page)==EDecommitted)
  1076 		{
  1077 		// remove a page...
  1078 		if(page&EUnmapVetoed)
  1079 			{
  1080 			*aPageEntry = (page&~(EUnmapVetoed|EStateMask))|EDecommitted; // change to EDecommitted state
  1081 			}
  1082 		else
  1083 			{
  1084 			*aPageEntry = EEmptyEntry;
  1085 			return page&~KPageMask;
  1086 			}
  1087 		// check not removing managed pages without the RamAllocLock...
  1088 		__NK_ASSERT_DEBUG(RamAllocLock::IsHeld()
  1089 			|| SPageInfo::FromPhysAddr(page)->Type()!=SPageInfo::EManaged);
  1090 		}
  1091 	return KPhysAddrInvalid;
  1092 	}
  1093 
  1094 
  1095 TPhysAddr* RPageArray::RestrictPageNAStart(TUint aIndex, TIter& aPageList)
  1096 	{
  1097 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1098 	__NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
  1099 
  1100 	TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
  1101 	TSegment* s = *pS;
  1102 	if(!s)
  1103 		return 0;
  1104 
  1105 	TPhysAddr* p = s->iPages+(aIndex&KPageArraySegmentMask);
  1106 	TPhysAddr page = *p;
  1107 	if(State(page) < RPageArray::ERestrictingNA)
  1108 		return 0;
  1109 
  1110 	*p = (page&~EStateMask) | RPageArray::ERestrictingNA;
  1111 
  1112 	s->Lock();
  1113 
  1114 	aPageList.Set(iSegments,aIndex,aIndex+1);
  1115 
  1116 	return p;
  1117 	}
  1118 
  1119 
  1120 TPhysAddr* RPageArray::StealPageStart(TUint aIndex, TIter& aPageList)
  1121 	{
  1122 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
  1123 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1124 	__NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
  1125 
  1126 	TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
  1127 	TSegment* s = *pS;
  1128 	__NK_ASSERT_DEBUG(s); // we only steal pages in the live list and these can not go away yet because we hold the RamAllocLock
  1129 
  1130 	TPhysAddr* p = s->iPages+(aIndex&KPageArraySegmentMask);
  1131 	TPhysAddr page = *p;
  1132 
  1133 	if(State(page)>EStealing)
  1134 		*p = (page&~EStateMask)|EStealing;
  1135 
  1136 	s->Lock();
  1137 
  1138 	aPageList.Set(iSegments,aIndex,aIndex+1);
  1139 
  1140 	return p;
  1141 	}
  1142 
  1143 
  1144 TPhysAddr* RPageArray::MovePageStart(TUint aIndex, TIter& aPageList)
  1145 	{
  1146 	__NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
  1147 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1148 	__NK_ASSERT_DEBUG(aIndex <= iNumSegments*KPageArraySegmentSize);
  1149 
  1150 	TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
  1151 	TSegment* s = *pS;
  1152 	// The segment should always exist for a page that is being moved.
  1153 	__NK_ASSERT_DEBUG(s);
  1154 
  1155 	TPhysAddr* p = s->iPages+(aIndex&KPageArraySegmentMask);
  1156 	TPhysAddr page = *p;
  1157 	if(State(page) <= RPageArray::EMoving)
  1158 		return NULL;
  1159 
  1160 	*p = (page & ~EStateMask) | EMoving;
  1161 
  1162 	aPageList.Set(iSegments, aIndex, aIndex+1);
  1163 
  1164 	return p;
  1165 	}
  1166 
  1167 
  1168 void RPageArray::ReleasePage(TUint aIndex, TInt aDelta)
  1169 	{
  1170 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1171 	__NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
  1172 
  1173 	TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
  1174 	TSegment* s = *pS;
  1175 	__NK_ASSERT_DEBUG(s); // must exist because FindPageStart/AddPageStart locked it
  1176 
  1177 	__NK_ASSERT_DEBUG(aDelta>=-1 && aDelta<=1);
  1178 	if(aDelta)
  1179 		s->AdjustAllocCount(aDelta);
  1180 
  1181 	if(TSegment::Unlock(*pS)==0)
  1182 		{
  1183 		TRACE2(("RPageArray::ReleasePage delete segment=%d",pS-iSegments));
  1184 		}
  1185 	}
  1186 
  1187 
  1188 TPhysAddr RPageArray::Page(TUint aIndex)
  1189 	{
  1190 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1191 	__NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
  1192 
  1193 	TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
  1194 	TSegment* s = *pS;
  1195 	if(!s)
  1196 		return ENotPresent;
  1197 	return s->iPages[aIndex&KPageArraySegmentMask];
  1198 	}
  1199 
  1200 
  1201 TPhysAddr* RPageArray::PageEntry(TUint aIndex)
  1202 	{
  1203 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1204 	__NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
  1205 
  1206 	TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
  1207 	TSegment* s = *pS;
  1208 	if(!s)
  1209 		return NULL;
  1210 	return s->iPages + (aIndex & KPageArraySegmentMask);
  1211 	}
  1212 
  1213 
  1214 TUint RPageArray::PagingManagerData(TUint aIndex)
  1215 	{
  1216 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1217 	__NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
  1218 	TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
  1219 	TSegment* s = *pS;
  1220 	__NK_ASSERT_DEBUG(s);
  1221 	TPhysAddr* p = &s->iPages[aIndex&KPageArraySegmentMask];
  1222 
  1223 	TPhysAddr entry = *p;
  1224 	if(IsPresent(entry))
  1225 		{
  1226 #ifdef _DEBUG
  1227 		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(entry&~KPageMask);
  1228 		if(!pi)
  1229 			Kern::Printf("RPageArray::PagingManagerData bad entry 0x%08x",entry);
  1230 		__NK_ASSERT_DEBUG(pi);
  1231 #else
  1232 		SPageInfo* pi = SPageInfo::FromPhysAddr(entry);
  1233 #endif
  1234 		entry = pi->PagingManagerData();
  1235 		}
  1236 	__NK_ASSERT_DEBUG((entry&(EFlagsMask|EStateMask))==ENotPresent);
  1237 
  1238 	return entry>>(EFlagsShift+EStateShift);
  1239 	}
  1240 
  1241 
  1242 void RPageArray::SetPagingManagerData(TUint aIndex, TUint aValue)
  1243 	{
  1244 	aValue = (aValue<<(EFlagsShift+EStateShift))|ENotPresent;
  1245 
  1246 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1247 	__NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
  1248 	TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
  1249 	TSegment* s = *pS;
  1250 	__NK_ASSERT_DEBUG(s);
  1251 	TPhysAddr* p = &s->iPages[aIndex&KPageArraySegmentMask];
  1252 
  1253 	TPhysAddr entry = *p;
  1254 	if(!IsPresent(entry))
  1255 		*p = aValue;
  1256 	else
  1257 		{
  1258 #ifdef _DEBUG
  1259 		SPageInfo* pi = SPageInfo::SafeFromPhysAddr(entry&~KPageMask);
  1260 		if(!pi)
  1261 			Kern::Printf("RPageArray::SetPagingManagerData bad entry 0x%08x",entry);
  1262 		__NK_ASSERT_DEBUG(pi);
  1263 #else
  1264 		SPageInfo* pi = SPageInfo::FromPhysAddr(entry);
  1265 #endif
  1266 		pi->SetPagingManagerData(aValue);
  1267 		}
  1268 	}
  1269 
  1270 
  1271 TPhysAddr RPageArray::PhysAddr(TUint aIndex)
  1272 	{
  1273 	__NK_ASSERT_DEBUG(MmuLock::IsHeld());
  1274 	__NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
  1275 
  1276 	TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
  1277 	TSegment* s = *pS;
  1278 	if(s)
  1279 		{
  1280 		TPhysAddr page = s->iPages[aIndex&KPageArraySegmentMask];
  1281 		if(IsPresent(page))
  1282 			{
  1283 			return page&~KPageMask;
  1284 			}
  1285 		}
  1286 	return KPhysAddrInvalid;
  1287 	}
  1288 
  1289 
  1290 TInt RPageArray::PhysAddr(TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList)
  1291 	{
  1292 	__NK_ASSERT_DEBUG(aCount);
  1293 	MmuLock::Lock();
  1294 
  1295 	TUint32* pageList = aPhysicalPageList;
  1296 
  1297 	// get first page...
  1298 	TPhysAddr physStart = PhysAddr(aIndex++);
  1299 	if(physStart==KPhysAddrInvalid)
  1300 		{
  1301 		MmuLock::Unlock();
  1302 		return KErrNotFound;
  1303 		}
  1304 	if(pageList)
  1305 		*pageList++ = physStart;
  1306 
  1307 	TUint32 nextPhys = physStart+KPageSize;
  1308 
  1309 	TUint flash = 0;
  1310 	while(--aCount)
  1311 		{
  1312 		MmuLock::Flash(flash,KMaxPagesInOneGo);
  1313 
  1314 		// get next page...
  1315 		TPhysAddr phys = PhysAddr(aIndex++);
  1316 		if(phys==KPhysAddrInvalid)
  1317 			{
  1318 			MmuLock::Unlock();
  1319 			return KErrNotFound;
  1320 			}
  1321 		if(pageList)
  1322 			*pageList++ = phys;
  1323 
  1324 		// check for contiguity...
  1325 		if(phys!=nextPhys)
  1326 			nextPhys = KPhysAddrInvalid;
  1327 		else
  1328 			nextPhys += KPageSize;
  1329 		}
  1330 
  1331 	MmuLock::Unlock();
  1332 
  1333 	if(nextPhys==KPhysAddrInvalid)
  1334 		{
  1335 		// memory is discontiguous...
  1336 		if(!aPhysicalPageList)
  1337 			return KErrNotFound;
  1338 		aPhysicalAddress = KPhysAddrInvalid;
  1339 		return 1;
  1340 		}
  1341 	else
  1342 		{
  1343 		// memory is contiguous...
  1344 		aPhysicalAddress = physStart;
  1345 		return KErrNone;
  1346 		}
  1347 	}
  1348 
  1349