First public contribution.
1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
16 #include <plat_priv.h>
20 #include "mpagearray.h"
21 #include "mslaballoc.h"
24 static RStaticSlabAllocator<RPageArray::TSegment,KPageArraySegmentBase,KPageArraySegmentEnd> PageSegmentAllocator;
28 // RPageArray::TSegment
31 RPageArray::TSegment* RPageArray::TSegment::New()
33 __NK_ASSERT_DEBUG(!MmuLock::IsHeld());
35 // allocate segment...
36 TSegment* s = PageSegmentAllocator.Alloc();
40 // initialise segment...
41 s->iCounts = 1; // lock count = 1, alloc count = 0
42 TPhysAddr* p = s->iPages;
43 TPhysAddr* pEnd = p+KPageArraySegmentSize;
44 TPhysAddr nullPage = EEmptyEntry;
52 RPageArray::TSegment* RPageArray::TSegment::Delete(TSegment* aSegment)
54 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
55 __NK_ASSERT_DEBUG(aSegment->iCounts==0);
57 TPhysAddr* p = aSegment->iPages;
58 TPhysAddr* pEnd = p+KPageArraySegmentSize;
64 Kern::Printf("TSegment Delete with allocated pages! [%d]=0x%08x",p-aSegment->iPages-1,a);
70 PageSegmentAllocator.Free(aSegment);
75 FORCE_INLINE void RPageArray::TSegment::Lock(TUint aCount)
77 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
78 __e32_atomic_add_ord32(&iCounts, (TUint32)aCount);
79 __NK_ASSERT_DEBUG((iCounts&KPageArraySegmentLockCountMask));
84 @return True if segment still exists, false if segment was deleted.
86 TBool RPageArray::TSegment::Unlock(TSegment*& aSegment, TUint aCount)
88 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
90 TSegment* s = aSegment;
93 TUint oldCounts = (TUint)__e32_atomic_add_ord32(&s->iCounts, (TUint32)-(TInt)aCount);
94 __NK_ASSERT_DEBUG(oldCounts&KPageArraySegmentLockCountMask); // alloc count must have been non-zero before decrementing
97 if((oldCounts&KPageArraySegmentLockCountMask)==aCount)
99 // check alloc count is consistent...
100 TUint allocCount = s->iCounts>>KPageArraySegmentAllocCountShift;
101 __NK_ASSERT_DEBUG(allocCount<=KPageArraySegmentSize);
102 TUint realAllocCount = 0;
103 TPhysAddr* p = s->iPages;
104 TPhysAddr* pEnd = p+KPageArraySegmentSize;
111 if(realAllocCount!=allocCount)
113 Kern::Printf("TSegment::Unlock alloc count missmatch %u!=%u",realAllocCount,allocCount);
114 __NK_ASSERT_DEBUG(0);
120 return oldCounts; // return 'true' to indicate segment still exists
124 return (TBool)Delete(s); // returns 'false'
128 FORCE_INLINE void RPageArray::TSegment::AdjustAllocCount(TInt aDelta)
130 __NK_ASSERT_DEBUG((iCounts&KPageArraySegmentLockCountMask));
131 __e32_atomic_add_ord32(&iCounts, TUint32(aDelta)<<KPageArraySegmentAllocCountShift);
136 void RPageArray::TSegment::Dump()
138 TUint allocCount = iCounts>>KPageArraySegmentAllocCountShift;
139 TUint lockCount = iCounts&KPageArraySegmentLockCountMask;
140 Kern::Printf("RPageArray::TSegment[0x%08x]::Dump() allocCount=%d lockCount=%d",this,allocCount,lockCount);
141 for(TUint i=0; i<KPageArraySegmentSize; i+=4)
142 Kern::Printf(" %08x %08x %08x %08x",iPages[i+0],iPages[i+1],iPages[i+2],iPages[i+3]);
151 TUint RPageArray::TIter::Pages(TPhysAddr*& aStart, TUint aMaxCount)
153 // MmuLock *may* be needed, depends if segments have been locked
155 TUint index = iIndex;
156 TUint size = iEndIndex-index;
160 TUint offset = index&KPageArraySegmentMask;
161 aStart = iSegments[index>>KPageArraySegmentShift]->iPages+offset;
163 TUint n = KPageArraySegmentSize-offset;
172 TUint RPageArray::TIter::AddFind(TIter& aPageList)
174 TRACE2(("RPageArray::TIter::AddFind range 0x%x..0x%x",iIndex,iEndIndex));
176 TUint index = iIndex;
177 TUint endIndex = iEndIndex;
181 aPageList.iIndex = endIndex;
182 aPageList.iEndIndex = endIndex;
183 TRACE2(("RPageArray::TIter::AddFind returns 0x%x+0x%x",iEndIndex,0));
187 TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
193 // scan for empty entries...
197 p = (*pS++)->iPages+(index&KPageArraySegmentMask);
198 TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
199 limit = (nextIndex<endIndex) ? nextIndex : endIndex;
207 if(State(page)!=ECommitted)
209 Kern::Printf("RPageArray::TIter::AddFind found unexpected page: %x",page);
210 __NK_ASSERT_DEBUG(0);
211 // *p = (page&~(EStateMask|EVetoed))|ECommitted; // mark page as allocated again
216 while(++index<limit);
220 while(index<endIndex);
226 TUint startIndex = index;
227 // scan for end of empty region...
236 while(++index<limit);
241 // get next segment...
243 TUint nextIndex = index+KPageArraySegmentSize;
244 limit = (nextIndex<endIndex) ? nextIndex : endIndex;
250 aPageList.iSegments = iSegments;
251 aPageList.iIndex = startIndex;
252 aPageList.iEndIndex = index;
255 TUint n = index-startIndex;
256 TRACE2(("RPageArray::TIter::AddFind returns 0x%x+0x%x",startIndex,n));
261 void RPageArray::TIter::Add(TUint aCount, TPhysAddr* aPages)
263 // MmuLock NOT required because...
264 // 1. AddStart has ensured all segments are allocated and locked (so they can't be deleted)
265 // 2. AddFind returns an unallocated region. This can only be changed by Adding pages
266 // and we only allow one thread to do this at a time (i.e. the thread calling this function.)
268 TRACE2(("RPageArray::TIter::Add 0x%x+0x%x",iIndex,aCount));
269 __NK_ASSERT_DEBUG(aCount);
271 TUint index = iIndex;
272 TUint endIndex = index+aCount;
273 TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
278 TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
279 TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
280 TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
282 // add pages to segment...
283 s->AdjustAllocCount(limit-index);
286 __NK_ASSERT_DEBUG((*aPages&KPageMask)==0);
287 __NK_ASSERT_DEBUG(!IsPresent(*p)); // AddFind only found not-present entries
288 *p++ = *aPages++|ECommitted;
290 while(++index<limit);
292 while(index<endIndex);
298 void RPageArray::TIter::AddContiguous(TUint aCount, TPhysAddr aPhysAddr)
300 // MmuLock NOT required because...
301 // 1. AddStart has ensured all segments are allocated and locked (so they can't be deleted)
302 // 2. AddFind returns an unallocated region. This can only be changed by Adding pages
303 // and we only allow one thread to do this at a time (i.e. the thread calling this function.)
305 TRACE2(("RPageArray::TIter::AddContiguous 0x%x+0x%x",iIndex,aCount));
306 __NK_ASSERT_DEBUG(aCount);
307 __NK_ASSERT_DEBUG((aPhysAddr&KPageMask)==0);
309 TUint index = iIndex;
310 TUint endIndex = index+aCount;
311 TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
317 TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
318 TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
319 TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
321 // add pages to segment...
322 s->AdjustAllocCount(limit-index);
325 __NK_ASSERT_DEBUG(!IsPresent(*p)); // AddFind only found not-present entries
326 *p++ = aPhysAddr|ECommitted;
327 aPhysAddr += KPageSize;
329 while(++index<limit);
331 while(index<endIndex);
337 void RPageArray::TIter::Added(TUint aCount, TUint aChanged)
339 __NK_ASSERT_DEBUG(aCount);
340 __NK_ASSERT_DEBUG(aChanged<=aCount);
341 TUint index = iIndex;
342 __NK_ASSERT_DEBUG((index>>KPageArraySegmentShift)==((index+aCount-1)>>KPageArraySegmentShift));
343 TSegment* s = iSegments[index>>KPageArraySegmentShift];
344 __NK_ASSERT_DEBUG(s);
345 __NK_ASSERT_DEBUG(s->iCounts&KPageArraySegmentLockCountMask);
346 s->AdjustAllocCount(aChanged);
351 TUint RPageArray::TIter::Find(TIter& aPageList)
353 TRACE2(("RPageArray::TIter::Find range 0x%x..0x%x",iIndex,iEndIndex));
356 TUint index = iIndex;
357 TUint endIndex = iEndIndex;
358 TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
359 TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
361 // search for first page...
362 while(index<endIndex)
370 TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
371 TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
374 if(RPageArray::IsPresent(*p++))
377 while(++index<limit);
382 nextIndex = index+KPageArraySegmentSize;
385 // we can't flash or release the MmuLock until we've Locked the segment we found!
388 // search for range of allocated pages...
389 while(index<endIndex)
391 // check first entry...
395 TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
396 if(!RPageArray::IsPresent(*p++))
399 // segment has pages, lock it...
402 // scan rest of entries...
403 TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
404 TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
406 if(!RPageArray::IsPresent(*p++))
416 aPageList.iSegments = iSegments;
417 aPageList.iIndex = iIndex;
418 aPageList.iEndIndex = index;
419 TInt n = index-iIndex;
420 TRACE2(("RPageArray::TIter::Find returns 0x%x+0x%x",iIndex,n));
425 void RPageArray::TIter::FindRelease(TUint aCount)
427 TUint index = iIndex;
429 RPageArray::Release(iSegments,index,aCount);
433 TUint RPageArray::TIter::RemoveFind(TIter& aPageList)
435 TRACE2(("RPageArray::TIter::RemoveFind range 0x%x..0x%x",iIndex,iEndIndex));
439 TUint index = iIndex;
440 TUint endIndex = iEndIndex;
441 TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
442 TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
444 // search for first page...
445 while(index<endIndex)
453 TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
454 TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
457 if(State(*p++)>=EDecommitting)
460 while(++index<limit);
466 nextIndex = index+KPageArraySegmentSize;
469 // we can't flash or release the MmuLock until we've Locked the segment we found!
472 // search for range of allocated pages, marking them EDecommitting...
473 while(index<endIndex)
475 // check first entry...
479 TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
480 TPhysAddr page = *p++;
481 if(State(page)<EDecommitting)
484 p[-1] = (page&~EStateMask)|EDecommitting;
486 // segment has pages, lock it...
489 // scan rest of entries...
490 TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
491 TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
494 TPhysAddr page = *p++;
495 if(State(page)<EDecommitting)
497 p[-1] = (page&~EStateMask)|EDecommitting;
507 aPageList.iSegments = iSegments;
508 aPageList.iIndex = iIndex;
509 aPageList.iEndIndex = index;
510 TInt n = index-iIndex;
511 TRACE2(("RPageArray::TIter::RemoveFind returns 0x%x+0x%x",iIndex,n));
516 TUint RPageArray::TIter::Remove(TUint aMaxCount, TPhysAddr* aPages)
518 TRACE2(("RPageArray::TIter::Remove 0x%x..0x%x max=0x%x",iIndex,iEndIndex,aMaxCount));
520 __NK_ASSERT_DEBUG(aMaxCount);
523 TUint index = iIndex;
524 TUint endIndex = iEndIndex;
528 TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
536 TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
537 TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
538 TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
540 // remove pages from segment...
543 TPhysAddr page = *p++;
544 __NK_ASSERT_DEBUG(State(page)!=EStealing); // can't be stealing as that only happens with the RamAllocLock held, which we should already hold if freeing demand paged pages
545 if(State(page)==EDecommitting || State(page)==EDecommitted)
548 if(page&EUnmapVetoed)
550 p[-1] = (page&~(EUnmapVetoed|EStateMask))|EDecommitted; // change to EDecommitted state
555 s->AdjustAllocCount(-1);
556 TPhysAddr pagePhys = page&~KPageMask;
557 aPages[count++] = pagePhys;
558 TRACE2(("RPageArray::TIter::Remove index=0x%x returns 0x%08x",index,pagePhys));
565 // check not removing managed pages without the RamAllocLock...
566 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()
567 || SPageInfo::FromPhysAddr(page)->Type()!=SPageInfo::EManaged);
570 while(++index<limit);
574 while(index<endIndex);
583 void RPageArray::TIter::VetoUnmap()
585 TUint index = iIndex;
586 TUint endIndex = iEndIndex;
590 TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
598 TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
599 TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
600 TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
602 // veto pages in segment...
605 TPhysAddr page = *p++;
606 TRACE2(("RPageArray::TIter::Veto() yes/no=%d page=0x%08x",IsPresent(page) && TargetStateIsDecommitted(page),page));
607 if(IsPresent(page) && TargetStateIsDecommitted(page))
608 p[-1] = page|EUnmapVetoed;
610 while(++index<limit);
614 while(index<endIndex);
620 void RPageArray::TIter::VetoRestrict(TBool aPageMoving)
622 TUint index = iIndex;
623 TUint endIndex = iEndIndex;
627 RPageArray::TState operation = aPageMoving ? RPageArray::EMoving : RPageArray::ERestrictingNA;
629 TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
637 TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
638 TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
639 TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
641 // veto pages in segment...
644 TPhysAddr page = *p++;
645 TRACE2(("RPageArray::TIter::VetoRestrict() yes/no=%d page=0x%08x",State(page)==operation,page));
646 if(State(page)==operation)
648 // to veto a 'restrict page' operation, we put the page back into the committed...
649 p[-1] = (page&~EStateMask)|ECommitted;
652 while(++index<limit);
656 while(index<endIndex);
662 FORCE_INLINE void RPageArray::TIter::Set(RPageArray::TSegment** aSegments, TUint aIndex, TUint aEndIndex)
664 iSegments = aSegments;
666 iEndIndex = aEndIndex;
674 void RPageArray::Init2A()
676 TInt r = PageSegmentAllocator.Construct();
677 __NK_ASSERT_ALWAYS(r==KErrNone);
681 void RPageArray::Init2B(DMutex* aLock)
683 // construct memory object for slabs...
684 DMemoryObject* memory;
685 TMappingCreateFlags mapFlags = (TMappingCreateFlags)(EMappingCreateFixedVirtual|EMappingCreateReserveAllResources);
686 TMemoryAttributes memAttr = EMemoryAttributeStandard;
687 TInt r = MM::InitFixedKernelMemory(memory, KPageArraySegmentBase, KPageArraySegmentEnd, KPageSize, EMemoryObjectUnpaged, EMemoryCreateNoWipe, memAttr, mapFlags);
688 __NK_ASSERT_ALWAYS(r==KErrNone);
689 MM::MemorySetLock(memory,aLock);
690 PageSegmentAllocator.SetMemory(memory,1);
694 RPageArray::RPageArray()
696 __NK_ASSERT_DEBUG(!iSegments);
700 TInt RPageArray::Construct(TUint aMaxPages, TBool aPreallocateMemory)
702 iNumSegments = (aMaxPages+KPageArraySegmentMask)>>KPageArraySegmentShift;
703 iSegments = (TSegment**)Kern::AllocZ(iNumSegments*sizeof(TSegment*));
707 if(!aPreallocateMemory)
710 return PreallocateMemory();
714 TInt RPageArray::PreallocateMemory()
718 __NK_ASSERT_DEBUG(!iPreallocatedMemory);
719 iPreallocatedMemory = true;
721 TSegment** pS = iSegments;
722 TSegment** pGEnd = pS+iNumSegments;
725 if(!GetOrAllocateSegment(pS,1))
727 iNumSegments = pS-iSegments; // truncate to amount successfully allocated
739 RPageArray::~RPageArray()
741 TSegment** pS = iSegments;
744 TSegment** pGEnd = pS+iNumSegments;
745 if(!iPreallocatedMemory)
747 // check all segments have already been deleted...
754 __NK_ASSERT_DEBUG(!*pS);
763 __NK_ASSERT_DEBUG(*pS);
764 TSegment::Unlock(*pS);
769 __NK_ASSERT_DEBUG(!*pS);
770 TRACE2(("RPageArray::~RPageArray delete segment=%d",pS-iSegments));
778 Kern::Free(iSegments);
783 RPageArray::TSegment* RPageArray::GetOrAllocateSegment(TSegment** aSegmentEntry, TUint aLockCount)
785 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
786 __NK_ASSERT_DEBUG(aLockCount);
790 TSegment* s = *aSegmentEntry;
797 // no segment, so allocate one...
804 // if someone else allocated one...
807 // free the one we created...
813 // use new segment...
814 TRACE2(("RPageArray::GetOrAllocateSegment new segment=%d",aSegmentEntry-iSegments));
823 TInt RPageArray::Alloc(TUint aIndex, TUint aCount)
825 TRACE2(("RPageArray::Alloc(0x%x,0x%x)",aIndex,aCount));
826 __NK_ASSERT_DEBUG(aIndex+aCount<=iNumSegments*KPageArraySegmentSize);
827 __NK_ASSERT_DEBUG(aIndex+aCount>=aIndex);
831 TUint index = aIndex;
832 TUint endIndex = aIndex+aCount;
833 TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
834 while(index<endIndex)
836 TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
837 TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
838 TUint lockCount = limit-index;
840 TSegment* s = GetOrAllocateSegment(pS++,lockCount);
851 // free what we actually alloced...
852 endIndex = index&~KPageArraySegmentMask;
853 Free(aIndex,endIndex-aIndex);
859 void RPageArray::Free(TUint aIndex, TUint aCount)
861 TRACE2(("RPageArray::Free(0x%x,0x%x)",aIndex,aCount));
862 __NK_ASSERT_DEBUG(aIndex+aCount<=iNumSegments*KPageArraySegmentSize);
863 __NK_ASSERT_DEBUG(aIndex+aCount>aIndex);
867 TUint index = aIndex;
868 TUint endIndex = aIndex+aCount;
869 TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
870 while(index<endIndex)
872 __NK_ASSERT_DEBUG(*pS);
873 TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
874 TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
875 TSegment::Unlock(*pS,limit-index);
884 TInt RPageArray::AddStart(TUint aIndex, TUint aCount, TIter& aIter, TBool aAllowExisting)
886 TRACE2(("RPageArray::AddStart(0x%x,0x%x,?,%d)",aIndex,aCount,aAllowExisting));
887 __NK_ASSERT_DEBUG(aIndex+aCount<=iNumSegments*KPageArraySegmentSize);
888 __NK_ASSERT_DEBUG(aIndex+aCount>aIndex);
890 aIter.Set(iSegments,aIndex,aIndex+aCount);
895 TUint index = aIndex;
896 TUint endIndex = aIndex+aCount;
897 TSegment** pS = iSegments+(index>>KPageArraySegmentShift);
898 while(index<endIndex)
903 // no segment, so allocate one...
910 // if someone else allocated one
913 // free the one we created...
919 // use new segment...
920 TRACE2(("RPageArray::AddStart new segment=%d",pS-iSegments));
924 index = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
928 TUint nextIndex = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
931 // just move on to next segment...
932 index = (index+KPageArraySegmentSize)&~KPageArraySegmentMask;
936 // check page entries are empty...
937 TPhysAddr* p = s->iPages+(index&KPageArraySegmentMask);
938 TUint limit = (nextIndex<endIndex) ? nextIndex : endIndex;
944 while(++index<limit);
946 // lock segment so that it doesn't go away...
963 r = KErrAlreadyExists;
967 // unlock any segments that we locked...
968 endIndex = index&~KPageArraySegmentMask;
970 Release(iSegments,aIndex,endIndex-aIndex);
977 void RPageArray::AddEnd(TUint aIndex, TUint aCount)
979 Release(iSegments,aIndex,aCount);
983 void RPageArray::FindStart(TUint aIndex, TUint aCount, TIter& aIter)
985 TRACE2(("RPageArray::FindStart(0x%x,0x%x,?)",aIndex,aCount));
986 __NK_ASSERT_DEBUG(aIndex+aCount<=iNumSegments*KPageArraySegmentSize);
987 __NK_ASSERT_DEBUG(aIndex+aCount>aIndex);
989 aIter.Set(iSegments,aIndex,aIndex+aCount);
993 void RPageArray::Release(TSegment** aSegments, TUint aIndex, TUint aCount)
995 __NK_ASSERT_DEBUG(aIndex+aCount>aIndex);
999 TSegment** pS = aSegments+(aIndex>>KPageArraySegmentShift);
1000 TSegment** pGLast = aSegments+((aIndex+aCount-1)>>KPageArraySegmentShift);
1001 __NK_ASSERT_DEBUG(pS<=pGLast);
1005 MmuLock::Flash(flash,KMaxPagesInOneGo);
1006 if(TSegment::Unlock(*pS)==0)
1008 TRACE2(("RPageArray::Release delete segment=%d",pS-aSegments));
1018 TPhysAddr* RPageArray::AddPageStart(TUint aIndex, TIter& aPageList)
1020 __NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
1023 TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
1024 TSegment* s = GetOrAllocateSegment(pS,1);
1030 aPageList.Set(iSegments,aIndex,aIndex+1);
1032 return s->iPages+(aIndex&KPageArraySegmentMask);
1036 TPhysAddr* RPageArray::RemovePageStart(TUint aIndex, TIter& aPageList)
1038 __NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
1042 TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
1050 TPhysAddr* p = s->iPages+(aIndex&KPageArraySegmentMask);
1051 TPhysAddr page = *p;
1052 if(State(page)<EDecommitting)
1058 *p = (page&~EStateMask)|EDecommitting;
1064 aPageList.Set(iSegments,aIndex,aIndex+1);
1070 TPhysAddr RPageArray::RemovePage(TPhysAddr* aPageEntry)
1072 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1073 TPhysAddr page = *aPageEntry;
1074 __NK_ASSERT_DEBUG(State(page)!=EStealing); // can't be stealing as that only happens with the RamAllocLock held, which we should already hold if freeing demand paged pages
1075 if(State(page)==EDecommitting || State(page)==EDecommitted)
1078 if(page&EUnmapVetoed)
1080 *aPageEntry = (page&~(EUnmapVetoed|EStateMask))|EDecommitted; // change to EDecommitted state
1084 *aPageEntry = EEmptyEntry;
1085 return page&~KPageMask;
1087 // check not removing managed pages without the RamAllocLock...
1088 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld()
1089 || SPageInfo::FromPhysAddr(page)->Type()!=SPageInfo::EManaged);
1091 return KPhysAddrInvalid;
1095 TPhysAddr* RPageArray::RestrictPageNAStart(TUint aIndex, TIter& aPageList)
1097 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1098 __NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
1100 TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
1105 TPhysAddr* p = s->iPages+(aIndex&KPageArraySegmentMask);
1106 TPhysAddr page = *p;
1107 if(State(page) < RPageArray::ERestrictingNA)
1110 *p = (page&~EStateMask) | RPageArray::ERestrictingNA;
1114 aPageList.Set(iSegments,aIndex,aIndex+1);
1120 TPhysAddr* RPageArray::StealPageStart(TUint aIndex, TIter& aPageList)
1122 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
1123 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1124 __NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
1126 TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
1128 __NK_ASSERT_DEBUG(s); // we only steal pages in the live list and these can not go away yet because we hold the RamAllocLock
1130 TPhysAddr* p = s->iPages+(aIndex&KPageArraySegmentMask);
1131 TPhysAddr page = *p;
1133 if(State(page)>EStealing)
1134 *p = (page&~EStateMask)|EStealing;
1138 aPageList.Set(iSegments,aIndex,aIndex+1);
1144 TPhysAddr* RPageArray::MovePageStart(TUint aIndex, TIter& aPageList)
1146 __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
1147 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1148 __NK_ASSERT_DEBUG(aIndex <= iNumSegments*KPageArraySegmentSize);
1150 TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
1152 // The segment should always exist for a page that is being moved.
1153 __NK_ASSERT_DEBUG(s);
1155 TPhysAddr* p = s->iPages+(aIndex&KPageArraySegmentMask);
1156 TPhysAddr page = *p;
1157 if(State(page) <= RPageArray::EMoving)
1160 *p = (page & ~EStateMask) | EMoving;
1162 aPageList.Set(iSegments, aIndex, aIndex+1);
1168 void RPageArray::ReleasePage(TUint aIndex, TInt aDelta)
1170 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1171 __NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
1173 TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
1175 __NK_ASSERT_DEBUG(s); // must exist because FindPageStart/AddPageStart locked it
1177 __NK_ASSERT_DEBUG(aDelta>=-1 && aDelta<=1);
1179 s->AdjustAllocCount(aDelta);
1181 if(TSegment::Unlock(*pS)==0)
1183 TRACE2(("RPageArray::ReleasePage delete segment=%d",pS-iSegments));
1188 TPhysAddr RPageArray::Page(TUint aIndex)
1190 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1191 __NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
1193 TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
1197 return s->iPages[aIndex&KPageArraySegmentMask];
1201 TPhysAddr* RPageArray::PageEntry(TUint aIndex)
1203 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1204 __NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
1206 TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
1210 return s->iPages + (aIndex & KPageArraySegmentMask);
1214 TUint RPageArray::PagingManagerData(TUint aIndex)
1216 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1217 __NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
1218 TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
1220 __NK_ASSERT_DEBUG(s);
1221 TPhysAddr* p = &s->iPages[aIndex&KPageArraySegmentMask];
1223 TPhysAddr entry = *p;
1224 if(IsPresent(entry))
1227 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(entry&~KPageMask);
1229 Kern::Printf("RPageArray::PagingManagerData bad entry 0x%08x",entry);
1230 __NK_ASSERT_DEBUG(pi);
1232 SPageInfo* pi = SPageInfo::FromPhysAddr(entry);
1234 entry = pi->PagingManagerData();
1236 __NK_ASSERT_DEBUG((entry&(EFlagsMask|EStateMask))==ENotPresent);
1238 return entry>>(EFlagsShift+EStateShift);
1242 void RPageArray::SetPagingManagerData(TUint aIndex, TUint aValue)
1244 aValue = (aValue<<(EFlagsShift+EStateShift))|ENotPresent;
1246 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1247 __NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
1248 TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
1250 __NK_ASSERT_DEBUG(s);
1251 TPhysAddr* p = &s->iPages[aIndex&KPageArraySegmentMask];
1253 TPhysAddr entry = *p;
1254 if(!IsPresent(entry))
1259 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(entry&~KPageMask);
1261 Kern::Printf("RPageArray::SetPagingManagerData bad entry 0x%08x",entry);
1262 __NK_ASSERT_DEBUG(pi);
1264 SPageInfo* pi = SPageInfo::FromPhysAddr(entry);
1266 pi->SetPagingManagerData(aValue);
1271 TPhysAddr RPageArray::PhysAddr(TUint aIndex)
1273 __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1274 __NK_ASSERT_DEBUG(aIndex<=iNumSegments*KPageArraySegmentSize);
1276 TSegment** pS = iSegments+(aIndex>>KPageArraySegmentShift);
1280 TPhysAddr page = s->iPages[aIndex&KPageArraySegmentMask];
1283 return page&~KPageMask;
1286 return KPhysAddrInvalid;
1290 TInt RPageArray::PhysAddr(TUint aIndex, TUint aCount, TPhysAddr& aPhysicalAddress, TPhysAddr* aPhysicalPageList)
1292 __NK_ASSERT_DEBUG(aCount);
1295 TUint32* pageList = aPhysicalPageList;
1297 // get first page...
1298 TPhysAddr physStart = PhysAddr(aIndex++);
1299 if(physStart==KPhysAddrInvalid)
1302 return KErrNotFound;
1305 *pageList++ = physStart;
1307 TUint32 nextPhys = physStart+KPageSize;
1312 MmuLock::Flash(flash,KMaxPagesInOneGo);
1315 TPhysAddr phys = PhysAddr(aIndex++);
1316 if(phys==KPhysAddrInvalid)
1319 return KErrNotFound;
1324 // check for contiguity...
1326 nextPhys = KPhysAddrInvalid;
1328 nextPhys += KPageSize;
1333 if(nextPhys==KPhysAddrInvalid)
1335 // memory is discontiguous...
1336 if(!aPhysicalPageList)
1337 return KErrNotFound;
1338 aPhysicalAddress = KPhysAddrInvalid;
1343 // memory is contiguous...
1344 aPhysicalAddress = physStart;