First public contribution.
1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\memmodel\epoc\mmubase\ramalloc.cpp
22 //#define __VERIFY_LEASTMOVDIS
24 #include <plat_priv.h>
26 #include <e32btrace.h>
28 #ifndef __MEMMODEL_FLEXIBLE__
29 #include <mmubase.inl>
31 #include "mdefrag.inl"
32 #endif //__MEMMODEL_FLEXIBLE__
34 DRamAllocator* DRamAllocator::New()
36 return new DRamAllocator;
39 DRamAllocator* DRamAllocator::New(const SRamInfo& aInfo, const SRamZone* aZoneInfo, TRamZoneCallback aZoneCallback)
41 DRamAllocator* pA=New();
43 Panic(ECreateNoMemory);
44 // If this fails then it won't return but panic
45 pA->Create(aInfo,aZoneInfo, aZoneCallback);
49 void DRamAllocator::Panic(TPanic aPanic)
51 Kern::Fault("RAM-ALLOC", aPanic);
55 void HexDump32(const TAny* a, TInt n, const char* s)
57 const TUint32* p=(const TUint32*)a;
63 b.AppendNumFixedWidth(i,EHex,4);
71 b.AppendNumFixedWidth(*p++,EHex,8);
73 Kern::Printf("%S",&b);
77 void HexDump8(const TAny* a, TInt n, const char* s)
79 const TUint8* p=(const TUint8*)a;
85 b.AppendNumFixedWidth(i,EHex,4);
93 b.AppendNumFixedWidth(*p++,EHex,2);
95 Kern::Printf("%S",&b);
99 void DRamAllocator::DebugDump()
101 Kern::Printf("PageSize=%08x PageShift=%d",KPageSize,KPageShift);
102 Kern::Printf("Total Pages=%x Total Free=%x",iTotalRamPages,iTotalFreeRamPages);
103 Kern::Printf("Number of zones=%d, PowerState=%016lx",iNumZones,iZonePwrState);
104 Kern::Printf("PhysAddrBase=%08x, PhysAddrTop=%08x",iPhysAddrBase,iPhysAddrTop);
107 Kern::Printf("Zone Info:");
108 for (; i<iNumZones; ++i)
111 TBitMapAllocator& b = *(z.iBma[KBmaAllPages]);
112 Kern::Printf("%x: Avail %x Size %x Phys %08x PhysEnd %08x ID %08x FreePage %x Pref %02x",i,b.iAvail,b.iSize,
113 z.iPhysBase, z.iPhysEnd, z.iId,z.iFreePages, z.iPref);
114 Kern::Printf("Allocated Unknown %x Fixed %x Movable %x Discardable %x",iZones[i].iAllocPages[EPageUnknown],iZones[i].iAllocPages[EPageFixed],
115 iZones[i].iAllocPages[EPageMovable],iZones[i].iAllocPages[EPageDiscard]);
118 Kern::Printf("Zone pref order:");
119 SDblQueLink* link = iZonePrefList.First();
120 for (; link != &iZonePrefList.iA; link = link->iNext)
122 SZone& zone = *_LOFF(link, SZone, iPrefLink);
123 Kern::Printf("ID0x%x rank0x%x", zone.iId, zone.iPrefRank);
125 SZone& zone = *_LOFF(iZoneLeastMovDis, SZone, iPrefLink);
126 Kern::Printf("iZoneLeastMovDis ID 0x%x rank 0x%x", zone.iId, iZoneLeastMovDisRank);
130 TInt CountBanks(const SRamBank* aBankList)
133 for (; aBankList->iSize; ++banks, ++aBankList);
137 TUint32 TotalBankSize(const SRamBank* aBankList)
140 for (; aBankList->iSize; ++aBankList)
141 size+=aBankList->iSize;
146 Count how many zones have been specified and do some basic checks on their layout:
147 Zones must be distinct, i.e. not overlap
148 Zone ID must be unique
149 Zones must be page size aligned
150 Zones must be big enough to cover all of the allocatable RAM
151 The end of the list is indicated by a SRamZone.iSize==0.
152 @param aZones The list of RAM zones to be setup
154 void DRamAllocator::CountZones(const SRamZone* aZones)
156 TUint32 totalSize = 0;
157 TUint32 pageMask = KPageSize-1;
158 // Check zones don't overlap each other and while running through the zones
159 // calculate how many there are
160 const SRamZone* pCurZ = aZones;
161 for (; pCurZ->iSize != 0; pCurZ++)
163 // Verify zone addresses and alignment
164 TUint32 curEnd = pCurZ->iBase + pCurZ->iSize - 1;
165 __KTRACE_OPT(KMMU,Kern::Printf("curBase %x curEnd %x pageMask %x",pCurZ->iBase,curEnd,pageMask));
166 if (curEnd <= pCurZ->iBase || (((curEnd + 1) | pCurZ->iBase) & pageMask))
168 Panic(EZonesAlignment);
171 if (pCurZ->iId == KRamZoneInvalidId)
173 Panic(EZonesIDInvalid);
175 // Check the flags are not set to invalid values
176 if (pCurZ->iFlags & KRamZoneFlagInvalid)
178 Panic(EZonesFlagsInvalid);
182 if (iNumZones > KMaxRamZones)
183 {// Too many zones specified
184 Panic(EZonesTooNumerousOrFew);
186 totalSize += pCurZ->iSize;
188 // Verify this zone doesn't overlap any of the previous zones' address space
189 const SRamZone* pTmpZ = aZones;
190 for (; pTmpZ < pCurZ; pTmpZ++)
192 TUint32 tmpEnd = pTmpZ->iBase + pTmpZ->iSize - 1;
193 if (tmpEnd >= pCurZ->iBase && pTmpZ->iBase <= curEnd)
195 Panic(EZonesNotDistinct);
197 if(pTmpZ->iId == pCurZ->iId)
199 Panic(EZonesIDNotUnique);
203 __KTRACE_OPT(KMMU,Kern::Printf("iNumZones=%d, totalSize=%x",iNumZones,totalSize));
205 {// no zones specified
206 Panic(EZonesTooNumerousOrFew);
209 // Together all of the zones should cover the whole of the RAM
210 if (totalSize>>KPageShift < iTotalRamPages)
212 Panic(EZonesIncomplete);
218 Get the zone from the ID
219 @param aId ID of zone to find
220 @return Pointer to the zone if zone of matching ID found, NULL otherwise
222 SZone* DRamAllocator::ZoneFromId(TUint aId) const
225 const SZone* const pEndZone = iZones + iNumZones;
226 for (; pZ < pEndZone; pZ++)
236 /** Retrieve the physical base address and number of pages in the specified zone.
238 @param aZoneId The ID of the zone
239 @param aPhysBaseAddr Receives the base address of the zone
240 @param aNumPages Receives the number of pages in the zone
242 @return KErrNone if zone found, KErrArgument if zone couldn't be found
244 TInt DRamAllocator::GetZoneAddress(TUint aZoneId, TPhysAddr& aPhysBase, TUint& aNumPages)
246 SZone* zone = ZoneFromId(aZoneId);
251 aPhysBase = zone->iPhysBase;
252 aNumPages = zone->iPhysPages;
256 #ifdef __MEMMODEL_FLEXIBLE__
258 @param aAddr The address of page to find the zone of
259 @param aOffset The page offset from the start of the zone that the page is in
261 SZone* DRamAllocator::GetZoneAndOffset(TPhysAddr aAddr, TInt& aOffset)
263 // Get the zone from the SPageInfo of the page at aAddr
264 SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(aAddr);
265 if (pageInfo == NULL)
270 // Perform a binary search for the RAM zone, we know aAddr is within a RAM
271 // zone as pageInfo != NULL.
272 SZone* left = iZones;
273 SZone* mid = iZones + (iNumZones>>1);
274 SZone* top = iZones + iNumZones - 1;
276 while (mid->iPhysEnd < aAddr || mid->iPhysBase > aAddr)
278 if (mid->iPhysEnd < aAddr)
282 mid = left + ((top - left) >> 1);
283 __ASSERT_DEBUG(left <= top && mid <= top && mid >= left, Panic(EAllocRamPagesInconsistent));
285 __ASSERT_DEBUG(mid->iPhysBase <= aAddr && mid->iPhysEnd >= aAddr, Panic(EAllocRamPagesInconsistent));
286 aOffset = (aAddr - mid->iPhysBase) >> KPageShift;
287 __ASSERT_DEBUG((TUint)aOffset < mid->iPhysPages, Panic(EAllocRamPagesInconsistent));
292 @param aAddr The address of page to find the zone of
293 @param aOffset The page offset from the start of the zone that the page is in
295 SZone* DRamAllocator::GetZoneAndOffset(TPhysAddr aAddr, TInt& aOffset)
297 // Get the zone from the SPageInfo of the page at aAddr
298 SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(aAddr);
299 if (pageInfo == NULL)
303 SZone* z = iZones + pageInfo->Zone();
304 aOffset = (aAddr - z->iPhysBase) >> KPageShift;
305 __ASSERT_DEBUG((TUint)aOffset < z->iPhysPages, Panic(EAllocRamPagesInconsistent));
310 @param aId ID of zone to get page count for
311 @param aPageData store for page counts
312 @return KErrNone if zone found, KErrArgument otherwise
314 TInt DRamAllocator::GetZonePageCount(TUint aId, SRamZonePageCount& aPageData)
316 // Search for the zone of ID aId
317 const SZone* zone = ZoneFromId(aId);
322 aPageData.iFreePages = zone->iFreePages;
323 aPageData.iUnknownPages = zone->iAllocPages[EPageUnknown];
324 aPageData.iFixedPages = zone->iAllocPages[EPageFixed];
325 aPageData.iMovablePages = zone->iAllocPages[EPageMovable];
326 aPageData.iDiscardablePages = zone->iAllocPages[EPageDiscard];
332 /** Update the count of free and allocated pages for the zone with
333 @param aZone The index of the zone whose counts are being updated
334 @param aCount The no of pages being allocated
335 @param aType The type of the pages being allocated
337 void DRamAllocator::ZoneAllocPages(SZone* aZone, TUint32 aCount, TZonePageType aType)
340 TUint32 free = aZone->iFreePages - aCount;
341 TUint32 alloc = aZone->iAllocPages[aType] + aCount;
342 TUint32 total_alloc = aZone->iAllocPages[EPageUnknown] +
343 aZone->iAllocPages[EPageDiscard] +
344 aZone->iAllocPages[EPageMovable] +
345 aZone->iAllocPages[EPageFixed] + aCount;
346 if (free > aZone->iFreePages ||
347 alloc < aZone->iAllocPages[aType] ||
348 free + total_alloc != aZone->iPhysPages ||
349 iTotalFreeRamPages > iTotalRamPages)
351 __KTRACE_OPT(KMMU,Kern::Printf("TotalFree %x TotalPages %x",iTotalFreeRamPages, iTotalRamPages));
352 __KTRACE_OPT(KMMU,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc)); // counts rolled over
353 __KTRACE_OPT(KMMU,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown],
354 aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
355 Panic(EZonesCountErr);
357 __ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent));
358 __KTRACE_OPT(KMMU2,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc));
359 __KTRACE_OPT(KMMU2,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown],
360 aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
364 TBitMapAllocator& bmaType = *(aZone->iBma[(aType != EPageUnknown)? aType : EPageFixed]);
366 if (aType == EPageFixed || aType == EPageUnknown)
367 allocPages = aZone->iAllocPages[EPageUnknown] + aZone->iAllocPages[EPageFixed];
369 allocPages = aZone->iAllocPages[aType];
370 allocPages += aCount;
371 __NK_ASSERT_DEBUG(aZone->iPhysPages - bmaType.iAvail == allocPages);
372 __NK_ASSERT_DEBUG((TUint)bmaType.iAvail >= aZone->iFreePages - aCount);
374 //#define _FULL_VERIFY_TYPE_BMAS
375 #ifdef _FULL_VERIFY_TYPE_BMAS
377 TUint matchedPages = 0;
379 while (offset < aZone->iPhysPages && r == KErrNone)
381 r = NextAllocatedPage(aZone, offset, EPageTypes);
382 if (bmaType.NotFree(offset, 1))
388 __NK_ASSERT_DEBUG(matchedPages == allocPages);
394 aZone->iAllocPages[aType] += aCount;
395 aZone->iFreePages -= aCount;
396 aZone->iFlags &= ~KRamZoneFlagMark; // clear the mark as this zone is active
398 // Check if power state of zone needs to be changed
399 if (iZonePowerFunc && !(iZonePwrState & (((TUint64)1) << aZone - iZones)))
400 {//zone no longer empty so call variant to power RAM zone up if necessary
401 iZonePwrState |= (((TUint64)1) << aZone - iZones);
403 if (iZoneCallbackInitSent)
405 TInt ret = (*iZonePowerFunc)(ERamZoneOp_PowerUp, (TAny*)aZone->iId, (TUint*)&iZonePwrState);
406 if (ret != KErrNone && ret != KErrNotSupported)
408 Panic(EZonesCallbackErr);
410 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "DRamAllocator::ZoneAllocPages");
414 // Re-order the zone preference list so that a RAM zone with more immovable pages
415 // is more preferable and secondary to that a RAM zone that is not empty is more
416 // preferable than one that is empty.
417 while (&aZone->iPrefLink != iZonePrefList.First())
419 SZone* prevZ = _LOFF(aZone->iPrefLink.iPrev, SZone, iPrefLink);
420 __NK_ASSERT_DEBUG(K::Initialising || prevZ->iPrefRank == aZone->iPrefRank - 1);
421 if (prevZ->iPref == aZone->iPref &&
422 (prevZ->iAllocPages[EPageFixed] + prevZ->iAllocPages[EPageUnknown] <
423 aZone->iAllocPages[EPageFixed] + aZone->iAllocPages[EPageUnknown] ||
424 prevZ->iFreePages == prevZ->iPhysPages))
426 __KTRACE_OPT(KMMU, Kern::Printf("a - Reorder aZone 0x%x free 0x%x before prevZ 0x%x free 0x%x", aZone->iId, aZone->iFreePages, prevZ->iId, prevZ->iFreePages));
427 // Make this RAM zone more preferable.
428 aZone->iPrefLink.Deque();
429 aZone->iPrefLink.InsertBefore(&prevZ->iPrefLink);
433 if (iZoneLeastMovDis == &prevZ->iPrefLink)
434 {// Ensure iZoneLeastMovDisRank is kept up to date.
435 iZoneLeastMovDisRank = prevZ->iPrefRank;
437 if (iZoneLeastMovDis == &aZone->iPrefLink)
438 {// Ensure iZoneLeastMovDisRank is kept up to date.
439 iZoneLeastMovDisRank = aZone->iPrefRank;
440 // aZone was the least preferable with movable and/or discardable so is it still?
441 if (prevZ->iAllocPages[EPageMovable] || prevZ->iAllocPages[EPageDiscard])
442 {// prevZ is now the least preferable RAM zone with movable and/or discardable.
443 iZoneLeastMovDis = &prevZ->iPrefLink;
444 iZoneLeastMovDisRank = prevZ->iPrefRank;
445 __KTRACE_OPT(KMMU, Kern::Printf("aa - iZoneleastInUse ID 0x%x", (_LOFF(iZoneLeastMovDis, SZone, iPrefLink))->iId));
447 __KTRACE_OPT(KMMU, Kern::Printf("iZoneLeastMovDisRank 0x%x", iZoneLeastMovDisRank));
456 // Now that the preference list has been re-ordered check whether
457 // iZoneLeastMovDis needs updating.
458 if (aType >= EPageMovable && iZoneLeastMovDisRank < aZone->iPrefRank)
460 iZoneLeastMovDis = &aZone->iPrefLink;
461 iZoneLeastMovDisRank = aZone->iPrefRank;
462 __KTRACE_OPT(KMMU, Kern::Printf("a - iZoneleastInUse ID 0x%x", (_LOFF(iZoneLeastMovDis, SZone, iPrefLink))->iId));
464 __NK_ASSERT_DEBUG( K::Initialising ||
465 iZoneLeastMovDisRank == _LOFF(iZoneLeastMovDis, SZone, iPrefLink)->iPrefRank);
466 #ifdef __VERIFY_LEASTMOVDIS
467 if (!K::Initialising)
468 VerifyLeastPrefMovDis();
473 /** Update the count of free and allocated pages for the zone with
474 @param aZone The index of the zone whose counts are being updated
475 @param aCount The no of pages being freed
476 @param aType The type of the pages being freed
478 void DRamAllocator::ZoneFreePages(SZone* aZone, TUint32 aCount, TZonePageType aType)
481 TUint32 alloc = aZone->iAllocPages[aType] - aCount;
482 TUint32 free = aZone->iFreePages + aCount;
483 TUint32 total_alloc = aZone->iAllocPages[EPageUnknown] +
484 aZone->iAllocPages[EPageDiscard] +
485 aZone->iAllocPages[EPageMovable] +
486 aZone->iAllocPages[EPageFixed] - aCount;
487 if (free < aZone->iFreePages ||
488 alloc > aZone->iAllocPages[aType] ||
489 free + total_alloc != aZone->iPhysPages ||
490 iTotalFreeRamPages > iTotalRamPages)
492 __KTRACE_OPT(KMMU,Kern::Printf("TotalFree %x TotalPages %x",iTotalFreeRamPages, iTotalRamPages));
493 __KTRACE_OPT(KMMU,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc)); // counts rolled over
494 __KTRACE_OPT(KMMU,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown],
495 aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
496 Panic(EZonesCountErr);
498 __ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent));
499 __KTRACE_OPT(KMMU2,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc));
500 __KTRACE_OPT(KMMU2,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown],
501 aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard]));
505 TBitMapAllocator& bmaType = *(aZone->iBma[(aType != EPageUnknown)? aType : EPageFixed]);
507 if (aType == EPageFixed || aType == EPageUnknown)
508 allocPages = aZone->iAllocPages[EPageUnknown] + aZone->iAllocPages[EPageFixed];
510 allocPages = aZone->iAllocPages[aType];
511 allocPages -= aCount;
512 __NK_ASSERT_DEBUG(aZone->iPhysPages - bmaType.iAvail == allocPages);
513 __NK_ASSERT_DEBUG((TUint)bmaType.iAvail >= aZone->iFreePages + aCount);
515 #ifdef _FULL_VERIFY_TYPE_BMAS
517 TUint matchedPages = 0;
519 while(offset < aZone->iPhysPages && r == KErrNone)
521 r = NextAllocatedPage(aZone, offset, EPageTypes);
522 if (bmaType.NotFree(offset, 1))
528 __NK_ASSERT_DEBUG(matchedPages == allocPages);
534 aZone->iAllocPages[aType] -= aCount;
535 aZone->iFreePages += aCount;
536 aZone->iFlags &= ~KRamZoneFlagMark; // clear the mark as this zone is active
538 // Check if power state of zone needs to be changed.
539 // Don't update iZonePwrState when a zone is being cleared to then be
540 // claimed as it shouldn't be powered off as it's about to be used.
541 if (iZonePowerFunc && !(aZone->iFlags & KRamZoneFlagClaiming) &&
542 aZone->iFreePages == aZone->iPhysPages)
543 {// Zone is empty so call variant to power down RAM zone if desirable.
544 TUint64 pwrMask = ~(((TUint64)1) << aZone - iZones);
545 iZonePwrState &= pwrMask;
547 // Don't invoke callback until Init callback sent.
548 if (iZoneCallbackInitSent)
550 TInt ret = (*iZonePowerFunc)(ERamZoneOp_PowerDown, (TAny*)aZone->iId, (TUint*)&iZonePwrState);
551 if (ret != KErrNone && ret != KErrNotSupported)
553 Panic(EZonesCallbackErr);
555 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "DRamAllocator::ZoneFreePages");
559 // Re-order the zone preference list so that a RAM zone with more immovable pages
560 // is more preferable and secondary to that a RAM zone that is not empty is more
561 // preferable than one that is empty.
562 while (&aZone->iPrefLink != iZonePrefList.Last())
564 SZone* nextZ = _LOFF(aZone->iPrefLink.iNext, SZone, iPrefLink);
565 __NK_ASSERT_DEBUG(K::Initialising || nextZ->iPrefRank == aZone->iPrefRank + 1);
566 if (nextZ->iPref == aZone->iPref &&
567 (nextZ->iAllocPages[EPageFixed] + nextZ->iAllocPages[EPageUnknown] >
568 aZone->iAllocPages[EPageFixed] + aZone->iAllocPages[EPageUnknown] ||
569 (nextZ->iFreePages != nextZ->iPhysPages &&
570 aZone->iFreePages == aZone->iPhysPages)))
572 __KTRACE_OPT(KMMU, Kern::Printf("f - Reorder aZone 0x%x free 0x%x after nextZ 0x%x free 0x%x", aZone->iId, aZone->iFreePages, nextZ->iId, nextZ->iFreePages));
573 // Make this RAM zone less preferable.
574 aZone->iPrefLink.Deque();
575 aZone->iPrefLink.InsertAfter(&nextZ->iPrefLink);
579 if (iZoneLeastMovDis == &aZone->iPrefLink)
580 {// Ensure iZoneLeastMovDisRank is kept up to date.
581 iZoneLeastMovDisRank = aZone->iPrefRank;
583 if (iZoneLeastMovDis == &nextZ->iPrefLink)
584 {// Ensure iZoneLeastMovDisRank is kept up to date.
585 iZoneLeastMovDisRank = nextZ->iPrefRank;
586 if (aZone->iAllocPages[EPageMovable] || aZone->iAllocPages[EPageDiscard])
587 {// aZone is now the least preferable RAM zone with movable and/or discardable.
588 iZoneLeastMovDis = &aZone->iPrefLink;
589 iZoneLeastMovDisRank = aZone->iPrefRank;
590 __KTRACE_OPT(KMMU, Kern::Printf("aa - iZoneleastInUse ID 0x%x", (_LOFF(iZoneLeastMovDis, SZone, iPrefLink))->iId));
592 __KTRACE_OPT(KMMU, Kern::Printf("iZoneLeastMovDis Rank 0x%x", iZoneLeastMovDisRank));
600 if (&aZone->iPrefLink == iZoneLeastMovDis &&
601 !aZone->iAllocPages[EPageMovable] && !aZone->iAllocPages[EPageDiscard])
602 {// This RAM zone no longer has movable or discardable and therefore it
603 // is also no longer the least preferable RAM zone with movable and/or
608 iZoneLeastMovDis = iZoneLeastMovDis->iPrev;
609 iZoneLeastMovDisRank--;
610 if (iZoneLeastMovDis == iZonePrefList.First())
611 {// This the most preferable RAM zone so can't go any further.
614 zonePrev = _LOFF(iZoneLeastMovDis, SZone, iPrefLink);
615 __KTRACE_OPT(KMMU, Kern::Printf("f - iZoneLeastMovDis 0x%x", zonePrev->iId));
617 while (!zonePrev->iAllocPages[EPageMovable] && !zonePrev->iAllocPages[EPageDiscard]);
619 __NK_ASSERT_DEBUG( K::Initialising ||
620 iZoneLeastMovDisRank == _LOFF(iZoneLeastMovDis, SZone, iPrefLink)->iPrefRank);
622 #ifdef __VERIFY_LEASTMOVDIS
623 if (!K::Initialising)
624 VerifyLeastPrefMovDis();
630 /** Calculate the physical address order of the zones and temporally store
631 the order in aZoneAddrOrder
633 inline void DRamAllocator::SortRamZones(const SRamZone* aZones, TUint8* aZoneAddrOrder)
635 const SRamZone* const endZone = aZones + iNumZones;
636 const SRamZone* zone = aZones;
637 for (; zone < endZone; zone++)
639 // zoneIdx is the number of zones that have a lower base address than the
640 // current zone and therefore it is the address index of the current zone
642 // search for any zones of lower base address
643 const SRamZone* zone2 = aZones;
644 for (; zone2 < endZone; zone2++)
646 if (zone2->iBase < zone->iBase)
648 zoneIdx++; // have another zone of lower base address
651 aZoneAddrOrder[zoneIdx] = zone - aZones;
656 /** Initialise SPageInfos for all pages in this zone with the
658 @param aZone The zone the pages to be initialised are in
660 inline TUint DRamAllocator::InitSPageInfos(const SZone* aZone)
662 TUint pagesUpdated = 0;
663 if (aZone->iPhysBase > iPhysAddrTop || aZone->iPhysEnd < iPhysAddrBase)
664 {// None of the zone is in allocatable RAM
668 // Mark each allocatable page in this zone with the index of the zone
669 #ifndef __MEMMODEL_FLEXIBLE__
670 TUint8 zoneIndex = aZone - iZones;
672 TPhysAddr addr = aZone->iPhysBase;
673 for (; addr <= aZone->iPhysEnd; addr += KPageSize)
675 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(addr);
678 #ifndef __MEMMODEL_FLEXIBLE__ // The FMM doesn't store zone indices in SPageInfos.
679 pi->SetZone(zoneIndex);
687 /** HAL Function for the RAM allocator.
689 TInt DRamAllocator::HalFunction(TInt aFunction, TAny* a1, TAny* a2)
693 case ERamHalGetZoneCount:
695 kumemput32(a1, &iNumZones, sizeof(iNumZones));
699 case ERamHalGetZoneConfig:
701 TUint zoneIndex = (TUint)a1;
702 if (zoneIndex < iNumZones)
704 SZone* pZone = iZones + zoneIndex;
705 struct SRamZoneConfig config;
706 NKern::ThreadEnterCS();
707 M::RamAllocLock(); // get mutex to ensure consistent set of values are read...
708 config.iZoneId = pZone->iId;
709 config.iZoneIndex = zoneIndex;
710 config.iPhysBase = pZone->iPhysBase;
711 config.iPhysEnd = pZone->iPhysEnd;
712 config.iPhysPages = pZone->iPhysPages;
713 config.iPref = pZone->iPref;
714 config.iFlags = pZone->iFlags;
716 NKern::ThreadLeaveCS();
717 kumemput32(a2,&config,sizeof(config));
723 case ERamHalGetZoneUtilisation:
725 TUint zoneIndex = (TUint)a1;
726 if (zoneIndex < iNumZones)
728 SZone* pZone = iZones + zoneIndex;
729 struct SRamZoneUtilisation config;
730 NKern::ThreadEnterCS();
731 M::RamAllocLock(); // get mutex to ensure consistent set of values are read...
732 config.iZoneId = pZone->iId;
733 config.iZoneIndex = zoneIndex;
734 config.iPhysPages = pZone->iPhysPages;
735 config.iFreePages = pZone->iFreePages;
736 config.iAllocUnknown = pZone->iAllocPages[EPageUnknown];
737 config.iAllocFixed = pZone->iAllocPages[EPageFixed];
738 config.iAllocMovable = pZone->iAllocPages[EPageMovable];
739 config.iAllocDiscardable = pZone->iAllocPages[EPageDiscard];
740 config.iAllocOther = 0;
742 NKern::ThreadLeaveCS();
743 kumemput32(a2,&config,sizeof(config));
751 return KErrNotSupported;
757 Setup the ram allocator with information of the RAM available in the system that
758 comes from the bootstrap/superpage. This is intended to be called from
759 DRamAllocator::New().
761 @see DRamAllocator::New()
762 @param aInfo Two lists of SRamBanks for available and reserved banks in RAM, respectively
763 @param aZones A list of the ram zones in the system and their configuration/preferences
764 @param aZoneCallback Pointer to a base port call back function that will be invoked by this class
766 void DRamAllocator::Create(const SRamInfo& aInfo, const SRamZone* aZones, TRamZoneCallback aZoneCallback)
768 __KTRACE_OPT(KMMU,Kern::Printf("DRamAllocator::Create"));
770 // SZone::iBma array assumes this and KBmaAllPages can't be the same as any
771 // allocatable page type.
772 __ASSERT_COMPILE(EPageFixed < KPageImmovable && EPageUnknown < KPageImmovable &&
773 EPageDiscard >= KPageImmovable && EPageMovable >= KPageImmovable &&
774 KBmaAllPages != EPageFixed && KBmaAllPages != EPageMovable &&
775 KBmaAllPages != EPageDiscard);
776 // NoAllocOfPageType() requires this
777 __ASSERT_COMPILE( KRamZoneFlagNoFixed == 1 << (EPageFixed - KPageTypeAllocBase) &&
778 KRamZoneFlagNoMovable == 1 << (EPageMovable - KPageTypeAllocBase) &&
779 KRamZoneFlagNoDiscard == 1 << (EPageDiscard - KPageTypeAllocBase));
781 // SZone::iPhysEnd and iPhysAddrTop rely on this when checking contiguous zones etc.
782 __ASSERT_COMPILE(KPageShift != 0);
784 ///////////////////////////////////////////////////////////////////////////
785 // Determine where all the allocatable RAM pages are, using the SRamBank
786 // data passed to the kernel by the bootstrap
787 //////////////////////////////////////////////////////////////////////////
788 TUint num_boot_banks=CountBanks(aInfo.iBanks);
789 TUint32 total_ram_size=TotalBankSize(aInfo.iBanks);
790 __KTRACE_OPT(KMMU,Kern::Printf("#banks from bootstrap=%d",num_boot_banks));
791 __KTRACE_OPT(KMMU,Kern::Printf("Total size=%08x",total_ram_size));
792 iTotalRamPages=total_ram_size>>KPageShift;
793 // Assume all pages are allocated as unknown for now
794 iTotalFreeRamPages = 0;
795 __KTRACE_OPT(KMMU,Kern::Printf("Total size=%08x, total pages=%08x",total_ram_size,iTotalRamPages));
797 iPhysAddrBase=aInfo.iBanks[0].iBase;
798 const SRamBank& last_boot_bank=aInfo.iBanks[num_boot_banks-1];
799 iPhysAddrTop = last_boot_bank.iBase + last_boot_bank.iSize - 1;
800 __KTRACE_OPT(KMMU,Kern::Printf("PA base=%08x, PA top=%08x",iPhysAddrBase,iPhysAddrTop));
802 __ASSERT_DEBUG(iPhysAddrTop > iPhysAddrBase, Panic(ECreateInvalidRamBanks));
805 ///////////////////////////////////////////////////////////////////////////
806 // Determine how many zones are required and allocate all the
807 // data structures that will be required, permanent one first then
808 // temporary ones to avoid kernel heap fragmentation.
809 ///////////////////////////////////////////////////////////////////////////
810 // Stop any RAM zone callback operations until the initial one has been sent
811 iZoneCallbackInitSent = EFalse;
815 iZonePowerFunc = aZoneCallback;
818 {// maximum number of zone is number of non-coalesced boot banks
819 iNumZones = num_boot_banks;
820 // No zones specified so don't worry about invoking callback function
821 iZonePowerFunc = NULL;
824 // Permenant heap allocation #1 - may be resized if no zones specified
825 __KTRACE_OPT(KMMU,Kern::Printf("iNumZones=%d", iNumZones));
826 iZones = (SZone*)Kern::AllocZ(iNumZones*sizeof(SZone));
829 Panic(ECreateNoMemory);
832 ///////////////////////////////////////////////////////////////////////////
833 // Coalesce contiguous boot banks
834 ///////////////////////////////////////////////////////////////////////////
835 SRamBank* physBanks = (SRamBank*)Kern::Alloc(num_boot_banks*sizeof(SRamBank));
838 Panic(ECreateNoMemory);
840 SRamBank* coalescedBank = physBanks;
841 const SRamBank* const lastBank = aInfo.iBanks + num_boot_banks;
842 TPhysAddr currentBase = aInfo.iBanks->iBase;
843 TPhysAddr currentEnd = aInfo.iBanks->iBase + aInfo.iBanks->iSize;
844 const SRamBank* nextBank = aInfo.iBanks + 1;
845 for (; nextBank <= lastBank; ++nextBank)
847 // Create new bank if the next bank isn't contiguous or if
848 // it is the last bank
849 if (nextBank == lastBank || nextBank->iBase != currentEnd)
851 coalescedBank->iBase = currentBase;
852 coalescedBank->iSize = currentEnd - currentBase;
853 // Mark all the SPageInfos for the pages in this bank as unused.
854 // Needs to be done here to allow SPageInfo::SafeFromPhysAddr to work
855 // which is used by InitSPageInfos()
856 SPageInfo* pi = SPageInfo::FromPhysAddr(coalescedBank->iBase);
857 SPageInfo* piBankEnd = pi + (coalescedBank->iSize >> KPageShift);
858 for (; pi < piBankEnd; pi++)
863 __KTRACE_OPT(KMMU, Kern::Printf("Coalesced bank: %08x-%08x", currentBase, currentEnd));
864 currentBase = nextBank->iBase;
865 currentEnd = currentBase + nextBank->iSize;
869 currentEnd += nextBank->iSize;
872 TUint num_coalesced_banks = coalescedBank - physBanks;
873 __KTRACE_OPT(KMMU, Kern::Printf("#Coalesced banks: %d", num_coalesced_banks));
875 ///////////////////////////////////////////////////////////////////////////
876 // Initialise the SZone objects and mark all the SPageInfos with the index
877 // of zone they are in.
878 //////////////////////////////////////////////////////////////////////////
879 // Assume everything is off so base port will get notification every time the
880 // a new zone is required during the rest of boot process.
883 SZone* newZone = iZones; // pointer to zone being created
885 // Create and fill zoneAddrOrder with address ordered indices to aZones
886 TUint8* zoneAddrOrder = (TUint8*)Kern::Alloc(iNumZones);
889 Panic(ECreateNoMemory);
891 SortRamZones(aZones, zoneAddrOrder);
893 // Now go through each SRamZone in address order initialising the SZone
896 TUint totalZonePages = 0;
897 for (; i < iNumZones; i++)
899 const SRamZone& ramZone = *(aZones + zoneAddrOrder[i]);
900 newZone->iPhysBase = ramZone.iBase;
901 newZone->iPhysEnd = ramZone.iBase + ramZone.iSize - 1;
902 newZone->iPhysPages = ramZone.iSize >> KPageShift;
903 newZone->iAllocPages[EPageUnknown] = newZone->iPhysPages;
904 newZone->iId = ramZone.iId;
905 newZone->iPref = ramZone.iPref;
906 newZone->iFlags = ramZone.iFlags;
907 totalZonePages += InitSPageInfos(newZone);
911 // iZones now points to all the SZone objects stored in address order
912 Kern::Free(zoneAddrOrder);
913 if (totalZonePages != iTotalRamPages)
914 {// The zones don't cover all of the allocatable RAM.
915 Panic(EZonesIncomplete);
920 iNumZones = num_coalesced_banks;
921 iZones = (SZone*)Kern::ReAlloc((TAny*)iZones, iNumZones*sizeof(SZone));
924 Panic(ECreateNoMemory);
926 // Create a zone for each coalesced boot bank
927 SRamBank* bank = physBanks;
928 SRamBank* bankEnd = physBanks + num_coalesced_banks;
929 SZone* zone = iZones;
930 for (; bank < bankEnd; bank++, zone++)
932 zone->iPhysBase = bank->iBase;
933 zone->iPhysEnd = bank->iBase + bank->iSize - 1;
934 zone->iPhysPages = bank->iSize >> KPageShift;
935 zone->iAllocPages[EPageUnknown] = zone->iPhysPages;
936 zone->iId = (TUint)bank; // doesn't matter what it is as long as it is unique
937 InitSPageInfos(zone);
940 // Delete the coalesced banks as no longer required
941 Kern::Free(physBanks);
943 //////////////////////////////////////////////////////////////////////////
944 // Create each zones' bit map allocator now as no temporary heap
945 // cells still allocated at this point.
946 ///////////////////////////////////////////////////////////////////////////
947 const SZone* const endZone = iZones + iNumZones;
948 SZone* zone = iZones;
949 for (; zone < endZone; zone++)
950 {// Create each BMA with all pages allocated as unknown.
951 for (TUint i = 0; i < EPageTypes; i++)
953 // Only mark the all pages bma and fixed/unknown bma as allocated.
954 TBool notAllocated = (i >= (TUint)EPageMovable);
955 zone->iBma[i] = TBitMapAllocator::New(zone->iPhysPages, notAllocated);
958 Panic(ECreateNoMemory);
963 ///////////////////////////////////////////////////////////////////////////
964 // Unallocate each page in each bank so that it can be allocated when required.
965 // Any page that exists outside a bank will remain allocated as EPageUnknown
966 // and will therefore not be touched by the allocator.
967 //////////////////////////////////////////////////////////////////////////
968 // Temporarily fill preference list so SetPhysicalRamState can succeed
970 // Block bma verificaitons as bma and alloc counts aren't consistent yet.
971 iAllowBmaVerify = EFalse;
973 const SZone* const lastZone = iZones + iNumZones;
975 for (; zone < lastZone; zone++)
977 iZonePrefList.Add(&zone->iPrefLink);
979 const SRamBank* const lastPhysBank = aInfo.iBanks + num_boot_banks;
980 const SRamBank* bank = aInfo.iBanks;
981 for (; bank < lastPhysBank; bank++)
982 {// Free all the pages in this bank.
983 SetPhysicalRamState(bank->iBase, bank->iSize, ETrue, EPageUnknown);
986 // Only now is it safe to enable bma verifications
987 iAllowBmaVerify = ETrue;
990 ///////////////////////////////////////////////////////////////////////////
991 // Sort the zones by preference and create a preference ordered linked list
992 ///////////////////////////////////////////////////////////////////////////
994 for (; zone < lastZone; zone++)
995 {// clear all the zones from the preference list as not in preference order
996 zone->iPrefLink.Deque();
998 SZone** prefOrder = (SZone**)Kern::AllocZ(iNumZones * sizeof(SZone*));
1001 Panic(ECreateNoMemory);
1004 for(; zone < lastZone; zone++)
1006 TInt lowerZones = 0;
1007 // Find how many zones that have a lower preference than this one
1008 const SZone* zone2 = iZones;
1009 for (; zone2 < lastZone; zone2++)
1011 if (zone->iPref > zone2->iPref ||
1012 zone->iPref == zone2->iPref && zone->iFreePages > zone2->iFreePages)
1017 while (prefOrder[lowerZones] != 0)
1018 {// Zone(s) of this preference and size already exist so
1019 // place this one after it/them
1022 prefOrder[lowerZones] = zone;
1024 // Fill preference ordered linked list
1025 SZone** const lastPref = prefOrder + iNumZones;
1026 SZone** prefZone = prefOrder;
1028 for (; prefZone < lastPref; prefZone++, prefRank++)
1030 SZone& zone = **prefZone;
1031 iZonePrefList.Add(&zone.iPrefLink);
1032 zone.iPrefRank = prefRank;
1034 Kern::Free(prefOrder); // Remove temporary allocation
1036 ///////////////////////////////////////////////////////////////////////////
1037 // Now mark any regions reserved by the base port as allocated and not
1038 // for use by the RAM allocator.
1039 ///////////////////////////////////////////////////////////////////////////
1040 const SRamBank* pB = lastBank + 1; // first reserved block specifier
1041 for (; pB->iSize; ++pB)
1043 __KTRACE_OPT(KMMU, Kern::Printf("Reserve physical block %08x+%x", pB->iBase, pB->iSize));
1044 TInt r = SetPhysicalRamState(pB->iBase, pB->iSize, EFalse, EPageFixed);
1045 __KTRACE_OPT(KMMU, Kern::Printf("Reserve returns %d", r));
1048 Panic(ECreateInvalidReserveBank);
1050 #ifdef BTRACE_KERNEL_MEMORY
1051 BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, pB->iSize, pB->iBase);
1052 Epoc::DriverAllocdPhysRam += pB->iSize;
1054 #ifndef __MEMMODEL_FLEXIBLE__ // Mmu::Init2Common() handles this in FMM.
1055 // Synchronise the SPageInfo with any blocks that were reserved by
1056 // marking any reserved regions as locked
1057 TPhysAddr physAddrEnd = pB->iBase + pB->iSize;
1058 TPhysAddr physAddr = pB->iBase;
1059 for(; physAddr < physAddrEnd; physAddr += KPageSize)
1061 SPageInfo* pi = SPageInfo::FromPhysAddr(physAddr);
1067 //////////////////////////////////////////////////////////////////////////
1068 // Now that we have have the RAM zone preference list and know how many
1069 // allocatable pages there are, set iZoneLeastMovDis to be the RAM zone
1070 // that will be used when half of the RAM is in use. This a boot up
1071 // optimisation to reduce the amount of moving and/or discarding fixed page
1072 // allocations will have to make during boot.
1073 //////////////////////////////////////////////////////////////////////////
1074 TUint halfAllocatablePages = iTotalFreeRamPages >> 1;
1076 SDblQueLink* link = &iZonePrefList.iA;
1080 __NK_ASSERT_DEBUG(link != &iZonePrefList.iA);
1081 SZone& zonePages = *_LOFF(link, SZone, iPrefLink);
1082 pages += zonePages.iFreePages;
1084 while(pages < halfAllocatablePages);
1085 iZoneLeastMovDis = link;
1086 iZoneLeastMovDisRank = _LOFF(link, SZone, iPrefLink)->iPrefRank;
1088 // Reset general defrag links.
1089 iZoneGeneralPrefLink = NULL;
1090 iZoneGeneralTmpLink = NULL;
1092 __KTRACE_OPT(KMMU,DebugDump());
1096 void DRamAllocator::MarkPagesAllocated(TPhysAddr aAddr, TInt aCount, TZonePageType aType)
1098 __KTRACE_OPT(KMMU,Kern::Printf("DRamAllocator::MarkPagesAllocated(%x+%x)",aAddr,aCount));
1100 M::RamAllocIsLocked();
1102 // Don't allow unknown pages to be allocated, saves extra 'if' when
1103 // creating bmaType.
1104 __NK_ASSERT_DEBUG(aType != EPageUnknown);
1106 __ASSERT_DEBUG( !(TUint32(aAddr) & (KPageSize - 1)) &&
1107 (TUint32(aAddr) < TUint32(iPhysAddrTop)) &&
1108 (TUint32(aAddr) >= TUint32(iPhysAddrBase))&&
1109 (TUint32((aCount << KPageShift) -1 + aAddr) <= TUint32(iPhysAddrTop)),
1110 Panic(EDoMarkPagesAllocated1));
1112 iTotalFreeRamPages-=aCount;
1113 // Find the 1st zone the 1st set of allocations belong to
1115 SZone* pZ = GetZoneAndOffset(aAddr,offset);
1118 Panic(EDoMarkPagesAllocated1);
1122 TBitMapAllocator& bmaAll = *(pZ->iBma[KBmaAllPages]);
1123 TBitMapAllocator& bmaType = *(pZ->iBma[aType]);
1124 TInt count = Min(bmaAll.iSize - offset, aCount);
1125 bmaAll.Alloc(offset, count);
1126 bmaType.Alloc(offset, count);
1127 ZoneAllocPages(pZ, count, aType);
1130 // If spanning zones then ensure the next zone is contiguous.
1131 __ASSERT_DEBUG(!aCount || ((pZ + 1)->iPhysBase != 0 && ((pZ + 1)->iPhysBase - 1) == pZ->iPhysEnd), Panic(EDoMarkPagesAllocated1));
1133 pZ++; // zones in physical address order so move to next one
1134 offset = 0; // and reset offset to start of the zone
1138 TInt DRamAllocator::MarkPageAllocated(TPhysAddr aAddr, TZonePageType aType)
1140 __KTRACE_OPT(KMMU,Kern::Printf("DRamAllocator::MarkPageAllocated %08x",aAddr));
1142 M::RamAllocIsLocked();
1144 // Don't allow unknown pages to be allocated, saves extra 'if' when
1145 // creating bmaType.
1146 __NK_ASSERT_DEBUG(aType != EPageUnknown);
1149 SZone* z=GetZoneAndOffset(aAddr,n);
1152 return KErrArgument;
1154 __KTRACE_OPT(KMMU2,Kern::Printf("Zone index %d page index %04x",z-iZones,n));
1155 TBitMapAllocator& bmaAll = *(z->iBma[KBmaAllPages]);
1156 TBitMapAllocator& bmaType = *(z->iBma[aType]);
1157 if (bmaAll.NotFree(n,1))
1159 __KTRACE_OPT(KMMU,Kern::Printf("Page already allocated"));
1160 return KErrAlreadyExists; // page is already allocated
1164 --iTotalFreeRamPages;
1165 ZoneAllocPages(z, 1, aType);
1166 __KTRACE_OPT(KMMU,Kern::Printf("Total free RAM pages now = %d",iTotalFreeRamPages));
1168 #ifdef BTRACE_RAM_ALLOCATOR
1169 BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocMarkAllocated, aType, aAddr);
1174 TInt DRamAllocator::FreeRamPage(TPhysAddr aAddr, TZonePageType aType)
1176 __KTRACE_OPT(KMMU,Kern::Printf("FreeRamPage %08x",aAddr));
1178 M::RamAllocIsLocked();
1181 #ifndef __MEMMODEL_FLEXIBLE__
1182 // Check lock counter of the page
1183 if (aAddr != KPhysAddrInvalid)
1185 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aAddr);
1186 if(pi && pi->LockCount())
1187 Panic(EFreeingLockedPage);
1190 // Don't allow unknown pages to be freed, saves extra 'if' when
1191 // creating bmaType.
1192 __NK_ASSERT_DEBUG(aType != EPageUnknown);
1196 SZone* z=GetZoneAndOffset(aAddr,n);
1199 return KErrArgument;
1201 __KTRACE_OPT(KMMU2,Kern::Printf("Zone index %d page index %04x",z-iZones,n));
1202 TBitMapAllocator& bmaAll = *(z->iBma[KBmaAllPages]);
1203 TBitMapAllocator& bmaType = *(z->iBma[aType]);
1206 ++iTotalFreeRamPages;
1207 ZoneFreePages(z, 1, aType);
1209 #ifdef BTRACE_RAM_ALLOCATOR
1210 BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocFreePage, aType, aAddr);
1215 void DRamAllocator::FreeRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aType)
1217 __KTRACE_OPT(KMMU,Kern::Printf("FreeRamPages count=%08x",aNumPages));
1219 M::RamAllocIsLocked();
1221 #if defined(_DEBUG) && !defined(__MEMMODEL_FLEXIBLE__)
1222 // Check lock counter for each page that is about to be freed.
1223 TInt pageNum = aNumPages;
1224 TPhysAddr* pageList = aPageList;
1227 TPhysAddr pa = *pageList++;
1228 if (pa == KPhysAddrInvalid)
1230 SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa);
1231 if(pi && pi->LockCount())
1232 Panic(EFreeingLockedPage);
1238 TPhysAddr first_pa = *aPageList++;
1239 if (first_pa == KPhysAddrInvalid)
1244 SZone* z = GetZoneAndOffset(first_pa,ix);
1249 TBitMapAllocator& bmaAll = *(z->iBma[KBmaAllPages]);
1250 TInt zp_rem = bmaAll.iSize - ix;
1251 __KTRACE_OPT(KMMU,Kern::Printf("1st PA=%08x Zone %d index %04x",first_pa,z-iZones,ix));
1253 TPhysAddr pa = first_pa + KPageSize;
1254 while (--zp_rem && aNumPages && *aPageList==pa)
1261 __KTRACE_OPT(KMMU2,Kern::Printf("%d consecutive pages, zp_rem=%x, %d remaining pages",n,zp_rem,aNumPages));
1263 TBitMapAllocator& bmaType = *(z->iBma[aType]);
1265 iTotalFreeRamPages += n;
1266 ZoneFreePages(z, n, aType);
1267 #ifdef BTRACE_RAM_ALLOCATOR
1268 BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocFreePages, aType, n, first_pa);
1271 #ifdef BTRACE_RAM_ALLOCATOR
1272 BTrace0(BTrace::ERamAllocator, BTrace::ERamAllocFreePagesEnd);
1277 Attempt to clear upto the required amount of discardable or movable pages
1280 @param aZone The RAM zone to clear.
1281 @param aRequiredPages The maximum number of pages to clear.
1283 void DRamAllocator::ZoneClearPages(SZone& aZone, TUint aRequiredPages)
1286 Kern::Printf("ZoneClearPages: ID 0x%x, req 0x%x", aZone.iId, aRequiredPages));
1287 // Discard the required number of discardable pages.
1289 TInt r = NextAllocatedPage(&aZone, offset, EPageDiscard);
1290 while (r == KErrNone && aRequiredPages)
1292 TPhysAddr physAddr = (offset << KPageShift) + aZone.iPhysBase;
1293 TInt discarded = M::DiscardPage(physAddr, aZone.iId, EFalse);
1294 if (discarded == KErrNone)
1295 {// The page was successfully discarded.
1299 r = NextAllocatedPage(&aZone, offset, EPageDiscard);
1301 // Move the required number of movable pages.
1303 r = NextAllocatedPage(&aZone, offset, EPageMovable);
1304 while(r == KErrNone && aRequiredPages)
1306 TPhysAddr physAddr = (offset << KPageShift) + aZone.iPhysBase;
1307 TPhysAddr newAddr = KPhysAddrInvalid;
1308 if (M::MovePage(physAddr, newAddr, aZone.iId, EFalse) == KErrNone)
1309 {// The page was successfully moved.
1312 SZone* newZone = GetZoneAndOffset(newAddr, newOffset);
1313 __NK_ASSERT_DEBUG(newZone != &aZone);
1318 r = NextAllocatedPage(&aZone, offset, EPageMovable);
1322 /** Attempt to allocate pages into a particular zone. Pages will not
1323 always be contiguous.
1325 @param aPageList On return it will contain the addresses of any allocated pages
1326 @param aZone The zone to allocate from
1327 @param aNumPages The number of pages to allocate
1328 @param aType The type of pages to allocate
1329 @return The number of pages that were allocated
1331 TUint32 DRamAllocator::ZoneFindPages(TPhysAddr*& aPageList, SZone& aZone, TUint32 aNumPages, TZonePageType aType)
1333 // Don't allow unknown pages to be allocated, saves extra 'if' when
1334 // creating bmaType.
1335 __NK_ASSERT_DEBUG(aType != EPageUnknown);
1337 TBitMapAllocator& bmaAll = *aZone.iBma[KBmaAllPages];
1338 TBitMapAllocator& bmaType = *(aZone.iBma[aType]);
1339 TPhysAddr zpb = aZone.iPhysBase;
1340 TInt got = bmaAll.AllocList(aNumPages, (TInt*)aPageList);
1343 TPhysAddr* pE = aPageList + got;
1344 while(aPageList < pE)
1346 TInt ix = *aPageList;
1347 *aPageList++ = zpb + (ix << KPageShift);
1348 __KTRACE_OPT(KMMU,Kern::Printf("Got page @%08x",zpb + (ix << KPageShift)));
1350 // Mark the page allocated on the page type bit map.
1351 bmaType.Alloc(ix, 1);
1353 ZoneAllocPages(&aZone, got, aType);
1354 #ifdef BTRACE_RAM_ALLOCATOR
1355 BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocRamPages, aType, got, *(pE-got));
1362 Allocate discontiguous pages.
1364 Fixed pages are always allocated into the most preferable RAM zone that has free,
1365 movable or discardable pages in it. This is to avoid fixed pages being placed
1366 in the less preferred RAM zones.
1368 Movable and discardable pages are allocated into the RAM zones currently in use.
1369 An empty RAM zone will only be used (switched on) if there are not enough free
1370 pages in the in use RAM zones. The pages will be allocated from the least
1371 preferable RAM to be in use after the allocation to the more preferred RAM zones.
1373 If a valid zone is specified in aBlockedZoneId then that RAM zone will not be
1374 allocated into. Also, if aBlockedZoneId and aBlockRest is set then the allocation
1375 will stop if aBlockZoneId
1377 @param aPageList On success, will contain the address of each allocated page
1378 @param aNumPages The number of the pages to allocate
1379 @param aType The type of the pages to allocate
1380 @param aBlockedZoneId The ID of the RAM zone that shouldn't be allocated into.
1381 The default value has no effect.
1382 @param aBlockRest Set to ETrue to stop this allocation using any currently empty
1383 RAM zones, EFalse to allow empty RAM zones to be used. Only
1384 effects movable and discardable allocations.
1386 @return 0 on success, the number of extra pages required to fulfill the request on failure.
1388 TInt DRamAllocator::AllocRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aType, TUint aBlockedZoneId, TBool aBlockRest)
1390 __KTRACE_OPT(KMMU,Kern::Printf("AllocRamPages 0x%x type%d",aNumPages, aType));
1392 M::RamAllocIsLocked();
1394 // Should never allocate unknown pages.
1395 __NK_ASSERT_DEBUG(aType != EPageUnknown);
1397 TPhysAddr* pageListBase = aPageList;
1398 TUint32 numMissing = aNumPages;
1400 if (aType == EPageFixed)
1401 {// Currently only a general defrag operation should set this and it won't
1402 // allocate fixed pages.
1403 __NK_ASSERT_DEBUG(!aBlockRest);
1404 if ((TUint)aNumPages > iTotalFreeRamPages + M::NumberOfFreeDpPages())
1405 {// Not enough free space and not enough freeable pages.
1409 // Search through each zone in preference order until all pages allocated or
1410 // have reached the end of the preference list
1411 SDblQueLink* link = iZonePrefList.First();
1412 while (numMissing && link != &iZonePrefList.iA)
1414 SZone& zone = *_LOFF(link, SZone, iPrefLink);
1415 // Get the link to next zone before any potential reordering.
1416 // Which would occur if previous zone is same preference and has
1417 // more free space after this allocation.
1420 if (zone.iId == aBlockedZoneId || NoAllocOfPageType(zone, aType))
1421 {// The flags disallow aType pages or all pages.
1422 __KTRACE_OPT(KMMU2, Kern::Printf("ARP Flags 0x%08x", zone.iFlags));
1426 numMissing -= ZoneFindPages(aPageList, zone, numMissing, aType);
1427 __KTRACE_OPT(KMMU, Kern::Printf("zone.iId 0x%x", zone.iId));
1430 (zone.iAllocPages[EPageMovable] || zone.iAllocPages[EPageDiscard]))
1431 {// Not all the required pages where allocated and there are still some
1432 // movable and discardable pages in this RAM zone.
1433 ZoneClearPages(zone, numMissing);
1435 // Have discarded and moved everything required or possible so
1436 // now allocate into the pages just freed.
1437 numMissing -= ZoneFindPages(aPageList, zone, numMissing, aType);
1443 if ((TUint)aNumPages > iTotalFreeRamPages)
1444 {// Not enough free pages to fulfill this request so return amount required
1445 return aNumPages - iTotalFreeRamPages;
1448 // Determine if there are enough free pages in the RAM zones in use.
1449 TUint totalFreeInUse = 0;
1450 SDblQueLink* link = iZoneLeastMovDis;
1451 for(; link != &iZonePrefList.iA; link = link->iPrev)
1453 SZone& zone = *_LOFF(link, SZone, iPrefLink);
1454 if (zone.iId == aBlockedZoneId || NoAllocOfPageType(zone, aType) ||
1455 (aBlockRest && (zone.iFlags & KRamZoneFlagGenDefragBlock)))
1456 {// The blocked RAM zone or flags disallow aType pages or all pages
1457 __KTRACE_OPT(KMMU2, Kern::Printf("ARP Flags 0x%08x", zone.iFlags));
1460 totalFreeInUse += zone.iFreePages;
1463 if (aBlockRest && totalFreeInUse < (TUint)aNumPages)
1464 {// Allocating as part of a general defragmentation and
1465 // can't allocate without using a RAM zone less preferable than
1466 // the current least prefeable RAM zone with movable and/or
1468 __NK_ASSERT_DEBUG(numMissing);
1472 SDblQueLink* leastClearable = iZoneLeastMovDis;
1473 while (totalFreeInUse < (TUint)aNumPages)
1474 {// The amount of free pages in the RAM zones with movable
1475 // and/or discardable isn't enough.
1476 leastClearable = leastClearable->iNext;
1477 if (leastClearable == &iZonePrefList.iA)
1478 {// There are no more RAM zones to allocate into.
1479 __NK_ASSERT_DEBUG(numMissing);
1482 SZone& zone = *_LOFF(leastClearable, SZone, iPrefLink);
1483 if (zone.iId == aBlockedZoneId || NoAllocOfPageType(zone, aType))
1484 {// The flags disallow aType pages or all pages
1485 __KTRACE_OPT(KMMU2, Kern::Printf("ARP Flags 0x%08x", zone.iFlags));
1488 totalFreeInUse += zone.iFreePages;
1490 // Now that we know exactly how many RAM zones will be required do
1491 // the allocation. To reduce fixed allocations having to clear RAM
1492 // zones, allocate from the least preferable RAM to be used
1493 // to the most preferable RAM zone.
1494 link = leastClearable;
1497 __NK_ASSERT_DEBUG(link != &iZonePrefList.iA);
1498 SZone& zone = *_LOFF(link, SZone, iPrefLink);
1499 // Update the link before any reordering so we don't miss a RAM zone.
1502 if (zone.iId == aBlockedZoneId || NoAllocOfPageType(zone, aType) ||
1503 (aBlockRest && (zone.iFlags & KRamZoneFlagGenDefragBlock)))
1504 {// The blocked RAM zone or flags disallow aType pages or all pages
1505 __KTRACE_OPT(KMMU2, Kern::Printf("ARP Flags 0x%08x", zone.iFlags));
1509 numMissing -= ZoneFindPages(aPageList, zone, numMissing, aType);
1510 __KTRACE_OPT(KMMU, Kern::Printf("zone.iId 0x%x", zone.iId));
1512 __NK_ASSERT_DEBUG(!numMissing);
1516 // Update here so any call to FreeRamPages doesn't upset count
1517 aNumPages -= numMissing; //set to number of pages that are allocated
1518 iTotalFreeRamPages -= aNumPages;
1521 {// Couldn't allocate all required pages so free those that were allocated
1522 FreeRamPages(pageListBase, aNumPages, aType);
1524 #ifdef BTRACE_RAM_ALLOCATOR
1527 BTrace0(BTrace::ERamAllocator, BTrace::ERamAllocRamPagesEnd);
1535 Attempt to allocate discontiguous pages from the specified RAM zone.
1537 NOTE - This method only obeys the KRamZoneFlagNoAlloc and KRamZoneFlagClaiming
1538 flags and not the others.
1539 But as currently only EFixed pages will be allocated using this method that is
1540 the desired behaviour.
1542 @param aZoneIdList An array of the IDs of the RAM zones to allocate from.
1543 @param aZoneIdCount The number of IDs in aZoneIdList.
1544 @param aPageList On success, will contain the address of each allocated page.
1545 @param aNumPages The number of the pages to allocate.
1546 @param aType The type of the pages to allocate.
1548 @return KErrNone on success, KErrNoMemory if allocation couldn't succeed or
1549 the RAM zone has the KRamZoneFlagNoAlloc flag set, KErrArgument if a zone of
1550 aZoneIdList doesn't exist or aNumPages is greater than the total pages in the zone.
1552 TInt DRamAllocator::ZoneAllocRamPages(TUint* aZoneIdList, TUint aZoneIdCount, TPhysAddr* aPageList, TInt aNumPages, TZonePageType aType)
1554 M::RamAllocIsLocked();
1555 __NK_ASSERT_DEBUG(aType == EPageFixed);
1558 __KTRACE_OPT(KMMU,Kern::Printf("ZoneAllocRamPages 0x%x zones 0x%x",aNumPages, aZoneIdCount));
1561 TUint* zoneIdPtr = aZoneIdList;
1562 TUint* zoneIdEnd = zoneIdPtr + aZoneIdCount;
1563 TUint numMissing = aNumPages;
1564 TUint physicalPages = 0;
1565 TPhysAddr* pageListBase = aPageList;
1567 // Always loop through all the RAM zones so that if an invalid ID is specified
1568 // it is always detected whether all the specified RAM zones were required
1569 // for the allocation or not.
1570 for(; zoneIdPtr < zoneIdEnd; zoneIdPtr++)
1572 SZone* zone = ZoneFromId(*zoneIdPtr);
1575 {// Invalid zone ID.
1580 physicalPages += zone->iPhysPages;
1582 if (zone->iFlags & (KRamZoneFlagNoAlloc|KRamZoneFlagClaiming))
1583 {// If this RAM zone can't be allocated into then skip it.
1587 numMissing -= ZoneFindPages(aPageList, *zone, numMissing, aType);
1589 if (numMissing && aType == EPageFixed)
1590 {// Remove up to required number of pages from the RAM zone
1591 // and reattempt the allocation.
1592 ZoneClearPages(*zone, numMissing);
1593 numMissing -= ZoneFindPages(aPageList, *zone, numMissing, aType);
1597 // Update iTotalFreeRamPages here so that if allocation doesn't succeed then
1598 // FreeRamPages() will keep it consistent.
1599 TUint numAllocated = aNumPages - numMissing;
1600 iTotalFreeRamPages -= numAllocated;
1602 if (r == KErrArgument || physicalPages < (TUint)aNumPages)
1603 {// Invalid zone ID or the number of pages requested is too large.
1604 // This should fail regardless of whether the allocation failed or not.
1605 FreeRamPages(pageListBase, numAllocated, aType);
1606 return KErrArgument;
1610 {// Couldn't allocate all required pages so free those that were allocated
1611 FreeRamPages(pageListBase, numAllocated, aType);
1612 return KErrNoMemory;
1615 // Have allocated all the required pages.
1616 #ifdef BTRACE_RAM_ALLOCATOR
1617 BTrace0(BTrace::ERamAllocator, BTrace::ERamAllocZoneRamPagesEnd);
1624 Will return zones one at a time in the following search patterns until a suitable
1625 zone has been found or it is determined that there is no suitable zone:
1628 Before the first call for a new search sequence must set:
1629 iZoneTmpAddrIndex = -1;
1630 iZoneTmpPrefLink = iZonePrefList.First();
1632 @param aZone On return this will be a pointer to the next zone to search.
1633 @param aState The current search state, i.e. which of the zone orderings to follow.
1634 It will be updated if necessary by this function.
1635 @param aType The type of page to be allocated.
1636 @param aBlockedZoneId The ID of a RAM zone to not allocate into.
1637 @param aBlockRest ETrue if allocation should fail as soon as a blocked zone is reached,
1638 EFalse otherwise. (Currently not used)
1639 @return ETrue a sutiable zone is found, EFalse when the allocation is not possible.
1641 TBool DRamAllocator::NextAllocZone(SZone*& aZone, TZoneSearchState& aState, TZonePageType aType, TUint aBlockedZoneId, TBool aBlockRest)
1643 TUint currentState = aState;
1646 for (; currentState < EZoneSearchEnd; currentState++)
1648 if (currentState == EZoneSearchAddr)
1650 iZoneTmpAddrIndex++;
1651 for (; iZoneTmpAddrIndex < (TInt)iNumZones; iZoneTmpAddrIndex++)
1653 aZone = iZones + iZoneTmpAddrIndex;
1654 if (aBlockedZoneId != aZone->iId && !NoAllocOfPageType(*aZone, aType))
1663 while(iZoneTmpPrefLink != &iZonePrefList.iA)
1665 aZone = _LOFF(iZoneTmpPrefLink, SZone, iPrefLink);
1666 iZoneTmpPrefLink = iZoneTmpPrefLink->iNext; // Update before any re-ordering
1667 if (aBlockedZoneId != aZone->iId && !NoAllocOfPageType(*aZone, aType))
1676 __NK_ASSERT_DEBUG((r && currentState < EZoneSearchEnd) || (!r && currentState == EZoneSearchEnd));
1678 aState = (TZoneSearchState)currentState;
1683 Search through the zones for the requested contiguous RAM, first in preference
1684 order then, if that fails, in address order.
1686 @param aNumPages The number of contiguous pages to find
1687 @param aPhysAddr Will contain the base address of any contiguous run if found
1688 @param aType The page type of the memory to be allocated
1689 @param aAlign Alignment specified as the alignment shift
1690 @param aBlockedZoneId The ID of a zone that can't be allocated into, by default this has no effect
1691 @param aBlockRest Set to ETrue to stop allocation as soon as aBlockedZoneId is reached
1692 in preference ordering. EFalse otherwise.
1694 @return KErrNone on success, KErrNoMemory otherwise
1696 TInt DRamAllocator::AllocContiguousRam(TUint aNumPages, TPhysAddr& aPhysAddr, TZonePageType aType, TInt aAlign, TUint aBlockedZoneId, TBool aBlockRest)
1698 __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam size %08x align %d",aNumPages,aAlign));
1700 M::RamAllocIsLocked();
1702 // No support for non-fixed pages as this will discard and move
1703 // pages if required.
1704 __NK_ASSERT_DEBUG(aType == EPageFixed);
1705 TInt alignWrtPage = Max(aAlign - KPageShift, 0);
1706 TUint32 alignmask = (1u << alignWrtPage) - 1;
1708 // Attempt to find enough pages searching in preference order first then
1710 TZoneSearchState searchState = EZoneSearchPref;
1712 SZone* prevZone = NULL;
1713 TInt carryAll = 0; // Carry for all pages bma, clear to start new run.
1714 TInt carryImmov = 0; // Carry for immovable pages bma, clear to start new run.
1717 iZoneTmpAddrIndex = -1;
1718 iZoneTmpPrefLink = iZonePrefList.First();
1719 while (NextAllocZone(zone, searchState, aType, aBlockedZoneId, aBlockRest))
1721 // Be sure to start from scratch if zone not contiguous with previous zone
1722 if (prevZone && (zone->iPhysBase == 0 || (zone->iPhysBase - 1) != prevZone->iPhysEnd))
1728 TBitMapAllocator& bmaAll = *(zone->iBma[KBmaAllPages]);
1729 base = TInt(zone->iPhysBase >> KPageShift);
1731 __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: base=%08x carryAll=%08x offset=%08x", base, carryAll, offset));
1732 offset = bmaAll.AllocAligned(aNumPages, alignWrtPage, base, EFalse, carryAll, runLength);
1733 __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset));
1736 {// Have found enough contiguous pages so return address of physical page
1737 // at the start of the region
1738 aPhysAddr = TPhysAddr((base + offset - carryAll + alignmask) & ~alignmask) << KPageShift;
1739 MarkPagesAllocated(aPhysAddr, aNumPages, aType);
1741 __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr));
1742 #ifdef BTRACE_RAM_ALLOCATOR
1743 BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, aType, aNumPages, aPhysAddr);
1748 {// No run found when looking in just the free pages so see if this
1749 // RAM zone could be used if pages where moved or discarded.
1750 if (aNumPages > KMaxFreeableContiguousPages)
1751 {// Can't move or discard any pages so move on to next RAM zone
1752 // taking any run at the end of this RAM zone into account.
1756 TBitMapAllocator& bmaImmov = *(zone->iBma[EPageFixed]);
1757 offset = 0; // Clear so searches whole of fixed BMA on the first pass.
1760 __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: base=%08x carryImmov=%08x offset=%08x", base, carryImmov, offset));
1761 offset = bmaImmov.AllocAligned(aNumPages, alignWrtPage, base, EFalse, carryImmov, runLength, offset);
1762 __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset));
1764 {// Have found a run in immovable page bma so attempt to clear
1765 // it for the allocation.
1766 TPhysAddr addrBase = TPhysAddr((base + offset - carryImmov + alignmask) & ~alignmask) << KPageShift;
1767 TPhysAddr addrEnd = addrBase + (aNumPages << KPageShift);
1769 // Block the RAM zones containing the contiguous region
1770 // from being allocated into when pages are moved or replaced.
1771 TPhysAddr addr = addrBase;
1773 SZone* tmpZone = GetZoneAndOffset(addr, tmpOffset);
1774 while (addr < addrEnd-1)
1776 tmpZone->iFlags |= KRamZoneFlagTmpBlockAlloc;
1777 addr = tmpZone->iPhysEnd;
1782 TInt contigOffset = 0;
1783 SZone* contigZone = GetZoneAndOffset(addr, contigOffset);
1784 for (; addr != addrEnd; addr += KPageSize, contigOffset++)
1786 if (contigZone->iPhysEnd < addr)
1788 contigZone = GetZoneAndOffset(addr, contigOffset);
1789 __NK_ASSERT_DEBUG(contigZone != NULL);
1791 #ifdef _DEBUG // This page shouldn't be allocated as fixed, only movable or discardable.
1792 __NK_ASSERT_DEBUG(contigZone != NULL);
1793 __NK_ASSERT_DEBUG(contigZone->iBma[EPageFixed]->NotAllocated(contigOffset, 1));
1794 SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(addr);
1795 __NK_ASSERT_DEBUG(pageInfo != NULL);
1798 TInt moveRet = M::MovePage(addr, newAddr, contigZone->iId, EFalse);
1799 if (moveRet != KErrNone && moveRet != KErrNotFound)
1800 {// This page couldn't be moved or discarded so
1801 // restart the search the page after this one.
1803 Kern::Printf("ContigMov fail offset %x moveRet %d addr %x carryImmov %x",
1804 offset, moveRet, addr, carryImmov));
1805 // Can't rely on RAM zone preference ordering being
1806 // the same so clear carrys and restart search from
1807 // within the current RAM zone or skip onto the next
1808 // one if at the end of this one.
1811 offset = (addr < zone->iPhysBase)? 0 : contigOffset + 1;
1812 __KTRACE_OPT(KMMU2, Kern::Printf("ContigMov fail offset %x", offset));
1816 // Unblock the RAM zones containing the contiguous region.
1817 TPhysAddr flagAddr = addrBase;
1818 tmpZone = GetZoneAndOffset(flagAddr, tmpOffset);
1819 while (flagAddr < addrEnd-1)
1821 tmpZone->iFlags &= ~KRamZoneFlagTmpBlockAlloc;
1822 flagAddr = tmpZone->iPhysEnd;
1826 if (addr == addrEnd)
1827 {// Cleared all the required pages so allocate them.
1828 // Return address of physical page at the start of the region.
1829 aPhysAddr = addrBase;
1830 MarkPagesAllocated(aPhysAddr, aNumPages, aType);
1832 __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr));
1833 #ifdef BTRACE_RAM_ALLOCATOR
1834 BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, aType, aNumPages, aPhysAddr);
1840 // Keep searching immovable page bma of the current RAM zone until
1841 // gone past end of RAM zone or no run can be found.
1842 while (offset >= 0 && (TUint)offset < zone->iPhysPages);
1845 return KErrNoMemory;
1850 Attempt to allocate the contiguous RAM from the specified zone.
1852 NOTE - This method only obeys the KRamZoneFlagNoAlloc and KRamZoneFlagClaiming
1853 flags and not the others.
1854 But as currently only EFixed pages will be allocated using this method that is
1855 the desired behaviour.
1857 @param aZoneIdList An array of the IDs of the RAM zones to allocate from.
1858 @param aZoneIdCount The number of the IDs listed by aZoneIdList.
1859 @param aSize The number of contiguous bytes to find
1860 @param aPhysAddr Will contain the base address of the contiguous run if found
1861 @param aType The page type of the memory to be allocated
1862 @param aAlign Alignment specified as the alignment shift
1864 @return KErrNone on success, KErrNoMemory if allocation couldn't succeed or
1865 the RAM zone has the KRamZoneFlagNoAlloc flag set. KErrArgument if a zone of
1866 aZoneIdList exists or if aSize is larger than the size of the zone.
1868 TInt DRamAllocator::ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aType, TInt aAlign)
1870 __KTRACE_OPT(KMMU,Kern::Printf("ZoneAllocContiguousRam zones 0x%x size 0x%08x align %d",aZoneIdCount, aSize, aAlign));
1872 M::RamAllocIsLocked();
1873 __NK_ASSERT_DEBUG(aType == EPageFixed);
1876 TUint numPages = (aSize + KPageSize - 1) >> KPageShift;
1877 TInt carry = 0; // must be zero as this is always the start of a new run
1878 TInt alignWrtPage = Max(aAlign - KPageShift, 0);
1879 TUint32 alignmask = (1u << alignWrtPage) - 1;
1883 TUint physPages = 0;
1884 TUint* zoneIdPtr = aZoneIdList;
1885 TUint* zoneIdEnd = aZoneIdList + aZoneIdCount;
1886 SZone* prevZone = NULL;
1887 for (; zoneIdPtr < zoneIdEnd; zoneIdPtr++)
1889 SZone* zone = ZoneFromId(*zoneIdPtr);
1891 {// Couldn't find zone of this ID or it isn't large enough
1892 return KErrArgument;
1894 physPages += zone->iPhysPages;
1897 (zone->iFlags & (KRamZoneFlagNoAlloc|KRamZoneFlagClaiming)))
1898 {// Keep searching through the RAM zones if the allocation
1899 // has succeeded, to ensure the ID list is always fully verified or
1900 // if this zone is currently blocked for further allocations.
1904 // Be sure to start from scratch if zone not contiguous with previous zone
1905 if (prevZone && (zone->iPhysBase == 0 || (zone->iPhysBase - 1) != prevZone->iPhysEnd))
1912 TBitMapAllocator& bmaAll = *(zone->iBma[KBmaAllPages]);
1913 base = TInt(zone->iPhysBase >> KPageShift);
1915 __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: aBase=%08x aCarry=%08x", base, carry));
1916 offset = bmaAll.AllocAligned(numPages, alignWrtPage, base, EFalse, carry, len);
1917 __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset));
1920 if (physPages < numPages)
1921 {// The allocation requested is too large for the specified RAM zones.
1922 return KErrArgument;
1926 {// The allocation failed.
1927 return KErrNoMemory;
1930 // Have found enough contiguous pages so mark the pages allocated and
1931 // return address of physical page at the start of the region.
1932 aPhysAddr = TPhysAddr((base + offset - carry + alignmask) & ~alignmask) << KPageShift;
1933 MarkPagesAllocated(aPhysAddr, numPages, aType);
1935 __KTRACE_OPT(KMMU,Kern::Printf("ZoneAllocContiguousRam returns %08x",aPhysAddr));
1936 #ifdef BTRACE_RAM_ALLOCATOR
1937 BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocZoneContiguousRam, aType, numPages, aPhysAddr);
1944 Attempt to set the specified contiguous block of RAM pages to be either
1947 @param aBase The base address of the RAM to update.
1948 @param aSize The number of contiguous bytes of RAM to update.
1949 @param aState Set to ETrue to free the RAM, EFalse to allocate the RAM.
1950 @param aType The type of the pages being updated.
1952 @return KErrNone on success, KErrArgument if aBase is an invalid address,
1953 KErrGeneral if a page being marked free is already free,
1954 KErrInUse if the page being marked allocated is already allocated.
1956 TInt DRamAllocator::SetPhysicalRamState(TPhysAddr aBase, TInt aSize, TBool aState, TZonePageType aType)
1958 M::RamAllocIsLocked();
1960 __KTRACE_OPT(KMMU,Kern::Printf("SetPhysicalRamState(%08x,%x,%d)",aBase,aSize,aState?1:0));
1961 TUint32 pageMask = KPageSize-1;
1962 aSize += (aBase & pageMask);
1964 TInt npages = (aSize + pageMask) >> KPageShift;
1965 __KTRACE_OPT(KMMU,Kern::Printf("Rounded base %08x npages=%x",aBase,npages));
1967 SZone* baseZone = GetZoneAndOffset(aBase, baseOffset);
1968 if (!baseZone || (TUint32)aSize > (iPhysAddrTop - aBase + 1))
1970 return KErrArgument;
1972 SZone* zone = baseZone;
1973 SZone* zoneEnd = iZones + iNumZones;
1974 TPhysAddr base = aBase;
1975 TInt pagesLeft = npages;
1976 TInt offset = baseOffset;
1977 TInt pageCount = -1;
1978 __KTRACE_OPT(KMMU2,Kern::Printf("Zone %x page index %x z=%08x zE=%08x n=%x base=%08x",zone->iId, offset, zone, zoneEnd, pagesLeft, base));
1979 for (; pagesLeft && zone < zoneEnd; ++zone)
1981 if (zone->iPhysBase + (offset << KPageShift) != base)
1982 {// Zone not contiguous with current run of page, so have been
1983 // asked to set the state of non-existent pages.
1984 return KErrArgument;
1987 TBitMapAllocator& bmaAll = *(zone->iBma[KBmaAllPages]);
1988 TInt zp_rem = bmaAll.iSize - offset;
1989 pageCount = Min(pagesLeft, zp_rem);
1990 __KTRACE_OPT(KMMU2,Kern::Printf("Zone %x pages %x+%x base %08x", zone->iId, offset, pageCount, base));
1993 if(bmaAll.NotAllocated(offset, pageCount))
2000 if(bmaAll.NotFree(offset, pageCount))
2005 pagesLeft -= pageCount;
2007 base += (TPhysAddr(pageCount) << KPageShift);
2011 return KErrArgument; // not all of the specified range exists
2014 iTotalFreeRamPages += (aState ? npages : -npages);
2016 offset = baseOffset;
2017 for (pagesLeft = npages; pagesLeft; pagesLeft -= pageCount)
2019 TBitMapAllocator& bmaAll = *(zone->iBma[KBmaAllPages]);
2020 // Unknown and fixed pages share a bit map.
2021 TBitMapAllocator& bmaType = *(zone->iBma[(aType != EPageUnknown)? aType : EPageFixed]);
2022 TInt zp_rem = bmaAll.iSize - offset;
2023 pageCount = Min(pagesLeft, zp_rem);
2026 bmaAll.Free(offset, pageCount);
2027 bmaType.Free(offset, pageCount);
2028 ZoneFreePages(zone, pageCount, aType);
2032 bmaAll.Alloc(offset, pageCount);
2033 bmaType.Alloc(offset, pageCount);
2034 ZoneAllocPages(zone, pageCount, aType);
2036 __KTRACE_OPT(KMMU2,Kern::Printf("Zone %d pages %x+%x base %08x",zone-iZones, offset, pageCount, base));
2043 /** Update the allocated page counts for the zone that is page is allocated into.
2045 @param aAddr The physical address of the page
2046 @param aOldPageType The type the page was allocated as
2047 @param aNewPageType The type the page is changing to
2049 void DRamAllocator::ChangePageType(SPageInfo* aPageInfo, TZonePageType aOldType, TZonePageType aNewType)
2053 SZone* zone = GetZoneAndOffset(aPageInfo->PhysAddr(), offset);
2055 // *********** System lock may be held while this is invoked so don't do********
2056 // *********** anything too slow and definitely don't call zone callback********
2057 M::RamAllocIsLocked();
2058 CHECK_PRECONDITIONS((MASK_THREAD_CRITICAL) & ~MASK_NO_FAST_MUTEX, "DRamAllocator::ChangePageType");
2060 // Get zone page is in and on debug builds check that it is allocated
2061 if (zone == NULL || zone->iBma[KBmaAllPages]->NotAllocated(offset, 1))
2063 Panic(EAllocRamPagesInconsistent);
2066 // Check if adjusting counts is valid, i.e. won't cause a roll over
2067 if (zone->iAllocPages[aOldType] - 1 > zone->iAllocPages[aOldType] ||
2068 zone->iAllocPages[aNewType] + 1 < zone->iAllocPages[aNewType])
2070 __KTRACE_OPT(KMMU, Kern::Printf("ChangePageType Alloc Unk %x Fx %x Mv %x Dis %x",zone->iAllocPages[EPageUnknown],
2071 zone->iAllocPages[EPageFixed], zone->iAllocPages[EPageMovable],zone->iAllocPages[EPageDiscard]));
2072 Panic(EZonesCountErr);
2076 // Update the counts and bmas
2077 zone->iAllocPages[aOldType]--;
2078 zone->iBma[aOldType]->Free(offset);
2079 zone->iAllocPages[aNewType]++;
2080 zone->iBma[aNewType]->Alloc(offset, 1);
2082 __KTRACE_OPT(KMMU2, Kern::Printf("ChangePageType Alloc Unk %x Fx %x Mv %x Dis %x",zone->iAllocPages[EPageUnknown],
2083 zone->iAllocPages[EPageFixed], zone->iAllocPages[EPageMovable],zone->iAllocPages[EPageDiscard]));
2084 #ifdef BTRACE_RAM_ALLOCATOR
2085 BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocChangePageType, aNewType, aPageInfo->PhysAddr());
2090 Get the next page in this zone that is allocated after this one.
2092 @param aZone The zone to find the next allocated page in.
2093 @param aOffset On entry this is the offset from which the next allocated
2094 page in the zone should be found, on return it will be the offset
2095 of the next allocated page.
2096 @return KErrNone if a next allocated page could be found, KErrNotFound if no more pages in
2097 the zone after aOffset are allocated, KErrArgument if aOffset is outside the zone.
2099 TInt DRamAllocator::NextAllocatedPage(SZone* aZone, TUint& aOffset, TZonePageType aType) const
2101 const TUint KWordAlignMask = KMaxTUint32 << 5;
2103 M::RamAllocIsLocked();
2105 __NK_ASSERT_DEBUG(aZone - iZones < (TInt)iNumZones);
2106 // Makes things simpler for bma selection.
2107 __NK_ASSERT_DEBUG(aType != EPageUnknown);
2109 if (aOffset >= aZone->iPhysPages)
2110 {// Starting point is outside the zone
2111 return KErrArgument;
2114 TUint offset = aOffset;
2115 TUint endOffset = aZone->iPhysPages;
2116 TUint endOffsetAligned = endOffset & KWordAlignMask;
2118 // Select the BMA to search,
2119 TUint bmaIndex = (aType == EPageTypes)? KBmaAllPages : aType;
2120 TUint32* map = &(aZone->iBma[bmaIndex]->iMap[offset >> 5]);
2121 TUint32 bits = *map++;
2123 // Set bits for pages before 'offset' (i.e. ones we want to ignore)...
2124 bits |= ~(KMaxTUint32 >> (offset & ~KWordAlignMask));
2126 // Find the first bit map word from aOffset in aZone with allocated pages
2127 while (bits == KMaxTUint32 && offset < endOffsetAligned)
2130 offset = (offset + 32) & KWordAlignMask;
2133 if (offset >= endOffsetAligned && endOffset != endOffsetAligned)
2134 {// Have reached the last bit mask word so set the bits that are
2135 // outside of the zone so that they are ignored.
2136 bits |= KMaxTUint32 >> (endOffset - endOffsetAligned);
2139 if (bits == KMaxTUint32)
2140 {// No allocated pages found after aOffset in aZone.
2141 return KErrNotFound;
2144 // Now we have bits with allocated pages in it so determine the exact
2145 // offset of the next allocated page
2146 TUint32 mask = 0x80000000 >> (offset & ~KWordAlignMask);
2153 if (offset >= endOffset)
2154 {// Reached the end of the zone without finding an allocated page after aOffset
2155 return KErrNotFound;
2158 // Should definitely have found an allocated page within aZone's pages
2159 __NK_ASSERT_DEBUG(mask != 0 && !(bits & mask) && offset < aZone->iPhysPages);
2166 See if any of the least preferable RAM zones can be emptied. If they can then
2167 initialise the allocator for a general defragmentation operation.
2169 Stage 0 of the general defrag is to ensure that there are enough free
2170 pages in the more preferable RAM zones to be in use after the general defrag
2171 for the movable page allocations. This is achieved by discarding the
2172 required amount of discardable pages from the more preferable RAM zones
2173 to be in use after the general defrag.
2176 @parm aInitialStage On return this will contain the stage the general
2177 defragmentation should begin at. I.e. if no RAM
2178 zones can be cleared then just perform the final
2180 @param aRequiredToBeDiscarded On return this will contain the number of
2181 discardable pages that need to be discarded
2182 from the RAM zones to be in use after the
2184 @return Pointer to the RAM zone object that may potentially have pages
2185 discarded by the general defrag. This will be NULL if no suitable
2186 RAM zone could be found.
2188 SZone* DRamAllocator::GeneralDefragStart0(TGenDefragStage& aStage, TUint& aRequiredToBeDiscarded)
2191 if (!K::Initialising)
2193 M::RamAllocIsLocked();
2194 #ifdef __VERIFY_LEASTMOVDIS
2195 VerifyLeastPrefMovDis();
2198 // Any previous general defrag operation must have ended.
2199 __NK_ASSERT_DEBUG(iZoneGeneralPrefLink == NULL);
2200 __NK_ASSERT_DEBUG(iZoneGeneralTmpLink == NULL);
2205 // Only have one RAM zone so a defrag can't do anything.
2209 // Determine how many movable or discardable pages are required to be allocated.
2210 TUint requiredPagesDis = 0;
2211 TUint requiredPagesMov = 0;
2212 TUint firstClearableInUseRank = 0;
2213 SDblQueLink* link = iZoneLeastMovDis;
2216 SZone& zone = *_LOFF(link, SZone, iPrefLink);
2217 requiredPagesDis += zone.iAllocPages[EPageDiscard];
2218 requiredPagesMov += zone.iAllocPages[EPageMovable];
2220 if (!firstClearableInUseRank &&
2221 (zone.iAllocPages[EPageMovable] || zone.iAllocPages[EPageDiscard]) &&
2222 !zone.iAllocPages[EPageFixed] && !zone.iAllocPages[EPageUnknown])
2223 {// This is the least preferable RAM zone that is has movable or
2224 // discardable but may be clearable as it has no immovable pages.
2225 firstClearableInUseRank = zone.iPrefRank;
2228 // Reset KRamZoneFlagGenDefrag flag bit for each RAM zone to be defraged.
2229 zone.iFlags &= ~(KRamZoneFlagGenDefrag | KRamZoneFlagGenDefragBlock);
2233 while (link != &iZonePrefList.iA);
2235 // Adjust the number of discardable pages for those that are freeable.
2236 // Dirty pages will be moved rather than discarded so they are not freeable
2237 // and we must make sure that we have enough space in zones for these dirty
2239 __NK_ASSERT_DEBUG(requiredPagesDis >= (TUint)M::NumberOfFreeDpPages());
2240 requiredPagesDis -= M::NumberOfFreeDpPages();
2241 TUint totalDirtyPagesDis = M::NumberOfDirtyDpPages();
2242 if (requiredPagesDis < totalDirtyPagesDis)
2243 requiredPagesDis = totalDirtyPagesDis;
2245 // Determine which is the least preferable RAM zone that needs to be
2246 // in use for required number of movable and discardable pages.
2247 TUint onlyPagesDis = 0; // Number of pages in RAM zones for discard only.
2248 TUint onlyPagesMov = 0; // Number of pages in RAM zones for movable only.
2249 TUint totalPagesDis = 0; // Total pages found so far for discardable pages.
2250 TUint totalPagesMov = 0; // Total pages found so far for movable pages.
2251 TUint totalCurrentDis = 0; // Number of allocated discardable pages found in
2252 // RAM zones to be in use after the general defrag.
2253 TUint totalCurrentMov = 0; // Number of allocated movable pages found in
2254 // RAM zones to be in use after the general defrag.
2255 TUint totalCurrentFree = 0; // The current number of free pages in the RAM zones
2256 // to be in use after the general defrag.
2257 iZoneGeneralPrefLink = &iZonePrefList.iA;
2258 while (iZoneGeneralPrefLink != iZoneLeastMovDis &&
2259 (requiredPagesMov > totalPagesMov ||
2260 requiredPagesDis > totalPagesDis))
2262 iZoneGeneralPrefLink = iZoneGeneralPrefLink->iNext;
2263 SZone& zone = *_LOFF(iZoneGeneralPrefLink, SZone, iPrefLink);
2264 // Update the current totals.
2265 totalCurrentDis += zone.iAllocPages[EPageDiscard];
2266 totalCurrentMov += zone.iAllocPages[EPageMovable];
2267 totalCurrentFree += zone.iFreePages;
2269 TBool onlyAllocDis = NoAllocOfPageType(zone, EPageMovable);
2270 TBool onlyAllocMov = NoAllocOfPageType(zone, EPageDiscard);
2271 if (!onlyAllocMov || !onlyAllocDis)
2272 {// Either movable, discardable or both can be allocated in this zone.
2273 TUint zonePagesFree = zone.iFreePages;
2274 TUint zonePagesDis = zone.iAllocPages[EPageDiscard];
2275 TUint zonePagesMov = zone.iAllocPages[EPageMovable];
2276 // Total pages in this RAM zone that can be used for either
2277 // discardable or movable pages.
2278 TUint zonePagesGen = zonePagesDis + zonePagesMov + zonePagesFree;
2281 if (requiredPagesDis > totalPagesDis)
2282 {// No further discardable pages can be allocated into
2283 // this RAM zone but consider any that already are.
2284 TUint usedPages = Min( (TInt)zonePagesDis,
2285 requiredPagesDis - totalPagesDis);
2286 totalPagesDis += usedPages;
2287 zonePagesDis -= usedPages;
2289 TUint zoneOnlyMov = zonePagesDis + zonePagesMov + zonePagesFree;
2290 onlyPagesMov += zoneOnlyMov;
2291 totalPagesMov += zoneOnlyMov;
2292 __KTRACE_OPT(KMMU2, Kern::Printf("onlyMov ID%x tot %x",
2293 zone.iId, zoneOnlyMov));
2294 zonePagesGen = 0; // These pages aren't general purpose.
2298 if (requiredPagesMov > totalPagesMov)
2299 {// No further movable pages can be allocated into
2300 // this RAM zone but consider any that already are.
2301 TUint usedPages = Min( (TInt)zonePagesMov,
2302 requiredPagesMov - totalPagesMov);
2303 totalPagesMov += usedPages;
2304 zonePagesMov -= usedPages;
2306 TUint zoneOnlyDis = zonePagesDis + zonePagesMov + zonePagesFree;
2307 onlyPagesDis += zoneOnlyDis;
2308 totalPagesDis += zoneOnlyDis;
2309 __KTRACE_OPT(KMMU2, Kern::Printf("onlyDis ID%x tot %x",
2310 zone.iId, zoneOnlyDis));
2311 zonePagesGen = 0; // These pages aren't general purpose.
2314 if (requiredPagesDis > totalPagesDis)
2315 {// Need some discardable pages so first steal any spare
2316 // movable pages for discardable allocations.
2317 if (totalPagesMov > requiredPagesMov)
2318 {// Use any spare movable pages that can also be
2319 // used for discardable allocations for discardable.
2320 __NK_ASSERT_DEBUG(onlyPagesMov);
2321 TUint spareMovPages = Min((TInt)(totalPagesMov - onlyPagesMov),
2322 totalPagesMov - requiredPagesMov);
2323 totalPagesMov -= spareMovPages;
2324 totalPagesDis += spareMovPages;
2325 __KTRACE_OPT(KMMU2, Kern::Printf("genDis Mov ID%x used%x",
2326 zone.iId, spareMovPages));
2328 if (requiredPagesDis > totalPagesDis)
2330 // Need more discardable pages but only grab those required.
2331 TUint usedPages = Min( (TInt) zonePagesGen,
2332 requiredPagesDis - totalPagesDis);
2333 totalPagesDis += usedPages;
2334 zonePagesGen -= usedPages;
2335 __KTRACE_OPT(KMMU2, Kern::Printf("genDis ID%x used%x",
2336 zone.iId, usedPages));
2339 if (requiredPagesMov > totalPagesMov)
2340 {// Need some movable pages so first steal any spare
2341 // discardable pages for movable allocations.
2342 if (totalPagesDis > requiredPagesDis)
2343 {// Use any spare discardable pages that can also be
2344 // used for movable allocations for movable.
2345 __NK_ASSERT_DEBUG(onlyPagesDis);
2346 TUint spareDisPages = Min((TInt)(totalPagesDis - onlyPagesDis),
2347 totalPagesDis - requiredPagesDis);
2348 totalPagesDis -= spareDisPages;
2349 totalPagesMov += spareDisPages;
2350 __KTRACE_OPT(KMMU2, Kern::Printf("genMov Dis ID%x used%x",
2351 zone.iId, spareDisPages));
2353 if (requiredPagesMov > totalPagesMov)
2354 {// Still need some movable pages so grab them from this zone.
2355 // Just grab all of the general pages left as discard pages will
2356 // have already grabbed some if it had needed to.
2357 totalPagesMov += zonePagesGen;
2358 __KTRACE_OPT(KMMU2, Kern::Printf("genMov ID%x used%x",
2359 zone.iId, zonePagesGen));
2365 __KTRACE_OPT(KMMU, Kern::Printf("gen least in use ID 0x%x",
2366 (_LOFF(iZoneGeneralPrefLink, SZone, iPrefLink))->iId));
2367 __NK_ASSERT_DEBUG(_LOFF(iZoneGeneralPrefLink, SZone, iPrefLink)->iPrefRank <=
2368 iZoneLeastMovDisRank);
2370 if (iZoneGeneralPrefLink != iZoneLeastMovDis &&
2371 firstClearableInUseRank > _LOFF(iZoneGeneralPrefLink, SZone, iPrefLink)->iPrefRank)
2372 {// We can reduce the number of RAM zones in use so block all the RAM
2373 // zones not to be in use after the defrag from being allocated into
2374 // by the general defrag.
2375 link = iZoneLeastMovDis;
2376 while (link != iZoneGeneralPrefLink)
2378 SZone& zone = *_LOFF(link, SZone, iPrefLink);
2379 zone.iFlags |= KRamZoneFlagGenDefragBlock;
2383 // Determine how many pages will need to be discarded to allow general
2384 // defrag to succeed in using the minimum RAM zones required.
2385 if (requiredPagesDis > totalCurrentDis)
2386 {// Need to replace some discardable pages in RAM zones to be
2387 // cleared with pages in the RAM zones to be in use after the
2389 __NK_ASSERT_DEBUG(totalCurrentFree >= requiredPagesDis - totalCurrentDis);
2390 totalCurrentFree -= requiredPagesDis - totalCurrentDis;
2392 TUint totalForMov = totalCurrentFree + totalCurrentMov;
2393 if (requiredPagesMov > totalForMov)
2394 {// Need to discard some pages from the least preferable RAM zone to be
2395 // in use after the general for the movable pages to be moved to.
2396 aRequiredToBeDiscarded = requiredPagesMov - totalForMov;
2397 __NK_ASSERT_DEBUG(aRequiredToBeDiscarded <= totalCurrentDis);
2398 __NK_ASSERT_DEBUG(totalCurrentDis - aRequiredToBeDiscarded >= requiredPagesDis);
2401 // This stage should discard pages from the least preferable RAM zones
2402 // to be in use after the general defrag to save the pages having to
2403 // be moved again by the final stage.
2404 iZoneGeneralStage = EGenDefragStage0;
2405 aStage = EGenDefragStage1; // Defrag::GeneralDefrag() requires this.
2406 iZoneGeneralTmpLink = iZoneGeneralPrefLink;
2407 return GeneralDefragNextZone0();
2410 // General defrag can't clear any RAM zones so jump to tidying stage.
2411 aStage = EGenDefragStage2;
2412 iZoneGeneralStage = EGenDefragStage2;
2418 Find the next RAM zone that is suitable for stage 0 of a general defrag.
2419 This should only be called after a preceeding call to
2420 DRamAllocator::GeneralDefragStart0().
2422 This goes through the RAM zones from the least preferable to be in use
2423 after the general defrag to the most preferable RAM zone. It will
2424 return each time it finds a RAM zone with discardable pages allocated into it.
2426 @return Pointer to the RAM zone object that may potentially have pages
2427 discarded by the general defrag. This will be NULL if no suitable
2428 RAM zone could be found.
2430 SZone* DRamAllocator::GeneralDefragNextZone0()
2432 M::RamAllocIsLocked();
2433 // Any previous general defrag operation must have ended.
2434 __NK_ASSERT_DEBUG(iZoneGeneralPrefLink != NULL);
2435 __NK_ASSERT_DEBUG(iZoneGeneralTmpLink != NULL);
2436 __NK_ASSERT_DEBUG(iZoneGeneralStage == EGenDefragStage0);
2438 while (iZoneGeneralTmpLink != &iZonePrefList.iA)
2440 SZone* zone = _LOFF(iZoneGeneralTmpLink, SZone, iPrefLink);
2442 // Save the RAM zone that is currently more preferable than this one
2443 // before any reordering.
2444 iZoneGeneralTmpLink = iZoneGeneralTmpLink->iPrev;
2446 if (zone->iFlags & KRamZoneFlagGenDefrag)
2447 {// This zone has been selected for a general defrag already.
2448 __KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext0 zone ID 0x%x already defraged",
2452 zone->iFlags |= KRamZoneFlagGenDefrag;
2453 if (zone->iAllocPages[EPageDiscard])
2455 // A RAM zone that may have pages discarded by a general defrag has been found.
2456 __KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext0 zone ID 0x%x", zone->iId));
2465 Initialise this stage of a general defrag operation which will attempt
2466 to clear all the RAM zones not to be in use once the general defrag
2469 @return Pointer to the RAM zone object that may potentially be cleared
2470 by the general defrag. This will be NULL if no suitable
2471 RAM zone could be found.
2473 SZone* DRamAllocator::GeneralDefragStart1()
2475 M::RamAllocIsLocked();
2476 __NK_ASSERT_DEBUG(iNumZones == 1 || iZoneGeneralPrefLink != NULL);
2480 {// On a device with one RAM zone can't do any defrag so return NULL.
2484 // Clear general defrag flags of each RAM zone to be defraged.
2485 SDblQueLink* link = iZoneGeneralPrefLink;
2486 for (; link != &iZonePrefList.iA; link = link->iPrev)
2488 SZone& zone = *_LOFF(link, SZone, iPrefLink);
2489 zone.iFlags &= ~KRamZoneFlagGenDefrag;
2492 // Flags cleared so now to start this stage from least preferable RAM zone
2493 // currently in use.
2494 iZoneGeneralTmpLink = iZoneLeastMovDis;
2495 iZoneGeneralStage = EGenDefragStage1;
2496 return GeneralDefragNextZone1();
2501 Find the next RAM zone that is suitable for stage 1 of a general defrag.
2502 This should only be called after a preceeding call to
2503 DRamAllocator::GeneralDefragStart1().
2505 This goes through the RAM zones from the least preferable currently
2506 with movable or discardable pages allocated into it to the least
2507 preferable RAM zone that is to be in use after the general defrag.
2508 It will return each time it finds a RAM zone with movable and/or
2509 discardable pages allocated into it.
2511 @return Pointer to the RAM zone object that may potentially be cleared by a
2512 general defrag. This will be NULL if no suitable zone could be found.
2514 SZone* DRamAllocator::GeneralDefragNextZone1()
2516 M::RamAllocIsLocked();
2517 // Any previous general defrag operation must have ended.
2518 __NK_ASSERT_DEBUG(iZoneGeneralPrefLink != NULL);
2519 __NK_ASSERT_DEBUG(iZoneGeneralTmpLink != NULL);
2520 __NK_ASSERT_DEBUG(iZoneGeneralStage == EGenDefragStage1);
2523 // If we hit the target least preferable RAM zone to be in use once
2524 // the defrag has completed then stop this stage of the general defrag.
2526 // Should never skip past iZoneGeneralPrefLink.
2527 __NK_ASSERT_DEBUG(iZoneGeneralTmpLink != &iZonePrefList.iA);
2529 while (iZoneGeneralTmpLink != iZoneGeneralPrefLink)
2531 SZone* zone = _LOFF(iZoneGeneralTmpLink, SZone, iPrefLink);
2533 // Save the RAM zone that is currently more preferable than this one
2534 // before any reordering.
2535 iZoneGeneralTmpLink = iZoneGeneralTmpLink->iPrev;
2537 if (zone->iFlags & KRamZoneFlagGenDefrag)
2538 {// This zone has been selected for a general defrag already.
2539 __KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext1 zone ID 0x%x already defraged",
2543 zone->iFlags |= KRamZoneFlagGenDefrag;
2544 if (zone->iAllocPages[EPageMovable] || zone->iAllocPages[EPageDiscard])
2546 // A RAM zone that may be cleared by a general defrag has been found.
2547 __KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext1 zone ID 0x%x", zone->iId));
2551 __KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext1 reached general target"));
2557 Initialise stage 2 of a general defrag operation.
2559 Stage 2 creates room for fixed pages allocations in the more preferable RAM
2560 zones in use by moving pages into the least preferable RAM zones in use.
2562 @return Pointer to the RAM zone object that may potentially be cleared of
2563 movable and discardable pages by the general defrag. This will be
2564 NULL if no suitable zone could be found.
2566 SZone* DRamAllocator::GeneralDefragStart2()
2568 M::RamAllocIsLocked();
2569 __NK_ASSERT_DEBUG(iNumZones == 1 || iZoneGeneralPrefLink != NULL);
2573 {// On a device with one RAM zone can't do any defrag so return NULL.
2577 // Clear general defrag flags of each RAM zone to be defraged.
2578 SDblQueLink* link = iZoneLeastMovDis;
2579 for (; link != &iZonePrefList.iA; link = link->iPrev)
2581 SZone& zone = *_LOFF(link, SZone, iPrefLink);
2582 zone.iFlags &= ~(KRamZoneFlagGenDefrag | KRamZoneFlagGenDefragBlock);
2585 // Flags cleared so now to start 2nd stage from most preferable RAM zone.
2586 iZoneGeneralTmpLink = iZonePrefList.First();
2587 iZoneGeneralStage = EGenDefragStage2;
2588 return GeneralDefragNextZone2();
2593 Find the next RAM zone that is suitable for this stage of general defrag.
2594 This should only be called after a preceeding call to
2595 DRamAllocator::GeneralDefragStart2().
2597 This goes through the RAM zones from the most preferable to the least
2598 preferable RAM zone that has movable and/or discardable pages allocated
2599 into it. It will return each time it finds a RAM zone with movable and/or
2600 discardable pages allocated into it.
2602 @return Pointer to the RAM zone object that may potentially be cleared of
2603 movable and discardable pages by the general defrag. This will be
2604 NULL if no suitable zone could be found.
2606 SZone* DRamAllocator::GeneralDefragNextZone2()
2608 M::RamAllocIsLocked();
2609 __NK_ASSERT_DEBUG(iZoneGeneralTmpLink != NULL);
2610 __NK_ASSERT_DEBUG(iZoneGeneralStage == EGenDefragStage2);
2613 while (iZoneGeneralTmpLink != iZoneLeastMovDis)
2615 SZone* zone = _LOFF(iZoneGeneralTmpLink, SZone, iPrefLink);
2617 // Save the RAM zone that is currently less preferable than this one
2618 // before any reordering.
2619 iZoneGeneralTmpLink = iZoneGeneralTmpLink->iNext;
2621 if (zone->iFlags & KRamZoneFlagGenDefrag)
2622 {// This zone has been selected for a general defrag already.
2623 __KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext2 zone ID 0x%x already defraged", zone->iId));
2626 zone->iFlags |= KRamZoneFlagGenDefrag | KRamZoneFlagGenDefragBlock;
2627 if (zone->iAllocPages[EPageMovable] || zone->iAllocPages[EPageDiscard])
2628 {// A RAM zone that may be cleared by a general defrag has been found.
2629 __KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext2 zone ID 0x%x", zone->iId));
2633 __KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext2 reached general target"));
2638 Inform the allocator that a general defragmentation operation has completed.
2641 void DRamAllocator::GeneralDefragEnd()
2644 if (!K::Initialising)
2646 M::RamAllocIsLocked();
2647 #ifdef __VERIFY_LEASTMOVDIS
2648 VerifyLeastPrefMovDis();
2652 // Reset the general defrag preference link as it is no longer required.
2653 iZoneGeneralPrefLink = NULL;
2654 iZoneGeneralTmpLink = NULL;
2659 Calculate the number of free pages in all the RAM zones to be in use
2660 once the general defragmentation operation has completed.
2662 @param aType The type of free pages to find in the higher priority zones.
2663 @return The number of free pages in the RAM zones intended to be in use
2664 after the general defrag operation has completed.
2666 TUint DRamAllocator::GenDefragFreePages(TZonePageType aType) const
2668 M::RamAllocIsLocked();
2670 if (iZoneGeneralStage == EGenDefragStage2)
2671 {// Second stage of general defrag where don't have to empty the RAM zone.
2674 TUint totalFree = 0;
2675 SDblQueLink* link = iZoneGeneralPrefLink;
2676 for (; link != &iZonePrefList.iA; link = link->iPrev)
2678 SZone& zone = *_LOFF(link, SZone, iPrefLink);
2679 if (NoAllocOfPageType(zone, aType) ||
2680 zone.iFlags & KRamZoneFlagGenDefragBlock)
2684 // This zone has free space for this type of page
2685 totalFree += zone.iFreePages;
2691 /** Mark the RAM zone as being claimed to stop any further allocations.
2692 @param aZone The zone to stop allocations to.
2694 @pre RamAlloc mutex held.
2695 @post RamAlloc mutex held.
2697 void DRamAllocator::ZoneClaimStart(SZone& aZone)
2699 M::RamAllocIsLocked();
2700 __NK_ASSERT_DEBUG(!(aZone.iFlags & KRamZoneFlagClaiming));
2702 aZone.iFlags |= KRamZoneFlagClaiming;
2704 #ifdef BTRACE_RAM_ALLOCATOR
2705 BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocZoneFlagsModified, aZone.iId, aZone.iFlags);
2710 /** Mark the RAM zone as not being claimed to allow allocations.
2711 @param aZone The zone to allow allocations into.
2713 @pre RamAlloc mutex held.
2714 @post RamAlloc mutex held.
2716 void DRamAllocator::ZoneClaimEnd(SZone& aZone)
2718 M::RamAllocIsLocked();
2719 __NK_ASSERT_DEBUG(aZone.iFlags & KRamZoneFlagClaiming);
2721 aZone.iFlags &= ~KRamZoneFlagClaiming;
2723 #ifdef BTRACE_RAM_ALLOCATOR
2724 BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocZoneFlagsModified, aZone.iId, aZone.iFlags);
2728 /** Mark the RAM zone so that any allocation or frees from it can be detected.
2729 Useful for defragging.
2730 @param aZone The zone to mark.
2731 @pre RamAlloc mutex held
2732 @post RamAlloc mutex held
2734 void DRamAllocator::ZoneMark(SZone& aZone)
2736 M::RamAllocIsLocked();
2737 __NK_ASSERT_DEBUG(!(aZone.iFlags & KRamZoneFlagMark));
2739 aZone.iFlags |= KRamZoneFlagMark;
2741 #ifdef BTRACE_RAM_ALLOCATOR
2742 BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocZoneFlagsModified, aZone.iId, aZone.iFlags);
2746 /** Unmark the RAM zone.
2747 Useful for defragging.
2748 @param aZone The zone to mark.
2749 @return ETrue if the RAM zone is inactive, EFalse otherwise.
2750 @pre RamAlloc mutex held
2751 @post RamAlloc mutex held
2753 TBool DRamAllocator::ZoneUnmark(SZone& aZone)
2755 M::RamAllocIsLocked();
2757 TInt r = aZone.iFlags & KRamZoneFlagMark;
2758 aZone.iFlags &= ~KRamZoneFlagMark;
2760 #ifdef BTRACE_RAM_ALLOCATOR
2761 BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocZoneFlagsModified, aZone.iId, aZone.iFlags);
2766 /** Determine whether it is OK to allocate the specified page type
2769 This should be used by all functions that search through the zones when
2770 attempting to allocate pages.
2772 @return ETrue if this page type shouldn't be allocated into the RAM zone,
2773 EFalse if it is OK to allocate that page type into the RAM zone.
2775 TBool DRamAllocator::NoAllocOfPageType(SZone& aZone, TZonePageType aType) const
2777 TUint8 flagMask = 1 << (aType - KPageTypeAllocBase);
2778 return (aZone.iFlags & (KRamZoneFlagClaiming|KRamZoneFlagNoAlloc|KRamZoneFlagTmpBlockAlloc)) ||
2779 (aZone.iFlags & flagMask);
2783 /** Updates the flags of the specified RAM zone.
2785 @param aId The ID of the RAM zone to modify.
2786 @param aClearFlags The bit flags to clear.
2787 @param aSetFlags The bit flags to set.
2789 @return KErrNone on success, KErrArgument if the RAM zone of aId not found or
2790 aSetMask contains invalid flags.
2792 @pre RamAlloc mutex held
2793 @post RamAlloc mutex held
2795 TInt DRamAllocator::ModifyZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask)
2797 M::RamAllocIsLocked();
2799 SZone* zone = ZoneFromId(aId);
2800 if (zone == NULL || (aSetMask & KRamZoneFlagInvalid))
2801 {// aId invalid or an invalid flag bit was requested to be set.
2802 return KErrArgument;
2804 zone->iFlags &= ~aClearMask;
2805 zone->iFlags |= aSetMask;
2807 __KTRACE_OPT(KMMU, Kern::Printf("Zone %x Flags %x", zone->iId, zone->iFlags));
2809 #ifdef BTRACE_RAM_ALLOCATOR
2810 BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocZoneFlagsModified, zone->iId, zone->iFlags);
2816 /** Invoke the RAM zone call back function to inform the variant of the RAM zones
2817 in use so far by the system.
2818 This is designed to only be invoked once during boot in MmuBase::Init2()
2820 void DRamAllocator::InitialCallback()
2822 __NK_ASSERT_DEBUG(iZoneCallbackInitSent == EFalse);
2825 TInt ret = (*iZonePowerFunc)(ERamZoneOp_Init, NULL, (TUint*)&iZonePwrState);
2826 if (ret != KErrNone && ret != KErrNotSupported)
2828 Panic(EZonesCallbackErr);
2830 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "DRamAllocator::InitialCallback");
2832 iZoneCallbackInitSent = ETrue;
2836 #ifdef BTRACE_RAM_ALLOCATOR
2838 Structure for outputing zone information to BTrace that couldn't be fit into first
2839 2 words of the BTraceN call
2841 struct TRamAllocBtraceZone
2850 This will be invoked when BTrace starts logging BTrace::ERamAllocator category
2852 It outputs the zone configuration and the base addresses of any contiguous block
2855 void DRamAllocator::SendInitialBtraceLogs(void)
2857 M::RamAllocIsLocked();
2858 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "DRamAllocator::SendInitialBtraceLogs");
2860 // Output the zone information
2861 TRamAllocBtraceZone bZone;
2862 BTrace4(BTrace::ERamAllocator, BTrace::ERamAllocZoneCount, iNumZones);
2863 const SZone* zone = iZones;
2864 const SZone* const endZone = iZones + iNumZones;
2865 for (; zone < endZone; zone++)
2867 bZone.iId = zone->iId;
2868 bZone.iPref = zone->iPref;
2869 bZone.iFlags = zone->iFlags;
2870 BTraceN(BTrace::ERamAllocator, BTrace::ERamAllocZoneConfig, zone->iPhysPages,
2871 zone->iPhysBase, &bZone, sizeof(TRamAllocBtraceZone));
2874 // Search through zones and output each contiguous region of allocated pages
2875 for (zone = iZones; zone < endZone; zone++)
2877 if (zone->iFreePages != zone->iPhysPages)
2880 TInt totalPages = 0;
2881 TUint32 runStart = 0;
2882 while ((TUint)totalPages != zone->iPhysPages - zone->iFreePages)
2884 // find set of contiguous pages that have been allocated
2885 // runStart will be set to first page of allocated run if one found
2886 for (;runStart < zone->iPhysPages && zone->iBma[KBmaAllPages]->NotAllocated(runStart,1); runStart++);
2888 // find last allocated page of this run
2889 TUint32 runEnd = runStart + 1;
2890 for (;runEnd < zone->iPhysPages && zone->iBma[KBmaAllPages]->NotFree(runEnd,1); runEnd++);
2892 pageCount = runEnd - runStart;
2894 {// have a run of allocated pages so output BTrace
2895 TPhysAddr baseAddr = (runStart << KPageShift) + zone->iPhysBase;
2896 __KTRACE_OPT(KMMU2, Kern::Printf("offset %x physBase %x pages %x baseAddr %08x",runStart, zone->iPhysBase, pageCount, baseAddr));
2897 BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocBootAllocation, pageCount, baseAddr);
2898 runStart += pageCount;
2899 totalPages += pageCount;
2904 BTrace0(BTrace::ERamAllocator, BTrace::ERamAllocBootAllocationEnd);
2906 #endif // BTRACE_RAM_ALLOCATOR
2908 TInt DRamAllocator::ClaimPhysicalRam(TPhysAddr aBase, TInt aSize)
2910 TInt ret = SetPhysicalRamState(aBase,aSize,EFalse, EPageFixed);
2911 #ifdef BTRACE_RAM_ALLOCATOR
2912 if (ret == KErrNone)
2914 BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocClaimRam, aSize, aBase);
2920 TInt DRamAllocator::FreePhysicalRam(TPhysAddr aBase, TInt aSize)
2922 TInt ret = SetPhysicalRamState(aBase,aSize,ETrue, EPageFixed);
2923 #ifdef BTRACE_RAM_ALLOCATOR
2924 if (ret == KErrNone)
2926 BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocFreePhysical, aSize, aBase);
2933 TInt DRamAllocator::FreeRamInBytes()
2935 return iTotalFreeRamPages<<KPageShift;
2938 TUint DRamAllocator::FreeRamInPages()
2940 return iTotalFreeRamPages;
2943 TUint DRamAllocator::TotalPhysicalRamPages()
2945 return iTotalRamPages;
2948 #ifdef __VERIFY_LEASTMOVDIS
2949 void DRamAllocator::VerifyLeastPrefMovDis()
2951 // Shouldn't have any movable or discardable pages in any RAM
2952 // zone less preferable than iZoneLeastMovDis
2953 SDblQueLink* tmpLink = iZoneLeastMovDis->iNext;
2954 while (tmpLink != &iZonePrefList.iA)
2956 SZone& zone = *_LOFF(tmpLink, SZone, iPrefLink);
2957 if (zone.iAllocPages[EPageMovable] != 0 ||
2958 zone.iAllocPages[EPageDiscard] != 0)
2961 __NK_ASSERT_DEBUG(0);
2963 tmpLink = tmpLink->iNext;