sl@0: // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32\memmodel\epoc\mmubase\ramalloc.cpp sl@0: // sl@0: // sl@0: sl@0: /** sl@0: @file sl@0: @internalComponent sl@0: */ sl@0: //#define __VERIFY_LEASTMOVDIS sl@0: sl@0: #include sl@0: #include sl@0: #include sl@0: sl@0: #ifndef __MEMMODEL_FLEXIBLE__ sl@0: #include sl@0: #else sl@0: #include "mdefrag.inl" sl@0: #endif //__MEMMODEL_FLEXIBLE__ sl@0: sl@0: DRamAllocator* DRamAllocator::New() sl@0: { sl@0: return new DRamAllocator; sl@0: } sl@0: sl@0: DRamAllocator* DRamAllocator::New(const SRamInfo& aInfo, const SRamZone* aZoneInfo, TRamZoneCallback aZoneCallback) sl@0: { sl@0: DRamAllocator* pA=New(); sl@0: if (!pA) sl@0: Panic(ECreateNoMemory); sl@0: // If this fails then it won't return but panic sl@0: pA->Create(aInfo,aZoneInfo, aZoneCallback); sl@0: return pA; sl@0: } sl@0: sl@0: void DRamAllocator::Panic(TPanic aPanic) sl@0: { sl@0: Kern::Fault("RAM-ALLOC", aPanic); sl@0: } sl@0: sl@0: #ifdef KMMU sl@0: void HexDump32(const TAny* a, TInt n, const char* s) sl@0: { sl@0: const TUint32* p=(const TUint32*)a; sl@0: Kern::Printf(s); sl@0: TInt i=0; sl@0: while(n) sl@0: { sl@0: TBuf8<80> b; sl@0: b.AppendNumFixedWidth(i,EHex,4); sl@0: b.Append(':'); sl@0: TInt m=Min(n,4); sl@0: n-=m; sl@0: i+=m; sl@0: while(m--) sl@0: { sl@0: b.Append(' '); sl@0: b.AppendNumFixedWidth(*p++,EHex,8); sl@0: } sl@0: Kern::Printf("%S",&b); sl@0: } sl@0: } sl@0: sl@0: void HexDump8(const TAny* a, TInt n, const char* s) sl@0: { sl@0: const TUint8* p=(const TUint8*)a; sl@0: Kern::Printf(s); sl@0: TInt i=0; sl@0: while(n) sl@0: { sl@0: TBuf8<80> b; sl@0: b.AppendNumFixedWidth(i,EHex,4); sl@0: b.Append(':'); sl@0: TInt m=Min(n,16); sl@0: n-=m; sl@0: i+=m; sl@0: while(m--) sl@0: { sl@0: b.Append(' '); sl@0: b.AppendNumFixedWidth(*p++,EHex,2); sl@0: } sl@0: Kern::Printf("%S",&b); sl@0: } sl@0: } sl@0: sl@0: void DRamAllocator::DebugDump() sl@0: { sl@0: Kern::Printf("PageSize=%08x PageShift=%d",KPageSize,KPageShift); sl@0: Kern::Printf("Total Pages=%x Total Free=%x",iTotalRamPages,iTotalFreeRamPages); sl@0: Kern::Printf("Number of zones=%d, PowerState=%016lx",iNumZones,iZonePwrState); sl@0: Kern::Printf("PhysAddrBase=%08x, PhysAddrTop=%08x",iPhysAddrBase,iPhysAddrTop); sl@0: sl@0: TUint i = 0; sl@0: Kern::Printf("Zone Info:"); sl@0: for (; iiNext) sl@0: { sl@0: SZone& zone = *_LOFF(link, SZone, iPrefLink); sl@0: Kern::Printf("ID0x%x rank0x%x", zone.iId, zone.iPrefRank); sl@0: } sl@0: SZone& zone = *_LOFF(iZoneLeastMovDis, SZone, iPrefLink); sl@0: Kern::Printf("iZoneLeastMovDis ID 0x%x rank 0x%x", zone.iId, iZoneLeastMovDisRank); sl@0: } sl@0: #endif sl@0: sl@0: TInt CountBanks(const SRamBank* aBankList) sl@0: { sl@0: TInt banks=0; sl@0: for (; aBankList->iSize; ++banks, ++aBankList); sl@0: return banks; sl@0: } sl@0: sl@0: TUint32 TotalBankSize(const SRamBank* aBankList) sl@0: { sl@0: TUint32 size=0; sl@0: for (; aBankList->iSize; ++aBankList) sl@0: size+=aBankList->iSize; sl@0: return size; sl@0: } sl@0: sl@0: /** sl@0: Count how many zones have been specified and do some basic checks on their layout: sl@0: Zones must be distinct, i.e. not overlap sl@0: Zone ID must be unique sl@0: Zones must be page size aligned sl@0: Zones must be big enough to cover all of the allocatable RAM sl@0: The end of the list is indicated by a SRamZone.iSize==0. sl@0: @param aZones The list of RAM zones to be setup sl@0: */ sl@0: void DRamAllocator::CountZones(const SRamZone* aZones) sl@0: { sl@0: TUint32 totalSize = 0; sl@0: TUint32 pageMask = KPageSize-1; sl@0: // Check zones don't overlap each other and while running through the zones sl@0: // calculate how many there are sl@0: const SRamZone* pCurZ = aZones; sl@0: for (; pCurZ->iSize != 0; pCurZ++) sl@0: { sl@0: // Verify zone addresses and alignment sl@0: TUint32 curEnd = pCurZ->iBase + pCurZ->iSize - 1; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("curBase %x curEnd %x pageMask %x",pCurZ->iBase,curEnd,pageMask)); sl@0: if (curEnd <= pCurZ->iBase || (((curEnd + 1) | pCurZ->iBase) & pageMask)) sl@0: { sl@0: Panic(EZonesAlignment); sl@0: } sl@0: sl@0: if (pCurZ->iId == KRamZoneInvalidId) sl@0: { sl@0: Panic(EZonesIDInvalid); sl@0: } sl@0: // Check the flags are not set to invalid values sl@0: if (pCurZ->iFlags & KRamZoneFlagInvalid) sl@0: { sl@0: Panic(EZonesFlagsInvalid); sl@0: } sl@0: sl@0: iNumZones++; sl@0: if (iNumZones > KMaxRamZones) sl@0: {// Too many zones specified sl@0: Panic(EZonesTooNumerousOrFew); sl@0: } sl@0: totalSize += pCurZ->iSize; sl@0: sl@0: // Verify this zone doesn't overlap any of the previous zones' address space sl@0: const SRamZone* pTmpZ = aZones; sl@0: for (; pTmpZ < pCurZ; pTmpZ++) sl@0: { sl@0: TUint32 tmpEnd = pTmpZ->iBase + pTmpZ->iSize - 1; sl@0: if (tmpEnd >= pCurZ->iBase && pTmpZ->iBase <= curEnd) sl@0: { sl@0: Panic(EZonesNotDistinct); sl@0: } sl@0: if(pTmpZ->iId == pCurZ->iId) sl@0: { sl@0: Panic(EZonesIDNotUnique); sl@0: } sl@0: } sl@0: } sl@0: __KTRACE_OPT(KMMU,Kern::Printf("iNumZones=%d, totalSize=%x",iNumZones,totalSize)); sl@0: if (!iNumZones) sl@0: {// no zones specified sl@0: Panic(EZonesTooNumerousOrFew); sl@0: } sl@0: sl@0: // Together all of the zones should cover the whole of the RAM sl@0: if (totalSize>>KPageShift < iTotalRamPages) sl@0: { sl@0: Panic(EZonesIncomplete); sl@0: } sl@0: } sl@0: sl@0: sl@0: /** sl@0: Get the zone from the ID sl@0: @param aId ID of zone to find sl@0: @return Pointer to the zone if zone of matching ID found, NULL otherwise sl@0: */ sl@0: SZone* DRamAllocator::ZoneFromId(TUint aId) const sl@0: { sl@0: SZone* pZ = iZones; sl@0: const SZone* const pEndZone = iZones + iNumZones; sl@0: for (; pZ < pEndZone; pZ++) sl@0: { sl@0: if (aId == pZ->iId) sl@0: { sl@0: return pZ; sl@0: } sl@0: } sl@0: return NULL; sl@0: } sl@0: sl@0: /** Retrieve the physical base address and number of pages in the specified zone. sl@0: sl@0: @param aZoneId The ID of the zone sl@0: @param aPhysBaseAddr Receives the base address of the zone sl@0: @param aNumPages Receives the number of pages in the zone sl@0: sl@0: @return KErrNone if zone found, KErrArgument if zone couldn't be found sl@0: */ sl@0: TInt DRamAllocator::GetZoneAddress(TUint aZoneId, TPhysAddr& aPhysBase, TUint& aNumPages) sl@0: { sl@0: SZone* zone = ZoneFromId(aZoneId); sl@0: if (zone == NULL) sl@0: { sl@0: return KErrArgument; sl@0: } sl@0: aPhysBase = zone->iPhysBase; sl@0: aNumPages = zone->iPhysPages; sl@0: return KErrNone; sl@0: } sl@0: sl@0: #ifdef __MEMMODEL_FLEXIBLE__ sl@0: /** sl@0: @param aAddr The address of page to find the zone of sl@0: @param aOffset The page offset from the start of the zone that the page is in sl@0: */ sl@0: SZone* DRamAllocator::GetZoneAndOffset(TPhysAddr aAddr, TInt& aOffset) sl@0: { sl@0: // Get the zone from the SPageInfo of the page at aAddr sl@0: SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(aAddr); sl@0: if (pageInfo == NULL) sl@0: { sl@0: return NULL; sl@0: } sl@0: sl@0: // Perform a binary search for the RAM zone, we know aAddr is within a RAM sl@0: // zone as pageInfo != NULL. sl@0: SZone* left = iZones; sl@0: SZone* mid = iZones + (iNumZones>>1); sl@0: SZone* top = iZones + iNumZones - 1; sl@0: sl@0: while (mid->iPhysEnd < aAddr || mid->iPhysBase > aAddr) sl@0: { sl@0: if (mid->iPhysEnd < aAddr) sl@0: left = mid + 1; sl@0: else sl@0: top = mid - 1; sl@0: mid = left + ((top - left) >> 1); sl@0: __ASSERT_DEBUG(left <= top && mid <= top && mid >= left, Panic(EAllocRamPagesInconsistent)); sl@0: } sl@0: __ASSERT_DEBUG(mid->iPhysBase <= aAddr && mid->iPhysEnd >= aAddr, Panic(EAllocRamPagesInconsistent)); sl@0: aOffset = (aAddr - mid->iPhysBase) >> KPageShift; sl@0: __ASSERT_DEBUG((TUint)aOffset < mid->iPhysPages, Panic(EAllocRamPagesInconsistent)); sl@0: return mid; sl@0: } sl@0: #else sl@0: /** sl@0: @param aAddr The address of page to find the zone of sl@0: @param aOffset The page offset from the start of the zone that the page is in sl@0: */ sl@0: SZone* DRamAllocator::GetZoneAndOffset(TPhysAddr aAddr, TInt& aOffset) sl@0: { sl@0: // Get the zone from the SPageInfo of the page at aAddr sl@0: SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(aAddr); sl@0: if (pageInfo == NULL) sl@0: { sl@0: return NULL; sl@0: } sl@0: SZone* z = iZones + pageInfo->Zone(); sl@0: aOffset = (aAddr - z->iPhysBase) >> KPageShift; sl@0: __ASSERT_DEBUG((TUint)aOffset < z->iPhysPages, Panic(EAllocRamPagesInconsistent)); sl@0: return z; sl@0: } sl@0: #endif sl@0: /** sl@0: @param aId ID of zone to get page count for sl@0: @param aPageData store for page counts sl@0: @return KErrNone if zone found, KErrArgument otherwise sl@0: */ sl@0: TInt DRamAllocator::GetZonePageCount(TUint aId, SRamZonePageCount& aPageData) sl@0: { sl@0: // Search for the zone of ID aId sl@0: const SZone* zone = ZoneFromId(aId); sl@0: if (zone == NULL) sl@0: { sl@0: return KErrArgument; sl@0: } sl@0: aPageData.iFreePages = zone->iFreePages; sl@0: aPageData.iUnknownPages = zone->iAllocPages[EPageUnknown]; sl@0: aPageData.iFixedPages = zone->iAllocPages[EPageFixed]; sl@0: aPageData.iMovablePages = zone->iAllocPages[EPageMovable]; sl@0: aPageData.iDiscardablePages = zone->iAllocPages[EPageDiscard]; sl@0: sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: /** Update the count of free and allocated pages for the zone with sl@0: @param aZone The index of the zone whose counts are being updated sl@0: @param aCount The no of pages being allocated sl@0: @param aType The type of the pages being allocated sl@0: */ sl@0: void DRamAllocator::ZoneAllocPages(SZone* aZone, TUint32 aCount, TZonePageType aType) sl@0: { sl@0: #ifdef _DEBUG sl@0: TUint32 free = aZone->iFreePages - aCount; sl@0: TUint32 alloc = aZone->iAllocPages[aType] + aCount; sl@0: TUint32 total_alloc = aZone->iAllocPages[EPageUnknown] + sl@0: aZone->iAllocPages[EPageDiscard] + sl@0: aZone->iAllocPages[EPageMovable] + sl@0: aZone->iAllocPages[EPageFixed] + aCount; sl@0: if (free > aZone->iFreePages || sl@0: alloc < aZone->iAllocPages[aType] || sl@0: free + total_alloc != aZone->iPhysPages || sl@0: iTotalFreeRamPages > iTotalRamPages) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("TotalFree %x TotalPages %x",iTotalFreeRamPages, iTotalRamPages)); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc)); // counts rolled over sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], sl@0: aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard])); sl@0: Panic(EZonesCountErr); sl@0: } sl@0: __ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent)); sl@0: __KTRACE_OPT(KMMU2,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc)); sl@0: __KTRACE_OPT(KMMU2,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], sl@0: aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard])); sl@0: sl@0: if (iAllowBmaVerify) sl@0: { sl@0: TBitMapAllocator& bmaType = *(aZone->iBma[(aType != EPageUnknown)? aType : EPageFixed]); sl@0: TUint allocPages; sl@0: if (aType == EPageFixed || aType == EPageUnknown) sl@0: allocPages = aZone->iAllocPages[EPageUnknown] + aZone->iAllocPages[EPageFixed]; sl@0: else sl@0: allocPages = aZone->iAllocPages[aType]; sl@0: allocPages += aCount; sl@0: __NK_ASSERT_DEBUG(aZone->iPhysPages - bmaType.iAvail == allocPages); sl@0: __NK_ASSERT_DEBUG((TUint)bmaType.iAvail >= aZone->iFreePages - aCount); sl@0: sl@0: //#define _FULL_VERIFY_TYPE_BMAS sl@0: #ifdef _FULL_VERIFY_TYPE_BMAS sl@0: TUint offset = 0; sl@0: TUint matchedPages = 0; sl@0: TInt r = KErrNone; sl@0: while (offset < aZone->iPhysPages && r == KErrNone) sl@0: { sl@0: r = NextAllocatedPage(aZone, offset, EPageTypes); sl@0: if (bmaType.NotFree(offset, 1)) sl@0: { sl@0: matchedPages++; sl@0: } sl@0: offset++; sl@0: } sl@0: __NK_ASSERT_DEBUG(matchedPages == allocPages); sl@0: #endif sl@0: } sl@0: #endif sl@0: sl@0: // Update counts sl@0: aZone->iAllocPages[aType] += aCount; sl@0: aZone->iFreePages -= aCount; sl@0: aZone->iFlags &= ~KRamZoneFlagMark; // clear the mark as this zone is active sl@0: sl@0: // Check if power state of zone needs to be changed sl@0: if (iZonePowerFunc && !(iZonePwrState & (((TUint64)1) << aZone - iZones))) sl@0: {//zone no longer empty so call variant to power RAM zone up if necessary sl@0: iZonePwrState |= (((TUint64)1) << aZone - iZones); sl@0: sl@0: if (iZoneCallbackInitSent) sl@0: { sl@0: TInt ret = (*iZonePowerFunc)(ERamZoneOp_PowerUp, (TAny*)aZone->iId, (TUint*)&iZonePwrState); sl@0: if (ret != KErrNone && ret != KErrNotSupported) sl@0: { sl@0: Panic(EZonesCallbackErr); sl@0: } sl@0: CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "DRamAllocator::ZoneAllocPages"); sl@0: } sl@0: } sl@0: sl@0: // Re-order the zone preference list so that a RAM zone with more immovable pages sl@0: // is more preferable and secondary to that a RAM zone that is not empty is more sl@0: // preferable than one that is empty. sl@0: while (&aZone->iPrefLink != iZonePrefList.First()) sl@0: { sl@0: SZone* prevZ = _LOFF(aZone->iPrefLink.iPrev, SZone, iPrefLink); sl@0: __NK_ASSERT_DEBUG(K::Initialising || prevZ->iPrefRank == aZone->iPrefRank - 1); sl@0: if (prevZ->iPref == aZone->iPref && sl@0: (prevZ->iAllocPages[EPageFixed] + prevZ->iAllocPages[EPageUnknown] < sl@0: aZone->iAllocPages[EPageFixed] + aZone->iAllocPages[EPageUnknown] || sl@0: prevZ->iFreePages == prevZ->iPhysPages)) sl@0: { sl@0: __KTRACE_OPT(KMMU, Kern::Printf("a - Reorder aZone 0x%x free 0x%x before prevZ 0x%x free 0x%x", aZone->iId, aZone->iFreePages, prevZ->iId, prevZ->iFreePages)); sl@0: // Make this RAM zone more preferable. sl@0: aZone->iPrefLink.Deque(); sl@0: aZone->iPrefLink.InsertBefore(&prevZ->iPrefLink); sl@0: aZone->iPrefRank--; sl@0: prevZ->iPrefRank++; sl@0: sl@0: if (iZoneLeastMovDis == &prevZ->iPrefLink) sl@0: {// Ensure iZoneLeastMovDisRank is kept up to date. sl@0: iZoneLeastMovDisRank = prevZ->iPrefRank; sl@0: } sl@0: if (iZoneLeastMovDis == &aZone->iPrefLink) sl@0: {// Ensure iZoneLeastMovDisRank is kept up to date. sl@0: iZoneLeastMovDisRank = aZone->iPrefRank; sl@0: // aZone was the least preferable with movable and/or discardable so is it still? sl@0: if (prevZ->iAllocPages[EPageMovable] || prevZ->iAllocPages[EPageDiscard]) sl@0: {// prevZ is now the least preferable RAM zone with movable and/or discardable. sl@0: iZoneLeastMovDis = &prevZ->iPrefLink; sl@0: iZoneLeastMovDisRank = prevZ->iPrefRank; sl@0: __KTRACE_OPT(KMMU, Kern::Printf("aa - iZoneleastInUse ID 0x%x", (_LOFF(iZoneLeastMovDis, SZone, iPrefLink))->iId)); sl@0: } sl@0: __KTRACE_OPT(KMMU, Kern::Printf("iZoneLeastMovDisRank 0x%x", iZoneLeastMovDisRank)); sl@0: } sl@0: } sl@0: else sl@0: { sl@0: break; sl@0: } sl@0: } sl@0: sl@0: // Now that the preference list has been re-ordered check whether sl@0: // iZoneLeastMovDis needs updating. sl@0: if (aType >= EPageMovable && iZoneLeastMovDisRank < aZone->iPrefRank) sl@0: { sl@0: iZoneLeastMovDis = &aZone->iPrefLink; sl@0: iZoneLeastMovDisRank = aZone->iPrefRank; sl@0: __KTRACE_OPT(KMMU, Kern::Printf("a - iZoneleastInUse ID 0x%x", (_LOFF(iZoneLeastMovDis, SZone, iPrefLink))->iId)); sl@0: } sl@0: __NK_ASSERT_DEBUG( K::Initialising || sl@0: iZoneLeastMovDisRank == _LOFF(iZoneLeastMovDis, SZone, iPrefLink)->iPrefRank); sl@0: #ifdef __VERIFY_LEASTMOVDIS sl@0: if (!K::Initialising) sl@0: VerifyLeastPrefMovDis(); sl@0: #endif sl@0: } sl@0: sl@0: sl@0: /** Update the count of free and allocated pages for the zone with sl@0: @param aZone The index of the zone whose counts are being updated sl@0: @param aCount The no of pages being freed sl@0: @param aType The type of the pages being freed sl@0: */ sl@0: void DRamAllocator::ZoneFreePages(SZone* aZone, TUint32 aCount, TZonePageType aType) sl@0: { sl@0: #ifdef _DEBUG sl@0: TUint32 alloc = aZone->iAllocPages[aType] - aCount; sl@0: TUint32 free = aZone->iFreePages + aCount; sl@0: TUint32 total_alloc = aZone->iAllocPages[EPageUnknown] + sl@0: aZone->iAllocPages[EPageDiscard] + sl@0: aZone->iAllocPages[EPageMovable] + sl@0: aZone->iAllocPages[EPageFixed] - aCount; sl@0: if (free < aZone->iFreePages || sl@0: alloc > aZone->iAllocPages[aType] || sl@0: free + total_alloc != aZone->iPhysPages || sl@0: iTotalFreeRamPages > iTotalRamPages) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("TotalFree %x TotalPages %x",iTotalFreeRamPages, iTotalRamPages)); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc)); // counts rolled over sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], sl@0: aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard])); sl@0: Panic(EZonesCountErr); sl@0: } sl@0: __ASSERT_DEBUG(free == (TUint32)aZone->iBma[KBmaAllPages]->iAvail, Panic(EAllocRamPagesInconsistent)); sl@0: __KTRACE_OPT(KMMU2,Kern::Printf("ZoneFreePages - aCount %x free %x, alloc %x",aCount,free,alloc)); sl@0: __KTRACE_OPT(KMMU2,Kern::Printf("Alloc Unk %x Fx %x Mv %x Dis %x",aZone->iAllocPages[EPageUnknown], sl@0: aZone->iAllocPages[EPageFixed], aZone->iAllocPages[EPageMovable],aZone->iAllocPages[EPageDiscard])); sl@0: sl@0: if (iAllowBmaVerify) sl@0: { sl@0: TBitMapAllocator& bmaType = *(aZone->iBma[(aType != EPageUnknown)? aType : EPageFixed]); sl@0: TUint allocPages; sl@0: if (aType == EPageFixed || aType == EPageUnknown) sl@0: allocPages = aZone->iAllocPages[EPageUnknown] + aZone->iAllocPages[EPageFixed]; sl@0: else sl@0: allocPages = aZone->iAllocPages[aType]; sl@0: allocPages -= aCount; sl@0: __NK_ASSERT_DEBUG(aZone->iPhysPages - bmaType.iAvail == allocPages); sl@0: __NK_ASSERT_DEBUG((TUint)bmaType.iAvail >= aZone->iFreePages + aCount); sl@0: sl@0: #ifdef _FULL_VERIFY_TYPE_BMAS sl@0: TUint offset = 0; sl@0: TUint matchedPages = 0; sl@0: TInt r = KErrNone; sl@0: while(offset < aZone->iPhysPages && r == KErrNone) sl@0: { sl@0: r = NextAllocatedPage(aZone, offset, EPageTypes); sl@0: if (bmaType.NotFree(offset, 1)) sl@0: { sl@0: matchedPages++; sl@0: } sl@0: offset++; sl@0: } sl@0: __NK_ASSERT_DEBUG(matchedPages == allocPages); sl@0: #endif sl@0: } sl@0: #endif sl@0: sl@0: // Update counts sl@0: aZone->iAllocPages[aType] -= aCount; sl@0: aZone->iFreePages += aCount; sl@0: aZone->iFlags &= ~KRamZoneFlagMark; // clear the mark as this zone is active sl@0: sl@0: // Check if power state of zone needs to be changed. sl@0: // Don't update iZonePwrState when a zone is being cleared to then be sl@0: // claimed as it shouldn't be powered off as it's about to be used. sl@0: if (iZonePowerFunc && !(aZone->iFlags & KRamZoneFlagClaiming) && sl@0: aZone->iFreePages == aZone->iPhysPages) sl@0: {// Zone is empty so call variant to power down RAM zone if desirable. sl@0: TUint64 pwrMask = ~(((TUint64)1) << aZone - iZones); sl@0: iZonePwrState &= pwrMask; sl@0: sl@0: // Don't invoke callback until Init callback sent. sl@0: if (iZoneCallbackInitSent) sl@0: { sl@0: TInt ret = (*iZonePowerFunc)(ERamZoneOp_PowerDown, (TAny*)aZone->iId, (TUint*)&iZonePwrState); sl@0: if (ret != KErrNone && ret != KErrNotSupported) sl@0: { sl@0: Panic(EZonesCallbackErr); sl@0: } sl@0: CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "DRamAllocator::ZoneFreePages"); sl@0: } sl@0: } sl@0: sl@0: // Re-order the zone preference list so that a RAM zone with more immovable pages sl@0: // is more preferable and secondary to that a RAM zone that is not empty is more sl@0: // preferable than one that is empty. sl@0: while (&aZone->iPrefLink != iZonePrefList.Last()) sl@0: { sl@0: SZone* nextZ = _LOFF(aZone->iPrefLink.iNext, SZone, iPrefLink); sl@0: __NK_ASSERT_DEBUG(K::Initialising || nextZ->iPrefRank == aZone->iPrefRank + 1); sl@0: if (nextZ->iPref == aZone->iPref && sl@0: (nextZ->iAllocPages[EPageFixed] + nextZ->iAllocPages[EPageUnknown] > sl@0: aZone->iAllocPages[EPageFixed] + aZone->iAllocPages[EPageUnknown] || sl@0: (nextZ->iFreePages != nextZ->iPhysPages && sl@0: aZone->iFreePages == aZone->iPhysPages))) sl@0: { sl@0: __KTRACE_OPT(KMMU, Kern::Printf("f - Reorder aZone 0x%x free 0x%x after nextZ 0x%x free 0x%x", aZone->iId, aZone->iFreePages, nextZ->iId, nextZ->iFreePages)); sl@0: // Make this RAM zone less preferable. sl@0: aZone->iPrefLink.Deque(); sl@0: aZone->iPrefLink.InsertAfter(&nextZ->iPrefLink); sl@0: aZone->iPrefRank++; sl@0: nextZ->iPrefRank--; sl@0: sl@0: if (iZoneLeastMovDis == &aZone->iPrefLink) sl@0: {// Ensure iZoneLeastMovDisRank is kept up to date. sl@0: iZoneLeastMovDisRank = aZone->iPrefRank; sl@0: } sl@0: if (iZoneLeastMovDis == &nextZ->iPrefLink) sl@0: {// Ensure iZoneLeastMovDisRank is kept up to date. sl@0: iZoneLeastMovDisRank = nextZ->iPrefRank; sl@0: if (aZone->iAllocPages[EPageMovable] || aZone->iAllocPages[EPageDiscard]) sl@0: {// aZone is now the least preferable RAM zone with movable and/or discardable. sl@0: iZoneLeastMovDis = &aZone->iPrefLink; sl@0: iZoneLeastMovDisRank = aZone->iPrefRank; sl@0: __KTRACE_OPT(KMMU, Kern::Printf("aa - iZoneleastInUse ID 0x%x", (_LOFF(iZoneLeastMovDis, SZone, iPrefLink))->iId)); sl@0: } sl@0: __KTRACE_OPT(KMMU, Kern::Printf("iZoneLeastMovDis Rank 0x%x", iZoneLeastMovDisRank)); sl@0: } sl@0: } sl@0: else sl@0: { sl@0: break; sl@0: } sl@0: } sl@0: if (&aZone->iPrefLink == iZoneLeastMovDis && sl@0: !aZone->iAllocPages[EPageMovable] && !aZone->iAllocPages[EPageDiscard]) sl@0: {// This RAM zone no longer has movable or discardable and therefore it sl@0: // is also no longer the least preferable RAM zone with movable and/or sl@0: // discardable. sl@0: SZone* zonePrev; sl@0: do sl@0: { sl@0: iZoneLeastMovDis = iZoneLeastMovDis->iPrev; sl@0: iZoneLeastMovDisRank--; sl@0: if (iZoneLeastMovDis == iZonePrefList.First()) sl@0: {// This the most preferable RAM zone so can't go any further. sl@0: break; sl@0: } sl@0: zonePrev = _LOFF(iZoneLeastMovDis, SZone, iPrefLink); sl@0: __KTRACE_OPT(KMMU, Kern::Printf("f - iZoneLeastMovDis 0x%x", zonePrev->iId)); sl@0: } sl@0: while (!zonePrev->iAllocPages[EPageMovable] && !zonePrev->iAllocPages[EPageDiscard]); sl@0: sl@0: __NK_ASSERT_DEBUG( K::Initialising || sl@0: iZoneLeastMovDisRank == _LOFF(iZoneLeastMovDis, SZone, iPrefLink)->iPrefRank); sl@0: sl@0: #ifdef __VERIFY_LEASTMOVDIS sl@0: if (!K::Initialising) sl@0: VerifyLeastPrefMovDis(); sl@0: #endif sl@0: } sl@0: } sl@0: sl@0: sl@0: /** Calculate the physical address order of the zones and temporally store sl@0: the order in aZoneAddrOrder sl@0: */ sl@0: inline void DRamAllocator::SortRamZones(const SRamZone* aZones, TUint8* aZoneAddrOrder) sl@0: { sl@0: const SRamZone* const endZone = aZones + iNumZones; sl@0: const SRamZone* zone = aZones; sl@0: for (; zone < endZone; zone++) sl@0: { sl@0: // zoneIdx is the number of zones that have a lower base address than the sl@0: // current zone and therefore it is the address index of the current zone sl@0: TInt zoneIdx = 0; sl@0: // search for any zones of lower base address sl@0: const SRamZone* zone2 = aZones; sl@0: for (; zone2 < endZone; zone2++) sl@0: { sl@0: if (zone2->iBase < zone->iBase) sl@0: { sl@0: zoneIdx++; // have another zone of lower base address sl@0: } sl@0: } sl@0: aZoneAddrOrder[zoneIdx] = zone - aZones; sl@0: } sl@0: } sl@0: sl@0: sl@0: /** Initialise SPageInfos for all pages in this zone with the sl@0: index of the zone. sl@0: @param aZone The zone the pages to be initialised are in sl@0: */ sl@0: inline TUint DRamAllocator::InitSPageInfos(const SZone* aZone) sl@0: { sl@0: TUint pagesUpdated = 0; sl@0: if (aZone->iPhysBase > iPhysAddrTop || aZone->iPhysEnd < iPhysAddrBase) sl@0: {// None of the zone is in allocatable RAM sl@0: return pagesUpdated; sl@0: } sl@0: sl@0: // Mark each allocatable page in this zone with the index of the zone sl@0: #ifndef __MEMMODEL_FLEXIBLE__ sl@0: TUint8 zoneIndex = aZone - iZones; sl@0: #endif sl@0: TPhysAddr addr = aZone->iPhysBase; sl@0: for (; addr <= aZone->iPhysEnd; addr += KPageSize) sl@0: { sl@0: SPageInfo* pi = SPageInfo::SafeFromPhysAddr(addr); sl@0: if (pi) sl@0: { sl@0: #ifndef __MEMMODEL_FLEXIBLE__ // The FMM doesn't store zone indices in SPageInfos. sl@0: pi->SetZone(zoneIndex); sl@0: #endif sl@0: pagesUpdated++; sl@0: } sl@0: } sl@0: return pagesUpdated; sl@0: } sl@0: sl@0: /** HAL Function for the RAM allocator. sl@0: */ sl@0: TInt DRamAllocator::HalFunction(TInt aFunction, TAny* a1, TAny* a2) sl@0: { sl@0: switch(aFunction) sl@0: { sl@0: case ERamHalGetZoneCount: sl@0: { sl@0: kumemput32(a1, &iNumZones, sizeof(iNumZones)); sl@0: return KErrNone; sl@0: } sl@0: sl@0: case ERamHalGetZoneConfig: sl@0: { sl@0: TUint zoneIndex = (TUint)a1; sl@0: if (zoneIndex < iNumZones) sl@0: { sl@0: SZone* pZone = iZones + zoneIndex; sl@0: struct SRamZoneConfig config; sl@0: NKern::ThreadEnterCS(); sl@0: M::RamAllocLock(); // get mutex to ensure consistent set of values are read... sl@0: config.iZoneId = pZone->iId; sl@0: config.iZoneIndex = zoneIndex; sl@0: config.iPhysBase = pZone->iPhysBase; sl@0: config.iPhysEnd = pZone->iPhysEnd; sl@0: config.iPhysPages = pZone->iPhysPages; sl@0: config.iPref = pZone->iPref; sl@0: config.iFlags = pZone->iFlags; sl@0: M::RamAllocUnlock(); sl@0: NKern::ThreadLeaveCS(); sl@0: kumemput32(a2,&config,sizeof(config)); sl@0: return KErrNone; sl@0: } sl@0: return KErrNotFound; sl@0: } sl@0: sl@0: case ERamHalGetZoneUtilisation: sl@0: { sl@0: TUint zoneIndex = (TUint)a1; sl@0: if (zoneIndex < iNumZones) sl@0: { sl@0: SZone* pZone = iZones + zoneIndex; sl@0: struct SRamZoneUtilisation config; sl@0: NKern::ThreadEnterCS(); sl@0: M::RamAllocLock(); // get mutex to ensure consistent set of values are read... sl@0: config.iZoneId = pZone->iId; sl@0: config.iZoneIndex = zoneIndex; sl@0: config.iPhysPages = pZone->iPhysPages; sl@0: config.iFreePages = pZone->iFreePages; sl@0: config.iAllocUnknown = pZone->iAllocPages[EPageUnknown]; sl@0: config.iAllocFixed = pZone->iAllocPages[EPageFixed]; sl@0: config.iAllocMovable = pZone->iAllocPages[EPageMovable]; sl@0: config.iAllocDiscardable = pZone->iAllocPages[EPageDiscard]; sl@0: config.iAllocOther = 0; sl@0: M::RamAllocUnlock(); sl@0: NKern::ThreadLeaveCS(); sl@0: kumemput32(a2,&config,sizeof(config)); sl@0: return KErrNone; sl@0: } sl@0: return KErrNotFound; sl@0: } sl@0: sl@0: default: sl@0: { sl@0: return KErrNotSupported; sl@0: } sl@0: } sl@0: } sl@0: sl@0: /** sl@0: Setup the ram allocator with information of the RAM available in the system that sl@0: comes from the bootstrap/superpage. This is intended to be called from sl@0: DRamAllocator::New(). sl@0: @internalComponent sl@0: @see DRamAllocator::New() sl@0: @param aInfo Two lists of SRamBanks for available and reserved banks in RAM, respectively sl@0: @param aZones A list of the ram zones in the system and their configuration/preferences sl@0: @param aZoneCallback Pointer to a base port call back function that will be invoked by this class sl@0: */ sl@0: void DRamAllocator::Create(const SRamInfo& aInfo, const SRamZone* aZones, TRamZoneCallback aZoneCallback) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("DRamAllocator::Create")); sl@0: sl@0: // SZone::iBma array assumes this and KBmaAllPages can't be the same as any sl@0: // allocatable page type. sl@0: __ASSERT_COMPILE(EPageFixed < KPageImmovable && EPageUnknown < KPageImmovable && sl@0: EPageDiscard >= KPageImmovable && EPageMovable >= KPageImmovable && sl@0: KBmaAllPages != EPageFixed && KBmaAllPages != EPageMovable && sl@0: KBmaAllPages != EPageDiscard); sl@0: // NoAllocOfPageType() requires this sl@0: __ASSERT_COMPILE( KRamZoneFlagNoFixed == 1 << (EPageFixed - KPageTypeAllocBase) && sl@0: KRamZoneFlagNoMovable == 1 << (EPageMovable - KPageTypeAllocBase) && sl@0: KRamZoneFlagNoDiscard == 1 << (EPageDiscard - KPageTypeAllocBase)); sl@0: sl@0: // SZone::iPhysEnd and iPhysAddrTop rely on this when checking contiguous zones etc. sl@0: __ASSERT_COMPILE(KPageShift != 0); sl@0: sl@0: /////////////////////////////////////////////////////////////////////////// sl@0: // Determine where all the allocatable RAM pages are, using the SRamBank sl@0: // data passed to the kernel by the bootstrap sl@0: ////////////////////////////////////////////////////////////////////////// sl@0: TUint num_boot_banks=CountBanks(aInfo.iBanks); sl@0: TUint32 total_ram_size=TotalBankSize(aInfo.iBanks); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("#banks from bootstrap=%d",num_boot_banks)); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Total size=%08x",total_ram_size)); sl@0: iTotalRamPages=total_ram_size>>KPageShift; sl@0: // Assume all pages are allocated as unknown for now sl@0: iTotalFreeRamPages = 0; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Total size=%08x, total pages=%08x",total_ram_size,iTotalRamPages)); sl@0: sl@0: iPhysAddrBase=aInfo.iBanks[0].iBase; sl@0: const SRamBank& last_boot_bank=aInfo.iBanks[num_boot_banks-1]; sl@0: iPhysAddrTop = last_boot_bank.iBase + last_boot_bank.iSize - 1; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("PA base=%08x, PA top=%08x",iPhysAddrBase,iPhysAddrTop)); sl@0: sl@0: __ASSERT_DEBUG(iPhysAddrTop > iPhysAddrBase, Panic(ECreateInvalidRamBanks)); sl@0: sl@0: sl@0: /////////////////////////////////////////////////////////////////////////// sl@0: // Determine how many zones are required and allocate all the sl@0: // data structures that will be required, permanent one first then sl@0: // temporary ones to avoid kernel heap fragmentation. sl@0: /////////////////////////////////////////////////////////////////////////// sl@0: // Stop any RAM zone callback operations until the initial one has been sent sl@0: iZoneCallbackInitSent = EFalse; sl@0: if (aZones) sl@0: { sl@0: CountZones(aZones); sl@0: iZonePowerFunc = aZoneCallback; sl@0: } sl@0: else sl@0: {// maximum number of zone is number of non-coalesced boot banks sl@0: iNumZones = num_boot_banks; sl@0: // No zones specified so don't worry about invoking callback function sl@0: iZonePowerFunc = NULL; sl@0: } sl@0: sl@0: // Permenant heap allocation #1 - may be resized if no zones specified sl@0: __KTRACE_OPT(KMMU,Kern::Printf("iNumZones=%d", iNumZones)); sl@0: iZones = (SZone*)Kern::AllocZ(iNumZones*sizeof(SZone)); sl@0: if (!iZones) sl@0: { sl@0: Panic(ECreateNoMemory); sl@0: } sl@0: sl@0: /////////////////////////////////////////////////////////////////////////// sl@0: // Coalesce contiguous boot banks sl@0: /////////////////////////////////////////////////////////////////////////// sl@0: SRamBank* physBanks = (SRamBank*)Kern::Alloc(num_boot_banks*sizeof(SRamBank)); sl@0: if (!physBanks) sl@0: { sl@0: Panic(ECreateNoMemory); sl@0: } sl@0: SRamBank* coalescedBank = physBanks; sl@0: const SRamBank* const lastBank = aInfo.iBanks + num_boot_banks; sl@0: TPhysAddr currentBase = aInfo.iBanks->iBase; sl@0: TPhysAddr currentEnd = aInfo.iBanks->iBase + aInfo.iBanks->iSize; sl@0: const SRamBank* nextBank = aInfo.iBanks + 1; sl@0: for (; nextBank <= lastBank; ++nextBank) sl@0: { sl@0: // Create new bank if the next bank isn't contiguous or if sl@0: // it is the last bank sl@0: if (nextBank == lastBank || nextBank->iBase != currentEnd) sl@0: { sl@0: coalescedBank->iBase = currentBase; sl@0: coalescedBank->iSize = currentEnd - currentBase; sl@0: // Mark all the SPageInfos for the pages in this bank as unused. sl@0: // Needs to be done here to allow SPageInfo::SafeFromPhysAddr to work sl@0: // which is used by InitSPageInfos() sl@0: SPageInfo* pi = SPageInfo::FromPhysAddr(coalescedBank->iBase); sl@0: SPageInfo* piBankEnd = pi + (coalescedBank->iSize >> KPageShift); sl@0: for (; pi < piBankEnd; pi++) sl@0: { sl@0: pi->SetUnused(); sl@0: } sl@0: ++coalescedBank; sl@0: __KTRACE_OPT(KMMU, Kern::Printf("Coalesced bank: %08x-%08x", currentBase, currentEnd)); sl@0: currentBase = nextBank->iBase; sl@0: currentEnd = currentBase + nextBank->iSize; sl@0: } sl@0: else sl@0: { sl@0: currentEnd += nextBank->iSize; sl@0: } sl@0: } sl@0: TUint num_coalesced_banks = coalescedBank - physBanks; sl@0: __KTRACE_OPT(KMMU, Kern::Printf("#Coalesced banks: %d", num_coalesced_banks)); sl@0: sl@0: /////////////////////////////////////////////////////////////////////////// sl@0: // Initialise the SZone objects and mark all the SPageInfos with the index sl@0: // of zone they are in. sl@0: ////////////////////////////////////////////////////////////////////////// sl@0: // Assume everything is off so base port will get notification every time the sl@0: // a new zone is required during the rest of boot process. sl@0: if (aZones != NULL) sl@0: { sl@0: SZone* newZone = iZones; // pointer to zone being created sl@0: sl@0: // Create and fill zoneAddrOrder with address ordered indices to aZones sl@0: TUint8* zoneAddrOrder = (TUint8*)Kern::Alloc(iNumZones); sl@0: if (!zoneAddrOrder) sl@0: { sl@0: Panic(ECreateNoMemory); sl@0: } sl@0: SortRamZones(aZones, zoneAddrOrder); sl@0: sl@0: // Now go through each SRamZone in address order initialising the SZone sl@0: // objects. sl@0: TUint i = 0; sl@0: TUint totalZonePages = 0; sl@0: for (; i < iNumZones; i++) sl@0: { sl@0: const SRamZone& ramZone = *(aZones + zoneAddrOrder[i]); sl@0: newZone->iPhysBase = ramZone.iBase; sl@0: newZone->iPhysEnd = ramZone.iBase + ramZone.iSize - 1; sl@0: newZone->iPhysPages = ramZone.iSize >> KPageShift; sl@0: newZone->iAllocPages[EPageUnknown] = newZone->iPhysPages; sl@0: newZone->iId = ramZone.iId; sl@0: newZone->iPref = ramZone.iPref; sl@0: newZone->iFlags = ramZone.iFlags; sl@0: totalZonePages += InitSPageInfos(newZone); sl@0: newZone++; sl@0: } sl@0: sl@0: // iZones now points to all the SZone objects stored in address order sl@0: Kern::Free(zoneAddrOrder); sl@0: if (totalZonePages != iTotalRamPages) sl@0: {// The zones don't cover all of the allocatable RAM. sl@0: Panic(EZonesIncomplete); sl@0: } sl@0: } sl@0: else sl@0: { sl@0: iNumZones = num_coalesced_banks; sl@0: iZones = (SZone*)Kern::ReAlloc((TAny*)iZones, iNumZones*sizeof(SZone)); sl@0: if (iZones == NULL) sl@0: { sl@0: Panic(ECreateNoMemory); sl@0: } sl@0: // Create a zone for each coalesced boot bank sl@0: SRamBank* bank = physBanks; sl@0: SRamBank* bankEnd = physBanks + num_coalesced_banks; sl@0: SZone* zone = iZones; sl@0: for (; bank < bankEnd; bank++, zone++) sl@0: { sl@0: zone->iPhysBase = bank->iBase; sl@0: zone->iPhysEnd = bank->iBase + bank->iSize - 1; sl@0: zone->iPhysPages = bank->iSize >> KPageShift; sl@0: zone->iAllocPages[EPageUnknown] = zone->iPhysPages; sl@0: zone->iId = (TUint)bank; // doesn't matter what it is as long as it is unique sl@0: InitSPageInfos(zone); sl@0: } sl@0: } sl@0: // Delete the coalesced banks as no longer required sl@0: Kern::Free(physBanks); sl@0: sl@0: ////////////////////////////////////////////////////////////////////////// sl@0: // Create each zones' bit map allocator now as no temporary heap sl@0: // cells still allocated at this point. sl@0: /////////////////////////////////////////////////////////////////////////// sl@0: const SZone* const endZone = iZones + iNumZones; sl@0: SZone* zone = iZones; sl@0: for (; zone < endZone; zone++) sl@0: {// Create each BMA with all pages allocated as unknown. sl@0: for (TUint i = 0; i < EPageTypes; i++) sl@0: { sl@0: // Only mark the all pages bma and fixed/unknown bma as allocated. sl@0: TBool notAllocated = (i >= (TUint)EPageMovable); sl@0: zone->iBma[i] = TBitMapAllocator::New(zone->iPhysPages, notAllocated); sl@0: if (!zone->iBma[i]) sl@0: { sl@0: Panic(ECreateNoMemory); sl@0: } sl@0: } sl@0: } sl@0: sl@0: /////////////////////////////////////////////////////////////////////////// sl@0: // Unallocate each page in each bank so that it can be allocated when required. sl@0: // Any page that exists outside a bank will remain allocated as EPageUnknown sl@0: // and will therefore not be touched by the allocator. sl@0: ////////////////////////////////////////////////////////////////////////// sl@0: // Temporarily fill preference list so SetPhysicalRamState can succeed sl@0: #ifdef _DEBUG sl@0: // Block bma verificaitons as bma and alloc counts aren't consistent yet. sl@0: iAllowBmaVerify = EFalse; sl@0: #endif sl@0: const SZone* const lastZone = iZones + iNumZones; sl@0: zone = iZones; sl@0: for (; zone < lastZone; zone++) sl@0: { sl@0: iZonePrefList.Add(&zone->iPrefLink); sl@0: } sl@0: const SRamBank* const lastPhysBank = aInfo.iBanks + num_boot_banks; sl@0: const SRamBank* bank = aInfo.iBanks; sl@0: for (; bank < lastPhysBank; bank++) sl@0: {// Free all the pages in this bank. sl@0: SetPhysicalRamState(bank->iBase, bank->iSize, ETrue, EPageUnknown); sl@0: } sl@0: #ifdef _DEBUG sl@0: // Only now is it safe to enable bma verifications sl@0: iAllowBmaVerify = ETrue; sl@0: #endif sl@0: sl@0: /////////////////////////////////////////////////////////////////////////// sl@0: // Sort the zones by preference and create a preference ordered linked list sl@0: /////////////////////////////////////////////////////////////////////////// sl@0: zone = iZones; sl@0: for (; zone < lastZone; zone++) sl@0: {// clear all the zones from the preference list as not in preference order sl@0: zone->iPrefLink.Deque(); sl@0: } sl@0: SZone** prefOrder = (SZone**)Kern::AllocZ(iNumZones * sizeof(SZone*)); sl@0: if (!prefOrder) sl@0: { sl@0: Panic(ECreateNoMemory); sl@0: } sl@0: zone = iZones; sl@0: for(; zone < lastZone; zone++) sl@0: { sl@0: TInt lowerZones = 0; sl@0: // Find how many zones that have a lower preference than this one sl@0: const SZone* zone2 = iZones; sl@0: for (; zone2 < lastZone; zone2++) sl@0: { sl@0: if (zone->iPref > zone2->iPref || sl@0: zone->iPref == zone2->iPref && zone->iFreePages > zone2->iFreePages) sl@0: { sl@0: lowerZones++; sl@0: } sl@0: } sl@0: while (prefOrder[lowerZones] != 0) sl@0: {// Zone(s) of this preference and size already exist so sl@0: // place this one after it/them sl@0: lowerZones++; sl@0: } sl@0: prefOrder[lowerZones] = zone; sl@0: } sl@0: // Fill preference ordered linked list sl@0: SZone** const lastPref = prefOrder + iNumZones; sl@0: SZone** prefZone = prefOrder; sl@0: TUint prefRank = 0; sl@0: for (; prefZone < lastPref; prefZone++, prefRank++) sl@0: { sl@0: SZone& zone = **prefZone; sl@0: iZonePrefList.Add(&zone.iPrefLink); sl@0: zone.iPrefRank = prefRank; sl@0: } sl@0: Kern::Free(prefOrder); // Remove temporary allocation sl@0: sl@0: /////////////////////////////////////////////////////////////////////////// sl@0: // Now mark any regions reserved by the base port as allocated and not sl@0: // for use by the RAM allocator. sl@0: /////////////////////////////////////////////////////////////////////////// sl@0: const SRamBank* pB = lastBank + 1; // first reserved block specifier sl@0: for (; pB->iSize; ++pB) sl@0: { sl@0: __KTRACE_OPT(KMMU, Kern::Printf("Reserve physical block %08x+%x", pB->iBase, pB->iSize)); sl@0: TInt r = SetPhysicalRamState(pB->iBase, pB->iSize, EFalse, EPageFixed); sl@0: __KTRACE_OPT(KMMU, Kern::Printf("Reserve returns %d", r)); sl@0: if (r!=KErrNone) sl@0: { sl@0: Panic(ECreateInvalidReserveBank); sl@0: } sl@0: #ifdef BTRACE_KERNEL_MEMORY sl@0: BTrace8(BTrace::EKernelMemory, BTrace::EKernelMemoryDrvPhysAlloc, pB->iSize, pB->iBase); sl@0: Epoc::DriverAllocdPhysRam += pB->iSize; sl@0: #endif sl@0: #ifndef __MEMMODEL_FLEXIBLE__ // Mmu::Init2Common() handles this in FMM. sl@0: // Synchronise the SPageInfo with any blocks that were reserved by sl@0: // marking any reserved regions as locked sl@0: TPhysAddr physAddrEnd = pB->iBase + pB->iSize; sl@0: TPhysAddr physAddr = pB->iBase; sl@0: for(; physAddr < physAddrEnd; physAddr += KPageSize) sl@0: { sl@0: SPageInfo* pi = SPageInfo::FromPhysAddr(physAddr); sl@0: pi->Lock(); sl@0: } sl@0: #endif sl@0: } sl@0: sl@0: ////////////////////////////////////////////////////////////////////////// sl@0: // Now that we have have the RAM zone preference list and know how many sl@0: // allocatable pages there are, set iZoneLeastMovDis to be the RAM zone sl@0: // that will be used when half of the RAM is in use. This a boot up sl@0: // optimisation to reduce the amount of moving and/or discarding fixed page sl@0: // allocations will have to make during boot. sl@0: ////////////////////////////////////////////////////////////////////////// sl@0: TUint halfAllocatablePages = iTotalFreeRamPages >> 1; sl@0: TUint pages = 0; sl@0: SDblQueLink* link = &iZonePrefList.iA; sl@0: do sl@0: { sl@0: link = link->iNext; sl@0: __NK_ASSERT_DEBUG(link != &iZonePrefList.iA); sl@0: SZone& zonePages = *_LOFF(link, SZone, iPrefLink); sl@0: pages += zonePages.iFreePages; sl@0: } sl@0: while(pages < halfAllocatablePages); sl@0: iZoneLeastMovDis = link; sl@0: iZoneLeastMovDisRank = _LOFF(link, SZone, iPrefLink)->iPrefRank; sl@0: sl@0: // Reset general defrag links. sl@0: iZoneGeneralPrefLink = NULL; sl@0: iZoneGeneralTmpLink = NULL; sl@0: sl@0: __KTRACE_OPT(KMMU,DebugDump()); sl@0: } sl@0: sl@0: sl@0: void DRamAllocator::MarkPagesAllocated(TPhysAddr aAddr, TInt aCount, TZonePageType aType) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("DRamAllocator::MarkPagesAllocated(%x+%x)",aAddr,aCount)); sl@0: sl@0: M::RamAllocIsLocked(); sl@0: sl@0: // Don't allow unknown pages to be allocated, saves extra 'if' when sl@0: // creating bmaType. sl@0: __NK_ASSERT_DEBUG(aType != EPageUnknown); sl@0: sl@0: __ASSERT_DEBUG( !(TUint32(aAddr) & (KPageSize - 1)) && sl@0: (TUint32(aAddr) < TUint32(iPhysAddrTop)) && sl@0: (TUint32(aAddr) >= TUint32(iPhysAddrBase))&& sl@0: (TUint32((aCount << KPageShift) -1 + aAddr) <= TUint32(iPhysAddrTop)), sl@0: Panic(EDoMarkPagesAllocated1)); sl@0: sl@0: iTotalFreeRamPages-=aCount; sl@0: // Find the 1st zone the 1st set of allocations belong to sl@0: TInt offset = 0; sl@0: SZone* pZ = GetZoneAndOffset(aAddr,offset); sl@0: if (pZ == NULL) sl@0: {//aAddr not in RAM sl@0: Panic(EDoMarkPagesAllocated1); sl@0: } sl@0: while(aCount) sl@0: { sl@0: TBitMapAllocator& bmaAll = *(pZ->iBma[KBmaAllPages]); sl@0: TBitMapAllocator& bmaType = *(pZ->iBma[aType]); sl@0: TInt count = Min(bmaAll.iSize - offset, aCount); sl@0: bmaAll.Alloc(offset, count); sl@0: bmaType.Alloc(offset, count); sl@0: ZoneAllocPages(pZ, count, aType); sl@0: aCount -= count; sl@0: sl@0: // If spanning zones then ensure the next zone is contiguous. sl@0: __ASSERT_DEBUG(!aCount || ((pZ + 1)->iPhysBase != 0 && ((pZ + 1)->iPhysBase - 1) == pZ->iPhysEnd), Panic(EDoMarkPagesAllocated1)); sl@0: sl@0: pZ++; // zones in physical address order so move to next one sl@0: offset = 0; // and reset offset to start of the zone sl@0: } sl@0: } sl@0: sl@0: TInt DRamAllocator::MarkPageAllocated(TPhysAddr aAddr, TZonePageType aType) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("DRamAllocator::MarkPageAllocated %08x",aAddr)); sl@0: sl@0: M::RamAllocIsLocked(); sl@0: sl@0: // Don't allow unknown pages to be allocated, saves extra 'if' when sl@0: // creating bmaType. sl@0: __NK_ASSERT_DEBUG(aType != EPageUnknown); sl@0: sl@0: TInt n; sl@0: SZone* z=GetZoneAndOffset(aAddr,n); sl@0: if (!z) sl@0: { sl@0: return KErrArgument; sl@0: } sl@0: __KTRACE_OPT(KMMU2,Kern::Printf("Zone index %d page index %04x",z-iZones,n)); sl@0: TBitMapAllocator& bmaAll = *(z->iBma[KBmaAllPages]); sl@0: TBitMapAllocator& bmaType = *(z->iBma[aType]); sl@0: if (bmaAll.NotFree(n,1)) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Page already allocated")); sl@0: return KErrAlreadyExists; // page is already allocated sl@0: } sl@0: bmaAll.Alloc(n,1); sl@0: bmaType.Alloc(n,1); sl@0: --iTotalFreeRamPages; sl@0: ZoneAllocPages(z, 1, aType); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Total free RAM pages now = %d",iTotalFreeRamPages)); sl@0: sl@0: #ifdef BTRACE_RAM_ALLOCATOR sl@0: BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocMarkAllocated, aType, aAddr); sl@0: #endif sl@0: return KErrNone; sl@0: } sl@0: sl@0: TInt DRamAllocator::FreeRamPage(TPhysAddr aAddr, TZonePageType aType) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("FreeRamPage %08x",aAddr)); sl@0: sl@0: M::RamAllocIsLocked(); sl@0: sl@0: #ifdef _DEBUG sl@0: #ifndef __MEMMODEL_FLEXIBLE__ sl@0: // Check lock counter of the page sl@0: if (aAddr != KPhysAddrInvalid) sl@0: { sl@0: SPageInfo* pi = SPageInfo::SafeFromPhysAddr(aAddr); sl@0: if(pi && pi->LockCount()) sl@0: Panic(EFreeingLockedPage); sl@0: } sl@0: #endif sl@0: // Don't allow unknown pages to be freed, saves extra 'if' when sl@0: // creating bmaType. sl@0: __NK_ASSERT_DEBUG(aType != EPageUnknown); sl@0: #endif sl@0: sl@0: TInt n; sl@0: SZone* z=GetZoneAndOffset(aAddr,n); sl@0: if (!z) sl@0: { sl@0: return KErrArgument; sl@0: } sl@0: __KTRACE_OPT(KMMU2,Kern::Printf("Zone index %d page index %04x",z-iZones,n)); sl@0: TBitMapAllocator& bmaAll = *(z->iBma[KBmaAllPages]); sl@0: TBitMapAllocator& bmaType = *(z->iBma[aType]); sl@0: bmaAll.Free(n); sl@0: bmaType.Free(n); sl@0: ++iTotalFreeRamPages; sl@0: ZoneFreePages(z, 1, aType); sl@0: sl@0: #ifdef BTRACE_RAM_ALLOCATOR sl@0: BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocFreePage, aType, aAddr); sl@0: #endif sl@0: return KErrNone; sl@0: } sl@0: sl@0: void DRamAllocator::FreeRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aType) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("FreeRamPages count=%08x",aNumPages)); sl@0: sl@0: M::RamAllocIsLocked(); sl@0: sl@0: #if defined(_DEBUG) && !defined(__MEMMODEL_FLEXIBLE__) sl@0: // Check lock counter for each page that is about to be freed. sl@0: TInt pageNum = aNumPages; sl@0: TPhysAddr* pageList = aPageList; sl@0: while (pageNum--) sl@0: { sl@0: TPhysAddr pa = *pageList++; sl@0: if (pa == KPhysAddrInvalid) sl@0: continue; sl@0: SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa); sl@0: if(pi && pi->LockCount()) sl@0: Panic(EFreeingLockedPage); sl@0: } sl@0: #endif sl@0: sl@0: while(aNumPages--) sl@0: { sl@0: TPhysAddr first_pa = *aPageList++; sl@0: if (first_pa == KPhysAddrInvalid) sl@0: { sl@0: continue; sl@0: } sl@0: TInt ix; sl@0: SZone* z = GetZoneAndOffset(first_pa,ix); sl@0: if (!z) sl@0: { sl@0: continue; sl@0: } sl@0: TBitMapAllocator& bmaAll = *(z->iBma[KBmaAllPages]); sl@0: TInt zp_rem = bmaAll.iSize - ix; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("1st PA=%08x Zone %d index %04x",first_pa,z-iZones,ix)); sl@0: TInt n = 1; sl@0: TPhysAddr pa = first_pa + KPageSize; sl@0: while (--zp_rem && aNumPages && *aPageList==pa) sl@0: { sl@0: ++n; sl@0: --aNumPages; sl@0: ++aPageList; sl@0: pa += KPageSize; sl@0: } sl@0: __KTRACE_OPT(KMMU2,Kern::Printf("%d consecutive pages, zp_rem=%x, %d remaining pages",n,zp_rem,aNumPages)); sl@0: bmaAll.Free(ix,n); sl@0: TBitMapAllocator& bmaType = *(z->iBma[aType]); sl@0: bmaType.Free(ix,n); sl@0: iTotalFreeRamPages += n; sl@0: ZoneFreePages(z, n, aType); sl@0: #ifdef BTRACE_RAM_ALLOCATOR sl@0: BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocFreePages, aType, n, first_pa); sl@0: #endif sl@0: } sl@0: #ifdef BTRACE_RAM_ALLOCATOR sl@0: BTrace0(BTrace::ERamAllocator, BTrace::ERamAllocFreePagesEnd); sl@0: #endif sl@0: } sl@0: sl@0: /** sl@0: Attempt to clear upto the required amount of discardable or movable pages sl@0: from the RAM zone. sl@0: sl@0: @param aZone The RAM zone to clear. sl@0: @param aRequiredPages The maximum number of pages to clear. sl@0: */ sl@0: void DRamAllocator::ZoneClearPages(SZone& aZone, TUint aRequiredPages) sl@0: { sl@0: __KTRACE_OPT(KMMU, sl@0: Kern::Printf("ZoneClearPages: ID 0x%x, req 0x%x", aZone.iId, aRequiredPages)); sl@0: // Discard the required number of discardable pages. sl@0: TUint offset = 0; sl@0: TInt r = NextAllocatedPage(&aZone, offset, EPageDiscard); sl@0: while (r == KErrNone && aRequiredPages) sl@0: { sl@0: TPhysAddr physAddr = (offset << KPageShift) + aZone.iPhysBase; sl@0: TInt discarded = M::DiscardPage(physAddr, aZone.iId, EFalse); sl@0: if (discarded == KErrNone) sl@0: {// The page was successfully discarded. sl@0: aRequiredPages--; sl@0: } sl@0: offset++; sl@0: r = NextAllocatedPage(&aZone, offset, EPageDiscard); sl@0: } sl@0: // Move the required number of movable pages. sl@0: offset = 0; sl@0: r = NextAllocatedPage(&aZone, offset, EPageMovable); sl@0: while(r == KErrNone && aRequiredPages) sl@0: { sl@0: TPhysAddr physAddr = (offset << KPageShift) + aZone.iPhysBase; sl@0: TPhysAddr newAddr = KPhysAddrInvalid; sl@0: if (M::MovePage(physAddr, newAddr, aZone.iId, EFalse) == KErrNone) sl@0: {// The page was successfully moved. sl@0: #ifdef _DEBUG sl@0: TInt newOffset = 0; sl@0: SZone* newZone = GetZoneAndOffset(newAddr, newOffset); sl@0: __NK_ASSERT_DEBUG(newZone != &aZone); sl@0: #endif sl@0: aRequiredPages--; sl@0: } sl@0: offset++; sl@0: r = NextAllocatedPage(&aZone, offset, EPageMovable); sl@0: } sl@0: } sl@0: sl@0: /** Attempt to allocate pages into a particular zone. Pages will not sl@0: always be contiguous. sl@0: sl@0: @param aPageList On return it will contain the addresses of any allocated pages sl@0: @param aZone The zone to allocate from sl@0: @param aNumPages The number of pages to allocate sl@0: @param aType The type of pages to allocate sl@0: @return The number of pages that were allocated sl@0: */ sl@0: TUint32 DRamAllocator::ZoneFindPages(TPhysAddr*& aPageList, SZone& aZone, TUint32 aNumPages, TZonePageType aType) sl@0: { sl@0: // Don't allow unknown pages to be allocated, saves extra 'if' when sl@0: // creating bmaType. sl@0: __NK_ASSERT_DEBUG(aType != EPageUnknown); sl@0: sl@0: TBitMapAllocator& bmaAll = *aZone.iBma[KBmaAllPages]; sl@0: TBitMapAllocator& bmaType = *(aZone.iBma[aType]); sl@0: TPhysAddr zpb = aZone.iPhysBase; sl@0: TInt got = bmaAll.AllocList(aNumPages, (TInt*)aPageList); sl@0: if (got) sl@0: { sl@0: TPhysAddr* pE = aPageList + got; sl@0: while(aPageList < pE) sl@0: { sl@0: TInt ix = *aPageList; sl@0: *aPageList++ = zpb + (ix << KPageShift); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Got page @%08x",zpb + (ix << KPageShift))); sl@0: sl@0: // Mark the page allocated on the page type bit map. sl@0: bmaType.Alloc(ix, 1); sl@0: } sl@0: ZoneAllocPages(&aZone, got, aType); sl@0: #ifdef BTRACE_RAM_ALLOCATOR sl@0: BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocRamPages, aType, got, *(pE-got)); sl@0: #endif sl@0: } sl@0: return got; sl@0: } sl@0: sl@0: /** sl@0: Allocate discontiguous pages. sl@0: sl@0: Fixed pages are always allocated into the most preferable RAM zone that has free, sl@0: movable or discardable pages in it. This is to avoid fixed pages being placed sl@0: in the less preferred RAM zones. sl@0: sl@0: Movable and discardable pages are allocated into the RAM zones currently in use. sl@0: An empty RAM zone will only be used (switched on) if there are not enough free sl@0: pages in the in use RAM zones. The pages will be allocated from the least sl@0: preferable RAM to be in use after the allocation to the more preferred RAM zones. sl@0: sl@0: If a valid zone is specified in aBlockedZoneId then that RAM zone will not be sl@0: allocated into. Also, if aBlockedZoneId and aBlockRest is set then the allocation sl@0: will stop if aBlockZoneId sl@0: sl@0: @param aPageList On success, will contain the address of each allocated page sl@0: @param aNumPages The number of the pages to allocate sl@0: @param aType The type of the pages to allocate sl@0: @param aBlockedZoneId The ID of the RAM zone that shouldn't be allocated into. sl@0: The default value has no effect. sl@0: @param aBlockRest Set to ETrue to stop this allocation using any currently empty sl@0: RAM zones, EFalse to allow empty RAM zones to be used. Only sl@0: effects movable and discardable allocations. sl@0: sl@0: @return 0 on success, the number of extra pages required to fulfill the request on failure. sl@0: */ sl@0: TInt DRamAllocator::AllocRamPages(TPhysAddr* aPageList, TInt aNumPages, TZonePageType aType, TUint aBlockedZoneId, TBool aBlockRest) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("AllocRamPages 0x%x type%d",aNumPages, aType)); sl@0: sl@0: M::RamAllocIsLocked(); sl@0: sl@0: // Should never allocate unknown pages. sl@0: __NK_ASSERT_DEBUG(aType != EPageUnknown); sl@0: sl@0: TPhysAddr* pageListBase = aPageList; sl@0: TUint32 numMissing = aNumPages; sl@0: sl@0: if (aType == EPageFixed) sl@0: {// Currently only a general defrag operation should set this and it won't sl@0: // allocate fixed pages. sl@0: __NK_ASSERT_DEBUG(!aBlockRest); sl@0: if ((TUint)aNumPages > iTotalFreeRamPages + M::NumberOfFreeDpPages()) sl@0: {// Not enough free space and not enough freeable pages. sl@0: goto exit; sl@0: } sl@0: sl@0: // Search through each zone in preference order until all pages allocated or sl@0: // have reached the end of the preference list sl@0: SDblQueLink* link = iZonePrefList.First(); sl@0: while (numMissing && link != &iZonePrefList.iA) sl@0: { sl@0: SZone& zone = *_LOFF(link, SZone, iPrefLink); sl@0: // Get the link to next zone before any potential reordering. sl@0: // Which would occur if previous zone is same preference and has sl@0: // more free space after this allocation. sl@0: link = link->iNext; sl@0: sl@0: if (zone.iId == aBlockedZoneId || NoAllocOfPageType(zone, aType)) sl@0: {// The flags disallow aType pages or all pages. sl@0: __KTRACE_OPT(KMMU2, Kern::Printf("ARP Flags 0x%08x", zone.iFlags)); sl@0: continue; sl@0: } sl@0: sl@0: numMissing -= ZoneFindPages(aPageList, zone, numMissing, aType); sl@0: __KTRACE_OPT(KMMU, Kern::Printf("zone.iId 0x%x", zone.iId)); sl@0: sl@0: if (numMissing && sl@0: (zone.iAllocPages[EPageMovable] || zone.iAllocPages[EPageDiscard])) sl@0: {// Not all the required pages where allocated and there are still some sl@0: // movable and discardable pages in this RAM zone. sl@0: ZoneClearPages(zone, numMissing); sl@0: sl@0: // Have discarded and moved everything required or possible so sl@0: // now allocate into the pages just freed. sl@0: numMissing -= ZoneFindPages(aPageList, zone, numMissing, aType); sl@0: } sl@0: } sl@0: } sl@0: else sl@0: { sl@0: if ((TUint)aNumPages > iTotalFreeRamPages) sl@0: {// Not enough free pages to fulfill this request so return amount required sl@0: return aNumPages - iTotalFreeRamPages; sl@0: } sl@0: sl@0: // Determine if there are enough free pages in the RAM zones in use. sl@0: TUint totalFreeInUse = 0; sl@0: SDblQueLink* link = iZoneLeastMovDis; sl@0: for(; link != &iZonePrefList.iA; link = link->iPrev) sl@0: { sl@0: SZone& zone = *_LOFF(link, SZone, iPrefLink); sl@0: if (zone.iId == aBlockedZoneId || NoAllocOfPageType(zone, aType) || sl@0: (aBlockRest && (zone.iFlags & KRamZoneFlagGenDefragBlock))) sl@0: {// The blocked RAM zone or flags disallow aType pages or all pages sl@0: __KTRACE_OPT(KMMU2, Kern::Printf("ARP Flags 0x%08x", zone.iFlags)); sl@0: continue; sl@0: } sl@0: totalFreeInUse += zone.iFreePages; sl@0: } sl@0: sl@0: if (aBlockRest && totalFreeInUse < (TUint)aNumPages) sl@0: {// Allocating as part of a general defragmentation and sl@0: // can't allocate without using a RAM zone less preferable than sl@0: // the current least prefeable RAM zone with movable and/or sl@0: //discardable. sl@0: __NK_ASSERT_DEBUG(numMissing); sl@0: goto exit; sl@0: } sl@0: sl@0: SDblQueLink* leastClearable = iZoneLeastMovDis; sl@0: while (totalFreeInUse < (TUint)aNumPages) sl@0: {// The amount of free pages in the RAM zones with movable sl@0: // and/or discardable isn't enough. sl@0: leastClearable = leastClearable->iNext; sl@0: if (leastClearable == &iZonePrefList.iA) sl@0: {// There are no more RAM zones to allocate into. sl@0: __NK_ASSERT_DEBUG(numMissing); sl@0: goto exit; sl@0: } sl@0: SZone& zone = *_LOFF(leastClearable, SZone, iPrefLink); sl@0: if (zone.iId == aBlockedZoneId || NoAllocOfPageType(zone, aType)) sl@0: {// The flags disallow aType pages or all pages sl@0: __KTRACE_OPT(KMMU2, Kern::Printf("ARP Flags 0x%08x", zone.iFlags)); sl@0: continue; sl@0: } sl@0: totalFreeInUse += zone.iFreePages; sl@0: } sl@0: // Now that we know exactly how many RAM zones will be required do sl@0: // the allocation. To reduce fixed allocations having to clear RAM sl@0: // zones, allocate from the least preferable RAM to be used sl@0: // to the most preferable RAM zone. sl@0: link = leastClearable; sl@0: while (numMissing) sl@0: { sl@0: __NK_ASSERT_DEBUG(link != &iZonePrefList.iA); sl@0: SZone& zone = *_LOFF(link, SZone, iPrefLink); sl@0: // Update the link before any reordering so we don't miss a RAM zone. sl@0: link = link->iPrev; sl@0: sl@0: if (zone.iId == aBlockedZoneId || NoAllocOfPageType(zone, aType) || sl@0: (aBlockRest && (zone.iFlags & KRamZoneFlagGenDefragBlock))) sl@0: {// The blocked RAM zone or flags disallow aType pages or all pages sl@0: __KTRACE_OPT(KMMU2, Kern::Printf("ARP Flags 0x%08x", zone.iFlags)); sl@0: continue; sl@0: } sl@0: sl@0: numMissing -= ZoneFindPages(aPageList, zone, numMissing, aType); sl@0: __KTRACE_OPT(KMMU, Kern::Printf("zone.iId 0x%x", zone.iId)); sl@0: } sl@0: __NK_ASSERT_DEBUG(!numMissing); sl@0: } sl@0: sl@0: exit: sl@0: // Update here so any call to FreeRamPages doesn't upset count sl@0: aNumPages -= numMissing; //set to number of pages that are allocated sl@0: iTotalFreeRamPages -= aNumPages; sl@0: sl@0: if (numMissing) sl@0: {// Couldn't allocate all required pages so free those that were allocated sl@0: FreeRamPages(pageListBase, aNumPages, aType); sl@0: } sl@0: #ifdef BTRACE_RAM_ALLOCATOR sl@0: else sl@0: { sl@0: BTrace0(BTrace::ERamAllocator, BTrace::ERamAllocRamPagesEnd); sl@0: } sl@0: #endif sl@0: return numMissing; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Attempt to allocate discontiguous pages from the specified RAM zone. sl@0: sl@0: NOTE - This method only obeys the KRamZoneFlagNoAlloc and KRamZoneFlagClaiming sl@0: flags and not the others. sl@0: But as currently only EFixed pages will be allocated using this method that is sl@0: the desired behaviour. sl@0: sl@0: @param aZoneIdList An array of the IDs of the RAM zones to allocate from. sl@0: @param aZoneIdCount The number of IDs in aZoneIdList. sl@0: @param aPageList On success, will contain the address of each allocated page. sl@0: @param aNumPages The number of the pages to allocate. sl@0: @param aType The type of the pages to allocate. sl@0: sl@0: @return KErrNone on success, KErrNoMemory if allocation couldn't succeed or sl@0: the RAM zone has the KRamZoneFlagNoAlloc flag set, KErrArgument if a zone of sl@0: aZoneIdList doesn't exist or aNumPages is greater than the total pages in the zone. sl@0: */ sl@0: TInt DRamAllocator::ZoneAllocRamPages(TUint* aZoneIdList, TUint aZoneIdCount, TPhysAddr* aPageList, TInt aNumPages, TZonePageType aType) sl@0: { sl@0: M::RamAllocIsLocked(); sl@0: __NK_ASSERT_DEBUG(aType == EPageFixed); sl@0: sl@0: sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ZoneAllocRamPages 0x%x zones 0x%x",aNumPages, aZoneIdCount)); sl@0: sl@0: TInt r = KErrNone; sl@0: TUint* zoneIdPtr = aZoneIdList; sl@0: TUint* zoneIdEnd = zoneIdPtr + aZoneIdCount; sl@0: TUint numMissing = aNumPages; sl@0: TUint physicalPages = 0; sl@0: TPhysAddr* pageListBase = aPageList; sl@0: sl@0: // Always loop through all the RAM zones so that if an invalid ID is specified sl@0: // it is always detected whether all the specified RAM zones were required sl@0: // for the allocation or not. sl@0: for(; zoneIdPtr < zoneIdEnd; zoneIdPtr++) sl@0: { sl@0: SZone* zone = ZoneFromId(*zoneIdPtr); sl@0: sl@0: if (zone == NULL) sl@0: {// Invalid zone ID. sl@0: r = KErrArgument; sl@0: break; sl@0: } sl@0: sl@0: physicalPages += zone->iPhysPages; sl@0: sl@0: if (zone->iFlags & (KRamZoneFlagNoAlloc|KRamZoneFlagClaiming)) sl@0: {// If this RAM zone can't be allocated into then skip it. sl@0: continue; sl@0: } sl@0: sl@0: numMissing -= ZoneFindPages(aPageList, *zone, numMissing, aType); sl@0: sl@0: if (numMissing && aType == EPageFixed) sl@0: {// Remove up to required number of pages from the RAM zone sl@0: // and reattempt the allocation. sl@0: ZoneClearPages(*zone, numMissing); sl@0: numMissing -= ZoneFindPages(aPageList, *zone, numMissing, aType); sl@0: } sl@0: } sl@0: sl@0: // Update iTotalFreeRamPages here so that if allocation doesn't succeed then sl@0: // FreeRamPages() will keep it consistent. sl@0: TUint numAllocated = aNumPages - numMissing; sl@0: iTotalFreeRamPages -= numAllocated; sl@0: sl@0: if (r == KErrArgument || physicalPages < (TUint)aNumPages) sl@0: {// Invalid zone ID or the number of pages requested is too large. sl@0: // This should fail regardless of whether the allocation failed or not. sl@0: FreeRamPages(pageListBase, numAllocated, aType); sl@0: return KErrArgument; sl@0: } sl@0: sl@0: if (numMissing) sl@0: {// Couldn't allocate all required pages so free those that were allocated sl@0: FreeRamPages(pageListBase, numAllocated, aType); sl@0: return KErrNoMemory; sl@0: } sl@0: sl@0: // Have allocated all the required pages. sl@0: #ifdef BTRACE_RAM_ALLOCATOR sl@0: BTrace0(BTrace::ERamAllocator, BTrace::ERamAllocZoneRamPagesEnd); sl@0: #endif sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Will return zones one at a time in the following search patterns until a suitable sl@0: zone has been found or it is determined that there is no suitable zone: sl@0: - preference order sl@0: - address order sl@0: Before the first call for a new search sequence must set: sl@0: iZoneTmpAddrIndex = -1; sl@0: iZoneTmpPrefLink = iZonePrefList.First(); sl@0: sl@0: @param aZone On return this will be a pointer to the next zone to search. sl@0: @param aState The current search state, i.e. which of the zone orderings to follow. sl@0: It will be updated if necessary by this function. sl@0: @param aType The type of page to be allocated. sl@0: @param aBlockedZoneId The ID of a RAM zone to not allocate into. sl@0: @param aBlockRest ETrue if allocation should fail as soon as a blocked zone is reached, sl@0: EFalse otherwise. (Currently not used) sl@0: @return ETrue a sutiable zone is found, EFalse when the allocation is not possible. sl@0: */ sl@0: TBool DRamAllocator::NextAllocZone(SZone*& aZone, TZoneSearchState& aState, TZonePageType aType, TUint aBlockedZoneId, TBool aBlockRest) sl@0: { sl@0: TUint currentState = aState; sl@0: TBool r = EFalse; sl@0: sl@0: for (; currentState < EZoneSearchEnd; currentState++) sl@0: { sl@0: if (currentState == EZoneSearchAddr) sl@0: { sl@0: iZoneTmpAddrIndex++; sl@0: for (; iZoneTmpAddrIndex < (TInt)iNumZones; iZoneTmpAddrIndex++) sl@0: { sl@0: aZone = iZones + iZoneTmpAddrIndex; sl@0: if (aBlockedZoneId != aZone->iId && !NoAllocOfPageType(*aZone, aType)) sl@0: { sl@0: r = ETrue; sl@0: goto exit; sl@0: } sl@0: } sl@0: } sl@0: else sl@0: { sl@0: while(iZoneTmpPrefLink != &iZonePrefList.iA) sl@0: { sl@0: aZone = _LOFF(iZoneTmpPrefLink, SZone, iPrefLink); sl@0: iZoneTmpPrefLink = iZoneTmpPrefLink->iNext; // Update before any re-ordering sl@0: if (aBlockedZoneId != aZone->iId && !NoAllocOfPageType(*aZone, aType)) sl@0: { sl@0: r = ETrue; sl@0: goto exit; sl@0: } sl@0: } sl@0: } sl@0: } sl@0: exit: sl@0: __NK_ASSERT_DEBUG((r && currentState < EZoneSearchEnd) || (!r && currentState == EZoneSearchEnd)); sl@0: sl@0: aState = (TZoneSearchState)currentState; sl@0: return r; sl@0: } sl@0: sl@0: /** sl@0: Search through the zones for the requested contiguous RAM, first in preference sl@0: order then, if that fails, in address order. sl@0: sl@0: @param aNumPages The number of contiguous pages to find sl@0: @param aPhysAddr Will contain the base address of any contiguous run if found sl@0: @param aType The page type of the memory to be allocated sl@0: @param aAlign Alignment specified as the alignment shift sl@0: @param aBlockedZoneId The ID of a zone that can't be allocated into, by default this has no effect sl@0: @param aBlockRest Set to ETrue to stop allocation as soon as aBlockedZoneId is reached sl@0: in preference ordering. EFalse otherwise. sl@0: sl@0: @return KErrNone on success, KErrNoMemory otherwise sl@0: */ sl@0: TInt DRamAllocator::AllocContiguousRam(TUint aNumPages, TPhysAddr& aPhysAddr, TZonePageType aType, TInt aAlign, TUint aBlockedZoneId, TBool aBlockRest) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam size %08x align %d",aNumPages,aAlign)); sl@0: sl@0: M::RamAllocIsLocked(); sl@0: sl@0: // No support for non-fixed pages as this will discard and move sl@0: // pages if required. sl@0: __NK_ASSERT_DEBUG(aType == EPageFixed); sl@0: TInt alignWrtPage = Max(aAlign - KPageShift, 0); sl@0: TUint32 alignmask = (1u << alignWrtPage) - 1; sl@0: sl@0: // Attempt to find enough pages searching in preference order first then sl@0: // in address order sl@0: TZoneSearchState searchState = EZoneSearchPref; sl@0: SZone* zone; sl@0: SZone* prevZone = NULL; sl@0: TInt carryAll = 0; // Carry for all pages bma, clear to start new run. sl@0: TInt carryImmov = 0; // Carry for immovable pages bma, clear to start new run. sl@0: TInt base = 0; sl@0: TInt offset = 0; sl@0: iZoneTmpAddrIndex = -1; sl@0: iZoneTmpPrefLink = iZonePrefList.First(); sl@0: while (NextAllocZone(zone, searchState, aType, aBlockedZoneId, aBlockRest)) sl@0: { sl@0: // Be sure to start from scratch if zone not contiguous with previous zone sl@0: if (prevZone && (zone->iPhysBase == 0 || (zone->iPhysBase - 1) != prevZone->iPhysEnd)) sl@0: { sl@0: carryAll = 0; sl@0: carryImmov = 0; sl@0: } sl@0: prevZone = zone; sl@0: TBitMapAllocator& bmaAll = *(zone->iBma[KBmaAllPages]); sl@0: base = TInt(zone->iPhysBase >> KPageShift); sl@0: TInt runLength; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: base=%08x carryAll=%08x offset=%08x", base, carryAll, offset)); sl@0: offset = bmaAll.AllocAligned(aNumPages, alignWrtPage, base, EFalse, carryAll, runLength); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset)); sl@0: sl@0: if (offset >= 0) sl@0: {// Have found enough contiguous pages so return address of physical page sl@0: // at the start of the region sl@0: aPhysAddr = TPhysAddr((base + offset - carryAll + alignmask) & ~alignmask) << KPageShift; sl@0: MarkPagesAllocated(aPhysAddr, aNumPages, aType); sl@0: sl@0: __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr)); sl@0: #ifdef BTRACE_RAM_ALLOCATOR sl@0: BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, aType, aNumPages, aPhysAddr); sl@0: #endif sl@0: return KErrNone; sl@0: } sl@0: else sl@0: {// No run found when looking in just the free pages so see if this sl@0: // RAM zone could be used if pages where moved or discarded. sl@0: if (aNumPages > KMaxFreeableContiguousPages) sl@0: {// Can't move or discard any pages so move on to next RAM zone sl@0: // taking any run at the end of this RAM zone into account. sl@0: carryImmov = 0; sl@0: continue; sl@0: } sl@0: TBitMapAllocator& bmaImmov = *(zone->iBma[EPageFixed]); sl@0: offset = 0; // Clear so searches whole of fixed BMA on the first pass. sl@0: do sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: base=%08x carryImmov=%08x offset=%08x", base, carryImmov, offset)); sl@0: offset = bmaImmov.AllocAligned(aNumPages, alignWrtPage, base, EFalse, carryImmov, runLength, offset); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset)); sl@0: if (offset >= 0) sl@0: {// Have found a run in immovable page bma so attempt to clear sl@0: // it for the allocation. sl@0: TPhysAddr addrBase = TPhysAddr((base + offset - carryImmov + alignmask) & ~alignmask) << KPageShift; sl@0: TPhysAddr addrEnd = addrBase + (aNumPages << KPageShift); sl@0: sl@0: // Block the RAM zones containing the contiguous region sl@0: // from being allocated into when pages are moved or replaced. sl@0: TPhysAddr addr = addrBase; sl@0: TInt tmpOffset; sl@0: SZone* tmpZone = GetZoneAndOffset(addr, tmpOffset); sl@0: while (addr < addrEnd-1) sl@0: { sl@0: tmpZone->iFlags |= KRamZoneFlagTmpBlockAlloc; sl@0: addr = tmpZone->iPhysEnd; sl@0: tmpZone++; sl@0: } sl@0: sl@0: addr = addrBase; sl@0: TInt contigOffset = 0; sl@0: SZone* contigZone = GetZoneAndOffset(addr, contigOffset); sl@0: for (; addr != addrEnd; addr += KPageSize, contigOffset++) sl@0: { sl@0: if (contigZone->iPhysEnd < addr) sl@0: { sl@0: contigZone = GetZoneAndOffset(addr, contigOffset); sl@0: __NK_ASSERT_DEBUG(contigZone != NULL); sl@0: } sl@0: #ifdef _DEBUG // This page shouldn't be allocated as fixed, only movable or discardable. sl@0: __NK_ASSERT_DEBUG(contigZone != NULL); sl@0: __NK_ASSERT_DEBUG(contigZone->iBma[EPageFixed]->NotAllocated(contigOffset, 1)); sl@0: SPageInfo* pageInfo = SPageInfo::SafeFromPhysAddr(addr); sl@0: __NK_ASSERT_DEBUG(pageInfo != NULL); sl@0: #endif sl@0: TPhysAddr newAddr; sl@0: TInt moveRet = M::MovePage(addr, newAddr, contigZone->iId, EFalse); sl@0: if (moveRet != KErrNone && moveRet != KErrNotFound) sl@0: {// This page couldn't be moved or discarded so sl@0: // restart the search the page after this one. sl@0: __KTRACE_OPT(KMMU2, sl@0: Kern::Printf("ContigMov fail offset %x moveRet %d addr %x carryImmov %x", sl@0: offset, moveRet, addr, carryImmov)); sl@0: // Can't rely on RAM zone preference ordering being sl@0: // the same so clear carrys and restart search from sl@0: // within the current RAM zone or skip onto the next sl@0: // one if at the end of this one. sl@0: carryImmov = 0; sl@0: carryAll = 0; sl@0: offset = (addr < zone->iPhysBase)? 0 : contigOffset + 1; sl@0: __KTRACE_OPT(KMMU2, Kern::Printf("ContigMov fail offset %x", offset)); sl@0: break; sl@0: } sl@0: } sl@0: // Unblock the RAM zones containing the contiguous region. sl@0: TPhysAddr flagAddr = addrBase; sl@0: tmpZone = GetZoneAndOffset(flagAddr, tmpOffset); sl@0: while (flagAddr < addrEnd-1) sl@0: { sl@0: tmpZone->iFlags &= ~KRamZoneFlagTmpBlockAlloc; sl@0: flagAddr = tmpZone->iPhysEnd; sl@0: tmpZone++; sl@0: } sl@0: sl@0: if (addr == addrEnd) sl@0: {// Cleared all the required pages so allocate them. sl@0: // Return address of physical page at the start of the region. sl@0: aPhysAddr = addrBase; sl@0: MarkPagesAllocated(aPhysAddr, aNumPages, aType); sl@0: sl@0: __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguousRam returns %08x",aPhysAddr)); sl@0: #ifdef BTRACE_RAM_ALLOCATOR sl@0: BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocContiguousRam, aType, aNumPages, aPhysAddr); sl@0: #endif sl@0: return KErrNone; sl@0: } sl@0: } sl@0: } sl@0: // Keep searching immovable page bma of the current RAM zone until sl@0: // gone past end of RAM zone or no run can be found. sl@0: while (offset >= 0 && (TUint)offset < zone->iPhysPages); sl@0: } sl@0: } sl@0: return KErrNoMemory; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Attempt to allocate the contiguous RAM from the specified zone. sl@0: sl@0: NOTE - This method only obeys the KRamZoneFlagNoAlloc and KRamZoneFlagClaiming sl@0: flags and not the others. sl@0: But as currently only EFixed pages will be allocated using this method that is sl@0: the desired behaviour. sl@0: sl@0: @param aZoneIdList An array of the IDs of the RAM zones to allocate from. sl@0: @param aZoneIdCount The number of the IDs listed by aZoneIdList. sl@0: @param aSize The number of contiguous bytes to find sl@0: @param aPhysAddr Will contain the base address of the contiguous run if found sl@0: @param aType The page type of the memory to be allocated sl@0: @param aAlign Alignment specified as the alignment shift sl@0: sl@0: @return KErrNone on success, KErrNoMemory if allocation couldn't succeed or sl@0: the RAM zone has the KRamZoneFlagNoAlloc flag set. KErrArgument if a zone of sl@0: aZoneIdList exists or if aSize is larger than the size of the zone. sl@0: */ sl@0: TInt DRamAllocator::ZoneAllocContiguousRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TZonePageType aType, TInt aAlign) sl@0: { sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ZoneAllocContiguousRam zones 0x%x size 0x%08x align %d",aZoneIdCount, aSize, aAlign)); sl@0: sl@0: M::RamAllocIsLocked(); sl@0: __NK_ASSERT_DEBUG(aType == EPageFixed); sl@0: sl@0: sl@0: TUint numPages = (aSize + KPageSize - 1) >> KPageShift; sl@0: TInt carry = 0; // must be zero as this is always the start of a new run sl@0: TInt alignWrtPage = Max(aAlign - KPageShift, 0); sl@0: TUint32 alignmask = (1u << alignWrtPage) - 1; sl@0: TInt offset = -1; sl@0: TInt base = 0; sl@0: sl@0: TUint physPages = 0; sl@0: TUint* zoneIdPtr = aZoneIdList; sl@0: TUint* zoneIdEnd = aZoneIdList + aZoneIdCount; sl@0: SZone* prevZone = NULL; sl@0: for (; zoneIdPtr < zoneIdEnd; zoneIdPtr++) sl@0: { sl@0: SZone* zone = ZoneFromId(*zoneIdPtr); sl@0: if (zone == NULL) sl@0: {// Couldn't find zone of this ID or it isn't large enough sl@0: return KErrArgument; sl@0: } sl@0: physPages += zone->iPhysPages; sl@0: sl@0: if (offset >= 0 || sl@0: (zone->iFlags & (KRamZoneFlagNoAlloc|KRamZoneFlagClaiming))) sl@0: {// Keep searching through the RAM zones if the allocation sl@0: // has succeeded, to ensure the ID list is always fully verified or sl@0: // if this zone is currently blocked for further allocations. sl@0: continue; sl@0: } sl@0: sl@0: // Be sure to start from scratch if zone not contiguous with previous zone sl@0: if (prevZone && (zone->iPhysBase == 0 || (zone->iPhysBase - 1) != prevZone->iPhysEnd)) sl@0: { sl@0: carry = 0; sl@0: } sl@0: prevZone = zone; sl@0: sl@0: TInt len; sl@0: TBitMapAllocator& bmaAll = *(zone->iBma[KBmaAllPages]); sl@0: base = TInt(zone->iPhysBase >> KPageShift); sl@0: sl@0: __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: aBase=%08x aCarry=%08x", base, carry)); sl@0: offset = bmaAll.AllocAligned(numPages, alignWrtPage, base, EFalse, carry, len); sl@0: __KTRACE_OPT(KMMU,Kern::Printf("AllocAligned: offset=%08x", offset)); sl@0: } sl@0: sl@0: if (physPages < numPages) sl@0: {// The allocation requested is too large for the specified RAM zones. sl@0: return KErrArgument; sl@0: } sl@0: sl@0: if (offset < 0) sl@0: {// The allocation failed. sl@0: return KErrNoMemory; sl@0: } sl@0: sl@0: // Have found enough contiguous pages so mark the pages allocated and sl@0: // return address of physical page at the start of the region. sl@0: aPhysAddr = TPhysAddr((base + offset - carry + alignmask) & ~alignmask) << KPageShift; sl@0: MarkPagesAllocated(aPhysAddr, numPages, aType); sl@0: sl@0: __KTRACE_OPT(KMMU,Kern::Printf("ZoneAllocContiguousRam returns %08x",aPhysAddr)); sl@0: #ifdef BTRACE_RAM_ALLOCATOR sl@0: BTrace12(BTrace::ERamAllocator, BTrace::ERamAllocZoneContiguousRam, aType, numPages, aPhysAddr); sl@0: #endif sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Attempt to set the specified contiguous block of RAM pages to be either sl@0: allocated or free. sl@0: sl@0: @param aBase The base address of the RAM to update. sl@0: @param aSize The number of contiguous bytes of RAM to update. sl@0: @param aState Set to ETrue to free the RAM, EFalse to allocate the RAM. sl@0: @param aType The type of the pages being updated. sl@0: sl@0: @return KErrNone on success, KErrArgument if aBase is an invalid address, sl@0: KErrGeneral if a page being marked free is already free, sl@0: KErrInUse if the page being marked allocated is already allocated. sl@0: */ sl@0: TInt DRamAllocator::SetPhysicalRamState(TPhysAddr aBase, TInt aSize, TBool aState, TZonePageType aType) sl@0: { sl@0: M::RamAllocIsLocked(); sl@0: sl@0: __KTRACE_OPT(KMMU,Kern::Printf("SetPhysicalRamState(%08x,%x,%d)",aBase,aSize,aState?1:0)); sl@0: TUint32 pageMask = KPageSize-1; sl@0: aSize += (aBase & pageMask); sl@0: aBase &= ~pageMask; sl@0: TInt npages = (aSize + pageMask) >> KPageShift; sl@0: __KTRACE_OPT(KMMU,Kern::Printf("Rounded base %08x npages=%x",aBase,npages)); sl@0: TInt baseOffset; sl@0: SZone* baseZone = GetZoneAndOffset(aBase, baseOffset); sl@0: if (!baseZone || (TUint32)aSize > (iPhysAddrTop - aBase + 1)) sl@0: { sl@0: return KErrArgument; sl@0: } sl@0: SZone* zone = baseZone; sl@0: SZone* zoneEnd = iZones + iNumZones; sl@0: TPhysAddr base = aBase; sl@0: TInt pagesLeft = npages; sl@0: TInt offset = baseOffset; sl@0: TInt pageCount = -1; sl@0: __KTRACE_OPT(KMMU2,Kern::Printf("Zone %x page index %x z=%08x zE=%08x n=%x base=%08x",zone->iId, offset, zone, zoneEnd, pagesLeft, base)); sl@0: for (; pagesLeft && zone < zoneEnd; ++zone) sl@0: { sl@0: if (zone->iPhysBase + (offset << KPageShift) != base) sl@0: {// Zone not contiguous with current run of page, so have been sl@0: // asked to set the state of non-existent pages. sl@0: return KErrArgument; sl@0: } sl@0: sl@0: TBitMapAllocator& bmaAll = *(zone->iBma[KBmaAllPages]); sl@0: TInt zp_rem = bmaAll.iSize - offset; sl@0: pageCount = Min(pagesLeft, zp_rem); sl@0: __KTRACE_OPT(KMMU2,Kern::Printf("Zone %x pages %x+%x base %08x", zone->iId, offset, pageCount, base)); sl@0: if(aState) sl@0: { sl@0: if(bmaAll.NotAllocated(offset, pageCount)) sl@0: { sl@0: return KErrGeneral; sl@0: } sl@0: } sl@0: else sl@0: { sl@0: if(bmaAll.NotFree(offset, pageCount)) sl@0: { sl@0: return KErrInUse; sl@0: } sl@0: } sl@0: pagesLeft -= pageCount; sl@0: offset = 0; sl@0: base += (TPhysAddr(pageCount) << KPageShift); sl@0: } sl@0: if (pagesLeft) sl@0: { sl@0: return KErrArgument; // not all of the specified range exists sl@0: } sl@0: sl@0: iTotalFreeRamPages += (aState ? npages : -npages); sl@0: zone = baseZone; sl@0: offset = baseOffset; sl@0: for (pagesLeft = npages; pagesLeft; pagesLeft -= pageCount) sl@0: { sl@0: TBitMapAllocator& bmaAll = *(zone->iBma[KBmaAllPages]); sl@0: // Unknown and fixed pages share a bit map. sl@0: TBitMapAllocator& bmaType = *(zone->iBma[(aType != EPageUnknown)? aType : EPageFixed]); sl@0: TInt zp_rem = bmaAll.iSize - offset; sl@0: pageCount = Min(pagesLeft, zp_rem); sl@0: if (aState) sl@0: { sl@0: bmaAll.Free(offset, pageCount); sl@0: bmaType.Free(offset, pageCount); sl@0: ZoneFreePages(zone, pageCount, aType); sl@0: } sl@0: else sl@0: { sl@0: bmaAll.Alloc(offset, pageCount); sl@0: bmaType.Alloc(offset, pageCount); sl@0: ZoneAllocPages(zone, pageCount, aType); sl@0: } sl@0: __KTRACE_OPT(KMMU2,Kern::Printf("Zone %d pages %x+%x base %08x",zone-iZones, offset, pageCount, base)); sl@0: ++zone; sl@0: offset = 0; sl@0: } sl@0: return KErrNone; sl@0: } sl@0: sl@0: /** Update the allocated page counts for the zone that is page is allocated into. sl@0: sl@0: @param aAddr The physical address of the page sl@0: @param aOldPageType The type the page was allocated as sl@0: @param aNewPageType The type the page is changing to sl@0: */ sl@0: void DRamAllocator::ChangePageType(SPageInfo* aPageInfo, TZonePageType aOldType, TZonePageType aNewType) sl@0: { sl@0: sl@0: TInt offset; sl@0: SZone* zone = GetZoneAndOffset(aPageInfo->PhysAddr(), offset); sl@0: #ifdef _DEBUG sl@0: // *********** System lock may be held while this is invoked so don't do******** sl@0: // *********** anything too slow and definitely don't call zone callback******** sl@0: M::RamAllocIsLocked(); sl@0: CHECK_PRECONDITIONS((MASK_THREAD_CRITICAL) & ~MASK_NO_FAST_MUTEX, "DRamAllocator::ChangePageType"); sl@0: sl@0: // Get zone page is in and on debug builds check that it is allocated sl@0: if (zone == NULL || zone->iBma[KBmaAllPages]->NotAllocated(offset, 1)) sl@0: { sl@0: Panic(EAllocRamPagesInconsistent); sl@0: } sl@0: sl@0: // Check if adjusting counts is valid, i.e. won't cause a roll over sl@0: if (zone->iAllocPages[aOldType] - 1 > zone->iAllocPages[aOldType] || sl@0: zone->iAllocPages[aNewType] + 1 < zone->iAllocPages[aNewType]) sl@0: { sl@0: __KTRACE_OPT(KMMU, Kern::Printf("ChangePageType Alloc Unk %x Fx %x Mv %x Dis %x",zone->iAllocPages[EPageUnknown], sl@0: zone->iAllocPages[EPageFixed], zone->iAllocPages[EPageMovable],zone->iAllocPages[EPageDiscard])); sl@0: Panic(EZonesCountErr); sl@0: } sl@0: #endif sl@0: sl@0: // Update the counts and bmas sl@0: zone->iAllocPages[aOldType]--; sl@0: zone->iBma[aOldType]->Free(offset); sl@0: zone->iAllocPages[aNewType]++; sl@0: zone->iBma[aNewType]->Alloc(offset, 1); sl@0: sl@0: __KTRACE_OPT(KMMU2, Kern::Printf("ChangePageType Alloc Unk %x Fx %x Mv %x Dis %x",zone->iAllocPages[EPageUnknown], sl@0: zone->iAllocPages[EPageFixed], zone->iAllocPages[EPageMovable],zone->iAllocPages[EPageDiscard])); sl@0: #ifdef BTRACE_RAM_ALLOCATOR sl@0: BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocChangePageType, aNewType, aPageInfo->PhysAddr()); sl@0: #endif sl@0: } sl@0: sl@0: /** sl@0: Get the next page in this zone that is allocated after this one. sl@0: sl@0: @param aZone The zone to find the next allocated page in. sl@0: @param aOffset On entry this is the offset from which the next allocated sl@0: page in the zone should be found, on return it will be the offset sl@0: of the next allocated page. sl@0: @return KErrNone if a next allocated page could be found, KErrNotFound if no more pages in sl@0: the zone after aOffset are allocated, KErrArgument if aOffset is outside the zone. sl@0: */ sl@0: TInt DRamAllocator::NextAllocatedPage(SZone* aZone, TUint& aOffset, TZonePageType aType) const sl@0: { sl@0: const TUint KWordAlignMask = KMaxTUint32 << 5; sl@0: sl@0: M::RamAllocIsLocked(); sl@0: sl@0: __NK_ASSERT_DEBUG(aZone - iZones < (TInt)iNumZones); sl@0: // Makes things simpler for bma selection. sl@0: __NK_ASSERT_DEBUG(aType != EPageUnknown); sl@0: sl@0: if (aOffset >= aZone->iPhysPages) sl@0: {// Starting point is outside the zone sl@0: return KErrArgument; sl@0: } sl@0: sl@0: TUint offset = aOffset; sl@0: TUint endOffset = aZone->iPhysPages; sl@0: TUint endOffsetAligned = endOffset & KWordAlignMask; sl@0: sl@0: // Select the BMA to search, sl@0: TUint bmaIndex = (aType == EPageTypes)? KBmaAllPages : aType; sl@0: TUint32* map = &(aZone->iBma[bmaIndex]->iMap[offset >> 5]); sl@0: TUint32 bits = *map++; sl@0: sl@0: // Set bits for pages before 'offset' (i.e. ones we want to ignore)... sl@0: bits |= ~(KMaxTUint32 >> (offset & ~KWordAlignMask)); sl@0: sl@0: // Find the first bit map word from aOffset in aZone with allocated pages sl@0: while (bits == KMaxTUint32 && offset < endOffsetAligned) sl@0: { sl@0: bits = *map++; sl@0: offset = (offset + 32) & KWordAlignMask; sl@0: } sl@0: sl@0: if (offset >= endOffsetAligned && endOffset != endOffsetAligned) sl@0: {// Have reached the last bit mask word so set the bits that are sl@0: // outside of the zone so that they are ignored. sl@0: bits |= KMaxTUint32 >> (endOffset - endOffsetAligned); sl@0: } sl@0: sl@0: if (bits == KMaxTUint32) sl@0: {// No allocated pages found after aOffset in aZone. sl@0: return KErrNotFound; sl@0: } sl@0: sl@0: // Now we have bits with allocated pages in it so determine the exact sl@0: // offset of the next allocated page sl@0: TUint32 mask = 0x80000000 >> (offset & ~KWordAlignMask); sl@0: while (bits & mask) sl@0: { sl@0: mask >>= 1; sl@0: offset++; sl@0: } sl@0: sl@0: if (offset >= endOffset) sl@0: {// Reached the end of the zone without finding an allocated page after aOffset sl@0: return KErrNotFound; sl@0: } sl@0: sl@0: // Should definitely have found an allocated page within aZone's pages sl@0: __NK_ASSERT_DEBUG(mask != 0 && !(bits & mask) && offset < aZone->iPhysPages); sl@0: sl@0: aOffset = offset; sl@0: return KErrNone; sl@0: } sl@0: sl@0: /** sl@0: See if any of the least preferable RAM zones can be emptied. If they can then sl@0: initialise the allocator for a general defragmentation operation. sl@0: sl@0: Stage 0 of the general defrag is to ensure that there are enough free sl@0: pages in the more preferable RAM zones to be in use after the general defrag sl@0: for the movable page allocations. This is achieved by discarding the sl@0: required amount of discardable pages from the more preferable RAM zones sl@0: to be in use after the general defrag. sl@0: sl@0: sl@0: @parm aInitialStage On return this will contain the stage the general sl@0: defragmentation should begin at. I.e. if no RAM sl@0: zones can be cleared then just perform the final sl@0: tidying stage. sl@0: @param aRequiredToBeDiscarded On return this will contain the number of sl@0: discardable pages that need to be discarded sl@0: from the RAM zones to be in use after the sl@0: general defrag. sl@0: @return Pointer to the RAM zone object that may potentially have pages sl@0: discarded by the general defrag. This will be NULL if no suitable sl@0: RAM zone could be found. sl@0: */ sl@0: SZone* DRamAllocator::GeneralDefragStart0(TGenDefragStage& aStage, TUint& aRequiredToBeDiscarded) sl@0: { sl@0: #ifdef _DEBUG sl@0: if (!K::Initialising) sl@0: { sl@0: M::RamAllocIsLocked(); sl@0: #ifdef __VERIFY_LEASTMOVDIS sl@0: VerifyLeastPrefMovDis(); sl@0: #endif sl@0: } sl@0: // Any previous general defrag operation must have ended. sl@0: __NK_ASSERT_DEBUG(iZoneGeneralPrefLink == NULL); sl@0: __NK_ASSERT_DEBUG(iZoneGeneralTmpLink == NULL); sl@0: #endif sl@0: sl@0: if (iNumZones == 1) sl@0: { sl@0: // Only have one RAM zone so a defrag can't do anything. sl@0: return NULL; sl@0: } sl@0: sl@0: // Determine how many movable or discardable pages are required to be allocated. sl@0: TUint requiredPagesDis = 0; sl@0: TUint requiredPagesMov = 0; sl@0: TUint firstClearableInUseRank = 0; sl@0: SDblQueLink* link = iZoneLeastMovDis; sl@0: do sl@0: { sl@0: SZone& zone = *_LOFF(link, SZone, iPrefLink); sl@0: requiredPagesDis += zone.iAllocPages[EPageDiscard]; sl@0: requiredPagesMov += zone.iAllocPages[EPageMovable]; sl@0: sl@0: if (!firstClearableInUseRank && sl@0: (zone.iAllocPages[EPageMovable] || zone.iAllocPages[EPageDiscard]) && sl@0: !zone.iAllocPages[EPageFixed] && !zone.iAllocPages[EPageUnknown]) sl@0: {// This is the least preferable RAM zone that is has movable or sl@0: // discardable but may be clearable as it has no immovable pages. sl@0: firstClearableInUseRank = zone.iPrefRank; sl@0: } sl@0: sl@0: // Reset KRamZoneFlagGenDefrag flag bit for each RAM zone to be defraged. sl@0: zone.iFlags &= ~(KRamZoneFlagGenDefrag | KRamZoneFlagGenDefragBlock); sl@0: sl@0: link = link->iPrev; sl@0: } sl@0: while (link != &iZonePrefList.iA); sl@0: sl@0: // Adjust the number of discardable pages for those that are freeable. sl@0: // Dirty pages will be moved rather than discarded so they are not freeable sl@0: // and we must make sure that we have enough space in zones for these dirty sl@0: // paged pages. sl@0: __NK_ASSERT_DEBUG(requiredPagesDis >= (TUint)M::NumberOfFreeDpPages()); sl@0: requiredPagesDis -= M::NumberOfFreeDpPages(); sl@0: TUint totalDirtyPagesDis = M::NumberOfDirtyDpPages(); sl@0: if (requiredPagesDis < totalDirtyPagesDis) sl@0: requiredPagesDis = totalDirtyPagesDis; sl@0: sl@0: // Determine which is the least preferable RAM zone that needs to be sl@0: // in use for required number of movable and discardable pages. sl@0: TUint onlyPagesDis = 0; // Number of pages in RAM zones for discard only. sl@0: TUint onlyPagesMov = 0; // Number of pages in RAM zones for movable only. sl@0: TUint totalPagesDis = 0; // Total pages found so far for discardable pages. sl@0: TUint totalPagesMov = 0; // Total pages found so far for movable pages. sl@0: TUint totalCurrentDis = 0; // Number of allocated discardable pages found in sl@0: // RAM zones to be in use after the general defrag. sl@0: TUint totalCurrentMov = 0; // Number of allocated movable pages found in sl@0: // RAM zones to be in use after the general defrag. sl@0: TUint totalCurrentFree = 0; // The current number of free pages in the RAM zones sl@0: // to be in use after the general defrag. sl@0: iZoneGeneralPrefLink = &iZonePrefList.iA; sl@0: while (iZoneGeneralPrefLink != iZoneLeastMovDis && sl@0: (requiredPagesMov > totalPagesMov || sl@0: requiredPagesDis > totalPagesDis)) sl@0: { sl@0: iZoneGeneralPrefLink = iZoneGeneralPrefLink->iNext; sl@0: SZone& zone = *_LOFF(iZoneGeneralPrefLink, SZone, iPrefLink); sl@0: // Update the current totals. sl@0: totalCurrentDis += zone.iAllocPages[EPageDiscard]; sl@0: totalCurrentMov += zone.iAllocPages[EPageMovable]; sl@0: totalCurrentFree += zone.iFreePages; sl@0: sl@0: TBool onlyAllocDis = NoAllocOfPageType(zone, EPageMovable); sl@0: TBool onlyAllocMov = NoAllocOfPageType(zone, EPageDiscard); sl@0: if (!onlyAllocMov || !onlyAllocDis) sl@0: {// Either movable, discardable or both can be allocated in this zone. sl@0: TUint zonePagesFree = zone.iFreePages; sl@0: TUint zonePagesDis = zone.iAllocPages[EPageDiscard]; sl@0: TUint zonePagesMov = zone.iAllocPages[EPageMovable]; sl@0: // Total pages in this RAM zone that can be used for either sl@0: // discardable or movable pages. sl@0: TUint zonePagesGen = zonePagesDis + zonePagesMov + zonePagesFree; sl@0: if (onlyAllocMov) sl@0: { sl@0: if (requiredPagesDis > totalPagesDis) sl@0: {// No further discardable pages can be allocated into sl@0: // this RAM zone but consider any that already are. sl@0: TUint usedPages = Min( (TInt)zonePagesDis, sl@0: requiredPagesDis - totalPagesDis); sl@0: totalPagesDis += usedPages; sl@0: zonePagesDis -= usedPages; sl@0: } sl@0: TUint zoneOnlyMov = zonePagesDis + zonePagesMov + zonePagesFree; sl@0: onlyPagesMov += zoneOnlyMov; sl@0: totalPagesMov += zoneOnlyMov; sl@0: __KTRACE_OPT(KMMU2, Kern::Printf("onlyMov ID%x tot %x", sl@0: zone.iId, zoneOnlyMov)); sl@0: zonePagesGen = 0; // These pages aren't general purpose. sl@0: } sl@0: if (onlyAllocDis) sl@0: { sl@0: if (requiredPagesMov > totalPagesMov) sl@0: {// No further movable pages can be allocated into sl@0: // this RAM zone but consider any that already are. sl@0: TUint usedPages = Min( (TInt)zonePagesMov, sl@0: requiredPagesMov - totalPagesMov); sl@0: totalPagesMov += usedPages; sl@0: zonePagesMov -= usedPages; sl@0: } sl@0: TUint zoneOnlyDis = zonePagesDis + zonePagesMov + zonePagesFree; sl@0: onlyPagesDis += zoneOnlyDis; sl@0: totalPagesDis += zoneOnlyDis; sl@0: __KTRACE_OPT(KMMU2, Kern::Printf("onlyDis ID%x tot %x", sl@0: zone.iId, zoneOnlyDis)); sl@0: zonePagesGen = 0; // These pages aren't general purpose. sl@0: } sl@0: sl@0: if (requiredPagesDis > totalPagesDis) sl@0: {// Need some discardable pages so first steal any spare sl@0: // movable pages for discardable allocations. sl@0: if (totalPagesMov > requiredPagesMov) sl@0: {// Use any spare movable pages that can also be sl@0: // used for discardable allocations for discardable. sl@0: __NK_ASSERT_DEBUG(onlyPagesMov); sl@0: TUint spareMovPages = Min((TInt)(totalPagesMov - onlyPagesMov), sl@0: totalPagesMov - requiredPagesMov); sl@0: totalPagesMov -= spareMovPages; sl@0: totalPagesDis += spareMovPages; sl@0: __KTRACE_OPT(KMMU2, Kern::Printf("genDis Mov ID%x used%x", sl@0: zone.iId, spareMovPages)); sl@0: } sl@0: if (requiredPagesDis > totalPagesDis) sl@0: { sl@0: // Need more discardable pages but only grab those required. sl@0: TUint usedPages = Min( (TInt) zonePagesGen, sl@0: requiredPagesDis - totalPagesDis); sl@0: totalPagesDis += usedPages; sl@0: zonePagesGen -= usedPages; sl@0: __KTRACE_OPT(KMMU2, Kern::Printf("genDis ID%x used%x", sl@0: zone.iId, usedPages)); sl@0: } sl@0: } sl@0: if (requiredPagesMov > totalPagesMov) sl@0: {// Need some movable pages so first steal any spare sl@0: // discardable pages for movable allocations. sl@0: if (totalPagesDis > requiredPagesDis) sl@0: {// Use any spare discardable pages that can also be sl@0: // used for movable allocations for movable. sl@0: __NK_ASSERT_DEBUG(onlyPagesDis); sl@0: TUint spareDisPages = Min((TInt)(totalPagesDis - onlyPagesDis), sl@0: totalPagesDis - requiredPagesDis); sl@0: totalPagesDis -= spareDisPages; sl@0: totalPagesMov += spareDisPages; sl@0: __KTRACE_OPT(KMMU2, Kern::Printf("genMov Dis ID%x used%x", sl@0: zone.iId, spareDisPages)); sl@0: } sl@0: if (requiredPagesMov > totalPagesMov) sl@0: {// Still need some movable pages so grab them from this zone. sl@0: // Just grab all of the general pages left as discard pages will sl@0: // have already grabbed some if it had needed to. sl@0: totalPagesMov += zonePagesGen; sl@0: __KTRACE_OPT(KMMU2, Kern::Printf("genMov ID%x used%x", sl@0: zone.iId, zonePagesGen)); sl@0: } sl@0: } sl@0: } sl@0: } sl@0: sl@0: __KTRACE_OPT(KMMU, Kern::Printf("gen least in use ID 0x%x", sl@0: (_LOFF(iZoneGeneralPrefLink, SZone, iPrefLink))->iId)); sl@0: __NK_ASSERT_DEBUG(_LOFF(iZoneGeneralPrefLink, SZone, iPrefLink)->iPrefRank <= sl@0: iZoneLeastMovDisRank); sl@0: sl@0: if (iZoneGeneralPrefLink != iZoneLeastMovDis && sl@0: firstClearableInUseRank > _LOFF(iZoneGeneralPrefLink, SZone, iPrefLink)->iPrefRank) sl@0: {// We can reduce the number of RAM zones in use so block all the RAM sl@0: // zones not to be in use after the defrag from being allocated into sl@0: // by the general defrag. sl@0: link = iZoneLeastMovDis; sl@0: while (link != iZoneGeneralPrefLink) sl@0: { sl@0: SZone& zone = *_LOFF(link, SZone, iPrefLink); sl@0: zone.iFlags |= KRamZoneFlagGenDefragBlock; sl@0: link = link->iPrev; sl@0: } sl@0: sl@0: // Determine how many pages will need to be discarded to allow general sl@0: // defrag to succeed in using the minimum RAM zones required. sl@0: if (requiredPagesDis > totalCurrentDis) sl@0: {// Need to replace some discardable pages in RAM zones to be sl@0: // cleared with pages in the RAM zones to be in use after the sl@0: // general defrag. sl@0: __NK_ASSERT_DEBUG(totalCurrentFree >= requiredPagesDis - totalCurrentDis); sl@0: totalCurrentFree -= requiredPagesDis - totalCurrentDis; sl@0: } sl@0: TUint totalForMov = totalCurrentFree + totalCurrentMov; sl@0: if (requiredPagesMov > totalForMov) sl@0: {// Need to discard some pages from the least preferable RAM zone to be sl@0: // in use after the general for the movable pages to be moved to. sl@0: aRequiredToBeDiscarded = requiredPagesMov - totalForMov; sl@0: __NK_ASSERT_DEBUG(aRequiredToBeDiscarded <= totalCurrentDis); sl@0: __NK_ASSERT_DEBUG(totalCurrentDis - aRequiredToBeDiscarded >= requiredPagesDis); sl@0: } sl@0: sl@0: // This stage should discard pages from the least preferable RAM zones sl@0: // to be in use after the general defrag to save the pages having to sl@0: // be moved again by the final stage. sl@0: iZoneGeneralStage = EGenDefragStage0; sl@0: aStage = EGenDefragStage1; // Defrag::GeneralDefrag() requires this. sl@0: iZoneGeneralTmpLink = iZoneGeneralPrefLink; sl@0: return GeneralDefragNextZone0(); sl@0: } sl@0: sl@0: // General defrag can't clear any RAM zones so jump to tidying stage. sl@0: aStage = EGenDefragStage2; sl@0: iZoneGeneralStage = EGenDefragStage2; sl@0: return NULL; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Find the next RAM zone that is suitable for stage 0 of a general defrag. sl@0: This should only be called after a preceeding call to sl@0: DRamAllocator::GeneralDefragStart0(). sl@0: sl@0: This goes through the RAM zones from the least preferable to be in use sl@0: after the general defrag to the most preferable RAM zone. It will sl@0: return each time it finds a RAM zone with discardable pages allocated into it. sl@0: sl@0: @return Pointer to the RAM zone object that may potentially have pages sl@0: discarded by the general defrag. This will be NULL if no suitable sl@0: RAM zone could be found. sl@0: */ sl@0: SZone* DRamAllocator::GeneralDefragNextZone0() sl@0: { sl@0: M::RamAllocIsLocked(); sl@0: // Any previous general defrag operation must have ended. sl@0: __NK_ASSERT_DEBUG(iZoneGeneralPrefLink != NULL); sl@0: __NK_ASSERT_DEBUG(iZoneGeneralTmpLink != NULL); sl@0: __NK_ASSERT_DEBUG(iZoneGeneralStage == EGenDefragStage0); sl@0: sl@0: while (iZoneGeneralTmpLink != &iZonePrefList.iA) sl@0: { sl@0: SZone* zone = _LOFF(iZoneGeneralTmpLink, SZone, iPrefLink); sl@0: sl@0: // Save the RAM zone that is currently more preferable than this one sl@0: // before any reordering. sl@0: iZoneGeneralTmpLink = iZoneGeneralTmpLink->iPrev; sl@0: sl@0: if (zone->iFlags & KRamZoneFlagGenDefrag) sl@0: {// This zone has been selected for a general defrag already. sl@0: __KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext0 zone ID 0x%x already defraged", sl@0: zone->iId)); sl@0: return NULL; sl@0: } sl@0: zone->iFlags |= KRamZoneFlagGenDefrag; sl@0: if (zone->iAllocPages[EPageDiscard]) sl@0: { sl@0: // A RAM zone that may have pages discarded by a general defrag has been found. sl@0: __KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext0 zone ID 0x%x", zone->iId)); sl@0: return zone; sl@0: } sl@0: } sl@0: return NULL; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Initialise this stage of a general defrag operation which will attempt sl@0: to clear all the RAM zones not to be in use once the general defrag sl@0: has completed. sl@0: sl@0: @return Pointer to the RAM zone object that may potentially be cleared sl@0: by the general defrag. This will be NULL if no suitable sl@0: RAM zone could be found. sl@0: */ sl@0: SZone* DRamAllocator::GeneralDefragStart1() sl@0: { sl@0: M::RamAllocIsLocked(); sl@0: __NK_ASSERT_DEBUG(iNumZones == 1 || iZoneGeneralPrefLink != NULL); sl@0: sl@0: sl@0: if (iNumZones == 1) sl@0: {// On a device with one RAM zone can't do any defrag so return NULL. sl@0: return NULL; sl@0: } sl@0: sl@0: // Clear general defrag flags of each RAM zone to be defraged. sl@0: SDblQueLink* link = iZoneGeneralPrefLink; sl@0: for (; link != &iZonePrefList.iA; link = link->iPrev) sl@0: { sl@0: SZone& zone = *_LOFF(link, SZone, iPrefLink); sl@0: zone.iFlags &= ~KRamZoneFlagGenDefrag; sl@0: } sl@0: sl@0: // Flags cleared so now to start this stage from least preferable RAM zone sl@0: // currently in use. sl@0: iZoneGeneralTmpLink = iZoneLeastMovDis; sl@0: iZoneGeneralStage = EGenDefragStage1; sl@0: return GeneralDefragNextZone1(); sl@0: } sl@0: sl@0: sl@0: /** sl@0: Find the next RAM zone that is suitable for stage 1 of a general defrag. sl@0: This should only be called after a preceeding call to sl@0: DRamAllocator::GeneralDefragStart1(). sl@0: sl@0: This goes through the RAM zones from the least preferable currently sl@0: with movable or discardable pages allocated into it to the least sl@0: preferable RAM zone that is to be in use after the general defrag. sl@0: It will return each time it finds a RAM zone with movable and/or sl@0: discardable pages allocated into it. sl@0: sl@0: @return Pointer to the RAM zone object that may potentially be cleared by a sl@0: general defrag. This will be NULL if no suitable zone could be found. sl@0: */ sl@0: SZone* DRamAllocator::GeneralDefragNextZone1() sl@0: { sl@0: M::RamAllocIsLocked(); sl@0: // Any previous general defrag operation must have ended. sl@0: __NK_ASSERT_DEBUG(iZoneGeneralPrefLink != NULL); sl@0: __NK_ASSERT_DEBUG(iZoneGeneralTmpLink != NULL); sl@0: __NK_ASSERT_DEBUG(iZoneGeneralStage == EGenDefragStage1); sl@0: sl@0: sl@0: // If we hit the target least preferable RAM zone to be in use once sl@0: // the defrag has completed then stop this stage of the general defrag. sl@0: sl@0: // Should never skip past iZoneGeneralPrefLink. sl@0: __NK_ASSERT_DEBUG(iZoneGeneralTmpLink != &iZonePrefList.iA); sl@0: sl@0: while (iZoneGeneralTmpLink != iZoneGeneralPrefLink) sl@0: { sl@0: SZone* zone = _LOFF(iZoneGeneralTmpLink, SZone, iPrefLink); sl@0: sl@0: // Save the RAM zone that is currently more preferable than this one sl@0: // before any reordering. sl@0: iZoneGeneralTmpLink = iZoneGeneralTmpLink->iPrev; sl@0: sl@0: if (zone->iFlags & KRamZoneFlagGenDefrag) sl@0: {// This zone has been selected for a general defrag already. sl@0: __KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext1 zone ID 0x%x already defraged", sl@0: zone->iId)); sl@0: return NULL; sl@0: } sl@0: zone->iFlags |= KRamZoneFlagGenDefrag; sl@0: if (zone->iAllocPages[EPageMovable] || zone->iAllocPages[EPageDiscard]) sl@0: { sl@0: // A RAM zone that may be cleared by a general defrag has been found. sl@0: __KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext1 zone ID 0x%x", zone->iId)); sl@0: return zone; sl@0: } sl@0: } sl@0: __KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext1 reached general target")); sl@0: return NULL; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Initialise stage 2 of a general defrag operation. sl@0: sl@0: Stage 2 creates room for fixed pages allocations in the more preferable RAM sl@0: zones in use by moving pages into the least preferable RAM zones in use. sl@0: sl@0: @return Pointer to the RAM zone object that may potentially be cleared of sl@0: movable and discardable pages by the general defrag. This will be sl@0: NULL if no suitable zone could be found. sl@0: */ sl@0: SZone* DRamAllocator::GeneralDefragStart2() sl@0: { sl@0: M::RamAllocIsLocked(); sl@0: __NK_ASSERT_DEBUG(iNumZones == 1 || iZoneGeneralPrefLink != NULL); sl@0: sl@0: sl@0: if (iNumZones == 1) sl@0: {// On a device with one RAM zone can't do any defrag so return NULL. sl@0: return NULL; sl@0: } sl@0: sl@0: // Clear general defrag flags of each RAM zone to be defraged. sl@0: SDblQueLink* link = iZoneLeastMovDis; sl@0: for (; link != &iZonePrefList.iA; link = link->iPrev) sl@0: { sl@0: SZone& zone = *_LOFF(link, SZone, iPrefLink); sl@0: zone.iFlags &= ~(KRamZoneFlagGenDefrag | KRamZoneFlagGenDefragBlock); sl@0: } sl@0: sl@0: // Flags cleared so now to start 2nd stage from most preferable RAM zone. sl@0: iZoneGeneralTmpLink = iZonePrefList.First(); sl@0: iZoneGeneralStage = EGenDefragStage2; sl@0: return GeneralDefragNextZone2(); sl@0: } sl@0: sl@0: sl@0: /** sl@0: Find the next RAM zone that is suitable for this stage of general defrag. sl@0: This should only be called after a preceeding call to sl@0: DRamAllocator::GeneralDefragStart2(). sl@0: sl@0: This goes through the RAM zones from the most preferable to the least sl@0: preferable RAM zone that has movable and/or discardable pages allocated sl@0: into it. It will return each time it finds a RAM zone with movable and/or sl@0: discardable pages allocated into it. sl@0: sl@0: @return Pointer to the RAM zone object that may potentially be cleared of sl@0: movable and discardable pages by the general defrag. This will be sl@0: NULL if no suitable zone could be found. sl@0: */ sl@0: SZone* DRamAllocator::GeneralDefragNextZone2() sl@0: { sl@0: M::RamAllocIsLocked(); sl@0: __NK_ASSERT_DEBUG(iZoneGeneralTmpLink != NULL); sl@0: __NK_ASSERT_DEBUG(iZoneGeneralStage == EGenDefragStage2); sl@0: sl@0: sl@0: while (iZoneGeneralTmpLink != iZoneLeastMovDis) sl@0: { sl@0: SZone* zone = _LOFF(iZoneGeneralTmpLink, SZone, iPrefLink); sl@0: sl@0: // Save the RAM zone that is currently less preferable than this one sl@0: // before any reordering. sl@0: iZoneGeneralTmpLink = iZoneGeneralTmpLink->iNext; sl@0: sl@0: if (zone->iFlags & KRamZoneFlagGenDefrag) sl@0: {// This zone has been selected for a general defrag already. sl@0: __KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext2 zone ID 0x%x already defraged", zone->iId)); sl@0: return NULL; sl@0: } sl@0: zone->iFlags |= KRamZoneFlagGenDefrag | KRamZoneFlagGenDefragBlock; sl@0: if (zone->iAllocPages[EPageMovable] || zone->iAllocPages[EPageDiscard]) sl@0: {// A RAM zone that may be cleared by a general defrag has been found. sl@0: __KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext2 zone ID 0x%x", zone->iId)); sl@0: return zone; sl@0: } sl@0: } sl@0: __KTRACE_OPT(KMMU, Kern::Printf("GenDefragNext2 reached general target")); sl@0: return NULL; sl@0: } sl@0: sl@0: /** sl@0: Inform the allocator that a general defragmentation operation has completed. sl@0: sl@0: */ sl@0: void DRamAllocator::GeneralDefragEnd() sl@0: { sl@0: #ifdef _DEBUG sl@0: if (!K::Initialising) sl@0: { sl@0: M::RamAllocIsLocked(); sl@0: #ifdef __VERIFY_LEASTMOVDIS sl@0: VerifyLeastPrefMovDis(); sl@0: #endif sl@0: } sl@0: #endif sl@0: // Reset the general defrag preference link as it is no longer required. sl@0: iZoneGeneralPrefLink = NULL; sl@0: iZoneGeneralTmpLink = NULL; sl@0: } sl@0: sl@0: sl@0: /** sl@0: Calculate the number of free pages in all the RAM zones to be in use sl@0: once the general defragmentation operation has completed. sl@0: sl@0: @param aType The type of free pages to find in the higher priority zones. sl@0: @return The number of free pages in the RAM zones intended to be in use sl@0: after the general defrag operation has completed. sl@0: */ sl@0: TUint DRamAllocator::GenDefragFreePages(TZonePageType aType) const sl@0: { sl@0: M::RamAllocIsLocked(); sl@0: sl@0: if (iZoneGeneralStage == EGenDefragStage2) sl@0: {// Second stage of general defrag where don't have to empty the RAM zone. sl@0: return KMaxTUint; sl@0: } sl@0: TUint totalFree = 0; sl@0: SDblQueLink* link = iZoneGeneralPrefLink; sl@0: for (; link != &iZonePrefList.iA; link = link->iPrev) sl@0: { sl@0: SZone& zone = *_LOFF(link, SZone, iPrefLink); sl@0: if (NoAllocOfPageType(zone, aType) || sl@0: zone.iFlags & KRamZoneFlagGenDefragBlock) sl@0: { sl@0: continue; sl@0: } sl@0: // This zone has free space for this type of page sl@0: totalFree += zone.iFreePages; sl@0: } sl@0: return totalFree; sl@0: } sl@0: sl@0: sl@0: /** Mark the RAM zone as being claimed to stop any further allocations. sl@0: @param aZone The zone to stop allocations to. sl@0: sl@0: @pre RamAlloc mutex held. sl@0: @post RamAlloc mutex held. sl@0: */ sl@0: void DRamAllocator::ZoneClaimStart(SZone& aZone) sl@0: { sl@0: M::RamAllocIsLocked(); sl@0: __NK_ASSERT_DEBUG(!(aZone.iFlags & KRamZoneFlagClaiming)); sl@0: sl@0: aZone.iFlags |= KRamZoneFlagClaiming; sl@0: sl@0: #ifdef BTRACE_RAM_ALLOCATOR sl@0: BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocZoneFlagsModified, aZone.iId, aZone.iFlags); sl@0: #endif sl@0: } sl@0: sl@0: sl@0: /** Mark the RAM zone as not being claimed to allow allocations. sl@0: @param aZone The zone to allow allocations into. sl@0: sl@0: @pre RamAlloc mutex held. sl@0: @post RamAlloc mutex held. sl@0: */ sl@0: void DRamAllocator::ZoneClaimEnd(SZone& aZone) sl@0: { sl@0: M::RamAllocIsLocked(); sl@0: __NK_ASSERT_DEBUG(aZone.iFlags & KRamZoneFlagClaiming); sl@0: sl@0: aZone.iFlags &= ~KRamZoneFlagClaiming; sl@0: sl@0: #ifdef BTRACE_RAM_ALLOCATOR sl@0: BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocZoneFlagsModified, aZone.iId, aZone.iFlags); sl@0: #endif sl@0: } sl@0: sl@0: /** Mark the RAM zone so that any allocation or frees from it can be detected. sl@0: Useful for defragging. sl@0: @param aZone The zone to mark. sl@0: @pre RamAlloc mutex held sl@0: @post RamAlloc mutex held sl@0: */ sl@0: void DRamAllocator::ZoneMark(SZone& aZone) sl@0: { sl@0: M::RamAllocIsLocked(); sl@0: __NK_ASSERT_DEBUG(!(aZone.iFlags & KRamZoneFlagMark)); sl@0: sl@0: aZone.iFlags |= KRamZoneFlagMark; sl@0: sl@0: #ifdef BTRACE_RAM_ALLOCATOR sl@0: BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocZoneFlagsModified, aZone.iId, aZone.iFlags); sl@0: #endif sl@0: } sl@0: sl@0: /** Unmark the RAM zone. sl@0: Useful for defragging. sl@0: @param aZone The zone to mark. sl@0: @return ETrue if the RAM zone is inactive, EFalse otherwise. sl@0: @pre RamAlloc mutex held sl@0: @post RamAlloc mutex held sl@0: */ sl@0: TBool DRamAllocator::ZoneUnmark(SZone& aZone) sl@0: { sl@0: M::RamAllocIsLocked(); sl@0: sl@0: TInt r = aZone.iFlags & KRamZoneFlagMark; sl@0: aZone.iFlags &= ~KRamZoneFlagMark; sl@0: sl@0: #ifdef BTRACE_RAM_ALLOCATOR sl@0: BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocZoneFlagsModified, aZone.iId, aZone.iFlags); sl@0: #endif sl@0: return r; sl@0: } sl@0: sl@0: /** Determine whether it is OK to allocate the specified page type sl@0: to the RAM zone. sl@0: sl@0: This should be used by all functions that search through the zones when sl@0: attempting to allocate pages. sl@0: sl@0: @return ETrue if this page type shouldn't be allocated into the RAM zone, sl@0: EFalse if it is OK to allocate that page type into the RAM zone. sl@0: */ sl@0: TBool DRamAllocator::NoAllocOfPageType(SZone& aZone, TZonePageType aType) const sl@0: { sl@0: TUint8 flagMask = 1 << (aType - KPageTypeAllocBase); sl@0: return (aZone.iFlags & (KRamZoneFlagClaiming|KRamZoneFlagNoAlloc|KRamZoneFlagTmpBlockAlloc)) || sl@0: (aZone.iFlags & flagMask); sl@0: } sl@0: sl@0: sl@0: /** Updates the flags of the specified RAM zone. sl@0: sl@0: @param aId The ID of the RAM zone to modify. sl@0: @param aClearFlags The bit flags to clear. sl@0: @param aSetFlags The bit flags to set. sl@0: sl@0: @return KErrNone on success, KErrArgument if the RAM zone of aId not found or sl@0: aSetMask contains invalid flags. sl@0: sl@0: @pre RamAlloc mutex held sl@0: @post RamAlloc mutex held sl@0: */ sl@0: TInt DRamAllocator::ModifyZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask) sl@0: { sl@0: M::RamAllocIsLocked(); sl@0: sl@0: SZone* zone = ZoneFromId(aId); sl@0: if (zone == NULL || (aSetMask & KRamZoneFlagInvalid)) sl@0: {// aId invalid or an invalid flag bit was requested to be set. sl@0: return KErrArgument; sl@0: } sl@0: zone->iFlags &= ~aClearMask; sl@0: zone->iFlags |= aSetMask; sl@0: sl@0: __KTRACE_OPT(KMMU, Kern::Printf("Zone %x Flags %x", zone->iId, zone->iFlags)); sl@0: sl@0: #ifdef BTRACE_RAM_ALLOCATOR sl@0: BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocZoneFlagsModified, zone->iId, zone->iFlags); sl@0: #endif sl@0: return KErrNone; sl@0: } sl@0: sl@0: sl@0: /** Invoke the RAM zone call back function to inform the variant of the RAM zones sl@0: in use so far by the system. sl@0: This is designed to only be invoked once during boot in MmuBase::Init2() sl@0: */ sl@0: void DRamAllocator::InitialCallback() sl@0: { sl@0: __NK_ASSERT_DEBUG(iZoneCallbackInitSent == EFalse); sl@0: if (iZonePowerFunc) sl@0: { sl@0: TInt ret = (*iZonePowerFunc)(ERamZoneOp_Init, NULL, (TUint*)&iZonePwrState); sl@0: if (ret != KErrNone && ret != KErrNotSupported) sl@0: { sl@0: Panic(EZonesCallbackErr); sl@0: } sl@0: CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "DRamAllocator::InitialCallback"); sl@0: } sl@0: iZoneCallbackInitSent = ETrue; sl@0: } sl@0: sl@0: sl@0: #ifdef BTRACE_RAM_ALLOCATOR sl@0: /** sl@0: Structure for outputing zone information to BTrace that couldn't be fit into first sl@0: 2 words of the BTraceN call sl@0: */ sl@0: struct TRamAllocBtraceZone sl@0: { sl@0: TUint32 iId; sl@0: TUint8 iPref; sl@0: TUint8 iFlags; sl@0: TUint16 iReserved; sl@0: }; sl@0: sl@0: /** sl@0: This will be invoked when BTrace starts logging BTrace::ERamAllocator category sl@0: traces. sl@0: It outputs the zone configuration and the base addresses of any contiguous block sl@0: of allocated pages. sl@0: */ sl@0: void DRamAllocator::SendInitialBtraceLogs(void) sl@0: { sl@0: M::RamAllocIsLocked(); sl@0: CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "DRamAllocator::SendInitialBtraceLogs"); sl@0: sl@0: // Output the zone information sl@0: TRamAllocBtraceZone bZone; sl@0: BTrace4(BTrace::ERamAllocator, BTrace::ERamAllocZoneCount, iNumZones); sl@0: const SZone* zone = iZones; sl@0: const SZone* const endZone = iZones + iNumZones; sl@0: for (; zone < endZone; zone++) sl@0: { sl@0: bZone.iId = zone->iId; sl@0: bZone.iPref = zone->iPref; sl@0: bZone.iFlags = zone->iFlags; sl@0: BTraceN(BTrace::ERamAllocator, BTrace::ERamAllocZoneConfig, zone->iPhysPages, sl@0: zone->iPhysBase, &bZone, sizeof(TRamAllocBtraceZone)); sl@0: } sl@0: sl@0: // Search through zones and output each contiguous region of allocated pages sl@0: for (zone = iZones; zone < endZone; zone++) sl@0: { sl@0: if (zone->iFreePages != zone->iPhysPages) sl@0: { sl@0: TInt pageCount = 0; sl@0: TInt totalPages = 0; sl@0: TUint32 runStart = 0; sl@0: while ((TUint)totalPages != zone->iPhysPages - zone->iFreePages) sl@0: { sl@0: // find set of contiguous pages that have been allocated sl@0: // runStart will be set to first page of allocated run if one found sl@0: for (;runStart < zone->iPhysPages && zone->iBma[KBmaAllPages]->NotAllocated(runStart,1); runStart++); sl@0: sl@0: // find last allocated page of this run sl@0: TUint32 runEnd = runStart + 1; sl@0: for (;runEnd < zone->iPhysPages && zone->iBma[KBmaAllPages]->NotFree(runEnd,1); runEnd++); sl@0: sl@0: pageCount = runEnd - runStart; sl@0: if (pageCount > 0) sl@0: {// have a run of allocated pages so output BTrace sl@0: TPhysAddr baseAddr = (runStart << KPageShift) + zone->iPhysBase; sl@0: __KTRACE_OPT(KMMU2, Kern::Printf("offset %x physBase %x pages %x baseAddr %08x",runStart, zone->iPhysBase, pageCount, baseAddr)); sl@0: BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocBootAllocation, pageCount, baseAddr); sl@0: runStart += pageCount; sl@0: totalPages += pageCount; sl@0: } sl@0: } sl@0: } sl@0: } sl@0: BTrace0(BTrace::ERamAllocator, BTrace::ERamAllocBootAllocationEnd); sl@0: } sl@0: #endif // BTRACE_RAM_ALLOCATOR sl@0: sl@0: TInt DRamAllocator::ClaimPhysicalRam(TPhysAddr aBase, TInt aSize) sl@0: { sl@0: TInt ret = SetPhysicalRamState(aBase,aSize,EFalse, EPageFixed); sl@0: #ifdef BTRACE_RAM_ALLOCATOR sl@0: if (ret == KErrNone) sl@0: { sl@0: BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocClaimRam, aSize, aBase); sl@0: } sl@0: #endif sl@0: return ret; sl@0: } sl@0: sl@0: TInt DRamAllocator::FreePhysicalRam(TPhysAddr aBase, TInt aSize) sl@0: { sl@0: TInt ret = SetPhysicalRamState(aBase,aSize,ETrue, EPageFixed); sl@0: #ifdef BTRACE_RAM_ALLOCATOR sl@0: if (ret == KErrNone) sl@0: { sl@0: BTrace8(BTrace::ERamAllocator, BTrace::ERamAllocFreePhysical, aSize, aBase); sl@0: } sl@0: #endif sl@0: return ret; sl@0: } sl@0: sl@0: sl@0: TInt DRamAllocator::FreeRamInBytes() sl@0: { sl@0: return iTotalFreeRamPages<iNext; sl@0: while (tmpLink != &iZonePrefList.iA) sl@0: { sl@0: SZone& zone = *_LOFF(tmpLink, SZone, iPrefLink); sl@0: if (zone.iAllocPages[EPageMovable] != 0 || sl@0: zone.iAllocPages[EPageDiscard] != 0) sl@0: { sl@0: DebugDump(); sl@0: __NK_ASSERT_DEBUG(0); sl@0: } sl@0: tmpLink = tmpLink->iNext; sl@0: } sl@0: } sl@0: #endif