1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/memmodel/epoc/flexible/mmu/mmu.cpp Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,2600 @@
1.4 +// Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +//
1.18 +
1.19 +#include "memmodel.h"
1.20 +#include "kernel/cache_maintenance.inl"
1.21 +#include <kernel/cache.h>
1.22 +#include <ramalloc.h>
1.23 +#include <defrag.h>
1.24 +#include "mm.h"
1.25 +#include "mmu.h"
1.26 +#include "mpager.h"
1.27 +#include "mmapping.h"
1.28 +#include "mobject.h"
1.29 +#include "mmanager.h"
1.30 +#include "mpagearray.h"
1.31 +
1.32 +
1.33 +//
1.34 +// SPageInfo
1.35 +//
1.36 +
1.37 +// check enough space for page infos...
1.38 +__ASSERT_COMPILE((KPageInfoLinearEnd-KPageInfoLinearBase)/sizeof(SPageInfo)==(1<<(32-KPageShift)));
1.39 +
1.40 +// check KPageInfoShift...
1.41 +__ASSERT_COMPILE(sizeof(SPageInfo)==(1<<KPageInfoShift));
1.42 +
1.43 +
1.44 +SPageInfo* SPageInfo::SafeFromPhysAddr(TPhysAddr aAddress)
1.45 + {
1.46 + __NK_ASSERT_DEBUG((aAddress&KPageMask)==0);
1.47 + TUint index = aAddress>>(KPageShift+KPageShift-KPageInfoShift);
1.48 + TUint flags = ((TUint8*)KPageInfoMap)[index>>3];
1.49 + TUint mask = 1<<(index&7);
1.50 + if(!(flags&mask))
1.51 + return 0; // no SPageInfo for aAddress
1.52 + SPageInfo* info = FromPhysAddr(aAddress);
1.53 + if(info->iType==SPageInfo::EInvalid)
1.54 + return 0;
1.55 + return info;
1.56 + }
1.57 +
1.58 +
1.59 +#ifdef _DEBUG
1.60 +
1.61 +void SPageInfo::CheckAccess(const char* aMessage, TUint aFlags)
1.62 + {
1.63 + if(K::Initialising || NKern::Crashed())
1.64 + return;
1.65 +
1.66 + if((aFlags&ECheckNotAllocated) && (iType!=EUnknown))
1.67 + {
1.68 + Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iType==%d : %s",this,PhysAddr(),iType,aMessage);
1.69 + __NK_ASSERT_DEBUG(0);
1.70 + goto fail;
1.71 + }
1.72 +
1.73 + if((aFlags&ECheckNotUnused) && (iType==EUnused))
1.74 + {
1.75 + Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iType==%d : %s",this,PhysAddr(),iType,aMessage);
1.76 + __NK_ASSERT_DEBUG(0);
1.77 + goto fail;
1.78 + }
1.79 +
1.80 + if((aFlags&ECheckUnused) && (iType!=EUnused))
1.81 + {
1.82 + Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iType==%d : %s",this,PhysAddr(),iType,aMessage);
1.83 + __NK_ASSERT_DEBUG(0);
1.84 + goto fail;
1.85 + }
1.86 +
1.87 + if((aFlags&ECheckNotPaged) && (iPagedState!=EUnpaged))
1.88 + {
1.89 + Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iPagedState=%d : %s",this,PhysAddr(),iPagedState,aMessage);
1.90 + __NK_ASSERT_DEBUG(0);
1.91 + goto fail;
1.92 + }
1.93 +
1.94 + if((aFlags&ECheckRamAllocLock) && !RamAllocLock::IsHeld())
1.95 + {
1.96 + Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x, iType==%d : %s",this,PhysAddr(),iType,aMessage);
1.97 + __NK_ASSERT_DEBUG(0);
1.98 + goto fail;
1.99 + }
1.100 +
1.101 + if((aFlags&ENoCheckMmuLock) || MmuLock::IsHeld())
1.102 + return;
1.103 +fail:
1.104 + Kern::Printf("SPageInfo[0x%08x]::CheckAccess failed, PhysAddr()=0x%08x : %s",this,PhysAddr(),aMessage);
1.105 + Mmu::Panic(Mmu::EUnsafePageInfoAccess);
1.106 + }
1.107 +
1.108 +
1.109 +void SPageInfo::Dump()
1.110 + {
1.111 + Kern::Printf("SPageInfo for page %x = %d,%d,%02x,0x%08x,0x%x,%d",PhysAddr(),iType,iPagedState,iFlags,iOwner,iIndex,iPinCount);
1.112 + }
1.113 +
1.114 +#endif
1.115 +
1.116 +
1.117 +
1.118 +//
1.119 +// SPageTableInfo
1.120 +//
1.121 +
1.122 +// check enough space for page table infos...
1.123 +__ASSERT_COMPILE((KPageTableInfoEnd-KPageTableInfoBase)/sizeof(SPageTableInfo)
1.124 + >=(KPageTableEnd-KPageTableBase)/KPageTableSize);
1.125 +
1.126 +// check KPtBlockShift...
1.127 +__ASSERT_COMPILE((sizeof(SPageTableInfo)<<KPtBlockShift)==KPageSize);
1.128 +
1.129 +
1.130 +#ifdef _DEBUG
1.131 +
1.132 +TBool SPageTableInfo::CheckPageCount()
1.133 + {
1.134 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.135 + TPte* pt = PageTable();
1.136 + TUint realCount = 0;
1.137 + do if(*pt++) ++realCount;
1.138 + while(TLinAddr(pt)&(KPageTableMask/sizeof(TPte)*sizeof(TPte)));
1.139 + if(iPageCount==realCount)
1.140 + return true;
1.141 + Kern::Printf("CheckPageCount Failed: pt=0x%08x count=%d realCount=%d",TLinAddr(pt)-KPageTableSize,iPageCount,realCount);
1.142 + return false;
1.143 + }
1.144 +
1.145 +
1.146 +void SPageTableInfo::CheckChangeUse(const char* aName)
1.147 + {
1.148 + if(K::Initialising)
1.149 + return;
1.150 + if(PageTablesLockIsHeld() && MmuLock::IsHeld())
1.151 + return;
1.152 + Kern::Printf("SPageTableInfo::CheckChangeUse failed : %s",aName);
1.153 + Mmu::Panic(Mmu::EUnsafePageTableInfoAccess);
1.154 + }
1.155 +
1.156 +
1.157 +void SPageTableInfo::CheckCheckUse(const char* aName)
1.158 + {
1.159 + if(K::Initialising)
1.160 + return;
1.161 + if(PageTablesLockIsHeld() || MmuLock::IsHeld())
1.162 + return;
1.163 + Kern::Printf("SPageTableInfo::CheckCheckUse failed : %s",aName);
1.164 + Mmu::Panic(Mmu::EUnsafePageTableInfoAccess);
1.165 + }
1.166 +
1.167 +
1.168 +void SPageTableInfo::CheckAccess(const char* aName)
1.169 + {
1.170 + if(K::Initialising)
1.171 + return;
1.172 + if(MmuLock::IsHeld())
1.173 + return;
1.174 + Kern::Printf("SPageTableInfo::CheckAccess failed : %s",aName);
1.175 + Mmu::Panic(Mmu::EUnsafePageTableInfoAccess);
1.176 + }
1.177 +
1.178 +
1.179 +void SPageTableInfo::CheckInit(const char* aName)
1.180 + {
1.181 + if(K::Initialising)
1.182 + return;
1.183 + if(PageTablesLockIsHeld() && iType==EUnused)
1.184 + return;
1.185 + Kern::Printf("SPageTableInfo::CheckInit failed : %s",aName);
1.186 + Mmu::Panic(Mmu::EUnsafePageTableInfoAccess);
1.187 + }
1.188 +
1.189 +#endif
1.190 +
1.191 +
1.192 +
1.193 +//
1.194 +// RamAllocLock
1.195 +//
1.196 +
1.197 +_LIT(KLitRamAlloc,"RamAlloc");
1.198 +_LIT(KLitPhysMemSync,"PhysMemSync");
1.199 +
1.200 +void RamAllocLock::Lock()
1.201 + {
1.202 + Mmu& m = TheMmu;
1.203 + Kern::MutexWait(*m.iRamAllocatorMutex);
1.204 + if(!m.iRamAllocLockCount++)
1.205 + {
1.206 + // first lock, so setup memory fail data...
1.207 + m.iRamAllocFailed = EFalse;
1.208 + __NK_ASSERT_DEBUG(m.iRamAllocInitialFreePages==m.FreeRamInPages()); // free RAM shouldn't have changed whilst lock was held
1.209 + }
1.210 + }
1.211 +
1.212 +
1.213 +void RamAllocLock::Unlock()
1.214 + {
1.215 + Mmu& m = TheMmu;
1.216 + if(--m.iRamAllocLockCount)
1.217 + {
1.218 + Kern::MutexSignal(*m.iRamAllocatorMutex);
1.219 + return;
1.220 + }
1.221 + TBool failed = m.iRamAllocFailed;
1.222 + TUint initial = m.iRamAllocInitialFreePages;
1.223 + TUint final = m.FreeRamInPages();
1.224 + m.iRamAllocInitialFreePages = final; // new baseline value
1.225 + TUint changes = K::CheckFreeMemoryLevel(initial*KPageSize,final*KPageSize,failed);
1.226 + if(changes)
1.227 + {
1.228 + __KTRACE_OPT(KMMU,Kern::Printf("RamAllocLock::Unlock() changes=%x",changes));
1.229 + }
1.230 + Kern::MutexSignal(*m.iRamAllocatorMutex);
1.231 + }
1.232 +
1.233 +
1.234 +TBool RamAllocLock::Flash()
1.235 + {
1.236 + Unlock();
1.237 + Lock();
1.238 + return true; // lock was released
1.239 + }
1.240 +
1.241 +
1.242 +TBool RamAllocLock::IsHeld()
1.243 + {
1.244 + Mmu& m = TheMmu;
1.245 + return m.iRamAllocatorMutex->iCleanup.iThread == &Kern::CurrentThread() && m.iRamAllocLockCount;
1.246 + }
1.247 +
1.248 +
1.249 +
1.250 +//
1.251 +// MmuLock
1.252 +//
1.253 +
1.254 +#ifdef _DEBUG
1.255 +TUint MmuLock::UnlockGuardNest =0;
1.256 +TUint MmuLock::UnlockGuardFail =0;
1.257 +#endif
1.258 +
1.259 +NFastMutex MmuLock::iLock;
1.260 +
1.261 +void MmuLock::Lock()
1.262 + {
1.263 + NKern::FMWait(&iLock);
1.264 + }
1.265 +
1.266 +void MmuLock::Unlock()
1.267 + {
1.268 + UnlockGuardCheck();
1.269 + NKern::FMSignal(&iLock);
1.270 + }
1.271 +
1.272 +TBool MmuLock::Flash()
1.273 + {
1.274 + UnlockGuardCheck();
1.275 + return NKern::FMFlash(&iLock);
1.276 + }
1.277 +
1.278 +TBool MmuLock::IsHeld()
1.279 + {
1.280 + NFastMutex& m = iLock;
1.281 + return m.HeldByCurrentThread();
1.282 + }
1.283 +
1.284 +
1.285 +
1.286 +//
1.287 +// Initialisation
1.288 +//
1.289 +
1.290 +Mmu TheMmu;
1.291 +
1.292 +void Mmu::Init1Common()
1.293 + {
1.294 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::Init1Common"));
1.295 +
1.296 + // Mmu data
1.297 + TUint pteType = PteType(ESupervisorReadWrite,true);
1.298 + iTempPteCached = BlankPte((TMemoryAttributes)(EMemoryAttributeNormalCached|EMemoryAttributeDefaultShareable),pteType);
1.299 + iTempPteUncached = BlankPte((TMemoryAttributes)(EMemoryAttributeNormalUncached|EMemoryAttributeDefaultShareable),pteType);
1.300 + iTempPteCacheMaintenance = BlankPte((TMemoryAttributes)(CacheMaintenance::TemporaryMapping()|EMemoryAttributeDefaultShareable),pteType);
1.301 +
1.302 + // other
1.303 + PP::MaxUserThreadStack=0x14000; // 80K - STDLIB asks for 64K for PosixServer!!!!
1.304 + PP::UserThreadStackGuard=0x2000; // 8K
1.305 + PP::MaxStackSpacePerProcess=0x200000; // 2Mb
1.306 + K::SupervisorThreadStackSize=0x1000; // 4K
1.307 + PP::SupervisorThreadStackGuard=0x1000; // 4K
1.308 + K::MachineConfig=(TMachineConfig*)KMachineConfigLinAddr;
1.309 + PP::RamDriveStartAddress=0;
1.310 + PP::RamDriveRange=0;
1.311 + PP::RamDriveMaxSize=0x20000000; // 512MB, probably will be reduced later
1.312 + K::MemModelAttributes=EMemModelTypeFlexible|EMemModelAttrNonExProt|EMemModelAttrKernProt|EMemModelAttrWriteProt|
1.313 + EMemModelAttrVA|EMemModelAttrProcessProt|EMemModelAttrSameVA|EMemModelAttrSvKernProt|
1.314 + EMemModelAttrIPCKernProt|EMemModelAttrRamCodeProt;
1.315 + }
1.316 +
1.317 +
1.318 +#if 0
1.319 +void Mmu::VerifyRam()
1.320 + {
1.321 + Kern::Printf("Mmu::VerifyRam() pass 1");
1.322 + RamAllocLock::Lock();
1.323 +
1.324 + TPhysAddr p = 0;
1.325 + do
1.326 + {
1.327 + SPageInfo* pi = SPageInfo::SafeFromPhysAddr(p);
1.328 + if(pi)
1.329 + {
1.330 + Kern::Printf("%08x %d",p,pi->Type());
1.331 + if(pi->Type()==SPageInfo::EUnused)
1.332 + {
1.333 + volatile TPhysAddr* b = (volatile TPhysAddr*)MapTemp(p,0);
1.334 + b[0] = p;
1.335 + b[1] = ~p;
1.336 + __NK_ASSERT_DEBUG(b[0]==p);
1.337 + __NK_ASSERT_DEBUG(b[1]==~p);
1.338 + UnmapTemp();
1.339 + }
1.340 + }
1.341 + p += KPageSize;
1.342 + }
1.343 + while(p);
1.344 +
1.345 + TBool fail = false;
1.346 + Kern::Printf("Mmu::VerifyRam() pass 2");
1.347 + do
1.348 + {
1.349 + SPageInfo* pi = SPageInfo::SafeFromPhysAddr(p);
1.350 + if(pi)
1.351 + {
1.352 + if(pi->Type()==SPageInfo::EUnused)
1.353 + {
1.354 + volatile TPhysAddr* b = (volatile TPhysAddr*)MapTemp(p,0);
1.355 + if(b[0]!=p || b[1]!=~p)
1.356 + {
1.357 + fail = true;
1.358 + Kern::Printf("%08x FAILED %x %x",b[0],b[1]);
1.359 + }
1.360 + UnmapTemp();
1.361 + }
1.362 + }
1.363 + p += KPageSize;
1.364 + }
1.365 + while(p);
1.366 +
1.367 + __NK_ASSERT_DEBUG(!fail);
1.368 + RamAllocLock::Unlock();
1.369 + }
1.370 +#endif
1.371 +
1.372 +
1.373 +void Mmu::Init2Common()
1.374 + {
1.375 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::Init2Common"));
1.376 +
1.377 + // create allocator...
1.378 + const SRamInfo& info = *(const SRamInfo*)TheSuperPage().iRamBootData;
1.379 + iRamPageAllocator = DRamAllocator::New(info, iRamZones, iRamZoneCallback);
1.380 +
1.381 + // initialise all pages in banks as unused...
1.382 + const SRamBank* bank = info.iBanks;
1.383 + while(bank->iSize)
1.384 + {
1.385 + TUint32 base = bank->iBase;
1.386 + TUint32 size = bank->iSize;
1.387 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Found RAM bank 0x%08x size %d",base,size));
1.388 + if(base+size<=base || ((base|size)&KPageMask))
1.389 + Panic(EInvalidRamBankAtBoot);
1.390 +
1.391 + SPageInfo* pi = SPageInfo::FromPhysAddr(base);
1.392 + SPageInfo* piEnd = pi+(size>>KPageShift);
1.393 + while(pi<piEnd)
1.394 + (pi++)->SetUnused();
1.395 + ++bank;
1.396 + }
1.397 + // step over the last bank to get to the reserved banks.
1.398 + ++bank;
1.399 + // mark any reserved regions as allocated...
1.400 + while(bank->iSize)
1.401 + {
1.402 + TUint32 base = bank->iBase;
1.403 + TUint32 size = bank->iSize;
1.404 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Found reserved bank 0x%08x size %d",base,size));
1.405 + if(base+size<=base || ((base|size)&KPageMask))
1.406 + Panic(EInvalidReservedBankAtBoot);
1.407 +
1.408 + SPageInfo* pi = SPageInfo::FromPhysAddr(base);
1.409 + SPageInfo* piEnd = pi+(size>>KPageShift);
1.410 + while(pi<piEnd)
1.411 + (pi++)->SetPhysAlloc();
1.412 + ++bank;
1.413 + }
1.414 +
1.415 + // Clear the inital (and only so far) page table info page so all unused
1.416 + // page tables infos will be marked as unused.
1.417 + __ASSERT_COMPILE(SPageTableInfo::EUnused == 0);
1.418 + memclr((TAny*)KPageTableInfoBase, KPageSize);
1.419 +
1.420 + // look for page tables - assume first page table maps page tables
1.421 + TPte* pPte = (TPte*)KPageTableBase;
1.422 + TInt i;
1.423 + for(i=0; i<KChunkSize/KPageSize; ++i)
1.424 + {
1.425 + TPte pte = *pPte++;
1.426 + if(pte==KPteUnallocatedEntry) // after boot, page tables are contiguous
1.427 + break;
1.428 + TPhysAddr ptpgPhys = Mmu::PtePhysAddr(pte,i);
1.429 + __KTRACE_OPT(KBOOT,Kern::Printf("Page Table Group %08x -> Phys %08x", KPageTableBase+i*KPageSize, ptpgPhys));
1.430 + SPageInfo* pi = SPageInfo::SafeFromPhysAddr(ptpgPhys);
1.431 + __ASSERT_ALWAYS(pi, Panic(EInvalidPageTableAtBoot));
1.432 + pi->SetFixed(i); // this also sets the SPageInfo::iOffset so that linear-to-physical works
1.433 + }
1.434 +
1.435 + // look for mapped pages
1.436 + TPde* pd = Mmu::PageDirectory(KKernelOsAsid);
1.437 + for(i=0; i<(1<<(32-KChunkShift)); ++i)
1.438 + {
1.439 + TPde pde = pd[i];
1.440 + if(pde==KPdeUnallocatedEntry)
1.441 + continue;
1.442 + TPhysAddr pdePhys = Mmu::PdePhysAddr(pde);
1.443 + TPte* pt = 0;
1.444 + if(pdePhys!=KPhysAddrInvalid)
1.445 + {
1.446 + __KTRACE_OPT(KBOOT,Kern::Printf("Addr %08x -> Whole PDE Phys %08x", i<<KChunkShift, pdePhys));
1.447 + }
1.448 + else
1.449 + {
1.450 + pt = Mmu::PageTableFromPde(pde);
1.451 + __KTRACE_OPT(KBOOT,Kern::Printf("Addr %08x -> page table %08x", i<<KChunkShift, pt));
1.452 + __ASSERT_ALWAYS(pt,Panic(EInvalidPdeAtBoot)); // bad PDE
1.453 + }
1.454 +
1.455 + TInt j;
1.456 + TInt np = 0;
1.457 + for(j=0; j<KChunkSize/KPageSize; ++j)
1.458 + {
1.459 + TBool present = ETrue; // all pages present if whole PDE mapping
1.460 + TPte pte = 0;
1.461 + if(pt)
1.462 + {
1.463 + pte = pt[j];
1.464 + present = pte!=KPteUnallocatedEntry;
1.465 + }
1.466 + if(present)
1.467 + {
1.468 + ++np;
1.469 + TPhysAddr pa = pt ? Mmu::PtePhysAddr(pte,j) : (pdePhys + (j<<KPageShift));
1.470 + SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pa);
1.471 + __KTRACE_OPT(KBOOT,Kern::Printf("Addr: %08x PA=%08x",
1.472 + (i<<KChunkShift)+(j<<KPageShift), pa));
1.473 + if(pi) // ignore non-RAM mappings
1.474 + {
1.475 + TInt r = iRamPageAllocator->MarkPageAllocated(pa, EPageFixed);
1.476 + // allow KErrAlreadyExists since it's possible that a page is doubly mapped
1.477 + __ASSERT_ALWAYS(r==KErrNone || r==KErrAlreadyExists, Panic(EBadMappedPageAfterBoot));
1.478 + if(pi->Type()==SPageInfo::EUnused)
1.479 + pi->SetFixed();
1.480 + }
1.481 + }
1.482 + }
1.483 + __KTRACE_OPT(KBOOT,Kern::Printf("Addr: %08x #PTEs=%d",(i<<KChunkShift),np));
1.484 + if(pt)
1.485 + {
1.486 + SPageTableInfo* pti = SPageTableInfo::FromPtPtr(pt);
1.487 + pti->Boot(np);
1.488 + }
1.489 + }
1.490 +
1.491 + TInt r = K::MutexCreate(iRamAllocatorMutex, KLitRamAlloc, NULL, EFalse, KMutexOrdRamAlloc);
1.492 + if(r!=KErrNone)
1.493 + Panic(ERamAllocMutexCreateFailed);
1.494 + iRamAllocLockCount = 0;
1.495 + iRamAllocInitialFreePages = FreeRamInPages();
1.496 +
1.497 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::DoInit2"));
1.498 +
1.499 + for(i=0; i<KNumTempMappingSlots; ++i)
1.500 + iTempMap[i].Alloc(1);
1.501 +
1.502 + iPhysMemSyncTemp.Alloc(1);
1.503 + r = K::MutexCreate(iPhysMemSyncMutex, KLitPhysMemSync, NULL, EFalse, KMutexOrdSyncPhysMem);
1.504 + if(r!=KErrNone)
1.505 + Panic(EPhysMemSyncMutexCreateFailed);
1.506 +// VerifyRam();
1.507 + }
1.508 +
1.509 +
1.510 +void Mmu::Init2FinalCommon()
1.511 + {
1.512 + __KTRACE_OPT2(KBOOT,KMMU,Kern::Printf("Mmu::Init2FinalCommon"));
1.513 + // hack, reduce free memory to <2GB...
1.514 + while(FreeRamInPages()>=0x80000000/KPageSize)
1.515 + {
1.516 + TPhysAddr dummyPage;
1.517 + TInt r = iRamPageAllocator->AllocRamPages(&dummyPage,1, EPageFixed);
1.518 + __NK_ASSERT_ALWAYS(r==KErrNone);
1.519 + }
1.520 + // hack, reduce total RAM to <2GB...
1.521 + if(TheSuperPage().iTotalRamSize<0)
1.522 + TheSuperPage().iTotalRamSize = 0x80000000-KPageSize;
1.523 +
1.524 + // Save current free RAM size - there can never be more free RAM than this
1.525 + TUint maxFreePages = FreeRamInPages();
1.526 + K::MaxFreeRam = maxFreePages*KPageSize;
1.527 + if(maxFreePages < (TUint(PP::RamDriveMaxSize)>>KPageShift))
1.528 + PP::RamDriveMaxSize = maxFreePages*KPageSize;
1.529 +
1.530 + // update this to stop assert triggering in RamAllocLock::Lock()
1.531 + iRamAllocInitialFreePages = maxFreePages;
1.532 + }
1.533 +
1.534 +
1.535 +void Mmu::Init3()
1.536 + {
1.537 + iDefrag = new Defrag;
1.538 + if (!iDefrag)
1.539 + Panic(EDefragAllocFailed);
1.540 + iDefrag->Init3(TheMmu.iRamPageAllocator);
1.541 + }
1.542 +
1.543 +//
1.544 +// Utils
1.545 +//
1.546 +
1.547 +void Mmu::Panic(TPanic aPanic)
1.548 + {
1.549 + Kern::Fault("MMU",aPanic);
1.550 + }
1.551 +
1.552 +
1.553 +TUint Mmu::FreeRamInPages()
1.554 + {
1.555 + return iRamPageAllocator->FreeRamInPages()+ThePager.NumberOfFreePages();
1.556 + }
1.557 +
1.558 +
1.559 +TUint Mmu::TotalPhysicalRamPages()
1.560 + {
1.561 + return iRamPageAllocator->TotalPhysicalRamPages();
1.562 + }
1.563 +
1.564 +
1.565 +const SRamZone* Mmu::RamZoneConfig(TRamZoneCallback& aCallback) const
1.566 + {
1.567 + aCallback = iRamZoneCallback;
1.568 + return iRamZones;
1.569 + }
1.570 +
1.571 +
1.572 +void Mmu::SetRamZoneConfig(const SRamZone* aZones, TRamZoneCallback aCallback)
1.573 + {
1.574 + iRamZones = aZones;
1.575 + iRamZoneCallback = aCallback;
1.576 + }
1.577 +
1.578 +
1.579 +TInt Mmu::ModifyRamZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask)
1.580 + {
1.581 + return iRamPageAllocator->ModifyZoneFlags(aId, aClearMask, aSetMask);
1.582 + }
1.583 +
1.584 +
1.585 +TInt Mmu::GetRamZonePageCount(TUint aId, SRamZonePageCount& aPageData)
1.586 + {
1.587 + return iRamPageAllocator->GetZonePageCount(aId, aPageData);
1.588 + }
1.589 +
1.590 +
1.591 +TInt Mmu::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aBytes, TPhysAddr& aPhysAddr, TInt aAlign)
1.592 + {
1.593 + __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam(?,%d,%d,?,%d)", aZoneIdCount, aBytes, aPhysAddr, aAlign));
1.594 + __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
1.595 +
1.596 + TInt r = iRamPageAllocator->ZoneAllocContiguousRam(aZoneIdList, aZoneIdCount, aBytes, aPhysAddr, EPageFixed, aAlign);
1.597 + if(r!=KErrNone)
1.598 + iRamAllocFailed = ETrue;
1.599 + else
1.600 + {
1.601 + TUint pages = MM::RoundToPageCount(aBytes);
1.602 + AllocatedPhysicalRam(aPhysAddr, pages, (Mmu::TRamAllocFlags)EMemAttStronglyOrdered);
1.603 + }
1.604 + __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam returns %d and aPhysAddr=0x%08x",r,aPhysAddr));
1.605 + return r;
1.606 + }
1.607 +
1.608 +
1.609 +TInt Mmu::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList)
1.610 + {
1.611 + __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam(?,%d,%d,?)", aZoneIdCount, aNumPages));
1.612 + __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
1.613 +
1.614 + TInt r = iRamPageAllocator->ZoneAllocRamPages(aZoneIdList, aZoneIdCount, aPageList, aNumPages, EPageFixed);
1.615 + if(r!=KErrNone)
1.616 + iRamAllocFailed = ETrue;
1.617 + else
1.618 + {
1.619 + PagesAllocated(aPageList, aNumPages, (Mmu::TRamAllocFlags)EMemAttStronglyOrdered);
1.620 +
1.621 + // update page infos...
1.622 + TUint flash = 0;
1.623 + TPhysAddr* pageEnd = aPageList + aNumPages;
1.624 + MmuLock::Lock();
1.625 + TPhysAddr* page = aPageList;
1.626 + while (page < pageEnd)
1.627 + {
1.628 + MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2);
1.629 + TPhysAddr pagePhys = *page++;
1.630 + __NK_ASSERT_DEBUG(pagePhys != KPhysAddrInvalid);
1.631 + SPageInfo::FromPhysAddr(pagePhys)->SetPhysAlloc();
1.632 + }
1.633 + MmuLock::Unlock();
1.634 + }
1.635 + __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ZoneAllocPhysicalRam returns %d",r));
1.636 + return r;
1.637 + }
1.638 +
1.639 +
1.640 +TInt Mmu::RamHalFunction(TInt aFunction, TAny* a1, TAny* a2)
1.641 + {
1.642 + // This function should only be registered with hal and therefore can only
1.643 + // be invoked after the ram allocator has been created.
1.644 + __NK_ASSERT_DEBUG(iRamPageAllocator);
1.645 + return iRamPageAllocator->HalFunction(aFunction, a1, a2);
1.646 + }
1.647 +
1.648 +
1.649 +void Mmu::ChangePageType(SPageInfo* aPageInfo, TZonePageType aOldPageType, TZonePageType aNewPageType)
1.650 + {
1.651 + iRamPageAllocator->ChangePageType(aPageInfo, aOldPageType, aNewPageType);
1.652 + }
1.653 +
1.654 +TInt Mmu::HandlePageFault(TLinAddr aPc, TLinAddr aFaultAddress, TUint aAccessPermissions, TAny* aExceptionInfo)
1.655 + {
1.656 + TRACE(("Mmu::HandlePageFault(0x%08x,0x%08x,%d)",aPc,aFaultAddress,aAccessPermissions));
1.657 +
1.658 + DMemModelThread* thread = (DMemModelThread*)TheCurrentThread;
1.659 + // Get the os asid of the process taking the fault, no need to open a reference
1.660 + // as it is the current thread's process so can't be freed.
1.661 + TUint faultOsAsid = ((DMemModelProcess*)thread->iNThread.iAddressSpace)->OsAsid();
1.662 +
1.663 + // check if any fast mutexes held...
1.664 + NFastMutex* fm = NKern::HeldFastMutex();
1.665 + TPagingExcTrap* trap = thread->iPagingExcTrap;
1.666 + if(fm)
1.667 + {
1.668 + // check there is an XTRAP_PAGING in effect...
1.669 + if(!trap)
1.670 + {
1.671 + // oops, kill system...
1.672 + __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("Fault with FM Held! addr=0x%08x (%O pc=%x)",aFaultAddress,thread,aPc));
1.673 + Exc::Fault(aExceptionInfo);
1.674 + }
1.675 +
1.676 + // release the fast mutex...
1.677 + NKern::FMSignal(fm);
1.678 + }
1.679 +
1.680 + NKern::ThreadEnterCS();
1.681 +
1.682 + // work out address space for aFaultAddress...
1.683 + TUint osAsid = faultOsAsid;
1.684 + TLinAddr addr = aFaultAddress;
1.685 + if(thread->iAliasLinAddr && TUint(addr - thread->iAliasLinAddr) < TUint(KPageSize))
1.686 + {
1.687 + // Address in aliased memory...
1.688 + addr = (addr - thread->iAliasLinAddr) + thread->iAliasTarget;
1.689 + // Get the os asid of the process thread is aliasing, no need to open
1.690 + // a reference on it as one was already opened when the alias was created.
1.691 + osAsid = thread->iAliasProcess->OsAsid();
1.692 + }
1.693 + else if(addr>=KGlobalMemoryBase)
1.694 + {
1.695 + // Address in global region, so look it up in kernel's address space...
1.696 + osAsid = KKernelOsAsid;
1.697 + }
1.698 +
1.699 + // NOTE, osAsid will remain valid for duration of this function because it is either
1.700 + // - The current thread's address space, which can't go away whilst the thread
1.701 + // is running.
1.702 + // - The address space of another thread which we are aliasing memory from,
1.703 + // and we would only do this if we have a reference on this other thread,
1.704 + // which has a reference on it's process, which should own the address space!
1.705 +
1.706 +#ifdef __BROADCAST_CACHE_MAINTENANCE__
1.707 + TInt aliasAsid = -1;
1.708 + if (thread->iAliasLinAddr)
1.709 + {
1.710 + // If an alias is in effect, the the thread will be locked to the current CPU,
1.711 + // but we need to be able to migrate between CPUs for cache maintainance. This
1.712 + // must be dealt with by removing the alias and restoring it with a paging trap
1.713 + // handler.
1.714 + if(!trap)
1.715 + {
1.716 + // oops, kill system...
1.717 + __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("Fault with thread locked to current CPU! addr=0x%08x (%O pc=%x)",aFaultAddress,thread,aPc));
1.718 + Exc::Fault(aExceptionInfo);
1.719 + }
1.720 + // Open a reference on the aliased process's os asid before removing the alias
1.721 + // so that the address space can't be freed while we try to access its members.
1.722 + aliasAsid = thread->iAliasProcess->TryOpenOsAsid();
1.723 + // This should never fail as until we remove the alias there will
1.724 + // always be at least one reference on the os asid.
1.725 + __NK_ASSERT_DEBUG(aliasAsid >= 0);
1.726 + thread->RemoveAlias();
1.727 + }
1.728 +#endif
1.729 +
1.730 + // find mapping...
1.731 + TUint offsetInMapping;
1.732 + TUint mapInstanceCount;
1.733 + DMemoryMapping* mapping = MM::FindMappingInAddressSpace(osAsid, addr, 1, offsetInMapping, mapInstanceCount);
1.734 +// TRACE(("%O mapping=0x%08x",TheCurrentThread,mapping));
1.735 + TInt r = KErrNotFound;
1.736 +
1.737 + if(mapping)
1.738 + {
1.739 + MmuLock::Lock();
1.740 +
1.741 + // check if we need to process page fault...
1.742 + if(!Mmu::CheckPteTypePermissions(mapping->PteType(),aAccessPermissions) ||
1.743 + mapInstanceCount != mapping->MapInstanceCount())
1.744 + {
1.745 + // Invalid access to the page.
1.746 + MmuLock::Unlock();
1.747 + r = KErrAbort;
1.748 + }
1.749 + else
1.750 + {
1.751 + // Should not be able to take a fault on a pinned mapping if accessing it
1.752 + // with the correct permissions.
1.753 + __NK_ASSERT_DEBUG(!mapping->IsPinned());
1.754 +
1.755 + // we do need to handle fault so is this a demand paging or page moving fault
1.756 + DMemoryObject* memory = mapping->Memory();
1.757 + if(!memory)
1.758 + MmuLock::Unlock();
1.759 + else
1.760 + {
1.761 + TUint faultIndex = (offsetInMapping >> KPageShift) + mapping->iStartIndex;
1.762 + memory->Open();
1.763 +
1.764 + // This is safe as we have the instance count so can detect the mapping
1.765 + // being reused and we have a reference to the memory object so it can't
1.766 + // be deleted.
1.767 + MmuLock::Unlock();
1.768 +
1.769 + if(memory->IsDemandPaged())
1.770 + {
1.771 + // Let the pager handle the fault...
1.772 + r = ThePager.HandlePageFault( aPc, aFaultAddress, faultOsAsid, faultIndex,
1.773 + aAccessPermissions, memory, mapping, mapInstanceCount,
1.774 + thread, aExceptionInfo);
1.775 + }
1.776 + else
1.777 + {// The page could be being moved so verify that with its manager.
1.778 + DMemoryManager* manager = memory->iManager;
1.779 + r = manager->HandleFault(memory, faultIndex, mapping, mapInstanceCount, aAccessPermissions);
1.780 + }
1.781 + if (r == KErrNone)
1.782 + {// alias PDE needs updating because page tables have changed...
1.783 + thread->RefreshAlias();
1.784 + }
1.785 + memory->Close();
1.786 + }
1.787 + }
1.788 + mapping->Close();
1.789 + }
1.790 +
1.791 + if (trap)
1.792 + {
1.793 + // restore address space (because the trap will bypass any code
1.794 + // which would have done this.)...
1.795 + DMemModelThread::RestoreAddressSpace();
1.796 + }
1.797 +
1.798 +#ifdef __BROADCAST_CACHE_MAINTENANCE__
1.799 + // Close any reference on the aliased process's os asid before we leave the
1.800 + // critical section.
1.801 + if (aliasAsid >= 0)
1.802 + {
1.803 + thread->iAliasProcess->CloseOsAsid();
1.804 + }
1.805 +#endif
1.806 +
1.807 + NKern::ThreadLeaveCS(); // thread will die now if CheckRealtimeThreadFault caused a panic
1.808 +
1.809 + // deal with XTRAP_PAGING...
1.810 + if(trap)
1.811 + {
1.812 + // re-acquire any fast mutex which was held before the page fault...
1.813 + if(fm)
1.814 + NKern::FMWait(fm);
1.815 + if (r == KErrNone)
1.816 + {
1.817 + trap->Exception(1); // return from exception trap with result '1' (value>0)
1.818 + // code doesn't continue beyond this point.
1.819 + __NK_ASSERT_DEBUG(0);
1.820 + }
1.821 + }
1.822 +
1.823 + return r;
1.824 + }
1.825 +
1.826 +
1.827 +//
1.828 +// Memory allocation
1.829 +//
1.830 +
1.831 +TInt Mmu::AllocRam( TPhysAddr* aPages, TUint aCount, TRamAllocFlags aFlags, TZonePageType aZonePageType,
1.832 + TUint aBlockZoneId, TBool aBlockRest)
1.833 + {
1.834 + __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocRam(?,%d,%x)",aCount,aFlags));
1.835 + __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
1.836 +#ifdef _DEBUG
1.837 + if(K::CheckForSimulatedAllocFail())
1.838 + {
1.839 + __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocRam returns simulated OOM %d",KErrNoMemory));
1.840 + return KErrNoMemory;
1.841 + }
1.842 +#endif
1.843 + TInt missing = iRamPageAllocator->AllocRamPages(aPages, aCount, aZonePageType, aBlockZoneId, aBlockRest);
1.844 + if(missing && !(aFlags&EAllocNoPagerReclaim) && ThePager.GetFreePages(missing))
1.845 + missing = iRamPageAllocator->AllocRamPages(aPages, aCount, aZonePageType, aBlockZoneId, aBlockRest);
1.846 + TInt r = missing ? KErrNoMemory : KErrNone;
1.847 + if(r!=KErrNone)
1.848 + iRamAllocFailed = ETrue;
1.849 + else
1.850 + PagesAllocated(aPages,aCount,aFlags);
1.851 + __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocRam returns %d",r));
1.852 + return r;
1.853 + }
1.854 +
1.855 +
1.856 +void Mmu::FreeRam(TPhysAddr* aPages, TUint aCount, TZonePageType aZonePageType)
1.857 + {
1.858 + __KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreeRam(?,%d)",aCount));
1.859 + __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
1.860 +
1.861 + // update page infos...
1.862 + TPhysAddr* pages = aPages;
1.863 + TPhysAddr* pagesEnd = pages+aCount;
1.864 + TPhysAddr* pagesOut = aPages;
1.865 + MmuLock::Lock();
1.866 + TUint flash = 0;
1.867 + while(pages<pagesEnd)
1.868 + {
1.869 + MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2);
1.870 + TPhysAddr pagePhys = *pages++;
1.871 + __NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid);
1.872 + SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
1.873 + PageFreed(pi);
1.874 +
1.875 + // If this is an old page of a page being moved that was previously pinned
1.876 + // then make sure it is freed as discardable otherwise despite DPager::DonatePages()
1.877 + // having marked it as discardable it would be freed as movable.
1.878 + __NK_ASSERT_DEBUG(pi->PagedState() != SPageInfo::EPagedPinnedMoved || aCount == 1);
1.879 + if (pi->PagedState() == SPageInfo::EPagedPinnedMoved)
1.880 + aZonePageType = EPageDiscard;
1.881 +
1.882 + if(ThePager.PageFreed(pi)==KErrNone)
1.883 + --aCount; // pager has dealt with this page, so one less for us
1.884 + else
1.885 + {
1.886 + // All paged pages should have been dealt with by the pager above.
1.887 + __NK_ASSERT_DEBUG(pi->PagedState() == SPageInfo::EUnpaged);
1.888 + *pagesOut++ = pagePhys; // store page address for freeing later
1.889 + }
1.890 + }
1.891 + MmuLock::Unlock();
1.892 +
1.893 + iRamPageAllocator->FreeRamPages(aPages, aCount, aZonePageType);
1.894 + }
1.895 +
1.896 +
1.897 +TInt Mmu::AllocContiguousRam(TPhysAddr& aPhysAddr, TUint aCount, TUint aAlign, TRamAllocFlags aFlags)
1.898 + {
1.899 + __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocContiguousRam(?,0x%x,%d,%x)",aCount,aAlign,aFlags));
1.900 + __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
1.901 +#ifdef _DEBUG
1.902 + if(K::CheckForSimulatedAllocFail())
1.903 + {
1.904 + __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocContiguousRam returns simulated OOM %d",KErrNoMemory));
1.905 + return KErrNoMemory;
1.906 + }
1.907 + // Only the page sets EAllocNoPagerReclaim and it shouldn't allocate contiguous ram.
1.908 + __NK_ASSERT_DEBUG(!(aFlags&EAllocNoPagerReclaim));
1.909 +#endif
1.910 + TInt r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, EPageFixed, aAlign+KPageShift);
1.911 + if(r==KErrNoMemory && aCount > KMaxFreeableContiguousPages)
1.912 + {
1.913 + // flush paging cache and retry...
1.914 + ThePager.FlushAll();
1.915 + r = iRamPageAllocator->AllocContiguousRam(aCount, aPhysAddr, EPageFixed, aAlign+KPageShift);
1.916 + }
1.917 + if(r!=KErrNone)
1.918 + iRamAllocFailed = ETrue;
1.919 + else
1.920 + PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags);
1.921 + __KTRACE_OPT(KMMU,Kern::Printf("AllocContiguouseRam returns %d and aPhysAddr=0x%08x",r,aPhysAddr));
1.922 + return r;
1.923 + }
1.924 +
1.925 +
1.926 +void Mmu::FreeContiguousRam(TPhysAddr aPhysAddr, TUint aCount)
1.927 + {
1.928 + __KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreeContiguousRam(0x%08x,0x%x)",aPhysAddr,aCount));
1.929 + __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
1.930 + __NK_ASSERT_DEBUG((aPhysAddr&KPageMask)==0);
1.931 +
1.932 + TUint pageCount = aCount;
1.933 +
1.934 + // update page infos...
1.935 + SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
1.936 + SPageInfo* piEnd = pi+pageCount;
1.937 + TUint flash = 0;
1.938 + MmuLock::Lock();
1.939 + while(pi<piEnd)
1.940 + {
1.941 + MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
1.942 + PageFreed(pi++);
1.943 + }
1.944 + MmuLock::Unlock();
1.945 +
1.946 + // free pages...
1.947 + while(pageCount)
1.948 + {
1.949 + iRamPageAllocator->FreeRamPage(aPhysAddr, EPageFixed);
1.950 + aPhysAddr += KPageSize;
1.951 + --pageCount;
1.952 + }
1.953 + }
1.954 +
1.955 +
1.956 +TInt Mmu::AllocPhysicalRam(TPhysAddr* aPages, TUint aCount, TRamAllocFlags aFlags)
1.957 + {
1.958 + __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam(?,%d,%x)",aCount,aFlags));
1.959 + __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
1.960 + // Allocate fixed pages as physically allocated pages aren't movable or discardable.
1.961 + TInt r = AllocRam(aPages, aCount, aFlags, EPageFixed);
1.962 + if (r!=KErrNone)
1.963 + return r;
1.964 +
1.965 + // update page infos...
1.966 + TPhysAddr* pages = aPages;
1.967 + TPhysAddr* pagesEnd = pages+aCount;
1.968 + MmuLock::Lock();
1.969 + TUint flash = 0;
1.970 + while(pages<pagesEnd)
1.971 + {
1.972 + MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2);
1.973 + TPhysAddr pagePhys = *pages++;
1.974 + __NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid);
1.975 + SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
1.976 + pi->SetPhysAlloc();
1.977 + }
1.978 + MmuLock::Unlock();
1.979 +
1.980 + return KErrNone;
1.981 + }
1.982 +
1.983 +
1.984 +void Mmu::FreePhysicalRam(TPhysAddr* aPages, TUint aCount)
1.985 + {
1.986 + __KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreePhysicalRam(?,%d)",aCount));
1.987 + __NK_ASSERT_DEBUG(RamAllocLock::IsHeld());
1.988 +
1.989 + // update page infos...
1.990 + TPhysAddr* pages = aPages;
1.991 + TPhysAddr* pagesEnd = pages+aCount;
1.992 + MmuLock::Lock();
1.993 + TUint flash = 0;
1.994 + while(pages<pagesEnd)
1.995 + {
1.996 + MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo/2);
1.997 + TPhysAddr pagePhys = *pages++;
1.998 + __NK_ASSERT_DEBUG(pagePhys!=KPhysAddrInvalid);
1.999 + SPageInfo* pi = SPageInfo::FromPhysAddr(pagePhys);
1.1000 + __ASSERT_ALWAYS(pi->Type()==SPageInfo::EPhysAlloc, Panic(EBadFreePhysicalRam));
1.1001 + __ASSERT_ALWAYS(!pi->UseCount(), Panic(EBadFreePhysicalRam));
1.1002 + pi->SetUnused();
1.1003 + }
1.1004 + MmuLock::Unlock();
1.1005 +
1.1006 + iRamPageAllocator->FreeRamPages(aPages,aCount, EPageFixed);
1.1007 + }
1.1008 +
1.1009 +
1.1010 +TInt Mmu::AllocPhysicalRam(TPhysAddr& aPhysAddr, TUint aCount, TUint aAlign, TRamAllocFlags aFlags)
1.1011 + {
1.1012 + __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocPhysicalRam(?,0x%x,d,%x)",aCount,aAlign,aFlags));
1.1013 + TInt r = AllocContiguousRam(aPhysAddr,aCount,aAlign,aFlags);
1.1014 + if (r!=KErrNone)
1.1015 + return r;
1.1016 +
1.1017 + // update page infos...
1.1018 + SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
1.1019 + SPageInfo* piEnd = pi+aCount;
1.1020 + TUint flash = 0;
1.1021 + MmuLock::Lock();
1.1022 + while(pi<piEnd)
1.1023 + {
1.1024 + MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
1.1025 + pi->SetPhysAlloc();
1.1026 + ++pi;
1.1027 + }
1.1028 + MmuLock::Unlock();
1.1029 +
1.1030 + return KErrNone;
1.1031 + }
1.1032 +
1.1033 +
1.1034 +void Mmu::FreePhysicalRam(TPhysAddr aPhysAddr, TUint aCount)
1.1035 + {
1.1036 + __KTRACE_OPT(KMMU,Kern::Printf("Mmu::FreePhysicalRam(0x%08x,0x%x)",aPhysAddr,aCount));
1.1037 +
1.1038 + // update page infos...
1.1039 + SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
1.1040 + SPageInfo* piEnd = pi+aCount;
1.1041 + TUint flash = 0;
1.1042 + MmuLock::Lock();
1.1043 + while(pi<piEnd)
1.1044 + {
1.1045 + MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
1.1046 + __ASSERT_ALWAYS(pi->Type()==SPageInfo::EPhysAlloc, Panic(EBadFreePhysicalRam));
1.1047 + __ASSERT_ALWAYS(!pi->UseCount(), Panic(EBadFreePhysicalRam));
1.1048 + pi->SetUnused();
1.1049 + ++pi;
1.1050 + }
1.1051 + MmuLock::Unlock();
1.1052 +
1.1053 + iRamPageAllocator->FreePhysicalRam(aPhysAddr, aCount << KPageShift);
1.1054 + }
1.1055 +
1.1056 +
1.1057 +TInt Mmu::ClaimPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags)
1.1058 + {
1.1059 + __KTRACE_OPT(KMMU,Kern::Printf("Mmu::ClaimPhysicalRam(0x%08x,0x%x,0x%08x)",aPhysAddr,aCount,aFlags));
1.1060 + aPhysAddr &= ~KPageMask;
1.1061 + TInt r = iRamPageAllocator->ClaimPhysicalRam(aPhysAddr,(aCount << KPageShift));
1.1062 + if(r!=KErrNone)
1.1063 + return r;
1.1064 +
1.1065 + PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags);
1.1066 +
1.1067 + // update page infos...
1.1068 + SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
1.1069 + SPageInfo* piEnd = pi+aCount;
1.1070 + TUint flash = 0;
1.1071 + MmuLock::Lock();
1.1072 + while(pi<piEnd)
1.1073 + {
1.1074 + MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
1.1075 + pi->SetPhysAlloc();
1.1076 + ++pi;
1.1077 + }
1.1078 + MmuLock::Unlock();
1.1079 +
1.1080 + return KErrNone;
1.1081 + }
1.1082 +
1.1083 +
1.1084 +void Mmu::AllocatedPhysicalRam(TPhysAddr aPhysAddr, TUint aCount, TRamAllocFlags aFlags)
1.1085 + {
1.1086 + __KTRACE_OPT(KMMU,Kern::Printf("Mmu::AllocatedPhysicalRam(0x%08x,0x%x,d,%x)",aPhysAddr,aCount,aFlags));
1.1087 +
1.1088 + PagesAllocated((TPhysAddr*)(aPhysAddr|1), aCount, aFlags);
1.1089 +
1.1090 + // update page infos...
1.1091 + SPageInfo* pi = SPageInfo::FromPhysAddr(aPhysAddr);
1.1092 + SPageInfo* piEnd = pi+aCount;
1.1093 + TUint flash = 0;
1.1094 + MmuLock::Lock();
1.1095 + while(pi<piEnd)
1.1096 + {
1.1097 + MmuLock::Flash(flash,KMaxPageInfoUpdatesInOneGo);
1.1098 + pi->SetPhysAlloc();
1.1099 + ++pi;
1.1100 + }
1.1101 + MmuLock::Unlock();
1.1102 + }
1.1103 +
1.1104 +
1.1105 +//
1.1106 +// Misc
1.1107 +//
1.1108 +
1.1109 +#ifdef _DEBUG
1.1110 +/**
1.1111 +Perform a page table walk to return the physical address of
1.1112 +the memory mapped at virtual address \a aLinAddr in the
1.1113 +address space \a aOsAsid.
1.1114 +
1.1115 +If the page table used was not one allocated by the kernel
1.1116 +then the results are unpredictable and may cause a system fault.
1.1117 +
1.1118 +@pre #MmuLock held.
1.1119 +*/
1.1120 +TPhysAddr Mmu::LinearToPhysical(TLinAddr aLinAddr, TInt aOsAsid)
1.1121 + {
1.1122 + __NK_ASSERT_DEBUG(MmuLock::IsHeld() || K::Initialising);
1.1123 + return UncheckedLinearToPhysical(aLinAddr,aOsAsid);
1.1124 + }
1.1125 +#endif
1.1126 +
1.1127 +
1.1128 +/**
1.1129 +Next virtual address available for allocation by TTempMapping.
1.1130 +This is initialised to #KTempAddr and addresses may be allocated
1.1131 +until they reach #KTempAddrEnd.
1.1132 +*/
1.1133 +TLinAddr Mmu::TTempMapping::iNextLinAddr = KTempAddr;
1.1134 +
1.1135 +
1.1136 +/**
1.1137 +Allocate virtual address space required to map a given number of memory pages.
1.1138 +
1.1139 +The actual size of allocated virtual allocated needs to accommodate \a aNumPages
1.1140 +number of pages of any colour. For example: if \a aNumPages == 4 and #KPageColourCount == 4,
1.1141 +then at least 7 pages are required.
1.1142 +
1.1143 +@param aNumPages Maximum number of pages that can be mapped into this temporary mapping.
1.1144 +
1.1145 +@pre Called in single threaded content (boot) only.
1.1146 +
1.1147 +@pre #iNextLinAddr points to virtual page with zero colour.
1.1148 +@post #iNextLinAddr points to virtual page with zero colour.
1.1149 +*/
1.1150 +void Mmu::TTempMapping::Alloc(TUint aNumPages)
1.1151 + {
1.1152 + __NK_ASSERT_DEBUG(aNumPages<=(KTempAddrEnd-KTempAddr)/KPageSize);
1.1153 +
1.1154 + // This runs during the boot only (single threaded context) so the access to iNextLinAddr is not guarded by any mutex.
1.1155 + TLinAddr tempAddr = iNextLinAddr;
1.1156 + TUint numPages = (KPageColourMask+aNumPages+KPageColourMask)&~KPageColourMask;
1.1157 + iNextLinAddr = tempAddr+numPages*KPageSize;
1.1158 +
1.1159 + __NK_ASSERT_ALWAYS(iNextLinAddr<=KTempAddrEnd);
1.1160 +
1.1161 + __NK_ASSERT_DEBUG(iSize==0);
1.1162 + iLinAddr = tempAddr;
1.1163 + MmuLock::Lock();
1.1164 + iPtePtr = Mmu::PtePtrFromLinAddr(tempAddr,KKernelOsAsid);
1.1165 + __NK_ASSERT_DEBUG(iPtePtr);
1.1166 + MmuLock::Unlock();
1.1167 + iBlankPte = TheMmu.iTempPteCached;
1.1168 + iSize = aNumPages;
1.1169 + iCount = 0;
1.1170 +
1.1171 + TRACEB(("Mmu::TTempMapping::Alloc(%d) iLinAddr=0x%08x, iPtePtr=0x%08x",aNumPages,iLinAddr,iPtePtr));
1.1172 + }
1.1173 +
1.1174 +
1.1175 +/**
1.1176 +Map a single physical page into this temporary mapping.
1.1177 +
1.1178 +Supervisor read/write access and EMemoryAttributeStandard memory attributes apply.
1.1179 +
1.1180 +@param aPage The physical page to map.
1.1181 +@param aColour The required colour for the mapping.
1.1182 +
1.1183 +@return The linear address at which the page is mapped.
1.1184 +*/
1.1185 +TLinAddr Mmu::TTempMapping::Map(TPhysAddr aPage, TUint aColour)
1.1186 + {
1.1187 + __NK_ASSERT_DEBUG(iSize>=1);
1.1188 + __NK_ASSERT_DEBUG(iCount==0);
1.1189 +
1.1190 + TUint colour = aColour&KPageColourMask;
1.1191 + TLinAddr addr = iLinAddr+(colour<<KPageShift);
1.1192 + TPte* pPte = iPtePtr+colour;
1.1193 + iColour = colour;
1.1194 +
1.1195 + __ASSERT_DEBUG(*pPte==KPteUnallocatedEntry,MM::Panic(MM::ETempMappingAlreadyInUse));
1.1196 + *pPte = (aPage&~KPageMask) | iBlankPte;
1.1197 + CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
1.1198 + InvalidateTLBForPage(addr|KKernelOsAsid);
1.1199 +
1.1200 + iCount = 1;
1.1201 + return addr;
1.1202 + }
1.1203 +
1.1204 +/**
1.1205 +Map a single physical page into this temporary mapping using the given page table entry (PTE) value.
1.1206 +
1.1207 +@param aPage The physical page to map.
1.1208 +@param aColour The required colour for the mapping.
1.1209 +@param aBlankPte The PTE value to use for mapping the page,
1.1210 + with the physical address component equal to zero.
1.1211 +
1.1212 +@return The linear address at which the page is mapped.
1.1213 +*/
1.1214 +TLinAddr Mmu::TTempMapping::Map(TPhysAddr aPage, TUint aColour, TPte aBlankPte)
1.1215 + {
1.1216 + __NK_ASSERT_DEBUG(iSize>=1);
1.1217 + __NK_ASSERT_DEBUG(iCount==0);
1.1218 +
1.1219 + TUint colour = aColour&KPageColourMask;
1.1220 + TLinAddr addr = iLinAddr+(colour<<KPageShift);
1.1221 + TPte* pPte = iPtePtr+colour;
1.1222 + iColour = colour;
1.1223 +
1.1224 + __ASSERT_DEBUG(*pPte==KPteUnallocatedEntry,MM::Panic(MM::ETempMappingAlreadyInUse));
1.1225 + *pPte = (aPage&~KPageMask) | aBlankPte;
1.1226 + CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
1.1227 + InvalidateTLBForPage(addr|KKernelOsAsid);
1.1228 +
1.1229 + iCount = 1;
1.1230 + return addr;
1.1231 + }
1.1232 +
1.1233 +
1.1234 +/**
1.1235 +Map a number of physical pages into this temporary mapping.
1.1236 +
1.1237 +Supervisor read/write access and EMemoryAttributeStandard memory attributes apply.
1.1238 +
1.1239 +@param aPages The array of physical pages to map.
1.1240 +@param aCount The number of pages to map.
1.1241 +@param aColour The required colour for the first page.
1.1242 + Consecutive pages will be coloured accordingly.
1.1243 +
1.1244 +@return The linear address at which the first page is mapped.
1.1245 +*/
1.1246 +TLinAddr Mmu::TTempMapping::Map(TPhysAddr* aPages, TUint aCount, TUint aColour)
1.1247 + {
1.1248 + __NK_ASSERT_DEBUG(iSize>=aCount);
1.1249 + __NK_ASSERT_DEBUG(iCount==0);
1.1250 +
1.1251 + TUint colour = aColour&KPageColourMask;
1.1252 + TLinAddr addr = iLinAddr+(colour<<KPageShift);
1.1253 + TPte* pPte = iPtePtr+colour;
1.1254 + iColour = colour;
1.1255 +
1.1256 + for(TUint i=0; i<aCount; ++i)
1.1257 + {
1.1258 + __ASSERT_DEBUG(pPte[i]==KPteUnallocatedEntry,MM::Panic(MM::ETempMappingAlreadyInUse));
1.1259 + pPte[i] = (aPages[i]&~KPageMask) | iBlankPte;
1.1260 + CacheMaintenance::SinglePteUpdated((TLinAddr)&pPte[i]);
1.1261 + InvalidateTLBForPage((addr+i*KPageSize)|KKernelOsAsid);
1.1262 + }
1.1263 +
1.1264 + iCount = aCount;
1.1265 + return addr;
1.1266 + }
1.1267 +
1.1268 +
1.1269 +/**
1.1270 +Unmap all pages from this temporary mapping.
1.1271 +
1.1272 +@param aIMBRequired True if IMB barrier is required prior unmapping.
1.1273 +*/
1.1274 +void Mmu::TTempMapping::Unmap(TBool aIMBRequired)
1.1275 + {
1.1276 + __NK_ASSERT_DEBUG(iSize>=1);
1.1277 + if(aIMBRequired)
1.1278 + CacheMaintenance::CodeChanged(iLinAddr+iColour*KPageSize,iCount*KPageSize);
1.1279 + Unmap();
1.1280 + }
1.1281 +
1.1282 +
1.1283 +/**
1.1284 +Unmap all pages from this temporary mapping.
1.1285 +*/
1.1286 +void Mmu::TTempMapping::Unmap()
1.1287 + {
1.1288 + __NK_ASSERT_DEBUG(iSize>=1);
1.1289 +
1.1290 + TUint colour = iColour;
1.1291 + TLinAddr addr = iLinAddr+(colour<<KPageShift);
1.1292 + TPte* pPte = iPtePtr+colour;
1.1293 + TUint count = iCount;
1.1294 +
1.1295 + while(count)
1.1296 + {
1.1297 + *pPte = KPteUnallocatedEntry;
1.1298 + CacheMaintenance::SinglePteUpdated((TLinAddr)pPte);
1.1299 + InvalidateTLBForPage(addr|KKernelOsAsid);
1.1300 + addr += KPageSize;
1.1301 + ++pPte;
1.1302 + --count;
1.1303 + }
1.1304 +
1.1305 + iCount = 0;
1.1306 + }
1.1307 +
1.1308 +#ifdef __SMP__
1.1309 +/**
1.1310 +Dummy IPI to be invoked when a thread's alias pde members are updated remotely
1.1311 +by another thread.
1.1312 +
1.1313 +@internalComponent
1.1314 +*/
1.1315 +class TAliasIPI : public TGenericIPI
1.1316 + {
1.1317 +public:
1.1318 + static void RefreshIsr(TGenericIPI*);
1.1319 + void RefreshAlias();
1.1320 + };
1.1321 +
1.1322 +
1.1323 +/**
1.1324 +Dummy isr method.
1.1325 +*/
1.1326 +void TAliasIPI::RefreshIsr(TGenericIPI*)
1.1327 + {
1.1328 + TRACE2(("TAliasIPI"));
1.1329 + }
1.1330 +
1.1331 +
1.1332 +/**
1.1333 +Queue the dummy IPI on all other processors. This ensures that DoProcessSwitch will
1.1334 +have completed updating iAliasPdePtr once this method returns.
1.1335 +*/
1.1336 +void TAliasIPI::RefreshAlias()
1.1337 + {
1.1338 + NKern::Lock();
1.1339 + QueueAllOther(&RefreshIsr);
1.1340 + NKern::Unlock();
1.1341 + WaitCompletion();
1.1342 + }
1.1343 +
1.1344 +
1.1345 +/**
1.1346 +Perform a dummy ipi on all the other processors to ensure if any of them are
1.1347 +executing DoProcessSwitch they will see the new value of iAliasPde before they
1.1348 +update iAliasPdePtr or will finish updating iAliasPdePtr before we continue.
1.1349 +This works as DoProcessSwitch() has interrupts disabled while reading iAliasPde
1.1350 +and updating iAliasPdePtr.
1.1351 +*/
1.1352 +void BroadcastAliasRefresh()
1.1353 + {
1.1354 + TAliasIPI ipi;
1.1355 + ipi.RefreshAlias();
1.1356 + }
1.1357 +#endif //__SMP__
1.1358 +
1.1359 +/**
1.1360 +Remove any thread IPC aliases which use the specified page table.
1.1361 +This is used by the page table allocator when a page table is freed.
1.1362 +
1.1363 +@pre #PageTablesLockIsHeld
1.1364 +*/
1.1365 +void Mmu::RemoveAliasesForPageTable(TPhysAddr aPageTable)
1.1366 + {
1.1367 + __NK_ASSERT_DEBUG(PageTablesLockIsHeld());
1.1368 +
1.1369 + MmuLock::Lock();
1.1370 +
1.1371 + SDblQue checkedList;
1.1372 +
1.1373 + TUint ptId = aPageTable>>KPageTableShift;
1.1374 + while(!iAliasList.IsEmpty())
1.1375 + {
1.1376 + SDblQueLink* next = iAliasList.First()->Deque();
1.1377 + checkedList.Add(next);
1.1378 + DMemModelThread* thread = (DMemModelThread*)((TInt)next-_FOFF(DMemModelThread,iAliasLink));
1.1379 + if((thread->iAliasPde>>KPageTableShift)==ptId)
1.1380 + {
1.1381 + // the page table is being aliased by the thread, so remove it...
1.1382 + TRACE2(("Thread %O RemoveAliasesForPageTable", this));
1.1383 + thread->iAliasPde = KPdeUnallocatedEntry;
1.1384 +#ifdef __SMP__ // we need to also unmap the page table in case thread is running on another core...
1.1385 +
1.1386 + // Ensure other processors see the update to iAliasPde.
1.1387 + BroadcastAliasRefresh();
1.1388 +
1.1389 + *thread->iAliasPdePtr = KPdeUnallocatedEntry;
1.1390 +
1.1391 + SinglePdeUpdated(thread->iAliasPdePtr);
1.1392 + __NK_ASSERT_DEBUG((thread->iAliasLinAddr&KPageMask)==0);
1.1393 + // Invalidate the tlb for the page using os asid of the process that created the alias
1.1394 + // this is safe as the os asid will be valid as thread must be running otherwise the alias
1.1395 + // would have been removed.
1.1396 + InvalidateTLBForPage(thread->iAliasLinAddr | ((DMemModelProcess*)thread->iOwningProcess)->OsAsid());
1.1397 + // note, race condition with 'thread' updating its iAliasLinAddr is
1.1398 + // not a problem because 'thread' will not the be accessing the aliased
1.1399 + // region and will take care of invalidating the TLB.
1.1400 +#endif
1.1401 + }
1.1402 + MmuLock::Flash();
1.1403 + }
1.1404 +
1.1405 + // copy checkedList back to iAliasList
1.1406 + iAliasList.MoveFrom(&checkedList);
1.1407 +
1.1408 + MmuLock::Unlock();
1.1409 + }
1.1410 +
1.1411 +
1.1412 +void DMemModelThread::RefreshAlias()
1.1413 + {
1.1414 + if(iAliasLinAddr)
1.1415 + {
1.1416 + TRACE2(("Thread %O RefreshAlias", this));
1.1417 + // Get the os asid, this is the current thread so no need to open a reference.
1.1418 + TUint thisAsid = ((DMemModelProcess*)iOwningProcess)->OsAsid();
1.1419 + MmuLock::Lock();
1.1420 + TInt osAsid = iAliasProcess->OsAsid();
1.1421 + TPde pde = *Mmu::PageDirectoryEntry(osAsid,iAliasTarget);
1.1422 + iAliasPde = pde;
1.1423 + *iAliasPdePtr = pde;
1.1424 + SinglePdeUpdated(iAliasPdePtr);
1.1425 + InvalidateTLBForPage(iAliasLinAddr|thisAsid);
1.1426 + MmuLock::Unlock();
1.1427 + }
1.1428 + }
1.1429 +
1.1430 +
1.1431 +
1.1432 +//
1.1433 +// Mapping/unmapping functions
1.1434 +//
1.1435 +
1.1436 +
1.1437 +/**
1.1438 +Modify page table entries (PTEs) so they map the given memory pages.
1.1439 +Entries are only updated if the current state of the corresponding page
1.1440 +is RPageArray::ECommitted.
1.1441 +
1.1442 +@param aPtePtr Pointer into a page table for the PTE of the first page.
1.1443 +@param aCount The number of pages to modify.
1.1444 +@param aPages Pointer to the entry for the first page in a memory object's #RPageArray.
1.1445 + Each entry contains the physical address of a page together with its
1.1446 + current state (RPageArray::TState).
1.1447 +@param aBlankPte The value to use for each PTE, with the physical address component equal
1.1448 + to zero.
1.1449 +
1.1450 +@return False, if the page table no longer maps any entries and may be freed.
1.1451 + True otherwise, to indicate that the page table is still needed.
1.1452 +
1.1453 +@pre #MmuLock held.
1.1454 +@post #MmuLock held and has not been released by this function.
1.1455 +*/
1.1456 +TBool Mmu::MapPages(TPte* const aPtePtr, const TUint aCount, TPhysAddr* aPages, TPte aBlankPte)
1.1457 + {
1.1458 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.1459 + __NK_ASSERT_DEBUG(aCount);
1.1460 + __NK_ASSERT_DEBUG(aBlankPte!=KPteUnallocatedEntry);
1.1461 +
1.1462 + TUint count = 0;
1.1463 + if(aCount==1)
1.1464 + {
1.1465 + // get page to map...
1.1466 + TPhysAddr pagePhys = *aPages;
1.1467 + TPte pte = *aPtePtr;
1.1468 + if(!RPageArray::TargetStateIsCommitted(pagePhys))
1.1469 + goto done; // page no longer needs mapping
1.1470 +
1.1471 + // clear type flags...
1.1472 + pagePhys &= ~KPageMask;
1.1473 +
1.1474 + // check nobody has already mapped the page...
1.1475 + if(pte!=KPteUnallocatedEntry)
1.1476 + {
1.1477 + // already mapped...
1.1478 +#ifdef _DEBUG
1.1479 + if((pte^pagePhys)>=TPte(KPageSize))
1.1480 + {
1.1481 + // but different!
1.1482 + Kern::Printf("Mmu::MapPages already mapped %x->%x",pagePhys,pte);
1.1483 + __NK_ASSERT_DEBUG(0);
1.1484 + }
1.1485 +#endif
1.1486 + return true; // return true to keep page table (it already had at least page mapped)
1.1487 + }
1.1488 +
1.1489 + // map page...
1.1490 + pte = pagePhys|aBlankPte;
1.1491 + TRACE2(("!PTE %x=%x",aPtePtr,pte));
1.1492 + *aPtePtr = pte;
1.1493 + count = 1;
1.1494 +
1.1495 + // clean cache...
1.1496 + CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
1.1497 + }
1.1498 + else
1.1499 + {
1.1500 + // check we are only updating a single page table...
1.1501 + __NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0);
1.1502 +
1.1503 + // map pages...
1.1504 + TPte* pPte = aPtePtr;
1.1505 + TPte* pPteEnd = aPtePtr+aCount;
1.1506 + do
1.1507 + {
1.1508 + // map page...
1.1509 + TPhysAddr pagePhys = *aPages++;
1.1510 + TPte pte = *pPte++;
1.1511 + if(RPageArray::TargetStateIsCommitted(pagePhys))
1.1512 + {
1.1513 + // clear type flags...
1.1514 + pagePhys &= ~KPageMask;
1.1515 +
1.1516 + // page not being freed, so try and map it...
1.1517 + if(pte!=KPteUnallocatedEntry)
1.1518 + {
1.1519 + // already mapped...
1.1520 +#ifdef _DEBUG
1.1521 + if((pte^pagePhys)>=TPte(KPageSize))
1.1522 + {
1.1523 + // but different!
1.1524 + Kern::Printf("Mmu::MapPages already mapped %x->%x",pagePhys,pte);
1.1525 + __NK_ASSERT_DEBUG(0);
1.1526 + }
1.1527 +#endif
1.1528 + }
1.1529 + else
1.1530 + {
1.1531 + // map page...
1.1532 + pte = pagePhys|aBlankPte;
1.1533 + TRACE2(("!PTE %x=%x",pPte-1,pte));
1.1534 + pPte[-1] = pte;
1.1535 + ++count;
1.1536 + }
1.1537 + }
1.1538 + }
1.1539 + while(pPte!=pPteEnd);
1.1540 +
1.1541 + // clean cache...
1.1542 + CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr);
1.1543 + }
1.1544 +
1.1545 +done:
1.1546 + // update page counts...
1.1547 + SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPtePtr);
1.1548 + count = pti->IncPageCount(count);
1.1549 + TRACE2(("pt %x page count=%d",TLinAddr(aPtePtr)&~KPageTableMask,pti->PageCount()));
1.1550 + __NK_ASSERT_DEBUG(pti->CheckPageCount());
1.1551 +
1.1552 + // see if page table needs freeing...
1.1553 + TUint keepPt = count | pti->PermanenceCount();
1.1554 +
1.1555 + __NK_ASSERT_DEBUG(!pti->IsDemandPaged()); // check not demand paged page table
1.1556 +
1.1557 + return keepPt;
1.1558 + }
1.1559 +
1.1560 +
1.1561 +/**
1.1562 +Modify page table entries (PTEs) so they map a new page.
1.1563 +Entries are only updated if the current state of the corresponding page
1.1564 +is RPageArray::ECommitted or RPageArray::EMoving.
1.1565 +
1.1566 +@param aPtePtr Pointer into a page table for the PTE of the page.
1.1567 +@param aPage Pointer to the entry for the page in a memory object's #RPageArray.
1.1568 + The entry contains the physical address of a page together with its
1.1569 + current state (RPageArray::TState).
1.1570 +@param aBlankPte The value to use for each PTE, with the physical address component equal
1.1571 + to zero.
1.1572 +
1.1573 +@pre #MmuLock held.
1.1574 +@post #MmuLock held and has not been released by this function.
1.1575 +*/
1.1576 +void Mmu::RemapPage(TPte* const aPtePtr, TPhysAddr& aPage, TPte aBlankPte)
1.1577 + {
1.1578 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.1579 + __NK_ASSERT_DEBUG(aBlankPte!=KPteUnallocatedEntry);
1.1580 +
1.1581 + // get page to remap...
1.1582 + TPhysAddr pagePhys = aPage;
1.1583 +
1.1584 + // Only remap the page if it is committed or it is being moved and
1.1585 + // no other operation has been performed on the page.
1.1586 + if(!RPageArray::TargetStateIsCommitted(pagePhys))
1.1587 + return; // page no longer needs mapping
1.1588 +
1.1589 + // Only remap the page if it is currently mapped, i.e. doesn't have an unallocated pte.
1.1590 + // This will only be true if a new mapping is being added but it hasn't yet updated
1.1591 + // all the ptes for the pages that it maps.
1.1592 + TPte pte = *aPtePtr;
1.1593 + if (pte == KPteUnallocatedEntry)
1.1594 + return;
1.1595 +
1.1596 + // clear type flags...
1.1597 + pagePhys &= ~KPageMask;
1.1598 +
1.1599 + SPageInfo* pi = SPageInfo::SafeFromPhysAddr(pagePhys);
1.1600 + if (pi)
1.1601 + {
1.1602 + SPageInfo::TPagedState pagedState = pi->PagedState();
1.1603 + if (pagedState != SPageInfo::EUnpaged)
1.1604 + {
1.1605 + // The page is demand paged. Only remap the page if it is pinned or is currently
1.1606 + // accessible but to the old physical page.
1.1607 + if (pagedState != SPageInfo::EPagedPinned &&
1.1608 + (Mmu::IsPteInaccessible(pte) || (pte^pagePhys) < TPte(KPageSize)))
1.1609 + return;
1.1610 + if (!pi->IsDirty())
1.1611 + {
1.1612 + // Ensure that the page is mapped as read only to prevent pages being marked dirty
1.1613 + // by page moving despite not having been written to
1.1614 + Mmu::MakePteInaccessible(aBlankPte, EFalse);
1.1615 + }
1.1616 + }
1.1617 + }
1.1618 +
1.1619 + // Map the page in the page array entry as this is always the physical
1.1620 + // page that the memory object's page should be mapped to.
1.1621 + pte = pagePhys|aBlankPte;
1.1622 + TRACE2(("!PTE %x=%x",aPtePtr,pte));
1.1623 + *aPtePtr = pte;
1.1624 +
1.1625 + // clean cache...
1.1626 + CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
1.1627 + }
1.1628 +
1.1629 +
1.1630 +/**
1.1631 +Modify page table entries (PTEs) so they no longer map any memory pages.
1.1632 +
1.1633 +@param aPtePtr Pointer into a page table for the PTE of the first page.
1.1634 +@param aCount The number of pages to modify.
1.1635 +
1.1636 +@return False, if the page table no longer maps any entries and may be freed.
1.1637 + True otherwise, to indicate that the page table is still needed.
1.1638 +
1.1639 +@pre #MmuLock held.
1.1640 +@post #MmuLock held and has not been released by this function.
1.1641 +*/
1.1642 +TBool Mmu::UnmapPages(TPte* const aPtePtr, TUint aCount)
1.1643 + {
1.1644 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.1645 + __NK_ASSERT_DEBUG(aCount);
1.1646 +
1.1647 + TUint count = 0;
1.1648 + if(aCount==1)
1.1649 + {
1.1650 + if(*aPtePtr==KPteUnallocatedEntry)
1.1651 + return true; // page already unmapped
1.1652 +
1.1653 + // unmap page...
1.1654 + ++count;
1.1655 + TPte pte = KPteUnallocatedEntry;
1.1656 + TRACE2(("!PTE %x=%x",aPtePtr,pte));
1.1657 + *aPtePtr = pte;
1.1658 +
1.1659 + // clean cache...
1.1660 + CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
1.1661 + }
1.1662 + else
1.1663 + {
1.1664 + // check we are only updating a single page table...
1.1665 + __NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0);
1.1666 +
1.1667 + // unmap pages...
1.1668 + TPte* pPte = aPtePtr;
1.1669 + TPte* pPteEnd = aPtePtr+aCount;
1.1670 + do
1.1671 + {
1.1672 + if(*pPte!=KPteUnallocatedEntry)
1.1673 + {
1.1674 + // unmap page...
1.1675 + ++count;
1.1676 + TPte pte = KPteUnallocatedEntry;
1.1677 + TRACE2(("!PTE %x=%x",pPte,pte));
1.1678 + *pPte = pte;
1.1679 + }
1.1680 + }
1.1681 + while(++pPte<pPteEnd);
1.1682 +
1.1683 + if(!count)
1.1684 + return true; // no PTEs changed, so nothing more to do
1.1685 +
1.1686 + // clean cache...
1.1687 + CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr);
1.1688 + }
1.1689 +
1.1690 + // update page table info...
1.1691 + SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPtePtr);
1.1692 + count = pti->DecPageCount(count);
1.1693 + TRACE2(("pt %x page count=%d",TLinAddr(aPtePtr)&~KPageTableMask,count));
1.1694 + __NK_ASSERT_DEBUG(pti->CheckPageCount());
1.1695 +
1.1696 + // see if page table needs freeing...
1.1697 + TUint keepPt = count | pti->PermanenceCount();
1.1698 +
1.1699 + return keepPt;
1.1700 + }
1.1701 +
1.1702 +
1.1703 +/**
1.1704 +Modify page table entries (PTEs) so they no longer map the given memory pages.
1.1705 +Entries are only updated if the current state of the corresponding page
1.1706 +is 'decommitted' i.e. RPageArray::TargetStateIsDecommitted returns true.
1.1707 +
1.1708 +@param aPtePtr Pointer into a page table for the PTE of the first page.
1.1709 +@param aCount The number of pages to modify.
1.1710 +@param aPages Pointer to the entry for the first page in a memory object's #RPageArray.
1.1711 + Each entry contains the physical address of a page together with its
1.1712 + current state (RPageArray::TState).
1.1713 +
1.1714 +@return False, if the page table no longer maps any entries and may be freed.
1.1715 + True otherwise, to indicate that the page table is still needed.
1.1716 +
1.1717 +@pre #MmuLock held.
1.1718 +@post #MmuLock held and has not been released by this function.
1.1719 +*/
1.1720 +TBool Mmu::UnmapPages(TPte* const aPtePtr, TUint aCount, TPhysAddr* aPages)
1.1721 + {
1.1722 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.1723 + __NK_ASSERT_DEBUG(aCount);
1.1724 +
1.1725 + TUint count = 0;
1.1726 + if(aCount==1)
1.1727 + {
1.1728 + if(*aPtePtr==KPteUnallocatedEntry)
1.1729 + return true; // page already unmapped
1.1730 +
1.1731 + if(!RPageArray::TargetStateIsDecommitted(*aPages))
1.1732 + return true; // page has been reallocated
1.1733 +
1.1734 + // unmap page...
1.1735 + ++count;
1.1736 + TPte pte = KPteUnallocatedEntry;
1.1737 + TRACE2(("!PTE %x=%x",aPtePtr,pte));
1.1738 + *aPtePtr = pte;
1.1739 +
1.1740 + // clean cache...
1.1741 + CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
1.1742 + }
1.1743 + else
1.1744 + {
1.1745 + // check we are only updating a single page table...
1.1746 + __NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0);
1.1747 +
1.1748 + // unmap pages...
1.1749 + TPte* pPte = aPtePtr;
1.1750 + TPte* pPteEnd = aPtePtr+aCount;
1.1751 + do
1.1752 + {
1.1753 + if(RPageArray::TargetStateIsDecommitted(*aPages++) && *pPte!=KPteUnallocatedEntry)
1.1754 + {
1.1755 + // unmap page...
1.1756 + ++count;
1.1757 + TPte pte = KPteUnallocatedEntry;
1.1758 + TRACE2(("!PTE %x=%x",pPte,pte));
1.1759 + *pPte = pte;
1.1760 + }
1.1761 + }
1.1762 + while(++pPte<pPteEnd);
1.1763 +
1.1764 + if(!count)
1.1765 + return true; // no PTEs changed, so nothing more to do
1.1766 +
1.1767 + // clean cache...
1.1768 + CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr);
1.1769 + }
1.1770 +
1.1771 + // update page table info...
1.1772 + SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPtePtr);
1.1773 + count = pti->DecPageCount(count);
1.1774 + TRACE2(("pt %x page count=%d",TLinAddr(aPtePtr)&~KPageTableMask,count));
1.1775 + __NK_ASSERT_DEBUG(pti->CheckPageCount());
1.1776 +
1.1777 + // see if page table needs freeing...
1.1778 + TUint keepPt = count | pti->PermanenceCount();
1.1779 +
1.1780 + return keepPt;
1.1781 + }
1.1782 +
1.1783 +
1.1784 +/**
1.1785 +Modify page table entries (PTEs) so the given memory pages are not accessible.
1.1786 +Entries are only updated if the current state of the corresponding page
1.1787 +is RPageArray::ERestrictingNA.
1.1788 +
1.1789 +@param aPtePtr Pointer into a page table for the PTE of the first page.
1.1790 +@param aCount The number of pages to modify.
1.1791 +@param aPages Pointer to the entry for the first page in a memory object's #RPageArray.
1.1792 + Each entry contains the physical address of a page together with its
1.1793 + current state (RPageArray::TState).
1.1794 +
1.1795 +@pre #MmuLock held.
1.1796 +@post #MmuLock held and has not been released by this function.
1.1797 +*/
1.1798 +void Mmu::RestrictPagesNA(TPte* const aPtePtr, TUint aCount, TPhysAddr* aPages)
1.1799 + {
1.1800 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.1801 + __NK_ASSERT_DEBUG(aCount);
1.1802 +
1.1803 + if(aCount==1)
1.1804 + {
1.1805 + TPhysAddr page = *aPages;
1.1806 + TPte pte = *aPtePtr;
1.1807 + RPageArray::TState state = RPageArray::State(page);
1.1808 + if(state != RPageArray::ERestrictingNA && state != RPageArray::EMoving)
1.1809 + return; // page no longer needs restricting
1.1810 +
1.1811 + if(pte==KPteUnallocatedEntry)
1.1812 + return; // page gone
1.1813 +
1.1814 + // restrict page...
1.1815 + pte = Mmu::MakePteInaccessible(pte,false);
1.1816 + TRACE2(("!PTE %x=%x",aPtePtr,pte));
1.1817 + *aPtePtr = pte;
1.1818 +
1.1819 + // clean cache...
1.1820 + CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
1.1821 + }
1.1822 + else
1.1823 + {
1.1824 + // check we are only updating a single page table...
1.1825 + __NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0);
1.1826 +
1.1827 + // restrict pages...
1.1828 + TPte* pPte = aPtePtr;
1.1829 + TPte* pPteEnd = aPtePtr+aCount;
1.1830 + do
1.1831 + {
1.1832 + TPhysAddr page = *aPages++;
1.1833 + TPte pte = *pPte++;
1.1834 + if(RPageArray::State(page)==RPageArray::ERestrictingNA && pte!=KPteUnallocatedEntry)
1.1835 + {
1.1836 + pte = Mmu::MakePteInaccessible(pte,false);
1.1837 + TRACE2(("!PTE %x=%x",pPte-1,pte));
1.1838 + pPte[-1] = pte;
1.1839 + }
1.1840 + }
1.1841 + while(pPte<pPteEnd);
1.1842 +
1.1843 + // clean cache...
1.1844 + CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr);
1.1845 + }
1.1846 + }
1.1847 +
1.1848 +
1.1849 +/**
1.1850 +Modify page table entries (PTEs) so they map the given demand paged memory pages.
1.1851 +
1.1852 +Entries are only updated if the current state of the corresponding page
1.1853 +is RPageArray::ECommitted.
1.1854 +
1.1855 +This function is used for demand paged memory when handling a page fault or
1.1856 +memory pinning operation. It will widen the access permission of existing entries
1.1857 +if required to match \a aBlankPte and will 'rejuvenate' the page table.
1.1858 +
1.1859 +@param aPtePtr Pointer into a page table for the PTE of the first page.
1.1860 +@param aCount The number of pages to modify.
1.1861 +@param aPages Pointer to the entry for the first page in a memory object's #RPageArray.
1.1862 + Each entry contains the physical address of a page together with its
1.1863 + current state (RPageArray::TState).
1.1864 +@param aBlankPte The value to use for each PTE, with the physical address component equal
1.1865 + to zero.
1.1866 +
1.1867 +@return False, if the page table no longer maps any entries and may be freed.
1.1868 + True otherwise, to indicate that the page table is still needed.
1.1869 +
1.1870 +@pre #MmuLock held.
1.1871 +@post MmuLock held (but may have been released by this function)
1.1872 +*/
1.1873 +TBool Mmu::PageInPages(TPte* const aPtePtr, const TUint aCount, TPhysAddr* aPages, TPte aBlankPte)
1.1874 + {
1.1875 + __NK_ASSERT_DEBUG(MmuLock::IsHeld());
1.1876 + __NK_ASSERT_DEBUG(aCount);
1.1877 + __NK_ASSERT_DEBUG(aBlankPte!=KPteUnallocatedEntry);
1.1878 +
1.1879 + TUint count = 0;
1.1880 +
1.1881 + if(aCount==1)
1.1882 + {
1.1883 + // get page to map...
1.1884 + TPhysAddr page = *aPages;
1.1885 + TPte pte = *aPtePtr;
1.1886 + if(!RPageArray::TargetStateIsCommitted(page))
1.1887 + goto done; // page no longer needs mapping
1.1888 +
1.1889 +#ifdef _DEBUG
1.1890 + if(pte!=KPteUnallocatedEntry)
1.1891 + {
1.1892 + if ((pte^page)>=TPte(KPageSize) && !Mmu::IsPteInaccessible(pte) &&
1.1893 + !Mmu::IsPteReadOnly(pte))
1.1894 + {
1.1895 + // Page has been mapped before but the physical address is different
1.1896 + // and the page hasn't been moved as it is not inaccessible.
1.1897 + Kern::Printf("Mmu::PageInPages already mapped %x->%x",page,pte);
1.1898 + __NK_ASSERT_DEBUG(0);
1.1899 + }
1.1900 + }
1.1901 +#endif
1.1902 + if(!Mmu::IsPteMoreAccessible(aBlankPte,pte))
1.1903 + return true; // return true to keep page table (it already had at least page mapped)
1.1904 +
1.1905 + // remap page with new increased permissions...
1.1906 + if(pte==KPteUnallocatedEntry)
1.1907 + count = 1; // we'll be adding a new pte entry, count it
1.1908 + if(!Mmu::IsPteReadOnly(aBlankPte))
1.1909 + ThePager.SetWritable(*SPageInfo::FromPhysAddr(page));
1.1910 + pte = (page&~KPageMask)|aBlankPte;
1.1911 + TRACE2(("!PTE %x=%x",aPtePtr,pte));
1.1912 + *aPtePtr = pte;
1.1913 +
1.1914 + // clean cache...
1.1915 + CacheMaintenance::SinglePteUpdated((TLinAddr)aPtePtr);
1.1916 + }
1.1917 + else
1.1918 + {
1.1919 + // check we are only updating a single page table...
1.1920 + __NK_ASSERT_DEBUG(((TLinAddr(aPtePtr)^TLinAddr(aPtePtr+aCount-1))>>KPageTableShift)==0);
1.1921 +
1.1922 + // map pages...
1.1923 + TPte* pPte = aPtePtr;
1.1924 + TPte* pPteEnd = aPtePtr+aCount;
1.1925 + do
1.1926 + {
1.1927 + // map page...
1.1928 + TPhysAddr page = *aPages++;
1.1929 + TPte pte = *pPte++;
1.1930 + if(RPageArray::TargetStateIsCommitted(page))
1.1931 + {
1.1932 +#ifdef _DEBUG
1.1933 + if(pte!=KPteUnallocatedEntry)
1.1934 + {
1.1935 + if ((pte^page)>=TPte(KPageSize) && !Mmu::IsPteInaccessible(pte) &&
1.1936 + !Mmu::IsPteReadOnly(pte))
1.1937 + {
1.1938 + // Page has been mapped before but the physical address is different
1.1939 + // and the page hasn't been moved as it is not inaccessible.
1.1940 + Kern::Printf("Mmu::PageInPages already mapped %x->%x",page,pte);
1.1941 + __NK_ASSERT_DEBUG(0);
1.1942 + }
1.1943 + }
1.1944 +#endif
1.1945 + if(Mmu::IsPteMoreAccessible(aBlankPte,pte))
1.1946 + {
1.1947 + // remap page with new increased permissions...
1.1948 + if(pte==KPteUnallocatedEntry)
1.1949 + ++count; // we'll be adding a new pte entry, count it
1.1950 + if(!Mmu::IsPteReadOnly(aBlankPte))
1.1951 + ThePager.SetWritable(*SPageInfo::FromPhysAddr(page));
1.1952 + pte = (page&~KPageMask)|aBlankPte;
1.1953 + TRACE2(("!PTE %x=%x",pPte-1,pte));
1.1954 + pPte[-1] = pte;
1.1955 + }
1.1956 + }
1.1957 + }
1.1958 + while(pPte!=pPteEnd);
1.1959 +
1.1960 + // clean cache...
1.1961 + CacheMaintenance::MultiplePtesUpdated((TLinAddr)aPtePtr,(TLinAddr)pPte-(TLinAddr)aPtePtr);
1.1962 + }
1.1963 +
1.1964 +done:
1.1965 + // update page counts...
1.1966 + SPageTableInfo* pti = SPageTableInfo::FromPtPtr(aPtePtr);
1.1967 + count = pti->IncPageCount(count);
1.1968 + TRACE2(("pt %x page count=%d",TLinAddr(aPtePtr)&~KPageTableMask,pti->PageCount()));
1.1969 + __NK_ASSERT_DEBUG(pti->CheckPageCount());
1.1970 +
1.1971 + // see if page table needs freeing...
1.1972 + TUint keepPt = count | pti->PermanenceCount();
1.1973 +
1.1974 + // rejuvenate demand paged page tables...
1.1975 + ThePager.RejuvenatePageTable(aPtePtr);
1.1976 +
1.1977 + return keepPt;
1.1978 + }
1.1979 +
1.1980 +
1.1981 +//
1.1982 +// CodeModifier
1.1983 +//
1.1984 +
1.1985 +#ifdef __DEBUGGER_SUPPORT__
1.1986 +
1.1987 +void DoWriteCode(TUint32* aAddress, TUint32 aValue);
1.1988 +
1.1989 +#ifdef __SMP__
1.1990 +
1.1991 +extern "C" void __e32_instruction_barrier();
1.1992 +
1.1993 +class TCodeModifierBroadcast : public TGenericIPI
1.1994 + {
1.1995 +public:
1.1996 + TCodeModifierBroadcast(TUint32* aAddress, TUint32 aValue);
1.1997 + static void Isr(TGenericIPI*);
1.1998 + void Go();
1.1999 +public:
1.2000 + TUint32* iAddress;
1.2001 + TUint32 iValue;
1.2002 + volatile TInt iFlag;
1.2003 + };
1.2004 +
1.2005 +TCodeModifierBroadcast::TCodeModifierBroadcast(TUint32* aAddress, TUint32 aValue)
1.2006 + : iAddress(aAddress), iValue(aValue), iFlag(0)
1.2007 + {
1.2008 + }
1.2009 +
1.2010 +void TCodeModifierBroadcast::Isr(TGenericIPI* aPtr)
1.2011 + {
1.2012 + TCodeModifierBroadcast& a = *(TCodeModifierBroadcast*)aPtr;
1.2013 + while (!__e32_atomic_load_acq32(&a.iFlag))
1.2014 + __chill();
1.2015 +#ifdef __BROADCAST_CACHE_MAINTENANCE__
1.2016 + CacheMaintenance::CodeChanged((TLinAddr)a.iAddress, sizeof (TInt), CacheMaintenance::ECodeModifier); // need to do separate Clean-D, Purge-I on each core
1.2017 +#else
1.2018 + __e32_instruction_barrier(); // synchronize instruction execution
1.2019 +#endif
1.2020 + }
1.2021 +
1.2022 +void TCodeModifierBroadcast::Go()
1.2023 + {
1.2024 + NKern::Lock();
1.2025 + QueueAllOther(&Isr);
1.2026 + WaitEntry(); // wait for other cores to stop
1.2027 + DoWriteCode(iAddress, iValue);
1.2028 + iFlag = 1;
1.2029 + __e32_instruction_barrier(); // synchronize instruction execution
1.2030 + WaitCompletion(); // wait for other cores to resume
1.2031 + NKern::Unlock();
1.2032 + }
1.2033 +#endif
1.2034 +
1.2035 +/**
1.2036 +@pre Calling thread must be in critical section
1.2037 +@pre CodeSeg mutex held
1.2038 +*/
1.2039 +TInt CodeModifier::SafeWriteCode(DProcess* aProcess, TLinAddr aAddress, TInt aSize, TUint aValue, void* aOldValue)
1.2040 + {
1.2041 + __ASSERT_CRITICAL;
1.2042 + Mmu& m=TheMmu;
1.2043 + RamAllocLock::Lock();
1.2044 + MmuLock::Lock();
1.2045 + __UNLOCK_GUARD_START(MmuLock);
1.2046 +
1.2047 + // Check aProcess is still alive by opening a reference on its os asid.
1.2048 + TInt osAsid = ((DMemModelProcess*)aProcess)->TryOpenOsAsid();
1.2049 + if (osAsid < 0)
1.2050 + {
1.2051 + __KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - zombie process"));
1.2052 + __UNLOCK_GUARD_END(MmuLock);
1.2053 + MmuLock::Unlock();
1.2054 + RamAllocLock::Unlock();
1.2055 + return KErrBadDescriptor;
1.2056 + }
1.2057 +
1.2058 + // Find physical address of the page, the breakpoint belongs to
1.2059 + TPhysAddr physAddr = Mmu::LinearToPhysical(aAddress, osAsid);
1.2060 + __KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - PA:%x", physAddr));
1.2061 +
1.2062 +
1.2063 + if (physAddr==KPhysAddrInvalid)
1.2064 + {
1.2065 + __KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - invalid VA"));
1.2066 + __UNLOCK_GUARD_END(MmuLock);
1.2067 + MmuLock::Unlock();
1.2068 + RamAllocLock::Unlock();
1.2069 + // The os asid is no longer required.
1.2070 + ((DMemModelProcess*)aProcess)->CloseOsAsid();
1.2071 + return KErrBadDescriptor;
1.2072 + }
1.2073 +
1.2074 + // Temporary map physical page
1.2075 + TLinAddr tempAddr = m.MapTemp(physAddr&~KPageMask, aAddress>>KPageShift);
1.2076 + tempAddr |= aAddress & KPageMask;
1.2077 + __KTRACE_OPT(KDEBUGGER,Kern::Printf("CodeModifier::SafeWriteCode - tempAddr:%x",tempAddr));
1.2078 +
1.2079 + TInt r = KErrBadDescriptor;
1.2080 + TUint32* ptr = (TUint32*)(tempAddr&~3);
1.2081 + TUint32 oldWord;
1.2082 +
1.2083 + if(Kern::SafeRead(ptr,&oldWord,sizeof(oldWord))==0 // safely read the original value...
1.2084 + && Kern::SafeWrite(ptr,&oldWord,sizeof(oldWord))==0 ) // and write it back
1.2085 + {
1.2086 + // We have successfully probed the memory by reading and writing to it
1.2087 + // so we assume it is now safe to access without generating exceptions.
1.2088 + // If this is wrong it will kill the system horribly.
1.2089 +
1.2090 + TUint32 newWord;
1.2091 + TUint badAlign;
1.2092 + TUint shift = (aAddress&3)*8;
1.2093 +
1.2094 + switch(aSize)
1.2095 + {
1.2096 + case 1: // 1 byte value
1.2097 + badAlign = 0;
1.2098 + *(TUint8*)aOldValue = oldWord>>shift;
1.2099 + newWord = (oldWord&~(0xff<<shift)) | ((aValue&0xff)<<shift);
1.2100 + break;
1.2101 +
1.2102 + case 2: // 2 byte value
1.2103 + badAlign = tempAddr&1;
1.2104 + if(!badAlign)
1.2105 + *(TUint16*)aOldValue = oldWord>>shift;
1.2106 + newWord = (oldWord&~(0xffff<<shift)) | ((aValue&0xffff)<<shift);
1.2107 + break;
1.2108 +
1.2109 + default: // 4 byte value
1.2110 + badAlign = tempAddr&3;
1.2111 + if(!badAlign)
1.2112 + *(TUint32*)aOldValue = oldWord;
1.2113 + newWord = aValue;
1.2114 + break;
1.2115 + }
1.2116 +
1.2117 + if(!badAlign)
1.2118 + {
1.2119 + // write the new value...
1.2120 +#ifdef __SMP__
1.2121 + TCodeModifierBroadcast b(ptr, newWord);
1.2122 + b.Go();
1.2123 +#else
1.2124 + DoWriteCode(ptr, newWord);
1.2125 +#endif
1.2126 + r = KErrNone;
1.2127 + }
1.2128 + }
1.2129 +
1.2130 + __UNLOCK_GUARD_END(MmuLock);
1.2131 + m.UnmapTemp();
1.2132 + MmuLock::Unlock();
1.2133 + RamAllocLock::Unlock();
1.2134 + // The os asid is no longer required.
1.2135 + ((DMemModelProcess*)aProcess)->CloseOsAsid();
1.2136 + return r;
1.2137 + }
1.2138 +
1.2139 +/**
1.2140 +@pre Calling thread must be in critical section
1.2141 +@pre CodeSeg mutex held
1.2142 +*/
1.2143 +void DoWriteCode(TUint32* aAddress, TUint32 aValue)
1.2144 + {
1.2145 + // We do not want to be interrupted by e.g. ISR that will run altered code before IMB-Range.
1.2146 + // Therefore, copy data and clean/invalidate caches with interrupts disabled.
1.2147 + TInt irq = NKern::DisableAllInterrupts();
1.2148 + *aAddress = aValue;
1.2149 + CacheMaintenance::CodeChanged((TLinAddr)aAddress, sizeof(TUint32), CacheMaintenance::ECodeModifier);
1.2150 + NKern::RestoreInterrupts(irq);
1.2151 + }
1.2152 +
1.2153 +#endif //__DEBUGGER_SUPPORT__
1.2154 +
1.2155 +
1.2156 +
1.2157 +//
1.2158 +// Virtual pinning
1.2159 +//
1.2160 +
1.2161 +TInt M::CreateVirtualPinObject(TVirtualPinObject*& aPinObject)
1.2162 + {
1.2163 + aPinObject = (TVirtualPinObject*)new DVirtualPinMapping;
1.2164 + return aPinObject != NULL ? KErrNone : KErrNoMemory;
1.2165 + }
1.2166 +
1.2167 +TInt M::PinVirtualMemory(TVirtualPinObject* aPinObject, TLinAddr aStart, TUint aSize, DThread* aThread)
1.2168 + {
1.2169 + __ASSERT_CRITICAL;
1.2170 + TUint offsetInMapping;
1.2171 + TUint mapInstanceCount;
1.2172 + DMemoryMapping* mapping = MM::FindMappingInThread( (DMemModelThread*)aThread,
1.2173 + aStart,
1.2174 + aSize,
1.2175 + offsetInMapping,
1.2176 + mapInstanceCount);
1.2177 + TInt r = KErrBadDescriptor;
1.2178 + if (mapping)
1.2179 + {
1.2180 + TInt count = ((aStart & KPageMask) + aSize + KPageMask) >> KPageShift;
1.2181 + if(mapping->IsPinned())
1.2182 + {
1.2183 + // Mapping for specified virtual address is pinned so we don't need to
1.2184 + // do anything. Also, we can't safely pin the memory in this case
1.2185 + // anyway, as pinned mappings may move between memory objects
1.2186 + r = KErrNone;
1.2187 + }
1.2188 + else
1.2189 + {
1.2190 + MmuLock::Lock();
1.2191 + DMemoryObject* memory = mapping->Memory();
1.2192 + if (mapInstanceCount != mapping->MapInstanceCount() ||
1.2193 + !memory || !memory->IsDemandPaged())
1.2194 + {
1.2195 + // mapping has been reused, no memory, or it's not paged, so no need to pin...
1.2196 + MmuLock::Unlock();
1.2197 + r = KErrNone;
1.2198 + }
1.2199 + else
1.2200 + {
1.2201 + // paged memory needs pinning...
1.2202 + // Open a reference on the memory so it doesn't get deleted.
1.2203 + memory->Open();
1.2204 + MmuLock::Unlock();
1.2205 +
1.2206 + TUint startInMemory = (offsetInMapping >> KPageShift) + mapping->iStartIndex;
1.2207 + r = ((DVirtualPinMapping*)aPinObject)->Pin( memory, startInMemory, count, mapping->Permissions(),
1.2208 + mapping, mapInstanceCount);
1.2209 + memory->Close();
1.2210 + }
1.2211 + }
1.2212 + mapping->Close();
1.2213 + }
1.2214 + return r;
1.2215 + }
1.2216 +
1.2217 +TInt M::CreateAndPinVirtualMemory(TVirtualPinObject*& aPinObject, TLinAddr aStart, TUint aSize)
1.2218 + {
1.2219 + __ASSERT_CRITICAL;
1.2220 + aPinObject = 0;
1.2221 + TUint offsetInMapping;
1.2222 + TUint mapInstanceCount;
1.2223 + DMemoryMapping* mapping = MM::FindMappingInThread( (DMemModelThread*)&Kern::CurrentThread(),
1.2224 + aStart,
1.2225 + aSize,
1.2226 + offsetInMapping,
1.2227 + mapInstanceCount);
1.2228 + TInt r = KErrBadDescriptor;
1.2229 + if (mapping)
1.2230 + {
1.2231 + TInt count = ((aStart & KPageMask) + aSize + KPageMask) >> KPageShift;
1.2232 + if(mapping->IsPinned())
1.2233 + {
1.2234 + // Mapping for specified virtual address is pinned so we don't need to
1.2235 + // do anything. Also, we can't safely pin the memory in this case
1.2236 + // anyway, as pinned mappings may move between memory objects
1.2237 + r = KErrNone;
1.2238 + }
1.2239 + else
1.2240 + {
1.2241 + MmuLock::Lock();
1.2242 + DMemoryObject* memory = mapping->Memory();
1.2243 + if (mapInstanceCount != mapping->MapInstanceCount() ||
1.2244 + !memory || !memory->IsDemandPaged())
1.2245 + {
1.2246 + // mapping has been reused, no memory, or it's not paged, so no need to pin...
1.2247 + MmuLock::Unlock();
1.2248 + r = KErrNone;
1.2249 + }
1.2250 + else
1.2251 + {// The memory is demand paged so create a pin object and pin it.
1.2252 + // Open a reference on the memory so it doesn't get deleted.
1.2253 + memory->Open();
1.2254 + MmuLock::Unlock();
1.2255 + r = CreateVirtualPinObject(aPinObject);
1.2256 + if (r == KErrNone)
1.2257 + {
1.2258 + TUint startInMemory = (offsetInMapping >> KPageShift) + mapping->iStartIndex;
1.2259 + r = ((DVirtualPinMapping*)aPinObject)->Pin( memory, startInMemory, count, mapping->Permissions(),
1.2260 + mapping, mapInstanceCount);
1.2261 + if (r != KErrNone)
1.2262 + {// Failed to pin the memory so pin object is not required.
1.2263 + DestroyVirtualPinObject(aPinObject);
1.2264 + }
1.2265 + }
1.2266 + memory->Close();
1.2267 + }
1.2268 + }
1.2269 + mapping->Close();
1.2270 + }
1.2271 + return r;
1.2272 + }
1.2273 +
1.2274 +void M::UnpinVirtualMemory(TVirtualPinObject* aPinObject)
1.2275 + {
1.2276 + DVirtualPinMapping* mapping = (DVirtualPinMapping*)aPinObject;
1.2277 + if (mapping->IsAttached())
1.2278 + mapping->Unpin();
1.2279 + }
1.2280 +
1.2281 +void M::DestroyVirtualPinObject(TVirtualPinObject*& aPinObject)
1.2282 + {
1.2283 + DVirtualPinMapping* mapping = (DVirtualPinMapping*)__e32_atomic_swp_ord_ptr(&aPinObject, 0);
1.2284 + if (mapping)
1.2285 + {
1.2286 + if (mapping->IsAttached())
1.2287 + mapping->Unpin();
1.2288 + mapping->AsyncClose();
1.2289 + }
1.2290 + }
1.2291 +
1.2292 +//
1.2293 +// Physical pinning
1.2294 +//
1.2295 +
1.2296 +TInt M::CreatePhysicalPinObject(TPhysicalPinObject*& aPinObject)
1.2297 + {
1.2298 + aPinObject = (TPhysicalPinObject*)new DPhysicalPinMapping;
1.2299 + return aPinObject != NULL ? KErrNone : KErrNoMemory;
1.2300 + }
1.2301 +
1.2302 +TInt M::PinPhysicalMemory(TPhysicalPinObject* aPinObject, TLinAddr aStart, TUint aSize, TBool aReadOnly,
1.2303 + TPhysAddr& aAddress, TPhysAddr* aPages, TUint32& aMapAttr, TUint& aColour, DThread* aThread)
1.2304 + {
1.2305 + __ASSERT_CRITICAL;
1.2306 + TUint offsetInMapping;
1.2307 + TUint mapInstanceCount;
1.2308 + DMemoryMapping* mapping = MM::FindMappingInThread( (DMemModelThread*)aThread,
1.2309 + aStart,
1.2310 + aSize,
1.2311 + offsetInMapping,
1.2312 + mapInstanceCount);
1.2313 + TInt r = KErrBadDescriptor;
1.2314 + if (mapping)
1.2315 + {
1.2316 + TInt count = ((aStart & KPageMask) + aSize + KPageMask) >> KPageShift;
1.2317 +
1.2318 + MmuLock::Lock();
1.2319 + DMemoryObject* memory = mapping->Memory();
1.2320 + if (mapInstanceCount == mapping->MapInstanceCount() && memory)
1.2321 + {
1.2322 + memory->Open();
1.2323 + MmuLock::Unlock();
1.2324 +
1.2325 + TUint startInMemory = (offsetInMapping >> KPageShift) + mapping->iStartIndex;
1.2326 + TMappingPermissions permissions = aReadOnly ? ESupervisorReadOnly : ESupervisorReadWrite;
1.2327 + r = ((DPhysicalPinMapping*)aPinObject)->Pin(memory, startInMemory, count, permissions);
1.2328 + if (r == KErrNone)
1.2329 + {
1.2330 + r = ((DPhysicalPinMapping*)aPinObject)->PhysAddr(0, count, aAddress, aPages);
1.2331 + if (r>=KErrNone)
1.2332 + {
1.2333 + r = KErrNone; //Do not report discontiguous memory in return value.
1.2334 + const TMappingAttributes2& mapAttr2 =
1.2335 + MM::LegacyMappingAttributes(memory->Attributes(), mapping->Permissions());
1.2336 + *(TMappingAttributes2*)&aMapAttr = mapAttr2;
1.2337 + }
1.2338 + else
1.2339 + UnpinPhysicalMemory(aPinObject);
1.2340 + }
1.2341 + memory->Close();
1.2342 + }
1.2343 + else // mapping has been reused or no memory...
1.2344 + {
1.2345 + MmuLock::Unlock();
1.2346 + }
1.2347 + mapping->Close();
1.2348 + }
1.2349 + aColour = (aStart >>KPageShift) & KPageColourMask;
1.2350 + return r;
1.2351 + }
1.2352 +
1.2353 +void M::UnpinPhysicalMemory(TPhysicalPinObject* aPinObject)
1.2354 + {
1.2355 + DPhysicalPinMapping* mapping = (DPhysicalPinMapping*)aPinObject;
1.2356 + if (mapping->IsAttached())
1.2357 + mapping->Unpin();
1.2358 + }
1.2359 +
1.2360 +void M::DestroyPhysicalPinObject(TPhysicalPinObject*& aPinObject)
1.2361 + {
1.2362 + DPhysicalPinMapping* mapping = (DPhysicalPinMapping*)__e32_atomic_swp_ord_ptr(&aPinObject, 0);
1.2363 + if (mapping)
1.2364 + {
1.2365 + if (mapping->IsAttached())
1.2366 + mapping->Unpin();
1.2367 + mapping->AsyncClose();
1.2368 + }
1.2369 + }
1.2370 +
1.2371 +
1.2372 +//
1.2373 +// Kernel map and pin.
1.2374 +//
1.2375 +
1.2376 +TInt M::CreateKernelMapObject(TKernelMapObject*& aMapObject, TUint aMaxReserveSize)
1.2377 + {
1.2378 + DKernelPinMapping* pinObject = new DKernelPinMapping();
1.2379 + aMapObject = (TKernelMapObject*) pinObject;
1.2380 + if (pinObject == NULL)
1.2381 + {
1.2382 + return KErrNoMemory;
1.2383 + }
1.2384 + // Ensure we reserve enough bytes for all possible alignments of the start and
1.2385 + // end of the region to map.
1.2386 + TUint reserveBytes = aMaxReserveSize? ((aMaxReserveSize + KPageMask) & ~KPageMask) + KPageSize : 0;
1.2387 + TInt r = pinObject->Construct(reserveBytes);
1.2388 + if (r != KErrNone)
1.2389 + {// Failed so delete the kernel mapping object.
1.2390 + pinObject->Close();
1.2391 + aMapObject = NULL;
1.2392 + }
1.2393 + return r;
1.2394 + }
1.2395 +
1.2396 +
1.2397 +TInt M::MapAndPinMemory(TKernelMapObject* aMapObject, DThread* aThread, TLinAddr aStart,
1.2398 + TUint aSize, TUint aMapAttributes, TLinAddr& aKernelAddr, TPhysAddr* aPages)
1.2399 + {
1.2400 + __ASSERT_CRITICAL;
1.2401 + TUint offsetInMapping;
1.2402 + TUint mapInstanceCount;
1.2403 + DMemoryMapping* mapping = MM::FindMappingInThread( (DMemModelThread*)aThread,
1.2404 + aStart,
1.2405 + aSize,
1.2406 + offsetInMapping,
1.2407 + mapInstanceCount);
1.2408 + TInt r = KErrBadDescriptor;
1.2409 + if (mapping)
1.2410 + {
1.2411 + DKernelPinMapping* kernelMap = (DKernelPinMapping*)aMapObject;
1.2412 + TInt count = (((aStart + aSize + KPageMask) & ~KPageMask) - (aStart & ~KPageMask)) >> KPageShift;
1.2413 + if (kernelMap->iReservePages && kernelMap->iReservePages < count)
1.2414 + {
1.2415 + mapping->Close();
1.2416 + return KErrArgument;
1.2417 + }
1.2418 +
1.2419 + MmuLock::Lock();
1.2420 + DMemoryObject* memory = mapping->Memory();
1.2421 + if (mapInstanceCount == mapping->MapInstanceCount() && memory)
1.2422 + {
1.2423 + memory->Open();
1.2424 + MmuLock::Unlock();
1.2425 +
1.2426 + TUint startInMemory = (offsetInMapping >> KPageShift) + mapping->iStartIndex;
1.2427 + TBool readOnly = aMapAttributes & Kern::EKernelMap_ReadOnly;
1.2428 + TMappingPermissions permissions = readOnly ? ESupervisorReadOnly : ESupervisorReadWrite;
1.2429 + r = kernelMap->MapAndPin(memory, startInMemory, count, permissions);
1.2430 + if (r == KErrNone)
1.2431 + {
1.2432 + __NK_ASSERT_DEBUG(!kernelMap->IsUserMapping());
1.2433 + aKernelAddr = kernelMap->Base();
1.2434 + TPhysAddr contigAddr; // Ignore this value as aPages will be populated
1.2435 + // whether the memory is contiguous or not.
1.2436 + r = kernelMap->PhysAddr(0, count, contigAddr, aPages);
1.2437 + if (r>=KErrNone)
1.2438 + {
1.2439 + r = KErrNone; //Do not report discontiguous memory in return value.
1.2440 + }
1.2441 + else
1.2442 + {
1.2443 + UnmapAndUnpinMemory((TKernelMapObject*)kernelMap);
1.2444 + }
1.2445 + }
1.2446 + memory->Close();
1.2447 + }
1.2448 + else // mapping has been reused or no memory...
1.2449 + {
1.2450 + MmuLock::Unlock();
1.2451 + }
1.2452 + mapping->Close();
1.2453 + }
1.2454 + return r;
1.2455 + }
1.2456 +
1.2457 +
1.2458 +void M::UnmapAndUnpinMemory(TKernelMapObject* aMapObject)
1.2459 + {
1.2460 + DKernelPinMapping* mapping = (DKernelPinMapping*)aMapObject;
1.2461 + if (mapping->IsAttached())
1.2462 + mapping->UnmapAndUnpin();
1.2463 + }
1.2464 +
1.2465 +
1.2466 +void M::DestroyKernelMapObject(TKernelMapObject*& aMapObject)
1.2467 + {
1.2468 + DKernelPinMapping* mapping = (DKernelPinMapping*)__e32_atomic_swp_ord_ptr(&aMapObject, 0);
1.2469 + if (mapping)
1.2470 + {
1.2471 + if (mapping->IsAttached())
1.2472 + mapping->UnmapAndUnpin();
1.2473 + mapping->AsyncClose();
1.2474 + }
1.2475 + }
1.2476 +
1.2477 +
1.2478 +//
1.2479 +// Cache sync operations
1.2480 +//
1.2481 +
1.2482 +//@pre As for MASK_THREAD_STANDARD
1.2483 +void Mmu::SyncPhysicalMemoryBeforeDmaWrite(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr)
1.2484 + {
1.2485 + //Jump over the pages we do not have to sync
1.2486 + aPages += aOffset>>KPageShift;
1.2487 + aOffset &=KPageMask;
1.2488 + aColour = (aColour + (aOffset>>KPageShift)) & KPageColourMask;
1.2489 +
1.2490 + //Calculate page table entry for the temporary mapping.
1.2491 + TUint pteType = PteType(ESupervisorReadWrite,true);
1.2492 + TMappingAttributes2 mapAttr2(aMapAttr);
1.2493 + TPte pte = Mmu::BlankPte((TMemoryAttributes)mapAttr2.Type(), pteType);
1.2494 +
1.2495 + while (aSize) //A single pass of loop operates within page boundaries.
1.2496 + {
1.2497 + TUint sizeInLoopPass = Min (KPageSize, aOffset+aSize) - aOffset; //The size of the region in this pass.
1.2498 +
1.2499 + NKern::ThreadEnterCS();
1.2500 + Kern::MutexWait(*iPhysMemSyncMutex);
1.2501 +
1.2502 + TLinAddr linAddr = iPhysMemSyncTemp.Map(*aPages, aColour, pte);
1.2503 + CacheMaintenance::MakeCPUChangesVisible(linAddr+aOffset, sizeInLoopPass, aMapAttr, *aPages+aOffset);
1.2504 + iPhysMemSyncTemp.Unmap();
1.2505 +
1.2506 + Kern::MutexSignal(*iPhysMemSyncMutex);
1.2507 + NKern::ThreadLeaveCS();
1.2508 +
1.2509 + aSize-=sizeInLoopPass; // Remaining bytes to sync
1.2510 + aOffset=0; // In all the pages after the first, sync will always start with zero offset.
1.2511 + aPages++; // Point to the next page
1.2512 + aColour = (aColour+1) & KPageColourMask;
1.2513 + }
1.2514 + }
1.2515 +
1.2516 +//@pre As for MASK_THREAD_STANDARD
1.2517 +void Mmu::SyncPhysicalMemoryBeforeDmaRead(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr)
1.2518 + {
1.2519 + //Jump over the pages we do not have to sync
1.2520 + aPages += aOffset>>KPageShift;
1.2521 + aOffset &=KPageMask;
1.2522 + aColour = (aColour + (aOffset>>KPageShift)) & KPageColourMask;
1.2523 +
1.2524 + //Calculate page table entry for the temporary mapping.
1.2525 + TUint pteType = PteType(ESupervisorReadWrite,true);
1.2526 + TMappingAttributes2 mapAttr2(aMapAttr);
1.2527 + TPte pte = Mmu::BlankPte((TMemoryAttributes)mapAttr2.Type(), pteType);
1.2528 +
1.2529 + while (aSize) //A single pass of loop operates within page boundaries.
1.2530 + {
1.2531 + TUint sizeInLoopPass = Min (KPageSize, aOffset+aSize) - aOffset; //The size of the region in this pass.
1.2532 +
1.2533 + NKern::ThreadEnterCS();
1.2534 + Kern::MutexWait(*iPhysMemSyncMutex);
1.2535 +
1.2536 + TLinAddr linAddr = iPhysMemSyncTemp.Map(*aPages, aColour, pte);
1.2537 + CacheMaintenance::PrepareMemoryForExternalWrites(linAddr+aOffset, sizeInLoopPass, aMapAttr, *aPages+aOffset);
1.2538 + iPhysMemSyncTemp.Unmap();
1.2539 +
1.2540 + Kern::MutexSignal(*iPhysMemSyncMutex);
1.2541 + NKern::ThreadLeaveCS();
1.2542 +
1.2543 + aSize-=sizeInLoopPass; // Remaining bytes to sync
1.2544 + aOffset=0; // In all the pages after the first, sync will always start with zero offset.
1.2545 + aPages++; // Point to the next page
1.2546 + aColour = (aColour+1) & KPageColourMask;
1.2547 + }
1.2548 + }
1.2549 +
1.2550 +//@pre As for MASK_THREAD_STANDARD
1.2551 +void Mmu::SyncPhysicalMemoryAfterDmaRead(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr)
1.2552 + {
1.2553 + //Jump over the pages we do not have to sync
1.2554 + aPages += aOffset>>KPageShift;
1.2555 + aOffset &=KPageMask;
1.2556 + aColour = (aColour + (aOffset>>KPageShift)) & KPageColourMask;
1.2557 +
1.2558 + //Calculate page table entry for the temporary mapping.
1.2559 + TUint pteType = PteType(ESupervisorReadWrite,true);
1.2560 + TMappingAttributes2 mapAttr2(aMapAttr);
1.2561 + TPte pte = Mmu::BlankPte((TMemoryAttributes)mapAttr2.Type(), pteType);
1.2562 +
1.2563 + while (aSize) //A single pass of loop operates within page boundaries.
1.2564 + {
1.2565 + TUint sizeInLoopPass = Min (KPageSize, aOffset+aSize) - aOffset; //The size of the region in this pass.
1.2566 +
1.2567 + NKern::ThreadEnterCS();
1.2568 + Kern::MutexWait(*iPhysMemSyncMutex);
1.2569 +
1.2570 + TLinAddr linAddr = iPhysMemSyncTemp.Map(*aPages, aColour, pte);
1.2571 + CacheMaintenance::MakeExternalChangesVisible(linAddr+aOffset, sizeInLoopPass, aMapAttr, *aPages+aOffset);
1.2572 + iPhysMemSyncTemp.Unmap();
1.2573 +
1.2574 + Kern::MutexSignal(*iPhysMemSyncMutex);
1.2575 + NKern::ThreadLeaveCS();
1.2576 +
1.2577 + aSize-=sizeInLoopPass; // Remaining bytes to sync
1.2578 + aOffset=0; // In all the pages after the first, sync will always start with zero offset.
1.2579 + aPages++; // Point to the next page
1.2580 + aColour = (aColour+1) & KPageColourMask;
1.2581 + }
1.2582 + }
1.2583 +
1.2584 +EXPORT_C TInt Cache::SyncPhysicalMemoryBeforeDmaWrite(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr)
1.2585 + {
1.2586 + CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryBeforeDmaWrite");
1.2587 + TheMmu.SyncPhysicalMemoryBeforeDmaWrite(aPages, aColour, aOffset, aSize, aMapAttr);
1.2588 + return KErrNone;
1.2589 + }
1.2590 +
1.2591 +EXPORT_C TInt Cache::SyncPhysicalMemoryBeforeDmaRead(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr)
1.2592 + {
1.2593 + CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryBeforeDmaRead");
1.2594 + TheMmu.SyncPhysicalMemoryBeforeDmaRead(aPages, aColour, aOffset, aSize, aMapAttr);
1.2595 + return KErrNone;
1.2596 + }
1.2597 +
1.2598 +EXPORT_C TInt Cache::SyncPhysicalMemoryAfterDmaRead(TPhysAddr* aPages, TUint aColour, TUint aOffset, TUint aSize, TUint32 aMapAttr)
1.2599 + {
1.2600 + CHECK_PRECONDITIONS(MASK_THREAD_STANDARD,"Cache::SyncPhysicalMemoryAfterDmaRead");
1.2601 + TheMmu.SyncPhysicalMemoryAfterDmaRead(aPages, aColour, aOffset, aSize, aMapAttr);
1.2602 + return KErrNone;
1.2603 + }