1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/memmodel/epoc/multiple/mdefrag.cpp Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,299 @@
1.4 +// Copyright (c) 2006-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32\memmodel\epoc\multiple\mdefrag.cpp
1.18 +//
1.19 +//
1.20 +#include <memmodel.h>
1.21 +#include <defrag.h>
1.22 +#include "mmboot.h"
1.23 +#include <ramalloc.h>
1.24 +#include "cache_maintenance.h"
1.25 +/*
1.26 + * Move a kernel page from aOld to aNew, updating the page table in aChunk.
1.27 + * Enter with system locked, exit with system unlocked (!!)
1.28 + * Must hold RAM alloc mutex.
1.29 + */
1.30 +TInt Mmu::MoveKernelPage(DChunk* aChunk, TUint32 aOffset, TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest)
1.31 + {
1.32 + __KTRACE_OPT(KMMU,Kern::Printf("Defrag::MoveKernelPage() off=%08x old=%08x",aOffset,aOld));
1.33 + Mmu& m=Mmu::Get();
1.34 +
1.35 + // Release the system lock - the kernel chunks can't ever be freed
1.36 + // and the ramalloc mutex protects us from decommit.
1.37 + NKern::UnlockSystem();
1.38 +
1.39 + // Allocate new page, map old and new
1.40 + TPhysAddr newPage;
1.41 + if (m.AllocRamPages(&newPage, 1, EPageMovable, aBlockZoneId, aBlockRest) != KErrNone)
1.42 + return KErrNoMemory;
1.43 + TLinAddr vOld = m.MapTemp(aOld, aOffset); // enough of address for page colour
1.44 + TLinAddr vNew = m.MapSecondTemp(newPage, aOffset);
1.45 +
1.46 + // With interrupts disabled, copy the page's contents and remap its PTE
1.47 + // System lock is required as well for Substitute
1.48 + NKern::LockSystem();
1.49 + TInt irq = NKern::DisableAllInterrupts();
1.50 + pagecpy((TAny*)vNew, (TAny*)vOld);
1.51 + aChunk->Substitute(aOffset, aOld, newPage);
1.52 + NKern::RestoreInterrupts(irq);
1.53 + NKern::UnlockSystem();
1.54 +
1.55 + // Before we sort out cache for the old page, check if the required mapping
1.56 + // atributes for that operation is what we have at the moment.
1.57 + if (CacheMaintenance::TemporaryMapping() != EMemAttNormalCached)
1.58 + {
1.59 + // Remove temporary mapping and map old page as required by CacheMaintenance
1.60 + m.UnmapTemp();
1.61 + vOld = m.MapTemp(aOld, aOffset,1, CacheMaintenance::TemporaryMapping());
1.62 + }
1.63 +
1.64 + //Sort out cache for the memory not in use anymore.
1.65 + CacheMaintenance::PageToReuse(vOld, EMemAttNormalCached, aOld);
1.66 +
1.67 + // Unalias pages
1.68 + m.UnmapTemp();
1.69 + m.UnmapSecondTemp();
1.70 +
1.71 + // Free old page
1.72 +#ifdef _DEBUG
1.73 + m.ClearPages(1, (TPhysAddr*)(aOld|1));
1.74 +#endif
1.75 + m.iRamPageAllocator->FreeRamPage(aOld, EPageMovable);
1.76 +
1.77 + aNew = newPage;
1.78 + return KErrNone;
1.79 + }
1.80 +
1.81 +/*
1.82 + * Move a code page from aOld to aNew, updating all page tables which refer
1.83 + * to it.
1.84 + * Enter with system locked, exit with system unlocked (!!)
1.85 + * Must hold RAM alloc mutex.
1.86 + */
1.87 +TInt Mmu::MoveCodeSegMemoryPage(DMemModelCodeSegMemory* aCodeSegMemory, TUint32 aOffset, TPhysAddr aOld,
1.88 + TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest)
1.89 + {
1.90 + __KTRACE_OPT(KMMU,Kern::Printf("Defrag::MoveCodeSegMemoryPage() off=%08x old=%08x",aOffset,aOld));
1.91 + Mmu& m=Mmu::Get();
1.92 +
1.93 + // if the code seg is not done loading yet, we can't move it the easy way
1.94 + // also, if it's being unloaded the codeseg will have gone.
1.95 + DCodeSeg* codeseg = aCodeSegMemory->iCodeSeg;
1.96 + if (!codeseg || !(codeseg->iMark & DCodeSeg::EMarkLoaded))
1.97 + {
1.98 + NKern::UnlockSystem();
1.99 + return KErrInUse;
1.100 + }
1.101 +
1.102 + // Release system lock as page can't be decommitted while we hold ramalloc mutex
1.103 + NKern::UnlockSystem();
1.104 +
1.105 + // Allocate new page, map old and new
1.106 + TPhysAddr newPage;
1.107 + if (m.AllocRamPages(&newPage, 1, EPageMovable, aBlockZoneId, aBlockRest) != KErrNone)
1.108 + return KErrNoMemory;
1.109 + TLinAddr vOld = m.MapTemp(aOld, aOffset); // enough of address for page colour
1.110 + TLinAddr vNew = m.MapSecondTemp(newPage, aOffset);
1.111 +
1.112 + // Copy the page and remap it wherever it's still mapped
1.113 + // Need to clean the new page to get the data to icache
1.114 + pagecpy((TAny*)vNew, (TAny*)vOld);
1.115 +
1.116 + //Sort out cache for the code that has just been altered.
1.117 + CacheMaintenance::CodeChanged(vNew, KPageSize);
1.118 +
1.119 + //Replace old page in the mapping with the new one.
1.120 + aCodeSegMemory->Substitute(aOffset, aOld, newPage);
1.121 +
1.122 + // Before we sort out cache for the old page, check if the required mapping
1.123 + // atributes for that operation is what we have at the moment.
1.124 + if (CacheMaintenance::TemporaryMapping() != EMemAttNormalCached)
1.125 + {
1.126 + // Remove temporary mapping and map old page as required by CacheMaintenance
1.127 + m.UnmapTemp();
1.128 + vOld = m.MapTemp(aOld, aOffset,1, CacheMaintenance::TemporaryMapping());
1.129 + }
1.130 +
1.131 + //Sort out cache for the memory not in use anymore.
1.132 + CacheMaintenance::PageToReuse(vOld, EMemAttNormalCached, aOld);
1.133 +
1.134 + // Unalias pages
1.135 + m.UnmapTemp();
1.136 + m.UnmapSecondTemp();
1.137 +
1.138 + // Free old page
1.139 +#ifdef _DEBUG
1.140 + m.ClearPages(1, (TPhysAddr*)(aOld|1));
1.141 +#endif
1.142 + m.iRamPageAllocator->FreeRamPage(aOld, EPageMovable);
1.143 +
1.144 + aNew = newPage;
1.145 + return KErrNone;
1.146 + }
1.147 +
1.148 +/*
1.149 + * Move a code chunk page from aOld to aNew, updating the page table in aChunk.
1.150 + * Enter with system locked, exit with system unlocked (!!)
1.151 + * Must hold RAM alloc mutex.
1.152 + */
1.153 +TInt Mmu::MoveCodeChunkPage(DChunk* aChunk, TUint32 aOffset, TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest)
1.154 + {
1.155 + __KTRACE_OPT(KMMU,Kern::Printf("Defrag::MoveCodeChunkPage() off=%08x old=%08x",aOffset,aOld));
1.156 + Mmu& m=Mmu::Get();
1.157 +
1.158 + // look up the code seg that corresponds to this page
1.159 + TLinAddr aLinearAddress = (TLinAddr)(aChunk->Base() + (aOffset));
1.160 + DMemModelCodeSeg* codeseg = (DMemModelCodeSeg*)DCodeSeg::CodeSegsByAddress.Find(aLinearAddress);
1.161 +
1.162 + // if the code seg is not done loading yet, we can't move it the easy way
1.163 + if (!(codeseg->iMark & DCodeSeg::EMarkLoaded))
1.164 + {
1.165 + NKern::UnlockSystem();
1.166 + return KErrInUse;
1.167 + }
1.168 +
1.169 + // Release system lock as page can't be decommitted while we hold ramalloc mutex
1.170 + NKern::UnlockSystem();
1.171 +
1.172 + // Allocate new page, map old and new
1.173 + TPhysAddr newPage;
1.174 + if (m.AllocRamPages(&newPage, 1, EPageMovable, aBlockZoneId, aBlockRest) != KErrNone)
1.175 + return KErrNoMemory;
1.176 + TLinAddr vOld = m.MapTemp(aOld, aOffset); // enough of address for page colour
1.177 + TLinAddr vNew = m.MapSecondTemp(newPage, aOffset);
1.178 +
1.179 + // Copy the page and remap it
1.180 + // Need to clean the new page to get the data to icache
1.181 + pagecpy((TAny*)vNew, (TAny*)vOld);
1.182 +
1.183 + //Sort out cache for the code that has just been altered.
1.184 + CacheMaintenance::CodeChanged(vNew, KPageSize);
1.185 +
1.186 + NKern::LockSystem();
1.187 + aChunk->Substitute(aOffset, aOld, newPage);
1.188 + NKern::UnlockSystem();
1.189 +
1.190 + // Before we sort out cache for the old page, check if the required mapping
1.191 + // atributes for that operation is what we have at the moment.
1.192 + if (CacheMaintenance::TemporaryMapping() != EMemAttNormalCached)
1.193 + {
1.194 + // Remove temporary mapping and map old page as required by CacheMaintenance
1.195 + m.UnmapTemp();
1.196 + vOld = m.MapTemp(aOld, aOffset,1, CacheMaintenance::TemporaryMapping());
1.197 + }
1.198 +
1.199 + //Sort out cache for the memory not in use anymore.
1.200 + CacheMaintenance::PageToReuse(vOld, EMemAttNormalCached, aOld);
1.201 +
1.202 + // Unalias pages
1.203 + m.UnmapTemp();
1.204 + m.UnmapSecondTemp();
1.205 +
1.206 + // Free old page
1.207 +#ifdef _DEBUG
1.208 + m.ClearPages(1, (TPhysAddr*)(aOld|1));
1.209 +#endif
1.210 + m.iRamPageAllocator->FreeRamPage(aOld, EPageMovable);
1.211 +
1.212 + aNew = newPage;
1.213 + return KErrNone;
1.214 + }
1.215 +
1.216 +/*
1.217 + * Move a data chunk page from aOld to aNew, updating the page table in aChunk.
1.218 + * Enter with system locked, exit with system unlocked (!!)
1.219 + * Must hold RAM alloc mutex.
1.220 + */
1.221 +TInt Mmu::MoveDataChunkPage(DChunk* aChunk, TUint32 aOffset, TPhysAddr aOld, TPhysAddr& aNew, TUint aBlockZoneId, TBool aBlockRest)
1.222 + {
1.223 + __KTRACE_OPT(KMMU,Kern::Printf("Defrag::MoveDataChunkPage() off=%08x old=%08x",aOffset,aOld));
1.224 + Mmu& m=Mmu::Get();
1.225 + TInt r;
1.226 +
1.227 + // Release system lock as page can't be decommitted while we hold ramalloc mutex
1.228 + NKern::UnlockSystem();
1.229 +
1.230 + // Allocate new page, map old and new
1.231 + TPhysAddr newPage;
1.232 + if (m.AllocRamPages(&newPage, 1, EPageMovable, aBlockZoneId, aBlockRest) != KErrNone)
1.233 + return KErrNoMemory;
1.234 + TLinAddr vOld = m.MapTemp(aOld, aOffset); // enough of address for page colour
1.235 + TLinAddr vNew = m.MapSecondTemp(newPage, aOffset);
1.236 +
1.237 + // Mark the PTE as readonly to avoid the data being overwritten while we copy
1.238 + DisablePageModification((DMemModelChunk*)aChunk, aOffset);
1.239 +
1.240 + // Copy the page's contents and remap its PTE
1.241 + pagecpy((TAny*)vNew, (TAny*)vOld);
1.242 + if (aChunk->iChunkType == EUserSelfModCode)//Sort out cache for the code that has just been altered
1.243 + CacheMaintenance::CodeChanged(vNew, KPageSize);
1.244 +
1.245 + NKern::LockSystem();
1.246 + if (iDisabledPte != NULL)
1.247 + {
1.248 + // Access wasn't reenabled, so we can continue
1.249 + aChunk->Substitute(aOffset, aOld, newPage);
1.250 + iDisabledAddr = 0;
1.251 + iDisabledAddrAsid = -1;
1.252 + iDisabledPte = NULL;
1.253 + iDisabledOldVal = 0;
1.254 + r = KErrNone;
1.255 + }
1.256 + else
1.257 + r = KErrInUse;
1.258 + NKern::UnlockSystem();
1.259 +
1.260 +
1.261 + TLinAddr vUnused = vOld;
1.262 + TPhysAddr pUnused = aOld;
1.263 +
1.264 + if (r != KErrNone)
1.265 + {
1.266 + //Substitute has failed. Sort out cache for the new page, not the old one.
1.267 + vUnused = vNew;
1.268 + pUnused = newPage;
1.269 + }
1.270 + // Before we sort out cache for the unused page, check if the required mapping
1.271 + // atributes for that operation is what we have at the moment.
1.272 + if (CacheMaintenance::TemporaryMapping() != EMemAttNormalCached)
1.273 + {
1.274 + // Remove temporary mapping and map the page as required by CacheMaintenance
1.275 + m.UnmapTemp();
1.276 + vUnused = m.MapTemp(pUnused, aOffset,1, CacheMaintenance::TemporaryMapping());
1.277 + }
1.278 +
1.279 + //Sort out cache for the memory not in use anymore.
1.280 + CacheMaintenance::PageToReuse(vUnused, EMemAttNormalCached, pUnused);
1.281 +
1.282 + // Unalias pages
1.283 + m.UnmapTemp();
1.284 + m.UnmapSecondTemp();
1.285 +
1.286 + if (r == KErrNone)
1.287 + {
1.288 + // Free old page
1.289 +#ifdef _DEBUG
1.290 + m.ClearPages(1, (TPhysAddr*)(aOld|1));
1.291 +#endif
1.292 + m.iRamPageAllocator->FreeRamPage(aOld, EPageMovable);
1.293 + aNew = newPage;
1.294 + }
1.295 + else
1.296 + {
1.297 + // Free new page
1.298 + m.iRamPageAllocator->FreeRamPage(newPage, EPageMovable);
1.299 + }
1.300 +
1.301 + return r;
1.302 + }