First public contribution.
1 // Copyright (c) 1998-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
22 /** Returns the amount of free RAM currently available.
24 @return The number of bytes of free RAM currently available.
27 EXPORT_C TInt Kern::FreeRamInBytes()
29 TUint numPages = TheMmu.FreeRamInPages();
30 // hack, clip free RAM to fit into a signed integer...
31 if(numPages>(KMaxTInt>>KPageShift))
33 return numPages*KPageSize;
37 /** Rounds up the argument to the size of a MMU page.
39 To find out the size of a MMU page:
41 size = Kern::RoundToPageSize(1);
44 @param aSize Value to round up
47 EXPORT_C TUint32 Kern::RoundToPageSize(TUint32 aSize)
49 return (aSize+KPageMask)&~KPageMask;
53 /** Rounds up the argument to the amount of memory mapped by a MMU page
56 Chunks occupy one or more consecutive page directory entries (PDE) and
57 therefore the amount of linear and physical memory allocated to a chunk is
58 always a multiple of the amount of memory mapped by a page directory entry.
60 EXPORT_C TUint32 Kern::RoundToChunkSize(TUint32 aSize)
62 return (aSize+KChunkMask)&~KChunkMask;
69 #ifdef BTRACE_KERNEL_MEMORY
70 TInt Epoc::DriverAllocdPhysRam = 0;
71 TInt Epoc::KernelMiscPages = 0;
76 Allows the variant to specify the details of the RAM zones. This should be invoked
77 by the variant in its implementation of the pure virtual function Asic::Init1().
79 There are some limitations to how the RAM zones can be specified:
80 - Each RAM zone's address space must be distinct and not overlap with any
81 other RAM zone's address space
82 - Each RAM zone's address space must have a size that is multiples of the
83 ASIC's MMU small page size and be aligned to the ASIC's MMU small page size,
84 usually 4KB on ARM MMUs.
85 - When taken together all of the RAM zones must cover the whole of the physical RAM
86 address space as specified by the bootstrap in the SuperPage members iTotalRamSize
88 - There can be no more than KMaxRamZones RAM zones specified by the base port
90 Note the verification of the RAM zone data is not performed here but by the ram
91 allocator later in the boot up sequence. This is because it is only possible to
92 verify the zone data once the physical RAM configuration has been read from
93 the super page. Any verification errors result in a "RAM-ALLOC" panic
94 faulting the kernel during initialisation.
96 @param aZones Pointer to an array of SRamZone structs containing the details for all
97 the zones. The end of the array is specified by an element with an iSize of zero. The array must
98 remain in memory at least until the kernel has successfully booted.
100 @param aCallback Pointer to a call back function that the kernel may invoke to request
101 one of the operations specified by TRamZoneOp.
103 @return KErrNone if successful, otherwise one of the system wide error codes
107 @see TRamZoneCallback
109 EXPORT_C TInt Epoc::SetRamZoneConfig(const SRamZone* aZones, TRamZoneCallback aCallback)
111 TRamZoneCallback dummy;
112 // Ensure this is only called once and only while we are initialising the kernel
113 if (!K::Initialising || TheMmu.RamZoneConfig(dummy) != NULL)
114 {// fault kernel, won't return
115 K::Fault(K::EBadSetRamZoneConfig);
122 TheMmu.SetRamZoneConfig(aZones, aCallback);
128 Modify the specified RAM zone's flags.
130 This allows the BSP or device driver to configure which type of pages, if any,
131 can be allocated into a RAM zone by the system.
133 Note: updating a RAM zone's flags can result in
134 1 - memory allocations failing despite there being enough free RAM in the system.
135 2 - the methods TRamDefragRequest::EmptyRamZone(), TRamDefragRequest::ClaimRamZone()
136 or TRamDefragRequest::DefragRam() never succeeding.
138 The flag masks KRamZoneFlagDiscardOnly, KRamZoneFlagMovAndDisOnly and KRamZoneFlagNoAlloc
139 are intended to be used with this method.
141 @param aId The ID of the RAM zone to modify.
142 @param aClearMask The bit mask to clear, each flag of which must already be set on the RAM zone.
143 @param aSetMask The bit mask to set.
145 @return KErrNone on success, KErrArgument if the RAM zone of aId not found or if
146 aSetMask contains invalid flag bits.
148 @see TRamDefragRequest::EmptyRamZone()
149 @see TRamDefragRequest::ClaimRamZone()
150 @see TRamDefragRequest::DefragRam()
152 @see KRamZoneFlagDiscardOnly
153 @see KRamZoneFlagMovAndDisOnly
154 @see KRamZoneFlagNoAlloc
156 EXPORT_C TInt Epoc::ModifyRamZoneFlags(TUint aId, TUint aClearMask, TUint aSetMask)
158 RamAllocLock::Lock();
159 TInt r = TheMmu.ModifyRamZoneFlags(aId, aClearMask, aSetMask);
160 RamAllocLock::Unlock();
166 Gets the current count of a particular RAM zone's pages by type.
168 @param aId The ID of the RAM zone to enquire about
169 @param aPageData If successful, on return this contains the page count
171 @return KErrNone if successful, KErrArgument if a RAM zone of aId is not found or
172 one of the system wide error codes
174 @pre Calling thread must be in a critical section.
175 @pre Interrupts must be enabled.
176 @pre Kernel must be unlocked.
177 @pre No fast mutex can be held.
178 @pre Call in a thread context.
180 @see SRamZonePageCount
182 EXPORT_C TInt Epoc::GetRamZonePageCount(TUint aId, SRamZonePageCount& aPageData)
184 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::GetRamZonePageCount");
185 RamAllocLock::Lock();
186 TInt r = TheMmu.GetRamZonePageCount(aId, aPageData);
187 RamAllocLock::Unlock();
193 Allocate a block of physically contiguous RAM with a physical address aligned
194 to a specified power of 2 boundary.
195 When the RAM is no longer required it should be freed using
196 Epoc::FreePhysicalRam()
198 @param aSize The size in bytes of the required block. The specified size
199 is rounded up to the page size, since only whole pages of
200 physical RAM can be allocated.
201 @param aPhysAddr Receives the physical address of the base of the block on
202 successful allocation.
203 @param aAlign Specifies the number of least significant bits of the
204 physical address which are required to be zero. If a value
205 less than log2(page size) is specified, page alignment is
206 assumed. Pass 0 for aAlign if there are no special alignment
207 constraints (other than page alignment).
208 @return KErrNone if the allocation was successful.
209 KErrNoMemory if a sufficiently large physically contiguous block of free
210 RAM with the specified alignment could not be found.
211 @pre Calling thread must be in a critical section.
212 @pre Interrupts must be enabled.
213 @pre Kernel must be unlocked.
214 @pre No fast mutex can be held.
215 @pre Call in a thread context.
216 @pre Can be used in a device driver.
218 EXPORT_C TInt Epoc::AllocPhysicalRam(TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
220 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::AllocPhysicalRam");
221 RamAllocLock::Lock();
222 TInt r = TheMmu.AllocPhysicalRam
225 MM::RoundToPageCount(aSize),
226 MM::RoundToPageShift(aAlign),
227 (Mmu::TRamAllocFlags)EMemAttStronglyOrdered
229 RamAllocLock::Unlock();
235 Allocate a block of physically contiguous RAM with a physical address aligned
236 to a specified power of 2 boundary from the specified zone.
237 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
239 Note that this method only respects the KRamZoneFlagNoAlloc flag and will always attempt
240 to allocate regardless of whether the other flags are set for the specified RAM zones
243 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
245 @param aZoneId The ID of the zone to attempt to allocate from.
246 @param aSize The size in bytes of the required block. The specified size
247 is rounded up to the page size, since only whole pages of
248 physical RAM can be allocated.
249 @param aPhysAddr Receives the physical address of the base of the block on
250 successful allocation.
251 @param aAlign Specifies the number of least significant bits of the
252 physical address which are required to be zero. If a value
253 less than log2(page size) is specified, page alignment is
254 assumed. Pass 0 for aAlign if there are no special alignment
255 constraints (other than page alignment).
256 @return KErrNone if the allocation was successful.
257 KErrNoMemory if a sufficiently large physically contiguous block of free
258 RAM with the specified alignment could not be found within the specified
260 KErrArgument if a RAM zone of the specified ID can't be found or if the
261 RAM zone has a total number of physical pages which is less than those
262 requested for the allocation.
264 @pre Calling thread must be in a critical section.
265 @pre Interrupts must be enabled.
266 @pre Kernel must be unlocked.
267 @pre No fast mutex can be held.
268 @pre Call in a thread context.
269 @pre Can be used in a device driver.
271 EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint aZoneId, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
273 return ZoneAllocPhysicalRam(&aZoneId, 1, aSize, aPhysAddr, aAlign);
278 Allocate a block of physically contiguous RAM with a physical address aligned
279 to a specified power of 2 boundary from the specified RAM zones.
280 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
282 RAM will be allocated into the RAM zones in the order they are specified in the
283 aZoneIdList parameter. If the contiguous allocations are intended to span RAM zones
284 when required then aZoneIdList should be listed with the RAM zones in ascending
285 physical address order.
287 Note that this method only respects the KRamZoneFlagNoAlloc flag and will always attempt
288 to allocate regardless of whether the other flags are set for the specified RAM zones
291 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
293 @param aZoneIdList A pointer to an array of RAM zone IDs of the RAM zones to
294 attempt to allocate from.
295 @param aZoneIdCount The number of RAM zone IDs contained in aZoneIdList.
296 @param aSize The size in bytes of the required block. The specified size
297 is rounded up to the page size, since only whole pages of
298 physical RAM can be allocated.
299 @param aPhysAddr Receives the physical address of the base of the block on
300 successful allocation.
301 @param aAlign Specifies the number of least significant bits of the
302 physical address which are required to be zero. If a value
303 less than log2(page size) is specified, page alignment is
304 assumed. Pass 0 for aAlign if there are no special alignment
305 constraints (other than page alignment).
306 @return KErrNone if the allocation was successful.
307 KErrNoMemory if a sufficiently large physically contiguous block of free
308 RAM with the specified alignment could not be found within the specified
310 KErrArgument if a RAM zone of a specified ID can't be found or if the
311 RAM zones have a total number of physical pages which is less than those
312 requested for the allocation.
314 @pre Calling thread must be in a critical section.
315 @pre Interrupts must be enabled.
316 @pre Kernel must be unlocked.
317 @pre No fast mutex can be held.
318 @pre Call in a thread context.
319 @pre Can be used in a device driver.
321 EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aSize, TPhysAddr& aPhysAddr, TInt aAlign)
323 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::ZoneAllocPhysicalRam");
324 RamAllocLock::Lock();
325 TInt r = TheMmu.ZoneAllocPhysicalRam(aZoneIdList, aZoneIdCount, aSize, aPhysAddr, aAlign);
326 RamAllocLock::Unlock();
332 Attempt to allocate discontiguous RAM pages.
334 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
336 @param aNumPages The number of discontiguous pages required to be allocated
337 @param aPageList This should be a pointer to a previously allocated array of
338 aNumPages TPhysAddr elements. On a successful allocation it
339 will receive the physical addresses of each page allocated.
341 @return KErrNone if the allocation was successful.
342 KErrNoMemory if the requested number of pages can't be allocated
344 @pre Calling thread must be in a critical section.
345 @pre Interrupts must be enabled.
346 @pre Kernel must be unlocked.
347 @pre No fast mutex can be held.
348 @pre Call in a thread context.
349 @pre Can be used in a device driver.
351 EXPORT_C TInt Epoc::AllocPhysicalRam(TInt aNumPages, TPhysAddr* aPageList)
353 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Epoc::AllocPhysicalRam");
354 RamAllocLock::Lock();
355 TInt r = TheMmu.AllocPhysicalRam(aPageList,aNumPages,(Mmu::TRamAllocFlags)EMemAttStronglyOrdered);
356 RamAllocLock::Unlock();
362 Attempt to allocate discontiguous RAM pages from the specified zone.
364 Note that this method only respects the KRamZoneFlagNoAlloc flag and will always attempt
365 to allocate regardless of whether the other flags are set for the specified RAM zones
368 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
370 @param aZoneId The ID of the zone to attempt to allocate from.
371 @param aNumPages The number of discontiguous pages required to be allocated
372 from the specified zone.
373 @param aPageList This should be a pointer to a previously allocated array of
374 aNumPages TPhysAddr elements. On a successful
375 allocation it will receive the physical addresses of each
377 @return KErrNone if the allocation was successful.
378 KErrNoMemory if the requested number of pages can't be allocated from the
380 KErrArgument if a RAM zone of the specified ID can't be found or if the
381 RAM zone has a total number of physical pages which is less than those
382 requested for the allocation.
384 @pre Calling thread must be in a critical section.
385 @pre Interrupts must be enabled.
386 @pre Kernel must be unlocked.
387 @pre No fast mutex can be held.
388 @pre Call in a thread context.
389 @pre Can be used in a device driver.
391 EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint aZoneId, TInt aNumPages, TPhysAddr* aPageList)
393 return ZoneAllocPhysicalRam(&aZoneId, 1, aNumPages, aPageList);
398 Attempt to allocate discontiguous RAM pages from the specified RAM zones.
399 The RAM pages will be allocated into the RAM zones in the order that they are specified
400 in the aZoneIdList parameter, the RAM zone preferences will be ignored.
402 Note that this method only respects the KRamZoneFlagNoAlloc flag and will always attempt
403 to allocate regardless of whether the other flags are set for the specified RAM zones
406 When the RAM is no longer required it should be freed using Epoc::FreePhysicalRam().
408 @param aZoneIdList A pointer to an array of RAM zone IDs of the RAM zones to
409 attempt to allocate from.
410 @param aZoneIdCount The number of RAM zone IDs pointed to by aZoneIdList.
411 @param aNumPages The number of discontiguous pages required to be allocated
412 from the specified zone.
413 @param aPageList This should be a pointer to a previously allocated array of
414 aNumPages TPhysAddr elements. On a successful
415 allocation it will receive the physical addresses of each
417 @return KErrNone if the allocation was successful.
418 KErrNoMemory if the requested number of pages can't be allocated from the
420 KErrArgument if a RAM zone of a specified ID can't be found or if the
421 RAM zones have a total number of physical pages which is less than those
422 requested for the allocation.
424 @pre Calling thread must be in a critical section.
425 @pre Interrupts must be enabled.
426 @pre Kernel must be unlocked.
427 @pre No fast mutex can be held.
428 @pre Call in a thread context.
429 @pre Can be used in a device driver.
431 EXPORT_C TInt Epoc::ZoneAllocPhysicalRam(TUint* aZoneIdList, TUint aZoneIdCount, TInt aNumPages, TPhysAddr* aPageList)
433 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL, "Epoc::ZoneAllocPhysicalRam");
434 RamAllocLock::Lock();
435 TInt r = TheMmu.ZoneAllocPhysicalRam(aZoneIdList, aZoneIdCount, aNumPages, aPageList);
436 RamAllocLock::Unlock();
442 Free a previously-allocated block of physically contiguous RAM.
444 Specifying one of the following may cause the system to panic:
445 a) an invalid physical RAM address.
446 b) valid physical RAM addresses where some had not been previously allocated.
447 c) an address not aligned to a page boundary.
449 @param aPhysAddr The physical address of the base of the block to be freed.
450 This must be the address returned by a previous call to
451 Epoc::AllocPhysicalRam(), Epoc::ZoneAllocPhysicalRam(),
452 Epoc::ClaimPhysicalRam() or Epoc::ClaimRamZone().
453 @param aSize The size in bytes of the required block. The specified size
454 is rounded up to the page size, since only whole pages of
455 physical RAM can be allocated.
456 @return KErrNone if the operation was successful.
460 @pre Calling thread must be in a critical section.
461 @pre Interrupts must be enabled.
462 @pre Kernel must be unlocked.
463 @pre No fast mutex can be held.
464 @pre Call in a thread context.
465 @pre Can be used in a device driver.
467 EXPORT_C TInt Epoc::FreePhysicalRam(TPhysAddr aPhysAddr, TInt aSize)
469 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreePhysicalRam");
470 RamAllocLock::Lock();
471 TheMmu.FreePhysicalRam(aPhysAddr,MM::RoundToPageCount(aSize));
472 RamAllocLock::Unlock();
478 Free a number of physical RAM pages that were previously allocated using
479 Epoc::AllocPhysicalRam() or Epoc::ZoneAllocPhysicalRam().
481 Specifying one of the following may cause the system to panic:
482 a) an invalid physical RAM address.
483 b) valid physical RAM addresses where some had not been previously allocated.
484 c) an address not aligned to a page boundary.
486 @param aNumPages The number of pages to be freed.
487 @param aPageList An array of aNumPages TPhysAddr elements. Where each element
488 should contain the physical address of each page to be freed.
489 This must be the same set of addresses as those returned by a
490 previous call to Epoc::AllocPhysicalRam() or
491 Epoc::ZoneAllocPhysicalRam().
492 @return KErrNone if the operation was successful.
494 @pre Calling thread must be in a critical section.
495 @pre Interrupts must be enabled.
496 @pre Kernel must be unlocked.
497 @pre No fast mutex can be held.
498 @pre Call in a thread context.
499 @pre Can be used in a device driver.
502 EXPORT_C TInt Epoc::FreePhysicalRam(TInt aNumPages, TPhysAddr* aPageList)
504 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::FreePhysicalRam");
505 RamAllocLock::Lock();
506 TheMmu.FreePhysicalRam(aPageList,aNumPages);
507 RamAllocLock::Unlock();
513 Allocate a specific block of physically contiguous RAM, specified by physical
514 base address and size.
515 If and when the RAM is no longer required it should be freed using
516 Epoc::FreePhysicalRam()
518 @param aPhysAddr The physical address of the base of the required block.
519 @param aSize The size in bytes of the required block. The specified size
520 is rounded up to the page size, since only whole pages of
521 physical RAM can be allocated.
522 @return KErrNone if the operation was successful.
523 KErrArgument if the range of physical addresses specified included some
524 which are not valid physical RAM addresses.
525 KErrInUse if the range of physical addresses specified are all valid
526 physical RAM addresses but some of them have already been
527 allocated for other purposes.
528 @pre Calling thread must be in a critical section.
529 @pre Interrupts must be enabled.
530 @pre Kernel must be unlocked.
531 @pre No fast mutex can be held.
532 @pre Call in a thread context.
533 @pre Can be used in a device driver.
535 EXPORT_C TInt Epoc::ClaimPhysicalRam(TPhysAddr aPhysAddr, TInt aSize)
537 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"Epoc::ClaimPhysicalRam");
538 RamAllocLock::Lock();
539 TInt r = TheMmu.ClaimPhysicalRam
542 MM::RoundToPageCount(aSize),
543 (Mmu::TRamAllocFlags)EMemAttStronglyOrdered
545 RamAllocLock::Unlock();
551 Translate a virtual address to the corresponding physical address.
553 @param aLinAddr The virtual address to be translated.
554 @return The physical address corresponding to the given virtual address, or
555 KPhysAddrInvalid if the specified virtual address is unmapped.
556 @pre Interrupts must be enabled.
557 @pre Kernel must be unlocked.
558 @pre Call in a thread context.
559 @pre Can be used in a device driver.
561 EXPORT_C TPhysAddr Epoc::LinearToPhysical(TLinAddr aLinAddr)
563 // This precondition is violated by various parts of the system under some conditions,
564 // e.g. when __FLUSH_PT_INTO_RAM__ is defined. This function might also be called by
565 // a higher-level RTOS for which these conditions are meaningless. Thus, it's been
567 // CHECK_PRECONDITIONS(MASK_KERNEL_UNLOCKED|MASK_INTERRUPTS_ENABLED|MASK_NOT_ISR|MASK_NOT_IDFC,"Epoc::LinearToPhysical");
569 // When called by a higher-level OS we may not be in a DThread context, so avoid looking up the
570 // current process in the DThread for a global address
571 TInt osAsid = KKernelOsAsid;
572 if (aLinAddr < KGlobalMemoryBase)
574 // Get the os asid of current thread's process so no need to open a reference on it.
575 DMemModelProcess* pP=(DMemModelProcess*)TheCurrentThread->iOwningProcess;
576 osAsid = pP->OsAsid();
580 return Mmu::UncheckedLinearToPhysical(aLinAddr, osAsid);
583 TPhysAddr addr = Mmu::LinearToPhysical(aLinAddr, osAsid);
594 EXPORT_C TInt TInternalRamDrive::MaxSize()
596 TUint maxPages = (TUint(TheSuperPage().iRamDriveSize)>>KPageShift)+TheMmu.FreeRamInPages(); // current size plus spare memory
597 TUint maxPages2 = TUint(PP::RamDriveMaxSize)>>KPageShift;
598 if(maxPages>maxPages2)
599 maxPages = maxPages2;
600 return maxPages*KPageSize;
604 TInt M::PageSizeInBytes()
610 #ifdef BTRACE_KERNEL_MEMORY
611 void M::BTracePrime(TUint aCategory)
624 Create a hardware chunk object, optionally mapping a specified block of physical
625 addresses with specified access permissions and cache policy.
627 When the mapping is no longer required, close the chunk using chunk->Close(0);
628 Note that closing a chunk does not free any RAM pages which were mapped by the
629 chunk - these must be freed separately using Epoc::FreePhysicalRam().
631 @param aChunk Upon successful completion this parameter receives a pointer to
632 the newly created chunk. Upon unsuccessful completion it is
633 written with a NULL pointer. The virtual address of the mapping
634 can subsequently be discovered using the LinearAddress()
635 function on the chunk.
636 @param aAddr The base address of the physical region to be mapped. This will
637 be rounded down to a multiple of the hardware page size before
639 @param aSize The size of the physical address region to be mapped. This will
640 be rounded up to a multiple of the hardware page size before
641 being used; the rounding is such that the entire range from
642 aAddr to aAddr+aSize-1 inclusive is mapped. For example if
643 aAddr=0xB0001FFF, aSize=2 and the hardware page size is 4KB, an
644 8KB range of physical addresses from 0xB0001000 to 0xB0002FFF
645 inclusive will be mapped.
646 @param aMapAttr Mapping attributes required for the mapping. This is formed
647 by ORing together values from the TMappingAttributes enumeration
648 to specify the access permissions and caching policy.
650 @pre Calling thread must be in a critical section.
651 @pre Interrupts must be enabled.
652 @pre Kernel must be unlocked.
653 @pre No fast mutex can be held.
654 @pre Call in a thread context.
655 @pre Can be used in a device driver.
656 @see TMappingAttributes
658 EXPORT_C TInt DPlatChunkHw::New(DPlatChunkHw*& aChunk, TPhysAddr aAddr, TInt aSize, TUint aMapAttr)
660 CHECK_PRECONDITIONS(MASK_THREAD_CRITICAL,"DPlatChunkHw::New");
661 __KTRACE_OPT(KMMU,Kern::Printf("DPlatChunkHw::New phys=%08x, size=%x, attribs=%x",aAddr,aSize,aMapAttr));
668 TPhysAddr end = aAddr+aSize-1;
669 if(end<aAddr) // overflow?
672 TUint pageCount = (end>>KPageShift)-(aAddr>>KPageShift)+1;
674 // check attributes...
675 TMappingPermissions perm;
676 TInt r = MM::MappingPermissions(perm,*(TMappingAttributes2*)&aMapAttr);
679 TMemoryAttributes attr;
680 r = MM::MemoryAttributes(attr,*(TMappingAttributes2*)&aMapAttr);
684 // construct a hardware chunk...
685 DMemModelChunkHw* pC = new DMemModelChunkHw;
689 // set the executable flags based on the specified mapping permissions...
690 TMemoryCreateFlags flags = EMemoryCreateDefault;
692 flags = (TMemoryCreateFlags)(flags|EMemoryCreateAllowExecution);
694 r = MM::MemoryNew(pC->iMemoryObject, EMemoryObjectHardware, pageCount, flags, attr);
697 r = MM::MemoryAddContiguous(pC->iMemoryObject,0,pageCount,aAddr);
700 r = MM::MappingNew(pC->iKernelMapping,pC->iMemoryObject,perm,KKernelOsAsid);
703 pC->iPhysAddr = aAddr;
704 pC->iLinAddr = MM::MappingBase(pC->iKernelMapping);
705 pC->iSize = pageCount<<KPageShift;
706 const TMappingAttributes2& lma = MM::LegacyMappingAttributes(attr,perm); // not needed, but keep in case someone uses this internal member
707 *(TMappingAttributes2*)&pC->iAttribs = lma;
720 TInt DMemModelChunkHw::Close(TAny*)
722 __KTRACE_OPT2(KOBJECT,KMMU,Kern::Printf("DMemModelChunkHw::Close %d %O",AccessCount(),this));
726 MM::MappingDestroy(iKernelMapping);
727 MM::MemoryDestroy(iMemoryObject);
740 extern "C" void ASMCheckPagingSafe(TLinAddr aPC, TLinAddr aLR, TLinAddr aStartAddres, TUint aLength)
742 if(M::CheckPagingSafe(EFalse, aStartAddres, aLength))
744 Kern::Printf("ASM_ASSERT_PAGING_SAFE FAILED: pc=%x lr=%x",aPC,aLR);
745 __NK_ASSERT_ALWAYS(0);
748 extern "C" void ASMCheckDataPagingSafe(TLinAddr aPC, TLinAddr aLR, TLinAddr aStartAddres, TUint aLength)
750 if(M::CheckPagingSafe(ETrue, aStartAddres, aLength))
752 __KTRACE_OPT(KDATAPAGEWARN,Kern::Printf("Data paging: ASM_ASSERT_DATA_PAGING_SAFE FAILED: pc=%x lr=%x",aPC,aLR));
757 DMutex* CheckMutexOrder()
760 SDblQue& ml = TheCurrentThread->iMutexList;
763 DMutex* mm = _LOFF(ml.First(), DMutex, iOrderLink);
764 if (KMutexOrdPageOut >= mm->iOrder)
771 TBool M::CheckPagingSafe(TBool aDataPaging, TLinAddr aStartAddr, TUint aLength)
776 NThread* nt = NCurrentThread();
778 return ETrue; // We've not booted properly yet!
780 if(aStartAddr>=KUserMemoryLimit)
781 return ETrue; // kernel memory can't be paged
783 if(IsUnpagedRom(aStartAddr,aLength))
786 TBool dataPagingEnabled = K::MemModelAttributes&EMemModelAttrDataPaging;
788 DThread* thread = _LOFF(nt,DThread,iNThread);
789 NFastMutex* fm = NKern::HeldFastMutex();
792 if(!thread->iPagingExcTrap || fm!=&TheScheduler.iLock)
796 __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: CheckPagingSafe FAILED - FM Held"));
801 __KTRACE_OPT(KDATAPAGEWARN, Kern::Printf("Data paging: CheckPagingSafe FAILED - FM Held"));
802 return !dataPagingEnabled;
807 DMutex* m = CheckMutexOrder();
812 __KTRACE_OPT2(KPAGING,KPANIC,Kern::Printf("DP: Mutex Order Fault %O",m));
817 __KTRACE_OPT(KDATAPAGEWARN, Kern::Printf("Data paging: Mutex Order Fault %O mem=%x+%x",m,aStartAddr,aLength));
818 return !dataPagingEnabled;
827 EXPORT_C void DPagingDevice::NotifyIdle()
831 EXPORT_C void DPagingDevice::NotifyBusy()