1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/kernelhwsrv/kernel/eka/drivers/locmedia/dmasupport.cpp Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,820 @@
1.4 +// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
1.5 +// All rights reserved.
1.6 +// This component and the accompanying materials are made available
1.7 +// under the terms of the License "Eclipse Public License v1.0"
1.8 +// which accompanies this distribution, and is available
1.9 +// at the URL "http://www.eclipse.org/legal/epl-v10.html".
1.10 +//
1.11 +// Initial Contributors:
1.12 +// Nokia Corporation - initial contribution.
1.13 +//
1.14 +// Contributors:
1.15 +//
1.16 +// Description:
1.17 +// e32\drivers\locmedia\dmasupport.cpp
1.18 +//
1.19 +//
1.20 +
1.21 +#include <kernel/kernel.h>
1.22 +#include <kernel/cache.h>
1.23 +#include "locmedia.h"
1.24 +#include "dmasupport.h"
1.25 +#include "dmasupport.inl"
1.26 +
1.27 +#include "OstTraceDefinitions.h"
1.28 +#ifdef OST_TRACE_COMPILER_IN_USE
1.29 +#include "locmedia_ost.h"
1.30 +#ifdef __VC32__
1.31 +#pragma warning(disable: 4127) // disabling warning "conditional expression is constant"
1.32 +#endif
1.33 +#include "dmasupportTraces.h"
1.34 +#endif
1.35 +
1.36 +#define PHYSADDR_FAULT() Kern::Fault("TLOCDRV-PHYS-ADDR",__LINE__)
1.37 +
1.38 +//#define __DEBUG_DMASUP__
1.39 +#ifdef __DEBUG_DMASUP__
1.40 +#define __KTRACE_DMA(p) {p;}
1.41 +#else
1.42 +#define __KTRACE_DMA(p)
1.43 +#endif
1.44 +
1.45 +TInt DDmaHelper::iPageSize;
1.46 +TInt DDmaHelper::iPageSizeLog2;
1.47 +TInt DDmaHelper::iPageSizeMsk;
1.48 +
1.49 +/******************************************************************************
1.50 + DDmaHelper
1.51 + ******************************************************************************/
1.52 +const TPhysAddr KPhysMemFragmented = KPhysAddrInvalid;
1.53 +
1.54 +TUint32 Log2(TUint32 aVal)
1.55 + {
1.56 + __ASSERT_COMPILE(sizeof(TUint32) == 4);
1.57 +
1.58 + TUint32 bitPos=31;
1.59 +
1.60 + if(!(aVal >> 16)) {bitPos-=16; aVal<<=16;}
1.61 + if(!(aVal >> 24)) {bitPos-=8; aVal<<=8 ;}
1.62 + if(!(aVal >> 28)) {bitPos-=4; aVal<<=4 ;}
1.63 + if(!(aVal >> 30)) {bitPos-=2; aVal<<=2 ;}
1.64 + if(!(aVal >> 31)) {bitPos-=1;}
1.65 +
1.66 + return bitPos;
1.67 + }
1.68 +
1.69 +TBool IsPowerOfTwo(TInt aNum)
1.70 +//
1.71 +// Returns ETrue if aNum is a power of two
1.72 +//
1.73 + {
1.74 + return (aNum != 0 && (aNum & -aNum) == aNum);
1.75 + }
1.76 +
1.77 +void DDmaHelper::ResetPageLists()
1.78 + {
1.79 + iFragLen = 0;
1.80 + iFragLenRemaining = 0;
1.81 + }
1.82 +
1.83 +DDmaHelper::DDmaHelper()
1.84 + {
1.85 + OstTraceFunctionEntry0( DDMAHELPER_DDMAHELPER_ENTRY );
1.86 + iPageSize = Kern::RoundToPageSize(1);
1.87 + __ASSERT_ALWAYS(IsPowerOfTwo(iPageSize), PHYSADDR_FAULT());
1.88 + iPageSizeLog2 = Log2(iPageSize);
1.89 + iPageSizeMsk = iPageSize-1;
1.90 + OstTraceFunctionExit0( DDMAHELPER_DDMAHELPER_EXIT );
1.91 + }
1.92 +
1.93 +DDmaHelper::~DDmaHelper()
1.94 + {
1.95 + OstTraceFunctionEntry0( DESTRUCTOR_DDMAHELPER_ENTRY );
1.96 + delete [] iPageArray;
1.97 + delete [] iPageList;
1.98 + if (iPhysicalPinObject)
1.99 + {
1.100 + NKern::ThreadEnterCS();
1.101 + Kern::DestroyPhysicalPinObject(iPhysicalPinObject);
1.102 + NKern::ThreadLeaveCS();
1.103 + }
1.104 + OstTraceFunctionExit0( DESTRUCTOR_DDMAHELPER_EXIT );
1.105 + }
1.106 +
1.107 +/**
1.108 +Constructs the DDmaHelper object
1.109 +
1.110 +@param aLength The maximum length of data mapped by this object.
1.111 + Should be a multiple of the page size
1.112 +@param aMediaBlockSize The minimum amount data that the media can transfer in read / write operations
1.113 +@param aDmaAlignment The memory alignment required by the media devices DMA controller. (i.e. word aligned = 2)
1.114 +
1.115 +@return KErrNone,if successful;
1.116 + KErrNoMemory, if unable to create Page Array's.
1.117 +*/
1.118 +TInt DDmaHelper::Construct(TInt aLength, TInt aMediaBlockSize, TInt aDmaAlignment)
1.119 + {
1.120 + OstTraceFunctionEntry1( DDMAHELPER_CONSTRUCT_ENTRY, this );
1.121 + __ASSERT_ALWAYS(aMediaBlockSize > 0, PHYSADDR_FAULT());
1.122 + __ASSERT_ALWAYS(IsPowerOfTwo(aMediaBlockSize), PHYSADDR_FAULT());
1.123 + __ASSERT_ALWAYS(aLength > 0, PHYSADDR_FAULT());
1.124 + __ASSERT_ALWAYS(aLength > iPageSize, PHYSADDR_FAULT());
1.125 +
1.126 + // This code assumes that the media block size (normally 512) is >= the processor's
1.127 + // cache-line size (typically 32 bytes). This may not be true for future processors.
1.128 + // If the cache-line size was 1024, for example, reading 512 bytes into a client's
1.129 + // buffer & then calling Cache::SyncMemoryAfterDmaRead would invalidate an entire 1024
1.130 + // bytes in the user's address space.
1.131 + TUint cacheLineSize = Cache::DmaBufferAlignment();
1.132 + __ASSERT_ALWAYS(IsPowerOfTwo(cacheLineSize), PHYSADDR_FAULT());
1.133 + if (cacheLineSize > (TUint) aMediaBlockSize)
1.134 + {
1.135 + OstTraceFunctionExitExt( DDMAHELPER_CONSTRUCT_EXIT1, this, KErrNotSupported );
1.136 + return KErrNotSupported;
1.137 + }
1.138 +
1.139 + //Check whether Kernel supports physical memory pinning:
1.140 + TInt mm = Kern::HalFunction(EHalGroupKernel, EKernelHalMemModelInfo, 0, 0) & EMemModelTypeMask;
1.141 + if (mm >= EMemModelTypeFlexible)
1.142 + {
1.143 + // Flexible memory model supports physical pinning for user (and Kernel) memory that
1.144 + // is the subject of DMA transfer.
1.145 + // Physical memory pinning ensures that:
1.146 + // - physical memory is not moved by RAM defragmentation.
1.147 + // - it is safe to to DMA against it or do sync cache (using new interface) even if/when
1.148 + // the owner of the memory (e.g. untrusted user aplication) decomits memory or panics.
1.149 + // For details @see Kern::PinPhysicalMemory.
1.150 + // Cache Sync of physically pinned memory on flexible memory model is done by:
1.151 + // - Cache::SyncPhysicalMemoryBeforeDmaWrite
1.152 + // - Cache::SyncPhysicalMemoryBeforeDmaRead
1.153 + // - Cache::SyncPhysicalMemoryAfterDmaRead
1.154 + iPhysPinningAvailable = ETrue;
1.155 + __KTRACE_DMA(Kern::Printf("Memory model (%d) supports physical pinning\n",mm));
1.156 + NKern::ThreadEnterCS();
1.157 + TInt r=Kern::CreatePhysicalPinObject(iPhysicalPinObject);
1.158 + OstTraceExt2(TRACE_DMASUPPORT, DDMAHELPER_CONSTRUCT1, "Memory model=%d supports physical pinning; created Physical Pin Object with return value=%d",mm, r);
1.159 + NKern::ThreadLeaveCS();
1.160 + if (r) return r;
1.161 + }
1.162 + else
1.163 + {
1.164 + // Memory models before flexible do not support memory pinning.
1.165 + // The driver has to use PrepareMemoryForDMA/ReleaseMemoryFromDMA Kernel interface
1.166 + // that ensures that physical memory won't be moved by RAM defragmentation module.
1.167 + // However, Kernel relies on assumption that the user memory won't dissapear (e.g. by
1.168 + // user client closing the chunk or panics), as it would lead to Kernel crash.
1.169 + // For that reason, the only use case for DMA transfer into user memory is File System's
1.170 + // read/write buffer - as it is assumed that File System is trusted component.
1.171 + // To mark its buffers(s) for DMA transfer, File Sytem must call UserSvr::RegisterTrustedChunk
1.172 + // before DMA transfer starts.
1.173 + // Cache sync. operations before/after DMA transfer must be done by using the old Cache interface:
1.174 + // - Cache::SyncMemoryBeforeDmaWrite
1.175 + // - Cache::SyncMemoryBeforeDmaRead
1.176 + // - Cache::SyncMemoryAfterDmaRead
1.177 + // As they all require linear address as input, these methods also rely on File System buffers
1.178 + // to be in valid state during sync calls.
1.179 + iPhysPinningAvailable = EFalse;
1.180 + __KTRACE_DMA(Kern::Printf("Memory model (%d) doesn't support physical pining",mm));
1.181 + OstTrace1(TRACE_DMASUPPORT, DDMAHELPER_CONSTRUCT2, "Memory model=%d doesn't support physical pinning",mm);
1.182 + iPhysicalPinObject = NULL;
1.183 + }
1.184 +
1.185 + iMaxPages = (aLength >> iPageSizeLog2)-1;
1.186 +
1.187 + // 2 Additional pages for page straddling
1.188 + iPageArray = new TPhysAddr[iMaxPages+2];
1.189 + if (iPageArray != NULL)
1.190 + {
1.191 + iPageList = new TPageList[iMaxPages];
1.192 + if (iPageList != NULL)
1.193 + {
1.194 + iMediaBlockSize = aMediaBlockSize;
1.195 + iMediaBlockSizeMask = TInt64(iMediaBlockSize - 1);
1.196 +
1.197 + iDmaAlignment = aDmaAlignment;
1.198 + __KTRACE_DMA(Kern::Printf("-PHYSADDR: Construct iMaxPages(%d), MediaBlocks(%d), DMAalign(%d)",iMaxPages,iMediaBlockSize,iDmaAlignment));
1.199 + OstTraceExt3(TRACE_FLOW, DDMAHELPER_CONSTRUCT_EXIT2, "< KErrNone PHYSADDR: Construct iMaxPages %d MediaBlocks %d DMAalign %d", iMaxPages,iMediaBlockSize,iDmaAlignment );
1.200 + return KErrNone;
1.201 + }
1.202 + delete [] iPageArray; iPageArray = NULL;
1.203 + }
1.204 +
1.205 + iMaxPages = 0;
1.206 + OstTraceFunctionExitExt( DDMAHELPER_CONSTRUCT_EXIT3, this, KErrNoMemory );
1.207 + return KErrNoMemory;
1.208 + }
1.209 +
1.210 +/**
1.211 + * Each Read/Write request is examined to determine if the descriptor that
1.212 + * is referenced is mapped to a physical memory object;
1.213 + * if so it prepares the memory, updates the request with physical memory information
1.214 + * and issues the request.
1.215 + * If a request does not make use of physical memory or is not configured correctly the
1.216 + * request is passed through without modification.
1.217 + */
1.218 +TInt DDmaHelper::SendReceive(TLocDrvRequest& aReq, TLinAddr aLinAddress)
1.219 + {
1.220 + OstTraceFunctionEntry0( DDMAHELPER_SENDRECEIVE_ENTRY );
1.221 + DPrimaryMediaBase& primaryMedia = *aReq.Drive()->iPrimaryMedia;
1.222 +
1.223 + TInt reqId = aReq.Id();
1.224 + if (reqId != DLocalDrive::ERead && reqId != DLocalDrive::EWrite)
1.225 + {
1.226 + OstTrace0(TRACE_FLOW, DDMAHELPER_SENDRECEIVE_EXIT1, "< Request is not ERead or EWrite, cannot perform Direct Memory Access");
1.227 + return aReq.SendReceive(&primaryMedia.iMsgQ);
1.228 + }
1.229 +
1.230 + if ((I64HIGH(aReq.Length()) > 0) || (aReq.Length() < iMediaBlockSize))
1.231 + {
1.232 + OstTrace0(TRACE_FLOW, DDMAHELPER_SENDRECEIVE_EXIT2, "< Invalid request length, cannot perform Direct Memory Access");
1.233 + return aReq.SendReceive(&primaryMedia.iMsgQ);
1.234 + }
1.235 +
1.236 + // If more than one user thread tries to access the drive, then bail out as there is
1.237 + // only one DDmaHelper object per TLocDrv. Normally this shouldn't ever happen unless
1.238 + // a client app accesses the drive directly using TBusLOcalDrive or the file system is
1.239 + // asynchronous (i.e. there is a separate drive thread) but the file server message is
1.240 + // flagged as synchronous - e.g. EFsDrive
1.241 + if (TInt(__e32_atomic_add_ord32(&iLockCount, 1)) > 0) // busy ?
1.242 + {
1.243 + __KTRACE_DMA(Kern::Printf("-PHYSADDR: BUSY"));
1.244 + __e32_atomic_add_ord32(&iLockCount, TUint32(-1));
1.245 + OstTrace0(TRACE_FLOW, DDMAHELPER_SENDRECEIVE_EXIT3, "< DMA Busy");
1.246 + return aReq.SendReceive(&primaryMedia.iMsgQ);
1.247 + }
1.248 +
1.249 + // make a copy of the request
1.250 + iMemoryType = EUnknown;
1.251 + iReq = &aReq;
1.252 + iReqId = reqId;
1.253 +
1.254 + iReqPosClient = iReq->Pos();
1.255 +
1.256 + iReqLenClient = I64LOW(iReq->Length());
1.257 +
1.258 + iReqRemoteDesOffset = iReq->RemoteDesOffset();
1.259 + iReqFlags = iReq->Flags();
1.260 +
1.261 + iRemoteThread = iReq->RemoteThread();
1.262 + iCurrentThread = &Kern::CurrentThread();
1.263 + iOwningThread = iRemoteThread ? iRemoteThread : iCurrentThread;
1.264 +
1.265 + iChunk = NULL;
1.266 + iChunkOffset = 0;
1.267 + iLinAddressUser = NULL;
1.268 + iLenConsumed = 0;
1.269 +
1.270 + // point to the start of the descriptor
1.271 + iLinAddressUser = aLinAddress - iReqRemoteDesOffset;
1.272 +
1.273 + // Need to check descriptors from both direct Clients (i.e. file cache, RemoteThread == NULL )
1.274 + // and Remote Server Clients (file server clients, RemoteThread != NULL)
1.275 + // Shared Memory can potentially be used by both remote server and direct clients
1.276 + NKern::ThreadEnterCS();
1.277 + iChunk = Kern::OpenSharedChunk(iOwningThread, (const TAny*) iLinAddressUser, ETrue, iChunkOffset);
1.278 + NKern::ThreadLeaveCS();
1.279 +
1.280 + TInt fragments = 0;
1.281 + TInt r;
1.282 + do
1.283 + {
1.284 + __KTRACE_DMA(Kern::Printf(">PHYSADDR:SendReceive() iReqLen %d; iLenConsumed %d; fragments %d",iReqLen, iLenConsumed, fragments));
1.285 + OstTraceExt2( TRACE_DMASUPPORT, DDMAHELPER_SENDRECEIVE1, "PHYSADDR:SendReceive() iLenConsumed=%d; fragments=%d", iLenConsumed, fragments);
1.286 + r = RequestStart();
1.287 + if (r != KErrNone)
1.288 + {
1.289 + if (iChunk)
1.290 + {
1.291 + NKern::ThreadEnterCS();
1.292 + Kern::ChunkClose(iChunk);
1.293 + iChunk = NULL;
1.294 + NKern::ThreadLeaveCS();
1.295 + }
1.296 + __KTRACE_DMA(Kern::Printf("<PHYSADDR:SendReceive()- r:%d",r));
1.297 + OstTrace1( TRACE_FLOW, DDMAHELPER_SENDRECEIVE_EXIT4, "< PHYSADDR:SendReceive() Return code %d",r);
1.298 + iMemoryType = EUnknown;
1.299 + __e32_atomic_add_ord32(&iLockCount, TUint32(-1));
1.300 + return fragments ? r : iReq->SendReceive(&primaryMedia.iMsgQ);
1.301 + }
1.302 + else
1.303 + {
1.304 + iReq->Flags() |= TLocDrvRequest::EPhysAddr;
1.305 + }
1.306 +
1.307 + __KTRACE_DMA(Kern::Printf("-PHYSADDR:SendReceive() rThread %08X pos %08lX, len %d addr %08X off %08X",
1.308 + iRemoteThread, iReq->Pos(), I64LOW(iReq->Length()), iLinAddressUser, iReqRemoteDesOffset));
1.309 + OstTraceExt4(TRACE_DMASUPPORT, DDMAHELPER_SENDRECEIVE2, "PHYSADDR:SendReceive() position=%Ld; length=%d; address=0x%x; offset=0x%x", iReq->Pos(), (TInt) I64LOW(iReq->Length()), (TUint) iLinAddressUser, (TUint) iReqRemoteDesOffset );
1.310 +
1.311 + __ASSERT_DEBUG(iReq->Length() == FragLength(), PHYSADDR_FAULT());
1.312 + __ASSERT_DEBUG(iReq->Length() != 0, PHYSADDR_FAULT());
1.313 +
1.314 + // reinstate iValue in case overwritten by DMediaPagingDevice::CompleteRequest()
1.315 + iReq->iValue = iReqId;
1.316 +
1.317 + OstTrace1(TRACE_DMASUPPORT, DDMAHELPER_SENDRECEIVE3, "Dma SendReceive Start iReq=%d", iReq);
1.318 + r = iReq->SendReceive(&primaryMedia.iMsgQ);
1.319 + OstTrace1(TRACE_DMASUPPORT, DDMAHELPER_SENDRECEIVE4, "Dma SendReceive Return iReq=%d", iReq);
1.320 +
1.321 + // The media driver could potentially choose to deal with the request
1.322 + // without accessing physical memory (e.g. if the data is already cached).
1.323 + iLenConsumed += iFragLenRemaining;
1.324 +
1.325 + RequestEnd();
1.326 +
1.327 + ResetPageLists();
1.328 +
1.329 + fragments++;
1.330 +
1.331 + }
1.332 + while(r == KErrNone && LengthRemaining() > 0);
1.333 +
1.334 + if (iChunk)
1.335 + {
1.336 + NKern::ThreadEnterCS();
1.337 + Kern::ChunkClose(iChunk);
1.338 + iChunk = NULL;
1.339 + NKern::ThreadLeaveCS();
1.340 + }
1.341 +
1.342 + // Set remote descriptor length to iReqLenClient
1.343 + if (iReqId == DLocalDrive::ERead && r == KErrNone)
1.344 + r = UpdateRemoteDescriptorLength(iReqLenClient);
1.345 +
1.346 + __KTRACE_DMA(Kern::Printf("<PHYSADDR:SendReceive()"));
1.347 +
1.348 + iMemoryType = EUnknown;
1.349 +
1.350 + __e32_atomic_add_ord32(&iLockCount, TUint32(-1));
1.351 + OstTraceFunctionExit0( DDMAHELPER_SENDRECEIVE_EXIT5 );
1.352 + return r;
1.353 + }
1.354 +
1.355 +
1.356 +/**
1.357 + * Each read/write request is split into one or more DMA "fragments".
1.358 + * The maximum size of each fragment depends on the size of iPageArray[].
1.359 + * Subsquent calls to RequestStart maybe required to complete a request.
1.360 + *
1.361 + * The physical address is checked for DMA alignment or the possibility of
1.362 + * eventually alignment due to mis-aligned start/end media blocks.
1.363 + *
1.364 + * A DMA "fragment" can be split over a number of pages as follows :
1.365 + * ----------------------------------------------------------
1.366 + * | 4K | 4K | 4K | 4K |
1.367 + * ----------------------------------------------------------
1.368 + * ******************************** : region to be read
1.369 + * <----------- iFragLen ----------->
1.370 + *
1.371 + * The pages may not be physically contiguous; if they are not,
1.372 + * then they are supplied to the media driver one contiguous
1.373 + * sequent at a time by GetPhysicalAddress()
1.374 + **/
1.375 +TInt DDmaHelper::RequestStart()
1.376 + {
1.377 + OstTraceFunctionEntry1( DDMAHELPER_REQUESTSTART_ENTRY, this );
1.378 + __KTRACE_DMA(Kern::Printf(">PHYSADDR:RequestStart()"));
1.379 +
1.380 + iIndex = 0;
1.381 +
1.382 + TLinAddr startAddr = LinAddress();
1.383 + TInt64 startPos = iReqPosClient + iLenConsumed;
1.384 + TInt mediaBlockOffset = BlockOffset(startPos);
1.385 + TInt addrBlockOffset = BlockOffset(startAddr);
1.386 + TInt length = Min(LengthRemaining(), MaxFragLength());
1.387 +
1.388 + iPageArrayCount = iPageListCount = 0;
1.389 +
1.390 + TLinAddr firstPageStart = PageAlign(startAddr);
1.391 + TLinAddr lastPageStart = PageAlign(startAddr + length + iPageSize - 1);
1.392 + iPageArrayCount = (lastPageStart - firstPageStart + 1) >> iPageSizeLog2;
1.393 +
1.394 + iMemoryType = EUnknown;
1.395 + iPhysAddr = KPhysMemFragmented; // Default - Mark memory as fragmented
1.396 +
1.397 + //*************************************
1.398 + // Check Physical Page Alignment!!
1.399 + //*************************************
1.400 + if (!IsBlockAligned(startPos))
1.401 + {
1.402 + // Will DMA align at next block alignment? such that DMA can be used
1.403 + TInt ofset = I64LOW((startPos + iMediaBlockSize) & (iMediaBlockSize-1));
1.404 + ofset = iMediaBlockSize - ofset;
1.405 +
1.406 + if (!IsDmaAligned(startAddr))
1.407 + {
1.408 + __KTRACE_DMA(Kern::Printf("<PHYSADDR:RequestStart() - not DMA Aligned pos 0x%x addr 0x%x)",I64LOW(startPos), startAddr));
1.409 + OstTraceExt2( TRACE_FLOW, DDMAHELPER_REQUESTSTART_EXIT1, "< KErrNotSupported Not DMA Aligned startPos %x startAddr %x", I64LOW(startPos), startAddr );
1.410 + return KErrNotSupported;
1.411 + }
1.412 + }
1.413 + else
1.414 + { //block aligned!
1.415 + if (!IsDmaAligned(startAddr))
1.416 + {
1.417 + __KTRACE_DMA(Kern::Printf("<PHYSADDR:RequestStart() - not DMA Aligned (0x%x)",startAddr));
1.418 + OstTrace1(TRACE_FLOW, DDMAHELPER_REQUESTSTART_EXIT2, "< KErrNotSupported Not DMA Aligned startAddr %x", startAddr);
1.419 + return KErrNotSupported;
1.420 + }
1.421 + }
1.422 +
1.423 + //************************************************
1.424 + // Check for possible striping of RAM pages vs Media blocks
1.425 + // i.e. Media blocks which may straddle 2 non contiguous pages.
1.426 + //************************************************
1.427 + if (mediaBlockOffset != addrBlockOffset)
1.428 + {
1.429 + __KTRACE_DMA(Kern::Printf("<PHYSADDR:RequestStart() - Frag / not block aligned: pos 0x%x addr 0x%x", I64LOW(startPos), startAddr));
1.430 + OstTraceExt2(TRACE_FLOW, DDMAHELPER_REQUESTSTART_EXIT3, "< KErrNotSupported Frag / not block aligned: startPos 0x%x startAddr 0x%x", I64LOW(startPos), startAddr );
1.431 + return KErrNotSupported;
1.432 + }
1.433 +
1.434 + //************************************************
1.435 + // Is it File Server Cache request ?
1.436 + //************************************************
1.437 + if (iChunk == NULL && // Not Shared memory
1.438 + iRemoteThread == NULL && // Direct Client Request
1.439 + IsPageAligned(startAddr) &&
1.440 + IsBlockAligned(startPos) &&
1.441 + (iPageArrayCount > 0) )
1.442 + {
1.443 + TLinAddr firstPageAddr = PageAlign(startAddr); //ensure that it is page aligned.
1.444 +
1.445 + TInt r = KErrNone;
1.446 + if (iPhysPinningAvailable)
1.447 + {
1.448 + TBool readOnlyMem = (iReqId == DLocalDrive::EWrite);
1.449 + r = Kern::PinPhysicalMemory(iPhysicalPinObject, firstPageAddr, iPageArrayCount << iPageSizeLog2,
1.450 + readOnlyMem, iPhysAddr, iPageArray, iMapAttr, iPageColour, iCurrentThread);
1.451 + }
1.452 + else
1.453 + {
1.454 + NKern::ThreadEnterCS();
1.455 + r = Kern::PrepareMemoryForDMA(iCurrentThread, (void*)firstPageAddr, iPageArrayCount << iPageSizeLog2, iPageArray);
1.456 + NKern::ThreadLeaveCS();
1.457 + }
1.458 + if (r != KErrNone)
1.459 + {
1.460 + OstTraceFunctionExitExt( DDMAHELPER_REQUESTSTART_EXIT4, this, r );
1.461 + return r;
1.462 + }
1.463 +
1.464 + iMemoryType = EFileServerChunk;
1.465 +
1.466 + __KTRACE_DMA(Kern::Printf("-PHYSADDR:RequestStart() - EFileServerChunk"));
1.467 + OstTrace0( TRACE_DMASUPPORT, DDMAHELPER_REQUESTSTART1, "EFileServerChunk");
1.468 + }
1.469 + //****************************
1.470 + // Is it shared chunk ?
1.471 + //****************************
1.472 + else if (iChunk)
1.473 + {
1.474 + // calculate chunk offset of start of first page
1.475 + TInt offset = iChunkOffset + iReqRemoteDesOffset+ iLenConsumed;
1.476 +
1.477 + TInt r = Kern::ChunkPhysicalAddress(iChunk, offset, length, iLinAddressKernel, iMapAttr, iPhysAddr, iPageArray);
1.478 +
1.479 + if (r < KErrNone)
1.480 + {
1.481 + OstTraceFunctionExitExt( DDMAHELPER_REQUESTSTART_EXIT5, this, r );
1.482 + return r; // 0 = Contiguous Memory, 1 = Fragmented/Dis-Contiguous Memory
1.483 + }
1.484 +
1.485 + iMemoryType = ESharedChunk;
1.486 +
1.487 + __KTRACE_DMA(Kern::Printf("-PHYSADDR:RequestStart() - ESharedChunk"));
1.488 + OstTrace0( TRACE_DMASUPPORT, DDMAHELPER_REQUESTSTART2, "ESharedChunk");
1.489 + }
1.490 + else
1.491 + {
1.492 + __KTRACE_DMA(Kern::Printf("<PHYSADDR:RequestStart() - EUnknown"));
1.493 + OstTraceFunctionExitExt( DDMAHELPER_REQUESTSTART_EXIT6, this, KErrNotFound );
1.494 + return KErrNotFound;
1.495 + }
1.496 +
1.497 + SetFragLength(length);
1.498 +
1.499 + //************************************************
1.500 + // Build Contiguous Page list
1.501 + //************************************************
1.502 + BuildPageList();
1.503 +
1.504 + //************************************************
1.505 + // Set up request parameters for this fragment
1.506 + //************************************************
1.507 + iReq->Length() = MAKE_TINT64(0, length);
1.508 + iReq->Pos() = iReqPosClient + iLenConsumed;
1.509 + iReq->RemoteDesOffset() = iReqRemoteDesOffset + iLenConsumed;
1.510 + // restore EAdjusted flag to ensure iReq->Pos() is adjusted correctly
1.511 + iReq->Flags()&= ~TLocDrvRequest::EAdjusted;
1.512 + iReq->Flags()|= (iReqFlags & TLocDrvRequest::EAdjusted);
1.513 +
1.514 + //************************************************
1.515 + // Sync memory
1.516 + //************************************************
1.517 + __KTRACE_DMA(Kern::Printf(">SYNC-PHYSADDR:addr 0x%x len %d", startAddr, length));
1.518 + OstTraceExt2(TRACE_DMASUPPORT, DDMAHELPER_REQUESTSTART3, "startAddr=0x%x length=%d", (TUint) startAddr, length );
1.519 +
1.520 + // Only sync whole blocks: it is assumed that the media driver will transfer
1.521 + // partial start and end blocks without DMA
1.522 +
1.523 + TInt startBlockPartialLen = IsBlockAligned(startPos) ? 0 : iMediaBlockSize - BlockOffset(startPos);
1.524 + TInt blockLen = (TInt) BlockAlign(length - startBlockPartialLen);
1.525 +
1.526 + if (iReqId == DLocalDrive::EWrite)
1.527 + {
1.528 + if (iMemoryType == ESharedChunk)
1.529 + {
1.530 + Cache::SyncMemoryBeforeDmaWrite(iLinAddressKernel+startBlockPartialLen, blockLen, iMapAttr);
1.531 + }
1.532 + else // (iMemoryType == EFileServerChunk)
1.533 + {
1.534 + if (iPhysPinningAvailable)
1.535 + Cache::SyncPhysicalMemoryBeforeDmaWrite(iPageArray, iPageColour, startBlockPartialLen, blockLen, iMapAttr);
1.536 + else
1.537 + Cache::SyncMemoryBeforeDmaWrite(startAddr+startBlockPartialLen, blockLen);
1.538 + }
1.539 + }
1.540 + else
1.541 + {
1.542 + if (iMemoryType == ESharedChunk)
1.543 + Cache::SyncMemoryBeforeDmaRead(iLinAddressKernel, length, iMapAttr);
1.544 + else // (iMemoryType == EFileServerChunk)
1.545 + {
1.546 + if (iPhysPinningAvailable)
1.547 + Cache::SyncPhysicalMemoryBeforeDmaRead(iPageArray, iPageColour, 0, length, iMapAttr);
1.548 + else
1.549 + Cache::SyncMemoryBeforeDmaRead(startAddr, length);
1.550 + }
1.551 + }
1.552 +
1.553 + __KTRACE_DMA(Kern::Printf("<PHYSADDR:RequestStart()"));
1.554 +
1.555 + OstTraceFunctionExitExt( DDMAHELPER_REQUESTSTART_EXIT7, this, KErrNone );
1.556 + return KErrNone;
1.557 + }
1.558 +
1.559 +/**
1.560 + * After read requests this method synchronous the current physical memory in use.
1.561 + */
1.562 +void DDmaHelper::RequestEnd()
1.563 + {
1.564 + OstTraceFunctionEntry0( DDMAHELPER_REQUESTEND_ENTRY );
1.565 + __KTRACE_DMA(Kern::Printf(">PHYSADDR:RequestEnd()"));
1.566 +
1.567 +
1.568 + __ASSERT_DEBUG(iReqId == DLocalDrive::ERead || iReqId == DLocalDrive::EWrite, PHYSADDR_FAULT());
1.569 + __ASSERT_DEBUG(iMemoryType == ESharedChunk || iMemoryType == EFileServerChunk, PHYSADDR_FAULT());
1.570 +
1.571 + TInt length = FragLength(); // len of data just transferred
1.572 + TLinAddr startAddr = LinAddress() - length;
1.573 +
1.574 + // Sync the memory : but not if the media driver has decided to transfer ALL the data using IPC rather than DMA.
1.575 + // It is assumed that the media driver will transfer partial start & end blocks using IPC, but it may also choose
1.576 + // to use IPC for the ENTIRE fragment when read/writing at the end of the media (see medmmc.cpp)
1.577 + if (iFragLenRemaining < length && iReqId == DLocalDrive::ERead)
1.578 + {
1.579 + TInt64 startPos = iReq->Pos();
1.580 + TInt startBlockPartialLen = IsBlockAligned(startPos) ? 0 : iMediaBlockSize - BlockOffset(startPos);
1.581 + TInt blockLen = (TInt) BlockAlign(length - startBlockPartialLen);
1.582 +
1.583 + if (iMemoryType == ESharedChunk)
1.584 + {
1.585 + Cache::SyncMemoryAfterDmaRead(iLinAddressKernel + startBlockPartialLen, blockLen);
1.586 + }
1.587 + else // (iMemoryType == EFileServerChunk)
1.588 + {
1.589 + if (iPhysPinningAvailable)
1.590 + Cache::SyncPhysicalMemoryAfterDmaRead(iPageArray, iPageColour, startBlockPartialLen, blockLen, iMapAttr);
1.591 + else
1.592 + Cache::SyncMemoryAfterDmaRead(startAddr + startBlockPartialLen, blockLen);
1.593 + }
1.594 +
1.595 + }
1.596 + ReleasePages(PageAlign(startAddr));
1.597 + OstTraceFunctionExit0( DDMAHELPER_REQUESTEND_EXIT );
1.598 + }
1.599 +
1.600 +/**
1.601 + * For File Server chunks this method releases the current physical memory in use.
1.602 + *
1.603 + * @see Kern::ReleaseMemoryFromDMA()
1.604 + */
1.605 +void DDmaHelper::ReleasePages(TLinAddr aAddr)
1.606 + {
1.607 + OstTraceFunctionEntry1( DDMAHELPER_RELEASEPAGES_ENTRY, this );
1.608 + if (iMemoryType == EFileServerChunk)
1.609 + {
1.610 + __KTRACE_DMA(Kern::Printf(">PHYSADDR():ReleasePages thread (0x%x) aAddr(0x%08x) size(%d) iPageArray(0x%x)",iCurrentThread, aAddr, (iPageArrayCount << iPageSizeLog2), iPageArray));
1.611 + OstTraceExt3( TRACE_DMASUPPORT, DDMAHELPER_RELEASEPAGES, "ReleasePages aAddr=0x%x; size=%d; iPageArray-0x%x", (TUint) aAddr, (iPageArrayCount << iPageSizeLog2), (TUint) iPageArray);
1.612 +
1.613 + TInt r;
1.614 + if (iPhysPinningAvailable)
1.615 + {
1.616 + r = Kern::UnpinPhysicalMemory(iPhysicalPinObject);
1.617 + }
1.618 + else
1.619 + {
1.620 + NKern::ThreadEnterCS();
1.621 + r = Kern::ReleaseMemoryFromDMA(iCurrentThread, (void*) aAddr, iPageArrayCount << iPageSizeLog2, iPageArray);
1.622 + NKern::ThreadLeaveCS();
1.623 + }
1.624 + __ASSERT_ALWAYS(r == KErrNone, PHYSADDR_FAULT());
1.625 + }
1.626 + OstTraceFunctionExit1( DDMAHELPER_RELEASEPAGES_EXIT, this );
1.627 + }
1.628 +
1.629 +/**
1.630 + * Utility method which examines the page array, compiling adjacent pages into contiguous fragments
1.631 + * and populating iPageList with said fragments.
1.632 + */
1.633 +void DDmaHelper::BuildPageList()
1.634 + {
1.635 + OstTraceFunctionEntry1( DDMAHELPER_BUILDPAGELIST_ENTRY, this );
1.636 + iPageListCount = 0;
1.637 +
1.638 + if (iPhysAddr != KPhysMemFragmented)
1.639 + {
1.640 + __KTRACE_DMA(Kern::Printf(">PHYSADDR:BuildPageList() - Contiguous Memory"));
1.641 + OstTrace0( TRACE_DMASUPPORT, DDMAHELPER_BUILDPAGELIST1, "Contiguous Memory");
1.642 + // Only one entry required.
1.643 + iPageList[0].iAddress = iPhysAddr;
1.644 + iPageList[0].iLength = FragLength();
1.645 + iPageListCount = 1;
1.646 + }
1.647 + else
1.648 + {
1.649 + __KTRACE_DMA(Kern::Printf(">PHYSADDR:BuildPageList() - Dis-Contiguous Memory"));
1.650 + OstTrace0( TRACE_DMASUPPORT, DDMAHELPER_BUILDPAGELIST2, "Dis-Contiguous Memory");
1.651 + TInt offset;
1.652 +
1.653 + offset = PageOffset(iChunkOffset + iReqRemoteDesOffset+ iLenConsumed);
1.654 + iPageList[0].iAddress = iPageArray[0]+offset;
1.655 + iPageList[0].iLength = iPageSize-offset;
1.656 +
1.657 + TInt lengthRemaining = FragLength() - iPageList[0].iLength;
1.658 +
1.659 + TInt i =1;
1.660 + for( ; i < iPageArrayCount; i++)
1.661 + {
1.662 + //Check if RAM pages are physically adjacent
1.663 + if ((iPageArray[i-1] + PageSize()) == iPageArray[i])
1.664 + {
1.665 + // Adjacent pages - just add length
1.666 + iPageList[iPageListCount].iLength += PageSize();
1.667 + }
1.668 + else
1.669 + {
1.670 + // Not Adjacent, start new Memory fragment
1.671 + iPageListCount++;
1.672 + iPageList[iPageListCount].iAddress = iPageArray[i];
1.673 + iPageList[iPageListCount].iLength = iPageSize;
1.674 + }
1.675 +
1.676 + lengthRemaining -= PageSize();
1.677 + if (lengthRemaining < 0)
1.678 + {
1.679 + // Last page, re-adjust length for odd remainder.
1.680 + iPageList[iPageListCount].iLength += lengthRemaining;
1.681 + break;
1.682 + }
1.683 + }
1.684 +
1.685 + iPageListCount++;
1.686 + }
1.687 +
1.688 +//#ifdef __DEBUG_DMASUP__
1.689 +// for (TInt m=0; m<iPageListCount; m++)
1.690 +// __KTRACE_DMA(Kern::Printf("-PHYSADDR:BuildPageList() [%d]: %08X l:%d", m, iPageList[m].iAddress, iPageList[m].iLength));
1.691 +//#endif
1.692 + OstTraceFunctionExit1( DDMAHELPER_BUILDPAGELIST_EXIT, this );
1.693 + }
1.694 +
1.695 +
1.696 +/**
1.697 + * Returns Address and Length of next contiguous Physical memory fragment
1.698 + *
1.699 + * @param aAddr On success, populated with the Physical Address of the next fragment.
1.700 + * @param aLen On success, populated with the length in bytes of the next fragment.
1.701 + *
1.702 + * @return KErrNone, if successful;
1.703 + * KErrNoMemory, if no more memory fragments left.
1.704 + */
1.705 +TInt DDmaHelper::GetPhysicalAddress(TPhysAddr& aAddr, TInt& aLen)
1.706 + {
1.707 + OstTraceFunctionEntry1( DUP1_DDMAHELPER_GETPHYSICALADDRESS_ENTRY, this );
1.708 + if (iIndex >= iPageListCount)
1.709 + {
1.710 + __KTRACE_DMA(Kern::Printf(">PHYSADDR:GetPhysD() [%d], PageListCount:%d", iIndex, iPageListCount));
1.711 + OstTraceExt2(TRACE_DMASUPPORT, DDMAHELPER_GETPHYSICALADDRESS1, "GetPhysD() [%d]; iPageCountList=%d", iIndex, iPageListCount );
1.712 + aAddr = 0;
1.713 + aLen = 0;
1.714 + OstTraceFunctionExitExt( DUP1_DDMAHELPER_GETPHYSICALADDRESS_EXIT1, this, KErrGeneral );
1.715 + return KErrGeneral;
1.716 + }
1.717 +
1.718 + aAddr = iPageList[iIndex].iAddress;
1.719 + aLen = iPageList[iIndex].iLength;
1.720 + iLenConsumed+= aLen;
1.721 + iFragLenRemaining-= aLen;
1.722 +
1.723 + __KTRACE_DMA(Kern::Printf(">PHYSADDR:GetPhysD() [%d] addr:0x%08X, l:%d; Used:%d, Left:%d", iIndex, aAddr, aLen, iLenConsumed, iFragLenRemaining));
1.724 + OstTraceExt5(TRACE_DMASUPPORT, DDMAHELPER_GETPHYSICALADDRESS2, "GetPhysD() [%d]; address=0x%x; length=%d; iLenConsumed=%d; iFragLenRemaining=%d", iIndex, (TUint) aAddr, aLen, iLenConsumed, iFragLenRemaining);
1.725 + __ASSERT_DEBUG(aLen >= 0, PHYSADDR_FAULT());
1.726 +
1.727 + iIndex++; //Move index to next page
1.728 +
1.729 + OstTraceFunctionExitExt( DDMAHELPER_GETPHYSICALADDRESS_EXIT2, this, KErrNone );
1.730 + return KErrNone;
1.731 + }
1.732 +
1.733 +
1.734 +#ifdef __DEMAND_PAGING__
1.735 +/**
1.736 + * Returns Address and Length of next contiguous Physical memory.
1.737 + * Static function specifically for Demand Paging support
1.738 + *
1.739 + * @param aReq TLocDrvRequest from which physical
1.740 + * @param aAddr Populated with the Physical Address of the Request aReq.
1.741 + * @param aLen Populated with the length in bytes of the memory.
1.742 + *
1.743 + * @return KErrNone
1.744 + */
1.745 +TInt DDmaHelper::GetPhysicalAddress(TLocDrvRequest& aReq, TPhysAddr& aAddr, TInt& aLen)
1.746 + {
1.747 + OstTraceFunctionEntry0( DDMAHELPER_GETPHYSICALADDRESS_ENTRY );
1.748 + __ASSERT_DEBUG( (aReq.Flags() & TLocDrvRequest::ETClientBuffer) == 0, PHYSADDR_FAULT());
1.749 + TLinAddr linAddr = (TLinAddr) aReq.RemoteDes();
1.750 + TInt& offset = aReq.RemoteDesOffset();
1.751 + TLinAddr currLinAddr = linAddr + offset;
1.752 + TInt reqLen = I64LOW(aReq.Length());
1.753 + __ASSERT_DEBUG(I64HIGH(aReq.Length()) == 0, PHYSADDR_FAULT());
1.754 +
1.755 + aAddr = Epoc::LinearToPhysical(currLinAddr);
1.756 +
1.757 + // Set the initial length to be the length remaining in this page or the request length (whichever is shorter).
1.758 + // If there are subsequent pages, we then need to determine whether they are contiguous
1.759 + aLen = Min( (TInt) (PageAlign(currLinAddr+iPageSize) - currLinAddr), reqLen - offset);
1.760 +
1.761 + __ASSERT_DEBUG(aLen > 0, PHYSADDR_FAULT());
1.762 +
1.763 + TPhysAddr currPhysPageAddr = PageAlign((TLinAddr) aAddr);
1.764 +
1.765 + offset+= aLen;
1.766 +
1.767 +
1.768 + while (offset < reqLen)
1.769 + {
1.770 + TPhysAddr nextPhysPageAddr = Epoc::LinearToPhysical(linAddr + offset);
1.771 + __ASSERT_DEBUG(PageOffset((TLinAddr) nextPhysPageAddr) == 0, PHYSADDR_FAULT());
1.772 +
1.773 + if (nextPhysPageAddr != currPhysPageAddr + iPageSize)
1.774 + break;
1.775 +
1.776 + currPhysPageAddr = nextPhysPageAddr;
1.777 +
1.778 + TInt len = Min(iPageSize, reqLen - offset);
1.779 + offset+= len;
1.780 + aLen+= len;
1.781 + }
1.782 +
1.783 +
1.784 + __KTRACE_DMA(Kern::Printf(">PHYSADDR:DP:GetPhysS(), linAddr %08X, physAddr %08X, len %x reqLen %x", linAddr + offset, aAddr, aLen, reqLen));
1.785 + OstTraceExt4(TRACE_DEMANDPAGING, DDMAHELPER_GETPHYSICALADDRESS_DP, "linAddr=0x%x; physAddr=0x%x; length=0x%x; reqLen=0x%x", linAddr + offset, aAddr, aLen, reqLen);
1.786 + OstTraceFunctionExit0( DDMAHELPER_GETPHYSICALADDRESS_EXIT );
1.787 + return KErrNone;
1.788 + }
1.789 +#endif // (__DEMAND_PAGING__)
1.790 +
1.791 +
1.792 +/**
1.793 + * Modifies the current requests remote descriptor length
1.794 + *
1.795 + * @param aLength Length in bytes to which the descriptor is to be set.
1.796 + *
1.797 + * @return KErrNone, if successful;
1.798 + * KErrBadDescriptor, if descriptor is corrupted;
1.799 + * otherwise one of the other system wide error codes.
1.800 + */
1.801 +
1.802 +TInt DDmaHelper::UpdateRemoteDescriptorLength(TInt aLength)
1.803 + {
1.804 + OstTraceFunctionEntryExt( DDMAHELPER_UPDATEREMOTEDESCRIPTORLENGTH_ENTRY, this );
1.805 + __KTRACE_DMA(Kern::Printf(">PHYSADDR:UpDesLen(%d)",aLength));
1.806 +
1.807 + // Restore request Id (overwritten by KErrNone return code) to stop ASSERT in WriteRemote
1.808 + iReq->Id() = DLocalDrive::ERead;
1.809 +
1.810 + // restore caller's descriptor offset
1.811 + iReq->RemoteDesOffset() = iReqRemoteDesOffset;
1.812 +
1.813 + // Write a zero length descriptor at the end such that the descriptors length is correctly updated.
1.814 + TPtrC8 zeroDes(NULL, 0);
1.815 + TInt r = iReq->WriteRemote(&zeroDes, aLength);
1.816 +
1.817 + // restore return code
1.818 + iReq->iValue = KErrNone;
1.819 +
1.820 + OstTraceFunctionExitExt( DDMAHELPER_UPDATEREMOTEDESCRIPTORLENGTH_EXIT, this, r );
1.821 + return r;
1.822 + }
1.823 +