os/kernelhwsrv/kernel/eka/drivers/locmedia/dmasupport.cpp
author sl@SLION-WIN7.fritz.box
Fri, 15 Jun 2012 03:10:57 +0200
changeset 0 bde4ae8d615e
permissions -rw-r--r--
First public contribution.
sl@0
     1
// Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\drivers\locmedia\dmasupport.cpp
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
#include <kernel/kernel.h>
sl@0
    19
#include <kernel/cache.h>
sl@0
    20
#include "locmedia.h"
sl@0
    21
#include "dmasupport.h"
sl@0
    22
#include "dmasupport.inl"
sl@0
    23
sl@0
    24
#include "OstTraceDefinitions.h"
sl@0
    25
#ifdef OST_TRACE_COMPILER_IN_USE
sl@0
    26
#include "locmedia_ost.h"
sl@0
    27
#ifdef __VC32__
sl@0
    28
#pragma warning(disable: 4127) // disabling warning "conditional expression is constant"
sl@0
    29
#endif
sl@0
    30
#include "dmasupportTraces.h"
sl@0
    31
#endif
sl@0
    32
sl@0
    33
#define PHYSADDR_FAULT()	Kern::Fault("TLOCDRV-PHYS-ADDR",__LINE__)
sl@0
    34
sl@0
    35
//#define __DEBUG_DMASUP__
sl@0
    36
#ifdef __DEBUG_DMASUP__
sl@0
    37
#define __KTRACE_DMA(p) {p;}
sl@0
    38
#else
sl@0
    39
#define __KTRACE_DMA(p)
sl@0
    40
#endif
sl@0
    41
sl@0
    42
TInt DDmaHelper::iPageSize;
sl@0
    43
TInt DDmaHelper::iPageSizeLog2;
sl@0
    44
TInt DDmaHelper::iPageSizeMsk;
sl@0
    45
sl@0
    46
/******************************************************************************
sl@0
    47
 DDmaHelper
sl@0
    48
 ******************************************************************************/
sl@0
    49
const TPhysAddr KPhysMemFragmented = KPhysAddrInvalid;
sl@0
    50
sl@0
    51
TUint32 Log2(TUint32 aVal)
sl@0
    52
	{	
sl@0
    53
    __ASSERT_COMPILE(sizeof(TUint32) == 4);
sl@0
    54
sl@0
    55
    TUint32 bitPos=31;
sl@0
    56
sl@0
    57
    if(!(aVal >> 16)) {bitPos-=16; aVal<<=16;}
sl@0
    58
    if(!(aVal >> 24)) {bitPos-=8;  aVal<<=8 ;}
sl@0
    59
    if(!(aVal >> 28)) {bitPos-=4;  aVal<<=4 ;}
sl@0
    60
    if(!(aVal >> 30)) {bitPos-=2;  aVal<<=2 ;}
sl@0
    61
    if(!(aVal >> 31)) {bitPos-=1;}
sl@0
    62
    
sl@0
    63
    return bitPos;
sl@0
    64
	}
sl@0
    65
sl@0
    66
TBool IsPowerOfTwo(TInt aNum)
sl@0
    67
//
sl@0
    68
// Returns ETrue if aNum is a power of two
sl@0
    69
//
sl@0
    70
	{
sl@0
    71
	return (aNum != 0 && (aNum & -aNum) == aNum);
sl@0
    72
	}
sl@0
    73
sl@0
    74
void DDmaHelper::ResetPageLists()
sl@0
    75
	{
sl@0
    76
	iFragLen = 0;
sl@0
    77
	iFragLenRemaining = 0;
sl@0
    78
	}
sl@0
    79
sl@0
    80
DDmaHelper::DDmaHelper()
sl@0
    81
	{
sl@0
    82
	OstTraceFunctionEntry0( DDMAHELPER_DDMAHELPER_ENTRY );
sl@0
    83
	iPageSize = Kern::RoundToPageSize(1);
sl@0
    84
	__ASSERT_ALWAYS(IsPowerOfTwo(iPageSize), PHYSADDR_FAULT());
sl@0
    85
	iPageSizeLog2 = Log2(iPageSize);
sl@0
    86
	iPageSizeMsk = iPageSize-1;
sl@0
    87
	OstTraceFunctionExit0( DDMAHELPER_DDMAHELPER_EXIT );
sl@0
    88
	}
sl@0
    89
sl@0
    90
DDmaHelper::~DDmaHelper()
sl@0
    91
	{
sl@0
    92
	OstTraceFunctionEntry0( DESTRUCTOR_DDMAHELPER_ENTRY );
sl@0
    93
	delete [] iPageArray;
sl@0
    94
	delete [] iPageList;
sl@0
    95
	if (iPhysicalPinObject)
sl@0
    96
		{
sl@0
    97
		NKern::ThreadEnterCS();
sl@0
    98
		Kern::DestroyPhysicalPinObject(iPhysicalPinObject);
sl@0
    99
		NKern::ThreadLeaveCS();
sl@0
   100
		}
sl@0
   101
	OstTraceFunctionExit0( DESTRUCTOR_DDMAHELPER_EXIT );
sl@0
   102
	}
sl@0
   103
sl@0
   104
/**
sl@0
   105
Constructs the DDmaHelper object 
sl@0
   106
sl@0
   107
@param aLength The maximum length of data mapped by this object.
sl@0
   108
			   Should be a multiple of the page size 
sl@0
   109
@param aMediaBlockSize The minimum amount data that the media can transfer in read / write operations
sl@0
   110
@param aDmaAlignment The memory alignment required by the media devices DMA controller. (i.e. word aligned = 2)
sl@0
   111
sl@0
   112
@return KErrNone,if successful;
sl@0
   113
		KErrNoMemory, if unable to create Page Array's.
sl@0
   114
*/
sl@0
   115
TInt DDmaHelper::Construct(TInt aLength, TInt aMediaBlockSize, TInt aDmaAlignment)
sl@0
   116
	{
sl@0
   117
	OstTraceFunctionEntry1( DDMAHELPER_CONSTRUCT_ENTRY, this );
sl@0
   118
	__ASSERT_ALWAYS(aMediaBlockSize > 0, PHYSADDR_FAULT());
sl@0
   119
	__ASSERT_ALWAYS(IsPowerOfTwo(aMediaBlockSize), PHYSADDR_FAULT());
sl@0
   120
	__ASSERT_ALWAYS(aLength > 0, PHYSADDR_FAULT());
sl@0
   121
	__ASSERT_ALWAYS(aLength > iPageSize, PHYSADDR_FAULT());
sl@0
   122
sl@0
   123
	// This code assumes that the media block size (normally 512) is >= the processor's 
sl@0
   124
	// cache-line size (typically 32 bytes). This may not be true for future processors.
sl@0
   125
	// If the cache-line size was 1024, for example,  reading 512 bytes into a client's 
sl@0
   126
	// buffer & then calling Cache::SyncMemoryAfterDmaRead would invalidate an entire 1024 
sl@0
   127
	// bytes in the user's address space.
sl@0
   128
	TUint cacheLineSize = Cache::DmaBufferAlignment();
sl@0
   129
	__ASSERT_ALWAYS(IsPowerOfTwo(cacheLineSize), PHYSADDR_FAULT());
sl@0
   130
	if (cacheLineSize > (TUint) aMediaBlockSize)
sl@0
   131
	    {
sl@0
   132
		OstTraceFunctionExitExt( DDMAHELPER_CONSTRUCT_EXIT1, this, KErrNotSupported );
sl@0
   133
		return KErrNotSupported;
sl@0
   134
	    }
sl@0
   135
	
sl@0
   136
	//Check whether Kernel supports physical memory pinning:
sl@0
   137
	TInt mm = Kern::HalFunction(EHalGroupKernel, EKernelHalMemModelInfo, 0, 0) & EMemModelTypeMask;
sl@0
   138
	if (mm >= EMemModelTypeFlexible)
sl@0
   139
		{
sl@0
   140
		// Flexible memory model supports physical pinning for user (and Kernel) memory that
sl@0
   141
		// is the subject of DMA transfer.
sl@0
   142
		// Physical memory pinning ensures that:
sl@0
   143
		// - physical memory is not moved by RAM defragmentation.
sl@0
   144
		// - it is safe to to DMA against it or do sync cache (using new interface) even if/when
sl@0
   145
		// the owner of the memory (e.g. untrusted user aplication) decomits memory or panics.
sl@0
   146
		// For details @see Kern::PinPhysicalMemory.
sl@0
   147
		// Cache Sync of physically pinned memory on flexible memory model is done by:
sl@0
   148
		//  - Cache::SyncPhysicalMemoryBeforeDmaWrite
sl@0
   149
		//  - Cache::SyncPhysicalMemoryBeforeDmaRead
sl@0
   150
		//  - Cache::SyncPhysicalMemoryAfterDmaRead
sl@0
   151
		iPhysPinningAvailable = ETrue;
sl@0
   152
		__KTRACE_DMA(Kern::Printf("Memory model (%d) supports physical pinning\n",mm));
sl@0
   153
		NKern::ThreadEnterCS();
sl@0
   154
		TInt r=Kern::CreatePhysicalPinObject(iPhysicalPinObject);
sl@0
   155
		OstTraceExt2(TRACE_DMASUPPORT, DDMAHELPER_CONSTRUCT1, "Memory model=%d supports physical pinning; created Physical Pin Object with return value=%d",mm, r);
sl@0
   156
		NKern::ThreadLeaveCS();
sl@0
   157
		if (r) return r;
sl@0
   158
		}
sl@0
   159
	else
sl@0
   160
		{
sl@0
   161
		// Memory models before flexible do not support memory pinning.
sl@0
   162
		// The driver has to use  PrepareMemoryForDMA/ReleaseMemoryFromDMA Kernel interface 
sl@0
   163
		// that ensures that physical memory won't be moved by RAM defragmentation module.
sl@0
   164
		// However, Kernel relies on assumption that the user memory won't dissapear (e.g. by
sl@0
   165
		// user client closing the chunk or panics), as it would lead to Kernel crash.
sl@0
   166
		// For that reason, the only use case for DMA transfer into user memory is File System's
sl@0
   167
		// read/write buffer - as it is assumed that File System is trusted component.
sl@0
   168
		// To mark its buffers(s) for DMA transfer, File Sytem must call UserSvr::RegisterTrustedChunk
sl@0
   169
		// before DMA transfer starts.
sl@0
   170
		// Cache sync. operations before/after DMA transfer must be done by using the old Cache interface:
sl@0
   171
		//  - Cache::SyncMemoryBeforeDmaWrite
sl@0
   172
		//  - Cache::SyncMemoryBeforeDmaRead
sl@0
   173
		//  - Cache::SyncMemoryAfterDmaRead
sl@0
   174
		// As they all require linear address as input, these methods also rely on File System buffers
sl@0
   175
		// to be in valid state during sync calls.
sl@0
   176
		iPhysPinningAvailable = EFalse;
sl@0
   177
		__KTRACE_DMA(Kern::Printf("Memory model (%d) doesn't support physical pining",mm));
sl@0
   178
		OstTrace1(TRACE_DMASUPPORT, DDMAHELPER_CONSTRUCT2, "Memory model=%d doesn't support physical pinning",mm);
sl@0
   179
		iPhysicalPinObject = NULL;
sl@0
   180
		}
sl@0
   181
	
sl@0
   182
	iMaxPages = (aLength >> iPageSizeLog2)-1;
sl@0
   183
	
sl@0
   184
	// 2 Additional pages for page straddling
sl@0
   185
	iPageArray = new TPhysAddr[iMaxPages+2];
sl@0
   186
	if (iPageArray != NULL)
sl@0
   187
		{
sl@0
   188
		iPageList = new TPageList[iMaxPages];
sl@0
   189
		if (iPageList != NULL)
sl@0
   190
			{
sl@0
   191
			iMediaBlockSize = aMediaBlockSize;
sl@0
   192
			iMediaBlockSizeMask = TInt64(iMediaBlockSize - 1);
sl@0
   193
sl@0
   194
			iDmaAlignment = aDmaAlignment;
sl@0
   195
			__KTRACE_DMA(Kern::Printf("-PHYSADDR: Construct iMaxPages(%d), MediaBlocks(%d), DMAalign(%d)",iMaxPages,iMediaBlockSize,iDmaAlignment));
sl@0
   196
			OstTraceExt3(TRACE_FLOW, DDMAHELPER_CONSTRUCT_EXIT2, "< KErrNone PHYSADDR: Construct iMaxPages %d MediaBlocks %d DMAalign %d", iMaxPages,iMediaBlockSize,iDmaAlignment );
sl@0
   197
			return KErrNone;
sl@0
   198
			}
sl@0
   199
		delete [] iPageArray; iPageArray = NULL;
sl@0
   200
		}
sl@0
   201
	
sl@0
   202
	iMaxPages = 0;
sl@0
   203
	OstTraceFunctionExitExt( DDMAHELPER_CONSTRUCT_EXIT3, this, KErrNoMemory );
sl@0
   204
	return KErrNoMemory;
sl@0
   205
	}
sl@0
   206
sl@0
   207
/**
sl@0
   208
 * Each Read/Write request is examined to determine if the descriptor that 
sl@0
   209
 * is referenced is mapped to a physical memory object; 
sl@0
   210
 * if so it prepares the memory, updates the request with physical memory information
sl@0
   211
 * and issues the request.
sl@0
   212
 * If a request does not make use of physical memory or is not configured correctly the
sl@0
   213
 * request is passed through without modification.
sl@0
   214
 */
sl@0
   215
TInt DDmaHelper::SendReceive(TLocDrvRequest& aReq, TLinAddr aLinAddress)
sl@0
   216
	{
sl@0
   217
	OstTraceFunctionEntry0( DDMAHELPER_SENDRECEIVE_ENTRY );
sl@0
   218
	DPrimaryMediaBase& primaryMedia = *aReq.Drive()->iPrimaryMedia;
sl@0
   219
	
sl@0
   220
	TInt reqId = aReq.Id();
sl@0
   221
	if (reqId != DLocalDrive::ERead && reqId != DLocalDrive::EWrite)
sl@0
   222
	    {
sl@0
   223
	    OstTrace0(TRACE_FLOW, DDMAHELPER_SENDRECEIVE_EXIT1, "< Request is not ERead or EWrite, cannot perform Direct Memory Access");
sl@0
   224
		return aReq.SendReceive(&primaryMedia.iMsgQ);
sl@0
   225
	    }
sl@0
   226
		
sl@0
   227
	if ((I64HIGH(aReq.Length()) > 0) || (aReq.Length() < iMediaBlockSize))
sl@0
   228
	    {
sl@0
   229
	    OstTrace0(TRACE_FLOW, DDMAHELPER_SENDRECEIVE_EXIT2, "< Invalid request length, cannot perform Direct Memory Access");
sl@0
   230
		return aReq.SendReceive(&primaryMedia.iMsgQ);
sl@0
   231
	    }
sl@0
   232
	
sl@0
   233
	// If more than one user thread tries to access the drive, then bail out as there is 
sl@0
   234
	// only one DDmaHelper object per TLocDrv. Normally this shouldn't ever happen unless
sl@0
   235
	// a client app accesses the drive directly using TBusLOcalDrive or the file system is 
sl@0
   236
	// asynchronous (i.e. there is a separate drive thread) but the file server message is 
sl@0
   237
	// flagged as synchronous - e.g. EFsDrive
sl@0
   238
	if (TInt(__e32_atomic_add_ord32(&iLockCount, 1)) > 0)	// busy ?
sl@0
   239
		{
sl@0
   240
		__KTRACE_DMA(Kern::Printf("-PHYSADDR: BUSY"));
sl@0
   241
		__e32_atomic_add_ord32(&iLockCount, TUint32(-1));
sl@0
   242
		OstTrace0(TRACE_FLOW, DDMAHELPER_SENDRECEIVE_EXIT3, "< DMA Busy");
sl@0
   243
		return aReq.SendReceive(&primaryMedia.iMsgQ);
sl@0
   244
		}
sl@0
   245
sl@0
   246
	// make a copy of the request 
sl@0
   247
	iMemoryType = EUnknown;
sl@0
   248
	iReq = &aReq;
sl@0
   249
	iReqId = reqId;
sl@0
   250
		
sl@0
   251
	iReqPosClient = iReq->Pos();
sl@0
   252
sl@0
   253
	iReqLenClient = I64LOW(iReq->Length());
sl@0
   254
sl@0
   255
	iReqRemoteDesOffset = iReq->RemoteDesOffset();
sl@0
   256
	iReqFlags = iReq->Flags();
sl@0
   257
sl@0
   258
	iRemoteThread = iReq->RemoteThread();
sl@0
   259
	iCurrentThread = &Kern::CurrentThread();
sl@0
   260
	iOwningThread = iRemoteThread ? iRemoteThread : iCurrentThread;
sl@0
   261
sl@0
   262
	iChunk = NULL;
sl@0
   263
	iChunkOffset = 0;
sl@0
   264
	iLinAddressUser = NULL;
sl@0
   265
	iLenConsumed = 0;
sl@0
   266
sl@0
   267
	// point to the start of the descriptor
sl@0
   268
	iLinAddressUser = aLinAddress - iReqRemoteDesOffset;
sl@0
   269
	
sl@0
   270
	// Need to check descriptors from both direct Clients (i.e. file cache, RemoteThread == NULL )
sl@0
   271
	// and Remote Server Clients (file server clients, RemoteThread != NULL)
sl@0
   272
	// Shared Memory can potentially be used by both remote server and direct clients
sl@0
   273
	NKern::ThreadEnterCS();
sl@0
   274
	iChunk = Kern::OpenSharedChunk(iOwningThread, (const TAny*) iLinAddressUser, ETrue, iChunkOffset);
sl@0
   275
	NKern::ThreadLeaveCS();
sl@0
   276
	
sl@0
   277
	TInt fragments = 0;
sl@0
   278
	TInt r;
sl@0
   279
	do
sl@0
   280
		{
sl@0
   281
		__KTRACE_DMA(Kern::Printf(">PHYSADDR:SendReceive() iReqLen %d; iLenConsumed %d; fragments %d",iReqLen, iLenConsumed, fragments));
sl@0
   282
		OstTraceExt2( TRACE_DMASUPPORT, DDMAHELPER_SENDRECEIVE1, "PHYSADDR:SendReceive() iLenConsumed=%d; fragments=%d", iLenConsumed, fragments);
sl@0
   283
		r = RequestStart();
sl@0
   284
		if (r != KErrNone)
sl@0
   285
			{
sl@0
   286
			if (iChunk)
sl@0
   287
				{
sl@0
   288
				NKern::ThreadEnterCS();
sl@0
   289
				Kern::ChunkClose(iChunk);
sl@0
   290
				iChunk = NULL;
sl@0
   291
				NKern::ThreadLeaveCS();
sl@0
   292
				}
sl@0
   293
			__KTRACE_DMA(Kern::Printf("<PHYSADDR:SendReceive()- r:%d",r));
sl@0
   294
			OstTrace1( TRACE_FLOW, DDMAHELPER_SENDRECEIVE_EXIT4, "< PHYSADDR:SendReceive() Return code %d",r);
sl@0
   295
			iMemoryType = EUnknown;
sl@0
   296
			__e32_atomic_add_ord32(&iLockCount, TUint32(-1));
sl@0
   297
			return fragments ? r : iReq->SendReceive(&primaryMedia.iMsgQ);
sl@0
   298
			}
sl@0
   299
		else
sl@0
   300
			{
sl@0
   301
			iReq->Flags() |= TLocDrvRequest::EPhysAddr;
sl@0
   302
			}
sl@0
   303
sl@0
   304
		__KTRACE_DMA(Kern::Printf("-PHYSADDR:SendReceive() rThread %08X pos %08lX, len %d addr %08X off %08X", 
sl@0
   305
				iRemoteThread, iReq->Pos(), I64LOW(iReq->Length()), iLinAddressUser, iReqRemoteDesOffset));
sl@0
   306
		OstTraceExt4(TRACE_DMASUPPORT, DDMAHELPER_SENDRECEIVE2, "PHYSADDR:SendReceive() position=%Ld; length=%d; address=0x%x; offset=0x%x", iReq->Pos(), (TInt) I64LOW(iReq->Length()), (TUint) iLinAddressUser, (TUint) iReqRemoteDesOffset );
sl@0
   307
		
sl@0
   308
		__ASSERT_DEBUG(iReq->Length() == FragLength(), PHYSADDR_FAULT());
sl@0
   309
		__ASSERT_DEBUG(iReq->Length() != 0, PHYSADDR_FAULT());
sl@0
   310
sl@0
   311
		// reinstate iValue in case overwritten by DMediaPagingDevice::CompleteRequest()
sl@0
   312
		iReq->iValue = iReqId;
sl@0
   313
		
sl@0
   314
		OstTrace1(TRACE_DMASUPPORT, DDMAHELPER_SENDRECEIVE3, "Dma SendReceive Start iReq=%d", iReq);
sl@0
   315
		r = iReq->SendReceive(&primaryMedia.iMsgQ);
sl@0
   316
		OstTrace1(TRACE_DMASUPPORT, DDMAHELPER_SENDRECEIVE4, "Dma SendReceive Return iReq=%d", iReq);
sl@0
   317
		
sl@0
   318
		// The media driver could potentially choose to deal with the request 
sl@0
   319
		// without accessing physical memory (e.g. if the data is already cached).
sl@0
   320
		iLenConsumed += iFragLenRemaining;
sl@0
   321
				
sl@0
   322
		RequestEnd();
sl@0
   323
		
sl@0
   324
		ResetPageLists();
sl@0
   325
sl@0
   326
		fragments++;
sl@0
   327
		
sl@0
   328
		}
sl@0
   329
	while(r == KErrNone && LengthRemaining() > 0);
sl@0
   330
sl@0
   331
	if (iChunk)		
sl@0
   332
		{
sl@0
   333
		NKern::ThreadEnterCS();
sl@0
   334
		Kern::ChunkClose(iChunk);
sl@0
   335
		iChunk = NULL;
sl@0
   336
		NKern::ThreadLeaveCS();
sl@0
   337
		}
sl@0
   338
	
sl@0
   339
	// Set remote descriptor length to iReqLenClient
sl@0
   340
	if (iReqId == DLocalDrive::ERead && r == KErrNone)
sl@0
   341
		r = UpdateRemoteDescriptorLength(iReqLenClient);
sl@0
   342
sl@0
   343
	__KTRACE_DMA(Kern::Printf("<PHYSADDR:SendReceive()"));
sl@0
   344
sl@0
   345
	iMemoryType = EUnknown;
sl@0
   346
sl@0
   347
	__e32_atomic_add_ord32(&iLockCount, TUint32(-1));
sl@0
   348
	OstTraceFunctionExit0( DDMAHELPER_SENDRECEIVE_EXIT5 );
sl@0
   349
	return r;
sl@0
   350
	}
sl@0
   351
sl@0
   352
sl@0
   353
/**
sl@0
   354
 * Each read/write request is split into one or more DMA "fragments".
sl@0
   355
 * The maximum size of each fragment depends on the size of iPageArray[].
sl@0
   356
 * Subsquent calls to RequestStart maybe required to complete a request.
sl@0
   357
 * 
sl@0
   358
 * The physical address is checked for DMA alignment or the possibility of 
sl@0
   359
 * eventually alignment due to mis-aligned start/end media blocks.
sl@0
   360
 * 
sl@0
   361
 * A DMA "fragment" can be split over a number of pages as follows :
sl@0
   362
 * ----------------------------------------------------------
sl@0
   363
 * |    4K    |    4K    |    4K    |    4K    |
sl@0
   364
 * ----------------------------------------------------------
sl@0
   365
 *      ********************************		: region to be read
sl@0
   366
 * <----------- iFragLen ----------->
sl@0
   367
 * 
sl@0
   368
 * The pages may not be physically contiguous; if they are not, 
sl@0
   369
 * then they are supplied to the media driver one contiguous 
sl@0
   370
 * sequent at a time by GetPhysicalAddress()
sl@0
   371
 **/
sl@0
   372
TInt DDmaHelper::RequestStart()
sl@0
   373
	{
sl@0
   374
	OstTraceFunctionEntry1( DDMAHELPER_REQUESTSTART_ENTRY, this );
sl@0
   375
	__KTRACE_DMA(Kern::Printf(">PHYSADDR:RequestStart()"));
sl@0
   376
sl@0
   377
	iIndex = 0;
sl@0
   378
sl@0
   379
	TLinAddr startAddr = LinAddress();
sl@0
   380
	TInt64 startPos = iReqPosClient + iLenConsumed;
sl@0
   381
	TInt mediaBlockOffset = BlockOffset(startPos);
sl@0
   382
	TInt addrBlockOffset = BlockOffset(startAddr);
sl@0
   383
	TInt length = Min(LengthRemaining(), MaxFragLength());
sl@0
   384
sl@0
   385
	iPageArrayCount = iPageListCount = 0;
sl@0
   386
sl@0
   387
	TLinAddr firstPageStart = PageAlign(startAddr);
sl@0
   388
	TLinAddr lastPageStart = PageAlign(startAddr + length + iPageSize - 1);
sl@0
   389
	iPageArrayCount = (lastPageStart - firstPageStart + 1) >> iPageSizeLog2;
sl@0
   390
sl@0
   391
	iMemoryType = EUnknown;
sl@0
   392
	iPhysAddr = KPhysMemFragmented; // Default - Mark memory as fragmented
sl@0
   393
sl@0
   394
	//*************************************
sl@0
   395
	// Check Physical Page Alignment!!	
sl@0
   396
	//*************************************
sl@0
   397
	if (!IsBlockAligned(startPos))
sl@0
   398
		{
sl@0
   399
		// Will DMA align at next block alignment? such that DMA can be used
sl@0
   400
		TInt ofset = I64LOW((startPos + iMediaBlockSize) & (iMediaBlockSize-1));
sl@0
   401
		ofset = iMediaBlockSize - ofset;
sl@0
   402
sl@0
   403
		if (!IsDmaAligned(startAddr))
sl@0
   404
			{			
sl@0
   405
			__KTRACE_DMA(Kern::Printf("<PHYSADDR:RequestStart() - not DMA Aligned pos 0x%x addr 0x%x)",I64LOW(startPos), startAddr));
sl@0
   406
			OstTraceExt2( TRACE_FLOW, DDMAHELPER_REQUESTSTART_EXIT1, "< KErrNotSupported Not DMA Aligned startPos %x startAddr %x", I64LOW(startPos), startAddr );
sl@0
   407
			return KErrNotSupported;
sl@0
   408
			}
sl@0
   409
		}
sl@0
   410
	else 
sl@0
   411
		{ //block aligned!
sl@0
   412
		if (!IsDmaAligned(startAddr))
sl@0
   413
			{
sl@0
   414
			__KTRACE_DMA(Kern::Printf("<PHYSADDR:RequestStart() - not DMA Aligned (0x%x)",startAddr));
sl@0
   415
			OstTrace1(TRACE_FLOW, DDMAHELPER_REQUESTSTART_EXIT2, "< KErrNotSupported Not DMA Aligned startAddr %x", startAddr);
sl@0
   416
			return KErrNotSupported;
sl@0
   417
			}
sl@0
   418
		}
sl@0
   419
sl@0
   420
	//************************************************
sl@0
   421
	// Check for possible striping of RAM pages vs Media blocks
sl@0
   422
	// i.e. Media blocks which may straddle 2 non contiguous pages. 
sl@0
   423
	//************************************************
sl@0
   424
	if (mediaBlockOffset != addrBlockOffset)
sl@0
   425
		{
sl@0
   426
		__KTRACE_DMA(Kern::Printf("<PHYSADDR:RequestStart() - Frag / not block aligned: pos 0x%x addr 0x%x", I64LOW(startPos), startAddr));
sl@0
   427
		OstTraceExt2(TRACE_FLOW, DDMAHELPER_REQUESTSTART_EXIT3, "< KErrNotSupported Frag / not block aligned: startPos 0x%x startAddr 0x%x", I64LOW(startPos), startAddr );
sl@0
   428
		return KErrNotSupported;
sl@0
   429
		}
sl@0
   430
sl@0
   431
	//************************************************
sl@0
   432
	// Is it File Server Cache request ?
sl@0
   433
	//************************************************
sl@0
   434
	if (iChunk == NULL &&				// Not Shared memory
sl@0
   435
		iRemoteThread == NULL &&		// Direct Client Request
sl@0
   436
		IsPageAligned(startAddr) &&
sl@0
   437
		IsBlockAligned(startPos) &&
sl@0
   438
		(iPageArrayCount > 0) )
sl@0
   439
		{
sl@0
   440
		TLinAddr firstPageAddr = PageAlign(startAddr); //ensure that it is page aligned.
sl@0
   441
		
sl@0
   442
		TInt r = KErrNone;
sl@0
   443
		if (iPhysPinningAvailable)
sl@0
   444
			{
sl@0
   445
			TBool readOnlyMem = (iReqId == DLocalDrive::EWrite); 
sl@0
   446
			r =  Kern::PinPhysicalMemory(iPhysicalPinObject,  firstPageAddr, iPageArrayCount << iPageSizeLog2,
sl@0
   447
					readOnlyMem, iPhysAddr, iPageArray, iMapAttr, iPageColour, iCurrentThread);
sl@0
   448
			}
sl@0
   449
		else
sl@0
   450
			{
sl@0
   451
			NKern::ThreadEnterCS();
sl@0
   452
			r = Kern::PrepareMemoryForDMA(iCurrentThread, (void*)firstPageAddr, iPageArrayCount << iPageSizeLog2, iPageArray);
sl@0
   453
			NKern::ThreadLeaveCS();
sl@0
   454
			}
sl@0
   455
		if (r != KErrNone) 
sl@0
   456
		    {
sl@0
   457
			OstTraceFunctionExitExt( DDMAHELPER_REQUESTSTART_EXIT4, this, r );
sl@0
   458
			return r;
sl@0
   459
		    }
sl@0
   460
sl@0
   461
		iMemoryType = EFileServerChunk;
sl@0
   462
		
sl@0
   463
		__KTRACE_DMA(Kern::Printf("-PHYSADDR:RequestStart() - EFileServerChunk"));
sl@0
   464
		OstTrace0( TRACE_DMASUPPORT, DDMAHELPER_REQUESTSTART1, "EFileServerChunk");
sl@0
   465
		}
sl@0
   466
	//****************************
sl@0
   467
	// Is it shared chunk ?
sl@0
   468
	//****************************
sl@0
   469
	else if (iChunk)
sl@0
   470
		{
sl@0
   471
		// calculate chunk offset of start of first page
sl@0
   472
		TInt offset = iChunkOffset + iReqRemoteDesOffset+ iLenConsumed;
sl@0
   473
				
sl@0
   474
		TInt r = Kern::ChunkPhysicalAddress(iChunk, offset, length, iLinAddressKernel, iMapAttr, iPhysAddr, iPageArray);
sl@0
   475
		
sl@0
   476
		if (r < KErrNone)
sl@0
   477
		    {
sl@0
   478
			OstTraceFunctionExitExt( DDMAHELPER_REQUESTSTART_EXIT5, this, r );
sl@0
   479
			return r;  // 0 = Contiguous Memory, 1 = Fragmented/Dis-Contiguous Memory
sl@0
   480
		    }
sl@0
   481
			
sl@0
   482
		iMemoryType = ESharedChunk;
sl@0
   483
		
sl@0
   484
		__KTRACE_DMA(Kern::Printf("-PHYSADDR:RequestStart() - ESharedChunk"));
sl@0
   485
		OstTrace0( TRACE_DMASUPPORT, DDMAHELPER_REQUESTSTART2, "ESharedChunk");
sl@0
   486
		}
sl@0
   487
	else
sl@0
   488
		{
sl@0
   489
		__KTRACE_DMA(Kern::Printf("<PHYSADDR:RequestStart() - EUnknown"));
sl@0
   490
		OstTraceFunctionExitExt( DDMAHELPER_REQUESTSTART_EXIT6, this, KErrNotFound );
sl@0
   491
		return KErrNotFound;
sl@0
   492
		}
sl@0
   493
sl@0
   494
	SetFragLength(length);
sl@0
   495
	
sl@0
   496
	//************************************************
sl@0
   497
	// Build Contiguous Page list
sl@0
   498
	//************************************************
sl@0
   499
	BuildPageList();
sl@0
   500
	
sl@0
   501
	//************************************************
sl@0
   502
	// Set up request parameters for this fragment 
sl@0
   503
	//************************************************
sl@0
   504
	iReq->Length() = MAKE_TINT64(0, length);
sl@0
   505
	iReq->Pos() = iReqPosClient + iLenConsumed;
sl@0
   506
	iReq->RemoteDesOffset() = iReqRemoteDesOffset + iLenConsumed;
sl@0
   507
	// restore EAdjusted flag to ensure iReq->Pos() is adjusted correctly
sl@0
   508
	iReq->Flags()&= ~TLocDrvRequest::EAdjusted;
sl@0
   509
	iReq->Flags()|= (iReqFlags & TLocDrvRequest::EAdjusted);
sl@0
   510
sl@0
   511
	//************************************************
sl@0
   512
	// Sync memory
sl@0
   513
	//************************************************
sl@0
   514
	__KTRACE_DMA(Kern::Printf(">SYNC-PHYSADDR:addr 0x%x len %d", startAddr, length));
sl@0
   515
	OstTraceExt2(TRACE_DMASUPPORT, DDMAHELPER_REQUESTSTART3, "startAddr=0x%x length=%d", (TUint) startAddr, length );
sl@0
   516
sl@0
   517
	// Only sync whole blocks: it is assumed that the media driver will transfer 
sl@0
   518
	// partial start and end blocks without DMA
sl@0
   519
sl@0
   520
	TInt startBlockPartialLen = IsBlockAligned(startPos) ? 0 : iMediaBlockSize - BlockOffset(startPos);
sl@0
   521
	TInt blockLen = (TInt) BlockAlign(length - startBlockPartialLen);
sl@0
   522
sl@0
   523
	if (iReqId == DLocalDrive::EWrite)
sl@0
   524
		{
sl@0
   525
		if (iMemoryType == ESharedChunk)
sl@0
   526
			{
sl@0
   527
			Cache::SyncMemoryBeforeDmaWrite(iLinAddressKernel+startBlockPartialLen, blockLen, iMapAttr);
sl@0
   528
			}
sl@0
   529
		else // (iMemoryType == EFileServerChunk)
sl@0
   530
			{
sl@0
   531
			if (iPhysPinningAvailable)
sl@0
   532
				Cache::SyncPhysicalMemoryBeforeDmaWrite(iPageArray, iPageColour, startBlockPartialLen, blockLen, iMapAttr);
sl@0
   533
			else
sl@0
   534
				Cache::SyncMemoryBeforeDmaWrite(startAddr+startBlockPartialLen, blockLen);
sl@0
   535
			}
sl@0
   536
		}
sl@0
   537
	else
sl@0
   538
		{
sl@0
   539
		if (iMemoryType == ESharedChunk)
sl@0
   540
			Cache::SyncMemoryBeforeDmaRead(iLinAddressKernel, length, iMapAttr);
sl@0
   541
		else // (iMemoryType == EFileServerChunk)
sl@0
   542
			{
sl@0
   543
			if (iPhysPinningAvailable)
sl@0
   544
				Cache::SyncPhysicalMemoryBeforeDmaRead(iPageArray, iPageColour, 0, length, iMapAttr);
sl@0
   545
			else
sl@0
   546
				Cache::SyncMemoryBeforeDmaRead(startAddr, length);
sl@0
   547
			}
sl@0
   548
		}
sl@0
   549
sl@0
   550
	__KTRACE_DMA(Kern::Printf("<PHYSADDR:RequestStart()"));
sl@0
   551
sl@0
   552
	OstTraceFunctionExitExt( DDMAHELPER_REQUESTSTART_EXIT7, this, KErrNone );
sl@0
   553
	return KErrNone;
sl@0
   554
	}
sl@0
   555
sl@0
   556
/**
sl@0
   557
 * After read requests this method synchronous the current physical memory in use.
sl@0
   558
 */
sl@0
   559
void DDmaHelper::RequestEnd()
sl@0
   560
	{
sl@0
   561
	OstTraceFunctionEntry0( DDMAHELPER_REQUESTEND_ENTRY );
sl@0
   562
	__KTRACE_DMA(Kern::Printf(">PHYSADDR:RequestEnd()"));
sl@0
   563
sl@0
   564
sl@0
   565
	__ASSERT_DEBUG(iReqId == DLocalDrive::ERead || iReqId == DLocalDrive::EWrite, PHYSADDR_FAULT());
sl@0
   566
	__ASSERT_DEBUG(iMemoryType == ESharedChunk || iMemoryType == EFileServerChunk, PHYSADDR_FAULT());
sl@0
   567
sl@0
   568
	TInt length = FragLength();	// len of data just transferred
sl@0
   569
	TLinAddr startAddr = LinAddress() - length;
sl@0
   570
sl@0
   571
	// Sync the memory : but not if the media driver has decided to transfer ALL the data using IPC rather than DMA.
sl@0
   572
	// It is assumed that the media driver will transfer partial start & end blocks using IPC, but it may also choose 
sl@0
   573
	// to use IPC for the ENTIRE fragment when read/writing at the end of the media (see medmmc.cpp)
sl@0
   574
	if (iFragLenRemaining < length && iReqId == DLocalDrive::ERead)
sl@0
   575
		{
sl@0
   576
		TInt64 startPos = iReq->Pos();
sl@0
   577
		TInt startBlockPartialLen = IsBlockAligned(startPos) ? 0 : iMediaBlockSize - BlockOffset(startPos);
sl@0
   578
		TInt blockLen = (TInt) BlockAlign(length - startBlockPartialLen);
sl@0
   579
sl@0
   580
		if (iMemoryType == ESharedChunk)
sl@0
   581
			{
sl@0
   582
			Cache::SyncMemoryAfterDmaRead(iLinAddressKernel + startBlockPartialLen, blockLen);
sl@0
   583
			}
sl@0
   584
		else // (iMemoryType == EFileServerChunk)
sl@0
   585
			{
sl@0
   586
			if (iPhysPinningAvailable)
sl@0
   587
				Cache::SyncPhysicalMemoryAfterDmaRead(iPageArray, iPageColour, startBlockPartialLen, blockLen, iMapAttr);
sl@0
   588
			else
sl@0
   589
				Cache::SyncMemoryAfterDmaRead(startAddr + startBlockPartialLen, blockLen);
sl@0
   590
			}
sl@0
   591
sl@0
   592
		}
sl@0
   593
	ReleasePages(PageAlign(startAddr));
sl@0
   594
	OstTraceFunctionExit0( DDMAHELPER_REQUESTEND_EXIT );
sl@0
   595
	}
sl@0
   596
sl@0
   597
/**
sl@0
   598
 * For File Server chunks this method releases the current physical memory in use.
sl@0
   599
 * 
sl@0
   600
 * @see Kern::ReleaseMemoryFromDMA()
sl@0
   601
 */
sl@0
   602
void DDmaHelper::ReleasePages(TLinAddr aAddr)
sl@0
   603
	{
sl@0
   604
	OstTraceFunctionEntry1( DDMAHELPER_RELEASEPAGES_ENTRY, this );
sl@0
   605
	if (iMemoryType == EFileServerChunk)
sl@0
   606
		{
sl@0
   607
		__KTRACE_DMA(Kern::Printf(">PHYSADDR():ReleasePages thread (0x%x) aAddr(0x%08x) size(%d) iPageArray(0x%x)",iCurrentThread, aAddr, (iPageArrayCount << iPageSizeLog2), iPageArray));
sl@0
   608
		OstTraceExt3( TRACE_DMASUPPORT, DDMAHELPER_RELEASEPAGES, "ReleasePages aAddr=0x%x; size=%d; iPageArray-0x%x", (TUint) aAddr, (iPageArrayCount << iPageSizeLog2), (TUint) iPageArray);
sl@0
   609
sl@0
   610
		TInt r;
sl@0
   611
		if (iPhysPinningAvailable)
sl@0
   612
			{
sl@0
   613
			r = Kern::UnpinPhysicalMemory(iPhysicalPinObject);
sl@0
   614
			}
sl@0
   615
		else
sl@0
   616
			{
sl@0
   617
			NKern::ThreadEnterCS();
sl@0
   618
			r = Kern::ReleaseMemoryFromDMA(iCurrentThread, (void*) aAddr, iPageArrayCount << iPageSizeLog2, iPageArray);
sl@0
   619
			NKern::ThreadLeaveCS();
sl@0
   620
			}
sl@0
   621
		__ASSERT_ALWAYS(r == KErrNone, PHYSADDR_FAULT());
sl@0
   622
		}		
sl@0
   623
	OstTraceFunctionExit1( DDMAHELPER_RELEASEPAGES_EXIT, this );
sl@0
   624
	}
sl@0
   625
sl@0
   626
/**
sl@0
   627
 * Utility method which examines the page array, compiling adjacent pages into contiguous fragments
sl@0
   628
 * and populating iPageList with said fragments.
sl@0
   629
 */
sl@0
   630
void DDmaHelper::BuildPageList()
sl@0
   631
	{
sl@0
   632
	OstTraceFunctionEntry1( DDMAHELPER_BUILDPAGELIST_ENTRY, this );
sl@0
   633
	iPageListCount = 0;
sl@0
   634
	
sl@0
   635
	if (iPhysAddr != KPhysMemFragmented)
sl@0
   636
		{
sl@0
   637
		__KTRACE_DMA(Kern::Printf(">PHYSADDR:BuildPageList() - Contiguous Memory"));
sl@0
   638
		OstTrace0( TRACE_DMASUPPORT, DDMAHELPER_BUILDPAGELIST1, "Contiguous Memory");
sl@0
   639
		// Only one entry required.
sl@0
   640
		iPageList[0].iAddress = iPhysAddr;
sl@0
   641
		iPageList[0].iLength = FragLength();
sl@0
   642
		iPageListCount = 1;
sl@0
   643
		}
sl@0
   644
	else
sl@0
   645
		{
sl@0
   646
		__KTRACE_DMA(Kern::Printf(">PHYSADDR:BuildPageList() - Dis-Contiguous Memory"));
sl@0
   647
		OstTrace0( TRACE_DMASUPPORT, DDMAHELPER_BUILDPAGELIST2, "Dis-Contiguous Memory");
sl@0
   648
		TInt offset;
sl@0
   649
		
sl@0
   650
		offset = PageOffset(iChunkOffset + iReqRemoteDesOffset+ iLenConsumed);
sl@0
   651
		iPageList[0].iAddress = iPageArray[0]+offset;
sl@0
   652
		iPageList[0].iLength  = iPageSize-offset;
sl@0
   653
		
sl@0
   654
		TInt lengthRemaining = FragLength() - iPageList[0].iLength;
sl@0
   655
		
sl@0
   656
		TInt i =1;
sl@0
   657
        for( ; i < iPageArrayCount; i++)
sl@0
   658
            {
sl@0
   659
            //Check if RAM pages are physically adjacent
sl@0
   660
            if ((iPageArray[i-1] + PageSize()) == iPageArray[i])
sl@0
   661
                {
sl@0
   662
                // Adjacent pages - just add length
sl@0
   663
                iPageList[iPageListCount].iLength += PageSize();             
sl@0
   664
                }
sl@0
   665
            else     	
sl@0
   666
                {
sl@0
   667
                // Not Adjacent, start new Memory fragment
sl@0
   668
                iPageListCount++;
sl@0
   669
                iPageList[iPageListCount].iAddress = iPageArray[i];
sl@0
   670
                iPageList[iPageListCount].iLength  = iPageSize;
sl@0
   671
                }
sl@0
   672
            
sl@0
   673
            lengthRemaining -= PageSize();
sl@0
   674
            if (lengthRemaining < 0)
sl@0
   675
            	{
sl@0
   676
            	// Last page, re-adjust length for odd remainder.            	
sl@0
   677
            	iPageList[iPageListCount].iLength += lengthRemaining;
sl@0
   678
            	break;
sl@0
   679
            	}
sl@0
   680
            }
sl@0
   681
        
sl@0
   682
        iPageListCount++;
sl@0
   683
		}
sl@0
   684
sl@0
   685
//#ifdef __DEBUG_DMASUP__
sl@0
   686
//	for (TInt m=0; m<iPageListCount; m++)
sl@0
   687
//		__KTRACE_DMA(Kern::Printf("-PHYSADDR:BuildPageList() [%d]: %08X l:%d", m, iPageList[m].iAddress, iPageList[m].iLength));
sl@0
   688
//#endif
sl@0
   689
	OstTraceFunctionExit1( DDMAHELPER_BUILDPAGELIST_EXIT, this );
sl@0
   690
	}
sl@0
   691
sl@0
   692
sl@0
   693
/**
sl@0
   694
 * Returns Address and Length of next contiguous Physical memory fragment
sl@0
   695
 * 
sl@0
   696
 * @param aAddr On success, populated with the Physical Address of the next fragment.
sl@0
   697
 * @param aLen  On success, populated with the length in bytes of the next fragment.
sl@0
   698
 * 
sl@0
   699
 * @return KErrNone, if successful;
sl@0
   700
 * 		   KErrNoMemory, if no more memory fragments left.
sl@0
   701
 */
sl@0
   702
TInt DDmaHelper::GetPhysicalAddress(TPhysAddr& aAddr, TInt& aLen)
sl@0
   703
	{
sl@0
   704
	OstTraceFunctionEntry1( DUP1_DDMAHELPER_GETPHYSICALADDRESS_ENTRY, this );
sl@0
   705
	if (iIndex >= iPageListCount)
sl@0
   706
		{
sl@0
   707
		__KTRACE_DMA(Kern::Printf(">PHYSADDR:GetPhysD() [%d], PageListCount:%d", iIndex, iPageListCount));
sl@0
   708
		OstTraceExt2(TRACE_DMASUPPORT, DDMAHELPER_GETPHYSICALADDRESS1, "GetPhysD() [%d]; iPageCountList=%d", iIndex, iPageListCount );
sl@0
   709
		aAddr = 0;
sl@0
   710
		aLen = 0;
sl@0
   711
		OstTraceFunctionExitExt( DUP1_DDMAHELPER_GETPHYSICALADDRESS_EXIT1, this, KErrGeneral );
sl@0
   712
		return KErrGeneral;
sl@0
   713
		}
sl@0
   714
	
sl@0
   715
	aAddr = iPageList[iIndex].iAddress;
sl@0
   716
	aLen = iPageList[iIndex].iLength;
sl@0
   717
	iLenConsumed+= aLen;
sl@0
   718
	iFragLenRemaining-= aLen;
sl@0
   719
	
sl@0
   720
	__KTRACE_DMA(Kern::Printf(">PHYSADDR:GetPhysD() [%d] addr:0x%08X, l:%d; Used:%d, Left:%d", iIndex, aAddr, aLen, iLenConsumed, iFragLenRemaining));
sl@0
   721
	OstTraceExt5(TRACE_DMASUPPORT, DDMAHELPER_GETPHYSICALADDRESS2, "GetPhysD() [%d]; address=0x%x; length=%d; iLenConsumed=%d; iFragLenRemaining=%d", iIndex, (TUint) aAddr, aLen, iLenConsumed, iFragLenRemaining);
sl@0
   722
	__ASSERT_DEBUG(aLen >= 0, PHYSADDR_FAULT());
sl@0
   723
sl@0
   724
	iIndex++;  //Move index to next page
sl@0
   725
sl@0
   726
	OstTraceFunctionExitExt( DDMAHELPER_GETPHYSICALADDRESS_EXIT2, this, KErrNone );
sl@0
   727
	return KErrNone;
sl@0
   728
	}
sl@0
   729
sl@0
   730
sl@0
   731
#ifdef __DEMAND_PAGING__
sl@0
   732
/**
sl@0
   733
 * Returns Address and Length of next contiguous Physical memory. 
sl@0
   734
 * Static function specifically for Demand Paging support
sl@0
   735
 * 
sl@0
   736
 * @param aReq  TLocDrvRequest from which physical 
sl@0
   737
 * @param aAddr Populated with the Physical Address of the Request aReq.
sl@0
   738
 * @param aLen  Populated with the length in bytes of the memory.
sl@0
   739
 * 
sl@0
   740
 * @return KErrNone 
sl@0
   741
 */
sl@0
   742
TInt DDmaHelper::GetPhysicalAddress(TLocDrvRequest& aReq, TPhysAddr& aAddr, TInt& aLen)
sl@0
   743
	{
sl@0
   744
	OstTraceFunctionEntry0( DDMAHELPER_GETPHYSICALADDRESS_ENTRY );
sl@0
   745
	__ASSERT_DEBUG( (aReq.Flags() & TLocDrvRequest::ETClientBuffer) == 0,  PHYSADDR_FAULT());
sl@0
   746
	TLinAddr linAddr = (TLinAddr) aReq.RemoteDes();
sl@0
   747
	TInt& offset = aReq.RemoteDesOffset();
sl@0
   748
	TLinAddr currLinAddr = linAddr + offset;
sl@0
   749
	TInt reqLen = I64LOW(aReq.Length());
sl@0
   750
	__ASSERT_DEBUG(I64HIGH(aReq.Length()) == 0,  PHYSADDR_FAULT());
sl@0
   751
sl@0
   752
	aAddr = Epoc::LinearToPhysical(currLinAddr);
sl@0
   753
sl@0
   754
	// Set the initial length to be the length remaining in this page or the request length (whichever is shorter).
sl@0
   755
	// If there are subsequent pages, we then need to determine whether they are contiguous
sl@0
   756
	aLen = Min( (TInt) (PageAlign(currLinAddr+iPageSize) - currLinAddr), reqLen - offset);
sl@0
   757
sl@0
   758
	__ASSERT_DEBUG(aLen > 0,  PHYSADDR_FAULT());
sl@0
   759
	
sl@0
   760
	TPhysAddr currPhysPageAddr = PageAlign((TLinAddr) aAddr);
sl@0
   761
sl@0
   762
	offset+= aLen;
sl@0
   763
sl@0
   764
sl@0
   765
	while (offset < reqLen)
sl@0
   766
		{
sl@0
   767
		TPhysAddr nextPhysPageAddr = Epoc::LinearToPhysical(linAddr + offset);
sl@0
   768
		__ASSERT_DEBUG(PageOffset((TLinAddr) nextPhysPageAddr) == 0,  PHYSADDR_FAULT());
sl@0
   769
sl@0
   770
		if (nextPhysPageAddr != currPhysPageAddr + iPageSize)
sl@0
   771
			break;
sl@0
   772
		
sl@0
   773
		currPhysPageAddr = nextPhysPageAddr;
sl@0
   774
sl@0
   775
		TInt len = Min(iPageSize, reqLen - offset);
sl@0
   776
		offset+= len;
sl@0
   777
		aLen+= len;
sl@0
   778
		}
sl@0
   779
sl@0
   780
sl@0
   781
	__KTRACE_DMA(Kern::Printf(">PHYSADDR:DP:GetPhysS(), linAddr %08X, physAddr %08X, len %x reqLen %x", linAddr + offset, aAddr, aLen, reqLen));
sl@0
   782
	OstTraceExt4(TRACE_DEMANDPAGING, DDMAHELPER_GETPHYSICALADDRESS_DP, "linAddr=0x%x; physAddr=0x%x; length=0x%x; reqLen=0x%x", linAddr + offset, aAddr, aLen, reqLen);
sl@0
   783
	OstTraceFunctionExit0( DDMAHELPER_GETPHYSICALADDRESS_EXIT );
sl@0
   784
	return KErrNone;
sl@0
   785
	}
sl@0
   786
#endif	// (__DEMAND_PAGING__)
sl@0
   787
sl@0
   788
sl@0
   789
/**
sl@0
   790
 * Modifies the current requests remote descriptor length
sl@0
   791
 * 
sl@0
   792
 * @param aLength Length in bytes to which the descriptor is to be set.
sl@0
   793
 * 
sl@0
   794
 * @return KErrNone, if successful;
sl@0
   795
 * 		   KErrBadDescriptor, if descriptor is corrupted;
sl@0
   796
 * 		   otherwise one of the other system wide error codes.
sl@0
   797
 */
sl@0
   798
sl@0
   799
TInt DDmaHelper::UpdateRemoteDescriptorLength(TInt aLength)
sl@0
   800
	{
sl@0
   801
	OstTraceFunctionEntryExt( DDMAHELPER_UPDATEREMOTEDESCRIPTORLENGTH_ENTRY, this );
sl@0
   802
	__KTRACE_DMA(Kern::Printf(">PHYSADDR:UpDesLen(%d)",aLength));
sl@0
   803
sl@0
   804
	// Restore request Id (overwritten by KErrNone return code) to stop ASSERT in WriteRemote
sl@0
   805
	iReq->Id() = DLocalDrive::ERead;
sl@0
   806
sl@0
   807
	// restore caller's descriptor offset
sl@0
   808
	iReq->RemoteDesOffset() = iReqRemoteDesOffset;
sl@0
   809
sl@0
   810
	// Write a zero length descriptor at the end such that the descriptors length is correctly updated.
sl@0
   811
	TPtrC8 zeroDes(NULL, 0);
sl@0
   812
	TInt r = iReq->WriteRemote(&zeroDes, aLength);
sl@0
   813
sl@0
   814
	// restore return code	
sl@0
   815
	iReq->iValue = KErrNone;
sl@0
   816
sl@0
   817
	OstTraceFunctionExitExt( DDMAHELPER_UPDATEREMOTEDESCRIPTORLENGTH_EXIT, this, r );
sl@0
   818
	return r;
sl@0
   819
	}
sl@0
   820