os/kernelhwsrv/kernel/eka/klib/x86/cumem.cia
author sl
Tue, 10 Jun 2014 14:32:02 +0200 (2014-06-10)
changeset 1 260cb5ec6c19
permissions -rw-r--r--
Update contrib.
sl@0
     1
// Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
sl@0
     2
// All rights reserved.
sl@0
     3
// This component and the accompanying materials are made available
sl@0
     4
// under the terms of the License "Eclipse Public License v1.0"
sl@0
     5
// which accompanies this distribution, and is available
sl@0
     6
// at the URL "http://www.eclipse.org/legal/epl-v10.html".
sl@0
     7
//
sl@0
     8
// Initial Contributors:
sl@0
     9
// Nokia Corporation - initial contribution.
sl@0
    10
//
sl@0
    11
// Contributors:
sl@0
    12
//
sl@0
    13
// Description:
sl@0
    14
// e32\klib\x86\cumem.cia
sl@0
    15
// 
sl@0
    16
//
sl@0
    17
sl@0
    18
#include <x86.h>
sl@0
    19
sl@0
    20
extern "C" {
sl@0
    21
sl@0
    22
__NAKED__ void CopyInterSeg()
sl@0
    23
//
sl@0
    24
// Copy ECX bytes from DS:ESI to ES:EDI
sl@0
    25
// Modifies EAX, EBX, ECX, EDX, ESI, EDI
sl@0
    26
//
sl@0
    27
	{
sl@0
    28
	asm("pushfd");
sl@0
    29
	asm("cld");				// assume forward copy initially
sl@0
    30
	asm("test ecx,ecx");	//
sl@0
    31
	asm("jz short memcopy0");// if length=0, nothing to do
sl@0
    32
	asm("xor edx,edx");		//
sl@0
    33
	asm("cmp edi,esi");		// compare source and dest addresses
sl@0
    34
	asm("jc short memcopy1");// if dest<source, must go forwards
sl@0
    35
	asm("std");				// else go backwards
sl@0
    36
	asm("add esi,ecx");		// and start at end of block
sl@0
    37
	asm("add edi,ecx");		//
sl@0
    38
	asm("inc edx");			// edx=1 if backwards, 0 if forwards
sl@0
    39
	asm("memcopy1:");
sl@0
    40
	asm("cmp ecx,16");		// if length<16 don't bother with alignment check
sl@0
    41
	asm("jc short memcopy2");//
sl@0
    42
	asm("mov ebx,edi");		// ebx = destination address
sl@0
    43
	asm("and ebx,3");		// ebx bottom 2 bits = alignment of destination wrt hardware bus
sl@0
    44
	asm("jz short memcopy3");// if aligned, proceed with block move
sl@0
    45
	asm("or edx,edx");		// check direction of move
sl@0
    46
	asm("jnz short memcopy4");// if backwards, ebx = number of byte moves to align destination
sl@0
    47
	asm("neg ebx");			// else number of byte moves = 4-ebx
sl@0
    48
	asm("add ebx,4");		//
sl@0
    49
	asm("memcopy4:");
sl@0
    50
	asm("sub ecx,ebx");		// subtract number of bytes from length
sl@0
    51
	asm("xchg ecx,ebx");	// temporarily put length in ebx
sl@0
    52
	asm("sub edi,edx");		// adjust if backwards move
sl@0
    53
	asm("sub esi,edx");		//
sl@0
    54
	asm("rep movsb");		// move bytes to align destination
sl@0
    55
	asm("add edi,edx");		// adjust if backwards move
sl@0
    56
	asm("add esi,edx");		//
sl@0
    57
	asm("mov ecx,ebx");		// length back into ecx
sl@0
    58
	asm("memcopy3:");
sl@0
    59
	asm("mov ebx,ecx");		// save length in ebx
sl@0
    60
	asm("shl edx,2");		// adjustment 4 for backwards move
sl@0
    61
	asm("shr ecx,2");		// number of dwords to move into ecx
sl@0
    62
	asm("sub edi,edx");		// adjust if backwards move
sl@0
    63
	asm("sub esi,edx");		//
sl@0
    64
	asm("rep movsd");		// perform DWORD block move
sl@0
    65
	asm("add edi,edx");		// adjust if backwards move
sl@0
    66
	asm("add esi,edx");		//
sl@0
    67
	asm("mov ecx,ebx");		// length back into ecx
sl@0
    68
	asm("and ecx,3");		// number of remaining bytes to move
sl@0
    69
	asm("jz short memcopy0");// if zero, we are finished
sl@0
    70
	asm("shr edx,2");		// adjustment 1 for backwards move
sl@0
    71
	asm("memcopy2:");		// *** come here for small move
sl@0
    72
	asm("sub edi,edx");		// adjust if backwards move
sl@0
    73
	asm("sub esi,edx");		//
sl@0
    74
	asm("rep movsb");		// move remaining bytes
sl@0
    75
	asm("memcopy0:");
sl@0
    76
	asm("popfd");
sl@0
    77
	asm("ret");				// finished - return value in EAX
sl@0
    78
	}
sl@0
    79
sl@0
    80
__NAKED__ void CopyInterSeg32()
sl@0
    81
//
sl@0
    82
// Copy ECX bytes from DS:ESI to ES:EDI
sl@0
    83
// ECX, ESI and EDI are all multiples of 4
sl@0
    84
// Modifies EAX, EBX, ECX, EDX, ESI, EDI
sl@0
    85
//
sl@0
    86
	{
sl@0
    87
	asm("pushfd");
sl@0
    88
	asm("cld");				//
sl@0
    89
	asm("test ecx,ecx");	//
sl@0
    90
	asm("jz short memmove0");// if length=0, nothing to do
sl@0
    91
	asm("cmp edi,esi");		// compare source and dest addresses
sl@0
    92
	asm("jc short memmove1");// if dest<source, must go forwards
sl@0
    93
	asm("std");				// else go backwards
sl@0
    94
	asm("lea esi,[esi+ecx-4]");	// and start at end of block - 4
sl@0
    95
	asm("lea edi,[edi+ecx-4]");	//
sl@0
    96
	asm("memmove1:");
sl@0
    97
	asm("shr ecx,2");		// ecx now contains number of dwords to move
sl@0
    98
	asm("rep movsd");		// do dword block move
sl@0
    99
	asm("memmove0:");
sl@0
   100
	asm("popfd");
sl@0
   101
	asm("ret");				// finished - return value in EAX
sl@0
   102
	}
sl@0
   103
sl@0
   104
__NAKED__ void FillInterSeg()
sl@0
   105
//
sl@0
   106
// Fill ECX bytes at ES:EDI with AL
sl@0
   107
// Modifies EAX, ECX, EDX, EDI
sl@0
   108
//
sl@0
   109
	{
sl@0
   110
	asm("pushfd");
sl@0
   111
	asm("cld");				// go forwards through array
sl@0
   112
	asm("test ecx,ecx");	//
sl@0
   113
	asm("jz short memfill0");// if length zero, nothing to do
sl@0
   114
	asm("cmp ecx,8");			// if array very small, just do byte fills
sl@0
   115
	asm("jb short memfill1");
sl@0
   116
sl@0
   117
	asm("mov ah,al");		// repeat al in all bytes of eax
sl@0
   118
	asm("movzx edx,ax");	//
sl@0
   119
	asm("shl eax,16");		//
sl@0
   120
	asm("or eax,edx");		//
sl@0
   121
	asm("mov edx,ecx");		// length into edx
sl@0
   122
	// ecx = number of byte fills to align = 4-(edi mod 4)
sl@0
   123
	asm("mov ecx,4");		
sl@0
   124
	asm("sub ecx,edi");		//
sl@0
   125
	asm("and ecx,3");		//
sl@0
   126
	asm("jz short memfill2");// if already aligned, proceed to dword fill
sl@0
   127
	asm("sub edx,ecx");		// subtract alignment bytes from length
sl@0
   128
	asm("rep stosb");		// do byte fills to align
sl@0
   129
	asm("memfill2:");
sl@0
   130
	asm("mov ecx,edx");		// length remaining into ecx
sl@0
   131
	asm("shr ecx,2");		// number of dwords to fill into ecx
sl@0
   132
	asm("rep stosd");		// perform dword fill
sl@0
   133
	asm("mov ecx,edx");		// calculate number of leftover bytes
sl@0
   134
	asm("and ecx,3");		// in ecx
sl@0
   135
	asm("jz short memfill0");// if none left, exit
sl@0
   136
	asm("memfill1:");
sl@0
   137
	asm("rep stosb");		// do byte fills to make up correct length
sl@0
   138
	asm("memfill0:");
sl@0
   139
	asm("popfd");
sl@0
   140
	asm("ret");
sl@0
   141
	}
sl@0
   142
sl@0
   143
sl@0
   144
/**	Reads the current thread's memory space with appropriate permissions.
sl@0
   145
sl@0
   146
Performs a memcpy(aKernAddr, aAddr, aLength).
sl@0
   147
The reads are performed	using requestor privilege level from GS, ie equal
sl@0
   148
to the privilege level of the caller of the Exec:: function.
sl@0
   149
Note that source and destination areas may not overlap.
sl@0
   150
sl@0
   151
@param	aKernAddr	Destination address in kernel memory.
sl@0
   152
@param	aAddr		Source address in kernel or user memory.
sl@0
   153
@param	aLength		Number of bytes to copy.
sl@0
   154
sl@0
   155
@pre    Call in a thread context.
sl@0
   156
@pre    Kernel must be unlocked.
sl@0
   157
@pre	Must be called under an XTRAP harness, or calling thread must not be
sl@0
   158
in a critical section.
sl@0
   159
*/
sl@0
   160
EXPORT_C __NAKED__ void kumemget(TAny* /*aKernAddr*/, const TAny* /*aAddr*/, TInt /*aLength*/)
sl@0
   161
	{
sl@0
   162
	asm("push edi");
sl@0
   163
	asm("push esi");
sl@0
   164
	asm("push ebx");
sl@0
   165
	asm("push ds");
sl@0
   166
	asm("mov edi, [esp+20]");
sl@0
   167
	asm("mov esi, [esp+24]");
sl@0
   168
	asm("mov ecx, [esp+28]");
sl@0
   169
	asm("mov ax, gs");
sl@0
   170
	asm("mov ds, ax");
sl@0
   171
	asm("call %a0": : "i"(&CopyInterSeg));
sl@0
   172
	asm("pop ds");
sl@0
   173
	asm("pop ebx");
sl@0
   174
	asm("pop esi");
sl@0
   175
	asm("pop edi");
sl@0
   176
	asm("ret");
sl@0
   177
	}
sl@0
   178
sl@0
   179
sl@0
   180
/**	Reads the current thread's memory space with user permissions.
sl@0
   181
sl@0
   182
Performs a memcpy(aKernAddr, aUserAddr, aLength).
sl@0
   183
The reads are performed with ring 3 RPL.
sl@0
   184
Note that source and destination areas may not overlap.
sl@0
   185
sl@0
   186
@param	aKernAddr	Destination address in kernel memory.
sl@0
   187
@param	aUserAddr	Source address in user memory.
sl@0
   188
@param	aLength		Number of bytes to copy.
sl@0
   189
sl@0
   190
@pre    Call in a thread context.
sl@0
   191
@pre    Kernel must be unlocked.
sl@0
   192
@pre	Must be called under an XTRAP harness, or calling thread must not be
sl@0
   193
in a critical section.
sl@0
   194
*/
sl@0
   195
EXPORT_C __NAKED__ void umemget(TAny* /*aKernAddr*/, const TAny* /*aUserAddr*/, TInt /*aLength*/)
sl@0
   196
	{
sl@0
   197
	asm("push edi");
sl@0
   198
	asm("push esi");
sl@0
   199
	asm("push ebx");
sl@0
   200
	asm("push ds");
sl@0
   201
	asm("mov edi, [esp+20]");
sl@0
   202
	asm("mov esi, [esp+24]");
sl@0
   203
	asm("mov ecx, [esp+28]");
sl@0
   204
	asm("mov eax, %0": : "i"(RING3_DS));
sl@0
   205
	asm("mov ds, ax");
sl@0
   206
	asm("call %a0": : "i"(&CopyInterSeg));
sl@0
   207
	asm("pop ds");
sl@0
   208
	asm("pop ebx");
sl@0
   209
	asm("pop esi");
sl@0
   210
	asm("pop edi");
sl@0
   211
	asm("ret");
sl@0
   212
	}
sl@0
   213
sl@0
   214
sl@0
   215
/**	Does a word-aligned read of the current thread's memory space with appropriate permissions.
sl@0
   216
sl@0
   217
Performs a memcpy(aKernAddr, aAddr, aLength).
sl@0
   218
The reads are performed	using requestor privilege level from GS, ie equal
sl@0
   219
to the privilege level of the caller of the Exec:: function.
sl@0
   220
Note that source and destination areas may not overlap.
sl@0
   221
sl@0
   222
@param	aKernAddr	Destination address in kernel memory, must be 4-byte aligned.
sl@0
   223
@param	aAddr		Source address in kernel or user memory, must be 4-byte aligned.
sl@0
   224
@param	aLength		Number of bytes to copy, must be a multiple of 4.
sl@0
   225
sl@0
   226
@pre    Call in a thread context.
sl@0
   227
@pre    Kernel must be unlocked.
sl@0
   228
@pre	Must be called under an XTRAP harness, or calling thread must not be
sl@0
   229
in a critical section.
sl@0
   230
*/
sl@0
   231
EXPORT_C __NAKED__ void kumemget32(TAny* /*aKernAddr*/, const TAny* /*aAddr*/, TInt /*aLength*/)
sl@0
   232
	{
sl@0
   233
	asm("push edi");
sl@0
   234
	asm("push esi");
sl@0
   235
	asm("push ebx");
sl@0
   236
	asm("push ds");
sl@0
   237
	asm("mov edi, [esp+20]");
sl@0
   238
	asm("mov esi, [esp+24]");
sl@0
   239
	asm("mov ecx, [esp+28]");
sl@0
   240
	asm("mov ax, gs");
sl@0
   241
	asm("mov ds, ax");
sl@0
   242
	asm("call %a0": : "i"(&CopyInterSeg32));
sl@0
   243
	asm("pop ds");
sl@0
   244
	asm("pop ebx");
sl@0
   245
	asm("pop esi");
sl@0
   246
	asm("pop edi");
sl@0
   247
	asm("ret");
sl@0
   248
	}
sl@0
   249
sl@0
   250
sl@0
   251
/**	Does a word-aligned read of the current thread's memory space with user permissions.
sl@0
   252
sl@0
   253
Performs a memcpy(aKernAddr, aUserAddr, aLength).
sl@0
   254
The reads are performed with ring 3 RPL.
sl@0
   255
Note that source and destination areas may not overlap.
sl@0
   256
sl@0
   257
@param	aKernAddr	Destination address in kernel memory, must be 4-byte aligned.
sl@0
   258
@param	aUserAddr	Source address in user memory, must be 4-byte aligned.
sl@0
   259
@param	aLength		Number of bytes to copy, must be a multiple of 4.
sl@0
   260
sl@0
   261
@pre    Call in a thread context.
sl@0
   262
@pre    Kernel must be unlocked.
sl@0
   263
@pre	Must be called under an XTRAP harness, or calling thread must not be
sl@0
   264
in a critical section.
sl@0
   265
*/
sl@0
   266
EXPORT_C __NAKED__ void umemget32(TAny* /*aKernAddr*/, const TAny* /*aUserAddr*/, TInt /*aLength*/)
sl@0
   267
	{
sl@0
   268
	asm("push edi");
sl@0
   269
	asm("push esi");
sl@0
   270
	asm("push ebx");
sl@0
   271
	asm("push ds");
sl@0
   272
	asm("mov edi, [esp+20]");
sl@0
   273
	asm("mov esi, [esp+24]");
sl@0
   274
	asm("mov ecx, [esp+28]");
sl@0
   275
	asm("mov eax, %0": : "i"(RING3_DS));
sl@0
   276
	asm("mov ds, ax");
sl@0
   277
	asm("call %a0": : "i"(&CopyInterSeg32));
sl@0
   278
	asm("pop ds");
sl@0
   279
	asm("pop ebx");
sl@0
   280
	asm("pop esi");
sl@0
   281
	asm("pop edi");
sl@0
   282
	asm("ret");
sl@0
   283
	}
sl@0
   284
sl@0
   285
sl@0
   286
/**	Writes to the current thread's memory space with appropriate permissions.
sl@0
   287
sl@0
   288
Performs a memcpy(aAddr, aKernAddr, aLength).
sl@0
   289
The writes are performed using requestor privilege level from GS, ie equal
sl@0
   290
to the privilege level of the caller of the Exec:: function.
sl@0
   291
Note that source and destination areas may not overlap.
sl@0
   292
sl@0
   293
@param	aAddr		Destination address in kernel or user memory.
sl@0
   294
@param	aKernAddr	Source address in kernel memory.
sl@0
   295
@param	aLength		Number of bytes to copy.
sl@0
   296
sl@0
   297
@pre    Call in a thread context.
sl@0
   298
@pre    Kernel must be unlocked.
sl@0
   299
@pre	Must be called under an XTRAP harness, or calling thread must not be
sl@0
   300
in a critical section.
sl@0
   301
*/
sl@0
   302
EXPORT_C __NAKED__ void kumemput(TAny* /*aAddr*/, const TAny* /*aKernAddr*/, TInt /*aLength*/)
sl@0
   303
	{
sl@0
   304
	asm("push edi");
sl@0
   305
	asm("push esi");
sl@0
   306
	asm("push ebx");
sl@0
   307
	asm("push es");
sl@0
   308
	asm("mov edi, [esp+20]");
sl@0
   309
	asm("mov esi, [esp+24]");
sl@0
   310
	asm("mov ecx, [esp+28]");
sl@0
   311
	asm("mov ax, gs");
sl@0
   312
	asm("mov es, ax");
sl@0
   313
	asm("call %a0": : "i"(&CopyInterSeg));
sl@0
   314
	asm("pop es");
sl@0
   315
	asm("pop ebx");
sl@0
   316
	asm("pop esi");
sl@0
   317
	asm("pop edi");
sl@0
   318
	asm("ret");
sl@0
   319
	}
sl@0
   320
sl@0
   321
sl@0
   322
/**	Writes to the current thread's memory space with user permissions.
sl@0
   323
sl@0
   324
Performs a memcpy(aAddr, aKernAddr, aLength).
sl@0
   325
The writes are performed with ring 3 RPL.
sl@0
   326
Note that source and destination areas may not overlap.
sl@0
   327
sl@0
   328
@param	aUserAddr	Destination address in user memory.
sl@0
   329
@param	aKernAddr	Source address in kernel memory.
sl@0
   330
@param	aLength		Number of bytes to copy.
sl@0
   331
sl@0
   332
@pre    Call in a thread context.
sl@0
   333
@pre    Kernel must be unlocked.
sl@0
   334
@pre	Must be called under an XTRAP harness, or calling thread must not be
sl@0
   335
in a critical section.
sl@0
   336
*/
sl@0
   337
EXPORT_C __NAKED__ void umemput(TAny* /*aUserAddr*/, const TAny* /*aKernAddr*/, TInt /*aLength*/)
sl@0
   338
	{
sl@0
   339
	asm("push edi");
sl@0
   340
	asm("push esi");
sl@0
   341
	asm("push ebx");
sl@0
   342
	asm("push es");
sl@0
   343
	asm("mov edi, [esp+20]");
sl@0
   344
	asm("mov esi, [esp+24]");
sl@0
   345
	asm("mov ecx, [esp+28]");
sl@0
   346
	asm("mov eax, %0": : "i"(RING3_DS));
sl@0
   347
	asm("mov es, ax");
sl@0
   348
	asm("call %a0": : "i"(&CopyInterSeg));
sl@0
   349
	asm("pop es");
sl@0
   350
	asm("pop ebx");
sl@0
   351
	asm("pop esi");
sl@0
   352
	asm("pop edi");
sl@0
   353
	asm("ret");
sl@0
   354
	}
sl@0
   355
sl@0
   356
sl@0
   357
/**	Does a word-aligned write to the current thread's memory space with appropriate permissions.
sl@0
   358
sl@0
   359
Performs a memcpy(aAddr, aKernAddr, aLength).
sl@0
   360
The writes are performed using requestor privilege level from GS, ie equal
sl@0
   361
to the privilege level of the caller of the Exec:: function.
sl@0
   362
Note that source and destination areas may not overlap.
sl@0
   363
sl@0
   364
@param	aAddr		Destination address in kernel or user memory, must be 4-byte aligned.
sl@0
   365
@param	aKernAddr	Source address in kernel memory, must be 4-byte aligned.
sl@0
   366
@param	aLength		Number of bytes to copy, must be a multiple of 4.
sl@0
   367
sl@0
   368
@pre    Call in a thread context.
sl@0
   369
@pre    Kernel must be unlocked.
sl@0
   370
@pre	Must be called under an XTRAP harness, or calling thread must not be
sl@0
   371
in a critical section.
sl@0
   372
*/
sl@0
   373
EXPORT_C __NAKED__ void kumemput32(TAny* /*aAddr*/, const TAny* /*aKernAddr*/, TInt /*aLength*/)
sl@0
   374
	{
sl@0
   375
	asm("push edi");
sl@0
   376
	asm("push esi");
sl@0
   377
	asm("push ebx");
sl@0
   378
	asm("push es");
sl@0
   379
	asm("mov edi, [esp+20]");
sl@0
   380
	asm("mov esi, [esp+24]");
sl@0
   381
	asm("mov ecx, [esp+28]");
sl@0
   382
	asm("mov ax, gs");
sl@0
   383
	asm("mov es, ax");
sl@0
   384
	asm("call %a0": : "i"(&CopyInterSeg32));
sl@0
   385
	asm("pop es");
sl@0
   386
	asm("pop ebx");
sl@0
   387
	asm("pop esi");
sl@0
   388
	asm("pop edi");
sl@0
   389
	asm("ret");
sl@0
   390
	}
sl@0
   391
sl@0
   392
sl@0
   393
/**	Does a word-aligned write to the current thread's memory space with user permissions.
sl@0
   394
sl@0
   395
Performs a memcpy(aAddr, aKernAddr, aLength).
sl@0
   396
The writes are performed with ring 3 RPL.
sl@0
   397
Note that source and destination areas may not overlap.
sl@0
   398
sl@0
   399
@param	aUserAddr	Destination address in user memory, must be 4-byte aligned.
sl@0
   400
@param	aKernAddr	Source address in kernel memory, must be 4-byte aligned.
sl@0
   401
@param	aLength		Number of bytes to copy, must be a multiple of 4.
sl@0
   402
sl@0
   403
@pre    Call in a thread context.
sl@0
   404
@pre    Kernel must be unlocked.
sl@0
   405
@pre	Must be called under an XTRAP harness, or calling thread must not be
sl@0
   406
in a critical section.
sl@0
   407
*/
sl@0
   408
EXPORT_C __NAKED__ void umemput32(TAny* /*aUserAddr*/, const TAny* /*aKernAddr*/, TInt /*aLength*/)
sl@0
   409
	{
sl@0
   410
	asm("push edi");
sl@0
   411
	asm("push esi");
sl@0
   412
	asm("push ebx");
sl@0
   413
	asm("push es");
sl@0
   414
	asm("mov edi, [esp+20]");
sl@0
   415
	asm("mov esi, [esp+24]");
sl@0
   416
	asm("mov ecx, [esp+28]");
sl@0
   417
	asm("mov eax, %0": : "i"(RING3_DS));
sl@0
   418
	asm("mov es, ax");
sl@0
   419
	asm("call %a0": : "i"(&CopyInterSeg32));
sl@0
   420
	asm("pop es");
sl@0
   421
	asm("pop ebx");
sl@0
   422
	asm("pop esi");
sl@0
   423
	asm("pop edi");
sl@0
   424
	asm("ret");
sl@0
   425
	}
sl@0
   426
sl@0
   427
sl@0
   428
/**	Fills the current thread's memory space with appropriate permissions.
sl@0
   429
sl@0
   430
Performs a memset(aAddr, aValue, aLength).
sl@0
   431
The writes are performed using requestor privilege level from GS, ie equal
sl@0
   432
to the privilege level of the caller of the Exec:: function.
sl@0
   433
sl@0
   434
@param	aAddr		Destination address in kernel or user memory.
sl@0
   435
@param	aValue		Value to write to each byte.
sl@0
   436
@param	aLength		Number of bytes to fill.
sl@0
   437
sl@0
   438
@pre    Call in a thread context.
sl@0
   439
@pre    Kernel must be unlocked.
sl@0
   440
@pre	Must be called under an XTRAP harness, or calling thread must not be
sl@0
   441
in a critical section.
sl@0
   442
*/
sl@0
   443
EXPORT_C __NAKED__ void kumemset(TAny* /*aAddr*/, const TUint8 /*aValue*/, TInt /*aLength*/)
sl@0
   444
	{
sl@0
   445
	asm("push edi");
sl@0
   446
	asm("push es");
sl@0
   447
	asm("mov edi, [esp+12]");
sl@0
   448
	asm("mov eax, [esp+16]");
sl@0
   449
	asm("mov ecx, [esp+20]");
sl@0
   450
	asm("mov dx, gs");
sl@0
   451
	asm("mov es, dx");
sl@0
   452
	asm("call %a0": :"i"(&FillInterSeg));
sl@0
   453
	asm("pop es");
sl@0
   454
	asm("pop edi");
sl@0
   455
	asm("ret");
sl@0
   456
	}
sl@0
   457
sl@0
   458
sl@0
   459
/**	Fills the current thread's memory space with user permissions.
sl@0
   460
sl@0
   461
Performs a memset(aUserAddr, aValue, aLength).
sl@0
   462
The writes are performed with ring 3 RPL.
sl@0
   463
sl@0
   464
@param	aUserAddr	Destination address in user memory.
sl@0
   465
@param	aValue		Value to write to each byte.
sl@0
   466
@param	aLength		Number of bytes to fill.
sl@0
   467
sl@0
   468
@pre    Call in a thread context.
sl@0
   469
@pre    Kernel must be unlocked.
sl@0
   470
@pre	Must be called under an XTRAP harness, or calling thread must not be
sl@0
   471
in a critical section.
sl@0
   472
*/
sl@0
   473
EXPORT_C __NAKED__ void umemset(TAny* /*aUserAddr*/, const TUint8 /*aValue*/, TInt /*aLength*/)
sl@0
   474
	{
sl@0
   475
	asm("push edi");
sl@0
   476
	asm("push es");
sl@0
   477
	asm("mov edi, [esp+12]");
sl@0
   478
	asm("mov eax, [esp+16]");
sl@0
   479
	asm("mov ecx, [esp+20]");
sl@0
   480
	asm("mov edx, %0": : "i"(RING3_DS));
sl@0
   481
	asm("mov es, dx");
sl@0
   482
	asm("call %a0": :"i"(&FillInterSeg));
sl@0
   483
	asm("pop es");
sl@0
   484
	asm("pop edi");
sl@0
   485
	asm("ret");
sl@0
   486
	}
sl@0
   487
sl@0
   488
__NAKED__ void uumemcpy32(TAny* /*aUserDst*/, const TAny* /*aUserSrc*/, TInt /*aLength*/)
sl@0
   489
	{
sl@0
   490
	asm("push edi");
sl@0
   491
	asm("push esi");
sl@0
   492
	asm("push ebx");
sl@0
   493
	asm("push ds");
sl@0
   494
	asm("push es");
sl@0
   495
	asm("mov edi, [esp+24]");
sl@0
   496
	asm("mov esi, [esp+28]");
sl@0
   497
	asm("mov ecx, [esp+32]");
sl@0
   498
	asm("mov ax, gs");
sl@0
   499
	asm("mov ds, ax");
sl@0
   500
	asm("mov es, ax");
sl@0
   501
	asm("call %a0": : "i"(&CopyInterSeg32));
sl@0
   502
	asm("pop es");
sl@0
   503
	asm("pop ds");
sl@0
   504
	asm("pop ebx");
sl@0
   505
	asm("pop esi");
sl@0
   506
	asm("pop edi");
sl@0
   507
	asm("ret");
sl@0
   508
	}
sl@0
   509
sl@0
   510
__NAKED__ void uumemcpy(TAny* /*aUserDst*/, const TAny* /*aUserSrc*/, TInt /*aLength*/)
sl@0
   511
	{
sl@0
   512
	asm("push edi");
sl@0
   513
	asm("push esi");
sl@0
   514
	asm("push ebx");
sl@0
   515
	asm("push ds");
sl@0
   516
	asm("push es");
sl@0
   517
	asm("mov edi, [esp+24]");
sl@0
   518
	asm("mov esi, [esp+28]");
sl@0
   519
	asm("mov ecx, [esp+32]");
sl@0
   520
	asm("mov ax, gs");
sl@0
   521
	asm("mov ds, ax");
sl@0
   522
	asm("mov es, ax");
sl@0
   523
	asm("call %a0": : "i"(&CopyInterSeg));
sl@0
   524
	asm("pop es");
sl@0
   525
	asm("pop ds");
sl@0
   526
	asm("pop ebx");
sl@0
   527
	asm("pop esi");
sl@0
   528
	asm("pop edi");
sl@0
   529
	asm("ret");
sl@0
   530
	}
sl@0
   531
sl@0
   532
}