First public contribution.
1 // Copyright (c) 2007-2009 Nokia Corporation and/or its subsidiary(-ies).
2 // All rights reserved.
3 // This component and the accompanying materials are made available
4 // under the terms of the License "Eclipse Public License v1.0"
5 // which accompanies this distribution, and is available
6 // at the URL "http://www.eclipse.org/legal/epl-v10.html".
8 // Initial Contributors:
9 // Nokia Corporation - initial contribution.
14 // e32\klib\x86\cumem.cia
22 __NAKED__ void CopyInterSeg()
24 // Copy ECX bytes from DS:ESI to ES:EDI
25 // Modifies EAX, EBX, ECX, EDX, ESI, EDI
29 asm("cld"); // assume forward copy initially
30 asm("test ecx,ecx"); //
31 asm("jz short memcopy0");// if length=0, nothing to do
32 asm("xor edx,edx"); //
33 asm("cmp edi,esi"); // compare source and dest addresses
34 asm("jc short memcopy1");// if dest<source, must go forwards
35 asm("std"); // else go backwards
36 asm("add esi,ecx"); // and start at end of block
37 asm("add edi,ecx"); //
38 asm("inc edx"); // edx=1 if backwards, 0 if forwards
40 asm("cmp ecx,16"); // if length<16 don't bother with alignment check
41 asm("jc short memcopy2");//
42 asm("mov ebx,edi"); // ebx = destination address
43 asm("and ebx,3"); // ebx bottom 2 bits = alignment of destination wrt hardware bus
44 asm("jz short memcopy3");// if aligned, proceed with block move
45 asm("or edx,edx"); // check direction of move
46 asm("jnz short memcopy4");// if backwards, ebx = number of byte moves to align destination
47 asm("neg ebx"); // else number of byte moves = 4-ebx
50 asm("sub ecx,ebx"); // subtract number of bytes from length
51 asm("xchg ecx,ebx"); // temporarily put length in ebx
52 asm("sub edi,edx"); // adjust if backwards move
53 asm("sub esi,edx"); //
54 asm("rep movsb"); // move bytes to align destination
55 asm("add edi,edx"); // adjust if backwards move
56 asm("add esi,edx"); //
57 asm("mov ecx,ebx"); // length back into ecx
59 asm("mov ebx,ecx"); // save length in ebx
60 asm("shl edx,2"); // adjustment 4 for backwards move
61 asm("shr ecx,2"); // number of dwords to move into ecx
62 asm("sub edi,edx"); // adjust if backwards move
63 asm("sub esi,edx"); //
64 asm("rep movsd"); // perform DWORD block move
65 asm("add edi,edx"); // adjust if backwards move
66 asm("add esi,edx"); //
67 asm("mov ecx,ebx"); // length back into ecx
68 asm("and ecx,3"); // number of remaining bytes to move
69 asm("jz short memcopy0");// if zero, we are finished
70 asm("shr edx,2"); // adjustment 1 for backwards move
71 asm("memcopy2:"); // *** come here for small move
72 asm("sub edi,edx"); // adjust if backwards move
73 asm("sub esi,edx"); //
74 asm("rep movsb"); // move remaining bytes
77 asm("ret"); // finished - return value in EAX
80 __NAKED__ void CopyInterSeg32()
82 // Copy ECX bytes from DS:ESI to ES:EDI
83 // ECX, ESI and EDI are all multiples of 4
84 // Modifies EAX, EBX, ECX, EDX, ESI, EDI
89 asm("test ecx,ecx"); //
90 asm("jz short memmove0");// if length=0, nothing to do
91 asm("cmp edi,esi"); // compare source and dest addresses
92 asm("jc short memmove1");// if dest<source, must go forwards
93 asm("std"); // else go backwards
94 asm("lea esi,[esi+ecx-4]"); // and start at end of block - 4
95 asm("lea edi,[edi+ecx-4]"); //
97 asm("shr ecx,2"); // ecx now contains number of dwords to move
98 asm("rep movsd"); // do dword block move
101 asm("ret"); // finished - return value in EAX
104 __NAKED__ void FillInterSeg()
106 // Fill ECX bytes at ES:EDI with AL
107 // Modifies EAX, ECX, EDX, EDI
111 asm("cld"); // go forwards through array
112 asm("test ecx,ecx"); //
113 asm("jz short memfill0");// if length zero, nothing to do
114 asm("cmp ecx,8"); // if array very small, just do byte fills
115 asm("jb short memfill1");
117 asm("mov ah,al"); // repeat al in all bytes of eax
118 asm("movzx edx,ax"); //
119 asm("shl eax,16"); //
120 asm("or eax,edx"); //
121 asm("mov edx,ecx"); // length into edx
122 // ecx = number of byte fills to align = 4-(edi mod 4)
124 asm("sub ecx,edi"); //
126 asm("jz short memfill2");// if already aligned, proceed to dword fill
127 asm("sub edx,ecx"); // subtract alignment bytes from length
128 asm("rep stosb"); // do byte fills to align
130 asm("mov ecx,edx"); // length remaining into ecx
131 asm("shr ecx,2"); // number of dwords to fill into ecx
132 asm("rep stosd"); // perform dword fill
133 asm("mov ecx,edx"); // calculate number of leftover bytes
134 asm("and ecx,3"); // in ecx
135 asm("jz short memfill0");// if none left, exit
137 asm("rep stosb"); // do byte fills to make up correct length
144 /** Reads the current thread's memory space with appropriate permissions.
146 Performs a memcpy(aKernAddr, aAddr, aLength).
147 The reads are performed using requestor privilege level from GS, ie equal
148 to the privilege level of the caller of the Exec:: function.
149 Note that source and destination areas may not overlap.
151 @param aKernAddr Destination address in kernel memory.
152 @param aAddr Source address in kernel or user memory.
153 @param aLength Number of bytes to copy.
155 @pre Call in a thread context.
156 @pre Kernel must be unlocked.
157 @pre Must be called under an XTRAP harness, or calling thread must not be
158 in a critical section.
160 EXPORT_C __NAKED__ void kumemget(TAny* /*aKernAddr*/, const TAny* /*aAddr*/, TInt /*aLength*/)
166 asm("mov edi, [esp+20]");
167 asm("mov esi, [esp+24]");
168 asm("mov ecx, [esp+28]");
171 asm("call %a0": : "i"(&CopyInterSeg));
180 /** Reads the current thread's memory space with user permissions.
182 Performs a memcpy(aKernAddr, aUserAddr, aLength).
183 The reads are performed with ring 3 RPL.
184 Note that source and destination areas may not overlap.
186 @param aKernAddr Destination address in kernel memory.
187 @param aUserAddr Source address in user memory.
188 @param aLength Number of bytes to copy.
190 @pre Call in a thread context.
191 @pre Kernel must be unlocked.
192 @pre Must be called under an XTRAP harness, or calling thread must not be
193 in a critical section.
195 EXPORT_C __NAKED__ void umemget(TAny* /*aKernAddr*/, const TAny* /*aUserAddr*/, TInt /*aLength*/)
201 asm("mov edi, [esp+20]");
202 asm("mov esi, [esp+24]");
203 asm("mov ecx, [esp+28]");
204 asm("mov eax, %0": : "i"(RING3_DS));
206 asm("call %a0": : "i"(&CopyInterSeg));
215 /** Does a word-aligned read of the current thread's memory space with appropriate permissions.
217 Performs a memcpy(aKernAddr, aAddr, aLength).
218 The reads are performed using requestor privilege level from GS, ie equal
219 to the privilege level of the caller of the Exec:: function.
220 Note that source and destination areas may not overlap.
222 @param aKernAddr Destination address in kernel memory, must be 4-byte aligned.
223 @param aAddr Source address in kernel or user memory, must be 4-byte aligned.
224 @param aLength Number of bytes to copy, must be a multiple of 4.
226 @pre Call in a thread context.
227 @pre Kernel must be unlocked.
228 @pre Must be called under an XTRAP harness, or calling thread must not be
229 in a critical section.
231 EXPORT_C __NAKED__ void kumemget32(TAny* /*aKernAddr*/, const TAny* /*aAddr*/, TInt /*aLength*/)
237 asm("mov edi, [esp+20]");
238 asm("mov esi, [esp+24]");
239 asm("mov ecx, [esp+28]");
242 asm("call %a0": : "i"(&CopyInterSeg32));
251 /** Does a word-aligned read of the current thread's memory space with user permissions.
253 Performs a memcpy(aKernAddr, aUserAddr, aLength).
254 The reads are performed with ring 3 RPL.
255 Note that source and destination areas may not overlap.
257 @param aKernAddr Destination address in kernel memory, must be 4-byte aligned.
258 @param aUserAddr Source address in user memory, must be 4-byte aligned.
259 @param aLength Number of bytes to copy, must be a multiple of 4.
261 @pre Call in a thread context.
262 @pre Kernel must be unlocked.
263 @pre Must be called under an XTRAP harness, or calling thread must not be
264 in a critical section.
266 EXPORT_C __NAKED__ void umemget32(TAny* /*aKernAddr*/, const TAny* /*aUserAddr*/, TInt /*aLength*/)
272 asm("mov edi, [esp+20]");
273 asm("mov esi, [esp+24]");
274 asm("mov ecx, [esp+28]");
275 asm("mov eax, %0": : "i"(RING3_DS));
277 asm("call %a0": : "i"(&CopyInterSeg32));
286 /** Writes to the current thread's memory space with appropriate permissions.
288 Performs a memcpy(aAddr, aKernAddr, aLength).
289 The writes are performed using requestor privilege level from GS, ie equal
290 to the privilege level of the caller of the Exec:: function.
291 Note that source and destination areas may not overlap.
293 @param aAddr Destination address in kernel or user memory.
294 @param aKernAddr Source address in kernel memory.
295 @param aLength Number of bytes to copy.
297 @pre Call in a thread context.
298 @pre Kernel must be unlocked.
299 @pre Must be called under an XTRAP harness, or calling thread must not be
300 in a critical section.
302 EXPORT_C __NAKED__ void kumemput(TAny* /*aAddr*/, const TAny* /*aKernAddr*/, TInt /*aLength*/)
308 asm("mov edi, [esp+20]");
309 asm("mov esi, [esp+24]");
310 asm("mov ecx, [esp+28]");
313 asm("call %a0": : "i"(&CopyInterSeg));
322 /** Writes to the current thread's memory space with user permissions.
324 Performs a memcpy(aAddr, aKernAddr, aLength).
325 The writes are performed with ring 3 RPL.
326 Note that source and destination areas may not overlap.
328 @param aUserAddr Destination address in user memory.
329 @param aKernAddr Source address in kernel memory.
330 @param aLength Number of bytes to copy.
332 @pre Call in a thread context.
333 @pre Kernel must be unlocked.
334 @pre Must be called under an XTRAP harness, or calling thread must not be
335 in a critical section.
337 EXPORT_C __NAKED__ void umemput(TAny* /*aUserAddr*/, const TAny* /*aKernAddr*/, TInt /*aLength*/)
343 asm("mov edi, [esp+20]");
344 asm("mov esi, [esp+24]");
345 asm("mov ecx, [esp+28]");
346 asm("mov eax, %0": : "i"(RING3_DS));
348 asm("call %a0": : "i"(&CopyInterSeg));
357 /** Does a word-aligned write to the current thread's memory space with appropriate permissions.
359 Performs a memcpy(aAddr, aKernAddr, aLength).
360 The writes are performed using requestor privilege level from GS, ie equal
361 to the privilege level of the caller of the Exec:: function.
362 Note that source and destination areas may not overlap.
364 @param aAddr Destination address in kernel or user memory, must be 4-byte aligned.
365 @param aKernAddr Source address in kernel memory, must be 4-byte aligned.
366 @param aLength Number of bytes to copy, must be a multiple of 4.
368 @pre Call in a thread context.
369 @pre Kernel must be unlocked.
370 @pre Must be called under an XTRAP harness, or calling thread must not be
371 in a critical section.
373 EXPORT_C __NAKED__ void kumemput32(TAny* /*aAddr*/, const TAny* /*aKernAddr*/, TInt /*aLength*/)
379 asm("mov edi, [esp+20]");
380 asm("mov esi, [esp+24]");
381 asm("mov ecx, [esp+28]");
384 asm("call %a0": : "i"(&CopyInterSeg32));
393 /** Does a word-aligned write to the current thread's memory space with user permissions.
395 Performs a memcpy(aAddr, aKernAddr, aLength).
396 The writes are performed with ring 3 RPL.
397 Note that source and destination areas may not overlap.
399 @param aUserAddr Destination address in user memory, must be 4-byte aligned.
400 @param aKernAddr Source address in kernel memory, must be 4-byte aligned.
401 @param aLength Number of bytes to copy, must be a multiple of 4.
403 @pre Call in a thread context.
404 @pre Kernel must be unlocked.
405 @pre Must be called under an XTRAP harness, or calling thread must not be
406 in a critical section.
408 EXPORT_C __NAKED__ void umemput32(TAny* /*aUserAddr*/, const TAny* /*aKernAddr*/, TInt /*aLength*/)
414 asm("mov edi, [esp+20]");
415 asm("mov esi, [esp+24]");
416 asm("mov ecx, [esp+28]");
417 asm("mov eax, %0": : "i"(RING3_DS));
419 asm("call %a0": : "i"(&CopyInterSeg32));
428 /** Fills the current thread's memory space with appropriate permissions.
430 Performs a memset(aAddr, aValue, aLength).
431 The writes are performed using requestor privilege level from GS, ie equal
432 to the privilege level of the caller of the Exec:: function.
434 @param aAddr Destination address in kernel or user memory.
435 @param aValue Value to write to each byte.
436 @param aLength Number of bytes to fill.
438 @pre Call in a thread context.
439 @pre Kernel must be unlocked.
440 @pre Must be called under an XTRAP harness, or calling thread must not be
441 in a critical section.
443 EXPORT_C __NAKED__ void kumemset(TAny* /*aAddr*/, const TUint8 /*aValue*/, TInt /*aLength*/)
447 asm("mov edi, [esp+12]");
448 asm("mov eax, [esp+16]");
449 asm("mov ecx, [esp+20]");
452 asm("call %a0": :"i"(&FillInterSeg));
459 /** Fills the current thread's memory space with user permissions.
461 Performs a memset(aUserAddr, aValue, aLength).
462 The writes are performed with ring 3 RPL.
464 @param aUserAddr Destination address in user memory.
465 @param aValue Value to write to each byte.
466 @param aLength Number of bytes to fill.
468 @pre Call in a thread context.
469 @pre Kernel must be unlocked.
470 @pre Must be called under an XTRAP harness, or calling thread must not be
471 in a critical section.
473 EXPORT_C __NAKED__ void umemset(TAny* /*aUserAddr*/, const TUint8 /*aValue*/, TInt /*aLength*/)
477 asm("mov edi, [esp+12]");
478 asm("mov eax, [esp+16]");
479 asm("mov ecx, [esp+20]");
480 asm("mov edx, %0": : "i"(RING3_DS));
482 asm("call %a0": :"i"(&FillInterSeg));
488 __NAKED__ void uumemcpy32(TAny* /*aUserDst*/, const TAny* /*aUserSrc*/, TInt /*aLength*/)
495 asm("mov edi, [esp+24]");
496 asm("mov esi, [esp+28]");
497 asm("mov ecx, [esp+32]");
501 asm("call %a0": : "i"(&CopyInterSeg32));
510 __NAKED__ void uumemcpy(TAny* /*aUserDst*/, const TAny* /*aUserSrc*/, TInt /*aLength*/)
517 asm("mov edi, [esp+24]");
518 asm("mov esi, [esp+28]");
519 asm("mov ecx, [esp+32]");
523 asm("call %a0": : "i"(&CopyInterSeg));