sl@0: // Copyright (c) 1997-2009 Nokia Corporation and/or its subsidiary(-ies). sl@0: // All rights reserved. sl@0: // This component and the accompanying materials are made available sl@0: // under the terms of the License "Eclipse Public License v1.0" sl@0: // which accompanies this distribution, and is available sl@0: // at the URL "http://www.eclipse.org/legal/epl-v10.html". sl@0: // sl@0: // Initial Contributors: sl@0: // Nokia Corporation - initial contribution. sl@0: // sl@0: // Contributors: sl@0: // sl@0: // Description: sl@0: // e32\common\arm\cdes8.cia sl@0: // sl@0: // sl@0: sl@0: #include sl@0: #include "../common.h" sl@0: sl@0: #define __FIXED_POINT_64BIT_DIV10__ sl@0: sl@0: #if defined(__DES8_MACHINE_CODED__) || defined(__EABI__) sl@0: sl@0: GLREF_C void Des8PanicBadDesType(); sl@0: GLREF_C void Des8PanicPosOutOfRange(); sl@0: sl@0: #endif sl@0: sl@0: #ifdef __DES8_MACHINE_CODED__ sl@0: sl@0: GLREF_C void Des8PanicLengthNegative(); sl@0: GLREF_C void Des8PanicMaxLengthNegative(); sl@0: GLREF_C void Des8PanicLengthOutOfRange(); sl@0: GLREF_C void Des8PanicDesOverflow(); sl@0: GLREF_C void Des8PanicDesIndexOutOfRange(); sl@0: sl@0: __NAKED__ EXPORT_C const TUint8 *TDesC8::Ptr() const sl@0: // sl@0: // Return a pointer to the buffer. sl@0: // sl@0: { sl@0: asm("ldr r1, [r0], #4 "); sl@0: asm("cmp r1, #0x50000000 "); sl@0: asm("bcs " CSM_Z19Des8PanicBadDesTypev); sl@0: asm("eor r1, r1, r1, lsr #1 "); sl@0: asm("msr cpsr_flg, r1 "); sl@0: asm("addcs r0, r0, #4 "); sl@0: asm("ldrle r0, [r0] "); sl@0: asm("addeq r0, r0, #4 "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: #ifndef __EABI_CTORS__ sl@0: __NAKED__ EXPORT_C TPtrC8::TPtrC8() sl@0: // sl@0: // Default constructor sl@0: // sl@0: { sl@0: asm("mov r1, #0x10000000 "); // type=EPtrC, length=0 sl@0: asm("mov r2, #0 "); // ptr=NULL sl@0: asm("stmia r0, {r1,r2} "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TPtrC8::TPtrC8(const TDesC8& /*aDes*/) sl@0: // sl@0: // Constructor sl@0: // sl@0: { sl@0: asm("ldr r2, [r1], #4 "); // r2 = type/length sl@0: asm("bic r3, r2, #0xF0000000"); // r3 = length sl@0: asm("orr r3, r3, #0x10000000"); // r3 = EPtrC + length sl@0: asm("cmp r2, #0x50000000 "); sl@0: asm("bcs " CSM_Z19Des8PanicBadDesTypev); sl@0: asm("eor r2, r2, r2, lsr #1 "); sl@0: asm("msr cpsr_flg, r2 "); sl@0: asm("addcs r1, r1, #4 "); sl@0: asm("ldrle r1, [r1] "); sl@0: asm("addeq r1, r1, #4 "); // r1 = aDes.Ptr() sl@0: asm("str r3, [r0] "); sl@0: asm("str r1, [r0, #4] "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TPtrC8::TPtrC8(const TUint8* /*aString*/) sl@0: // sl@0: // Constructor sl@0: // sl@0: { sl@0: asm("mov r2, r1 "); // save aString pointer sl@0: asm("1: "); sl@0: asm("ldrb r3, [r1], #1 "); sl@0: asm("cmp r3, #0 "); sl@0: asm("bne 1b "); // loop until we reach zero terminator sl@0: asm("sub r1, r1, r2 "); // r1 = length + 1 sl@0: asm("sub r1, r1, #1 "); // r1 = length sl@0: asm("orr r1, r1, #0x10000000 "); // r1=EPtrC + length sl@0: asm("stmia r0, {r1, r2} "); // store type/length and ptr fields sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TPtrC8::TPtrC8(const TUint8* /*aBuf*/,TInt /*aLength*/) sl@0: // sl@0: // Constructor sl@0: // sl@0: { sl@0: asm("orrs r2, r2, #0x10000000 "); sl@0: asm("strpl r2, [r0] "); sl@0: asm("strpl r1, [r0, #4] "); sl@0: __JUMP(pl,lr); sl@0: asm("b " CSM_Z23Des8PanicLengthNegativev); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TPtr8::TPtr8(TUint8* /*aBuf*/,TInt /*aMaxLength*/) sl@0: // sl@0: // Constructor sl@0: // sl@0: { sl@0: asm("cmp r2, #0 "); sl@0: asm("movpl r3, r1 "); sl@0: asm("movpl r1, #0x20000000 "); // length=0, EPtr sl@0: asm("stmplia r0, {r1,r2,r3} "); sl@0: __JUMP(pl,lr); sl@0: asm("b " CSM_Z26Des8PanicMaxLengthNegativev); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TPtr8::TPtr8(TUint8* /*aBuf*/,TInt /*aLength*/,TInt /*aMaxLength*/) sl@0: // sl@0: // Constructor sl@0: // sl@0: { sl@0: asm("cmp r2, #0 "); // check length>=0 sl@0: asm("cmpge r3, r2 "); // if so, check maxlength>=length sl@0: asm("movge r12, r1 "); sl@0: asm("orrge r2, r2, #0x20000000 "); // r2 = length + EPtr sl@0: asm("stmgeia r0, {r2,r3,r12} "); sl@0: __JUMP(ge,lr); sl@0: asm("cmp r2, #0 "); sl@0: asm("bmi " CSM_Z23Des8PanicLengthNegativev); sl@0: asm("cmp r3, #0 "); sl@0: asm("bmi " CSM_Z26Des8PanicMaxLengthNegativev); sl@0: asm("b " CSM_Z25Des8PanicLengthOutOfRangev); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TPtr8::TPtr8(TBufCBase8& /*aLcb*/,TInt /*aMaxLength*/) sl@0: // sl@0: // Constructor sl@0: // sl@0: { sl@0: asm("mov r3, r1 "); sl@0: asm("ldr r1, [r3] "); sl@0: asm("bic r1, r1, #0xF0000000 "); // r1=aLcb.Length() sl@0: asm("cmp r1, r2 "); // check against maxlength sl@0: asm("orrle r1, r1, #0x40000000 "); // r1=aLcb.Length() + EBufCPtr sl@0: asm("stmleia r0, {r1,r2,r3} "); sl@0: __JUMP(le,lr); sl@0: asm("b " CSM_Z25Des8PanicLengthOutOfRangev); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TBufCBase8::TBufCBase8() sl@0: // sl@0: // Constructor sl@0: // sl@0: { sl@0: asm("mov r1, #0 "); sl@0: asm("str r1, [r0] "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TBufCBase8::TBufCBase8(const TUint8* /*aString*/,TInt /*aMaxLength*/) sl@0: // sl@0: // Constructor sl@0: // sl@0: { sl@0: asm("mov r3, r1 "); // save aString pointer sl@0: asm("1: "); sl@0: asm("ldrb r12, [r3], #1 "); sl@0: asm("cmp r12, #0 "); sl@0: asm("bne 1b "); // loop until we reach zero terminator sl@0: asm("sub r3, r3, r1 "); // r3 = length + 1 sl@0: asm("sub r3, r3, #1 "); // r3 = length (+EBufC) sl@0: asm("cmp r3, r2 "); // check against max length sl@0: asm("bgt " CSM_Z25Des8PanicLengthOutOfRangev); sl@0: asm("stmfd sp!, {r0,lr} "); // save registers for function call sl@0: asm("str r3, [r0], #4 "); // save length/type field, r0->buffer sl@0: asm("mov r2, r3 "); // length into r2 for function call sl@0: asm("bl memmove "); sl@0: __POPRET("r0,"); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TBufCBase8::TBufCBase8(const TDesC8& /*aDes*/,TInt /*aMaxLength*/) sl@0: // sl@0: // Constructor sl@0: // sl@0: { sl@0: asm("ldr r3, [r1], #4 "); // r3 = type/length sl@0: asm("bic r12, r3, #0xF0000000"); // r12 = length sl@0: asm("cmp r12, r2 "); // compare with maxlength sl@0: asm("bgt " CSM_Z25Des8PanicLengthOutOfRangev); sl@0: asm("cmp r3, #0x50000000 "); sl@0: asm("bcs " CSM_Z19Des8PanicBadDesTypev); sl@0: asm("eor r3, r3, r3, lsr #1 "); sl@0: asm("msr cpsr_flg, r3 "); sl@0: asm("addcs r1, r1, #4 "); sl@0: asm("ldrle r1, [r1] "); sl@0: asm("addeq r1, r1, #4 "); // r1 = aDes.Ptr() sl@0: asm("stmfd sp!, {r0,lr} "); // save registers for function call sl@0: asm("str r12, [r0], #4 "); // store length/type, r0->buffer sl@0: asm("mov r2, r12 "); // length into r2 for function call sl@0: asm("bl memmove "); sl@0: __POPRET("r0,"); sl@0: } sl@0: #endif sl@0: sl@0: __NAKED__ EXPORT_C void TBufCBase8::Copy(const TUint8* /*aString*/,TInt /*aMaxLength*/) sl@0: // sl@0: // Copy from a string. sl@0: // sl@0: { sl@0: asm("mov r3, r1 "); // save aString pointer sl@0: asm("1: "); sl@0: asm("ldrb r12, [r3], #1 "); sl@0: asm("cmp r12, #0 "); sl@0: asm("bne 1b "); // loop until we reach zero terminator sl@0: asm("sub r3, r3, r1 "); // r3 = length + 1 sl@0: asm("sub r3, r3, #1 "); // r3 = length (+EBufC) sl@0: asm("cmp r3, r2 "); // check against max length sl@0: asm("bgt " CSM_Z25Des8PanicLengthOutOfRangev); sl@0: asm("str r3, [r0], #4 "); // save length/type field, r0->buffer sl@0: asm("mov r2, r3 "); // length into r2 for function call sl@0: asm("b memmove "); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C void TBufCBase8::Copy(const TDesC8& /*aDes*/,TInt /*aMaxLength*/) sl@0: // sl@0: // Copy from a descriptor. sl@0: // sl@0: { sl@0: asm("ldr r3, [r1], #4 "); // r3 = type/length sl@0: asm("bic r12, r3, #0xF0000000"); // r12 = length sl@0: asm("cmp r12, r2 "); // compare with maxlength sl@0: asm("bgt " CSM_Z20Des8PanicDesOverflowv); sl@0: asm("cmp r3, #0x50000000 "); sl@0: asm("bcs " CSM_Z19Des8PanicBadDesTypev); sl@0: asm("eor r3, r3, r3, lsr #1 "); sl@0: asm("msr cpsr_flg, r3 "); sl@0: asm("addcs r1, r1, #4 "); sl@0: asm("ldrle r1, [r1] "); sl@0: asm("addeq r1, r1, #4 "); // r1 = aDes.Ptr() sl@0: asm("str r12, [r0], #4 "); // store length/type, r0->buffer sl@0: asm("mov r2, r12 "); // length into r2 for function call sl@0: asm("b memmove "); sl@0: } sl@0: sl@0: #ifndef __EABI_CTORS__ sl@0: __NAKED__ EXPORT_C TBufBase8::TBufBase8(TInt /*aMaxLength*/) sl@0: { sl@0: asm("mov r2, #0x30000000 "); // EBuf + zero length sl@0: asm("str r2, [r0] "); sl@0: asm("str r1, [r0, #4] "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TBufBase8::TBufBase8(TInt /*aLength*/, TInt /*aMaxLength*/) sl@0: { sl@0: asm("cmp r1, #0 "); // check length>=0 sl@0: asm("cmpge r2, r1 "); // if so, check maxlength>=length sl@0: asm("orrge r1, r1, #0x30000000 "); // r1=length + EBuf sl@0: asm("stmgeia r0, {r1,r2} "); // store length/type and maxlength fields sl@0: __JUMP(ge,lr); sl@0: asm("cmp r2, #0 "); sl@0: asm("bmi " CSM_Z26Des8PanicMaxLengthNegativev); sl@0: asm("b " CSM_Z25Des8PanicLengthOutOfRangev); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TBufBase8::TBufBase8(const TUint8* /*aString*/, TInt /*aMaxLength*/) sl@0: { sl@0: asm("mov r12, r1 "); // save aString pointer sl@0: asm("1: "); sl@0: asm("ldrb r3, [r1], #1 "); sl@0: asm("cmp r3, #0 "); sl@0: asm("bne 1b "); // loop until we reach zero terminator sl@0: asm("sub r1, r1, r12 "); // r1 = length + 1 sl@0: asm("sub r1, r1, #1 "); // r1 = length sl@0: asm("cmp r1, r2 "); // compare to max length sl@0: asm("bgt " CSM_Z25Des8PanicLengthOutOfRangev); // length too big, so panic sl@0: asm("orr r1, r1, #0x30000000 "); // if length<=max, r1=EBuf + length sl@0: asm("stmfd sp!, {r0,lr} "); // save registers for function call sl@0: asm("stmia r0!, {r1, r2} "); // store type/length and max length fields, r0->buffer sl@0: asm("bic r2, r1, #0xf0000000 "); // r2=length sl@0: asm("mov r1, r12 "); // r12=aString sl@0: asm("bl memmove "); sl@0: __POPRET("r0,"); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TBufBase8::TBufBase8(const TDesC8& /*aDes*/, TInt /*aMaxLength*/) sl@0: { sl@0: asm("ldr r3, [r1], #4 "); // r3 = type/length sl@0: asm("bic r12, r3, #0xF0000000"); // r12 = length sl@0: asm("cmp r12, r2 "); // compare with maxlength sl@0: asm("bgt " CSM_Z25Des8PanicLengthOutOfRangev); sl@0: asm("cmp r3, #0x50000000 "); sl@0: asm("bcs " CSM_Z19Des8PanicBadDesTypev); sl@0: asm("eor r3, r3, r3, lsr #1 "); sl@0: asm("msr cpsr_flg, r3 "); sl@0: asm("addcs r1, r1, #4 "); sl@0: asm("ldrle r1, [r1] "); sl@0: asm("addeq r1, r1, #4 "); // r1 = aDes.Ptr() sl@0: asm("stmfd sp!, {r0,lr} "); // save registers for function call sl@0: asm("orr r12, r12, #0x30000000 "); // add EBuf type field sl@0: asm("str r12, [r0], #4 "); // store length/type, r0->max length sl@0: asm("str r2, [r0], #4 "); // store max length, r0->buffer sl@0: asm("bic r2, r12, #0xf0000000 "); // length into r2 for function call sl@0: asm("bl memmove "); sl@0: __POPRET("r0,"); sl@0: } sl@0: #endif sl@0: sl@0: __NAKED__ EXPORT_C void TDes8::SetLength(TInt /*aLength*/) sl@0: // sl@0: // Set the length of the descriptor, checking the length is O.K. sl@0: // sl@0: { sl@0: asm("ldmia r0, {r2,r3} "); // r2=length/type, r3=maxlength sl@0: asm("cmp r1, r3 "); // check aLength against maxlength and for -ve values sl@0: asm("bhi " CSM_Z20Des8PanicDesOverflowv); sl@0: asm("and r2, r2, #0xF0000000 "); // r2=type field sl@0: asm("cmp r2, #0x40000000 "); // check for EBufCPtr sl@0: asm("orr r2, r2, r1 "); // r2=type + new length sl@0: asm("str r2, [r0] "); // store new length sl@0: __JUMP(ne,lr); sl@0: asm("ldr r2, [r0, #8] "); // r2=pointer to TBufCBase sl@0: asm("str r1, [r2] "); // update length of TBufCBase sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C void TDes8::SetMax() sl@0: // sl@0: // Set the length to MaxLength(). sl@0: // sl@0: { sl@0: asm("ldmia r0, {r1,r2} "); // r1=length/type, r2=maxlength sl@0: asm("and r1, r1, #0xF0000000 "); // r1=type field sl@0: asm("cmp r1, #0x40000000 "); // check for EBufCPtr sl@0: asm("orr r1, r1, r2 "); // r1=type field + maxlength sl@0: asm("str r1, [r0] "); // store new length sl@0: __JUMP(ne,lr); sl@0: asm("ldr r1, [r0, #8] "); // r1 = pointer to TBufCBase sl@0: asm("str r2, [r1] "); // update length of TBufCBase sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C void TDes8::Copy(const TUint8* /*aString*/) sl@0: // sl@0: // Copy a string to this descriptor. sl@0: // sl@0: { sl@0: asm("mov r2, r1 "); // r2=aString sl@0: asm("1: "); sl@0: asm("ldrb r3, [r2], #1 "); sl@0: asm("cmp r3, #0 "); sl@0: asm("bne 1b "); // loop until zero terminator reached sl@0: asm("sub r2, r2, r1 "); sl@0: asm("sub r2, r2, #1 "); // r2=length of string sl@0: asm("ldmia r0, {r3,r12} "); // r3=type/length of this, r12=maxlength sl@0: asm("cmp r2, r12 "); // compare new length against maxlength sl@0: asm("bgt " CSM_Z20Des8PanicDesOverflowv); sl@0: asm("cmp r3, #0x50000000 "); sl@0: asm("bcs " CSM_Z19Des8PanicBadDesTypev); sl@0: asm("and r3, r3, #0xF0000000 "); // r3=type of this sl@0: asm("orr r3, r3, r2 "); // r3=new type/length sl@0: asm("str r3, [r0], #4 "); // store it sl@0: asm("eor r3, r3, r3, lsr #1 "); sl@0: asm("msr cpsr_flg, r3 "); sl@0: asm("addcs r0, r0, #4 "); sl@0: asm("ldrle r0, [r0] "); sl@0: asm("streq r2, [r0], #4 "); // if EBufCPtr, update length of TBufCBase, r0=Ptr() sl@0: asm("b memmove "); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C void TDes8::Copy(const TUint8* /*aBuf*/,TInt /*aLength*/) sl@0: // sl@0: // Copy the aLength characters to the descriptor. sl@0: // sl@0: { sl@0: asm("ldmia r0, {r3,r12} "); // r3=type/length of this, r12=maxlength sl@0: asm("cmp r2, r12 "); // compare new length against maxlength sl@0: asm("bhi " CSM_Z20Des8PanicDesOverflowv); // Des8Panic if >MaxLength or -ve sl@0: asm("cmp r3, #0x50000000 "); sl@0: asm("bcs " CSM_Z19Des8PanicBadDesTypev); sl@0: asm("and r3, r3, #0xF0000000 "); // r3=type of this sl@0: asm("orr r3, r3, r2 "); // r3=new type/length sl@0: asm("str r3, [r0], #4 "); // store it sl@0: asm("eor r3, r3, r3, lsr #1 "); sl@0: asm("msr cpsr_flg, r3 "); sl@0: asm("addcs r0, r0, #4 "); sl@0: asm("ldrle r0, [r0] "); sl@0: asm("streq r2, [r0], #4 "); // if EBufCPtr, update length of TBufCBase, r0=Ptr() sl@0: asm("b memmove "); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C void TDes8::Copy(const TDesC8& /*aDes*/) sl@0: // sl@0: // Copy a descriptor to this descriptor. sl@0: // sl@0: { sl@0: asm("ldr r3, [r1], #4 "); // r3 = type/length of aDes sl@0: asm("bic r12, r3, #0xF0000000"); // r12 = aDes.length sl@0: asm("ldr r2, [r0, #4] "); // r2=this.maxlength sl@0: asm("cmp r12, r2 "); // compare with maxlength sl@0: asm("bgt " CSM_Z20Des8PanicDesOverflowv); sl@0: asm("ldr r2, [r0] "); // get type of this sl@0: asm("cmp r2, #0x50000000 "); // check both descriptor types sl@0: asm("cmpcc r3, #0x50000000 "); sl@0: asm("bcs " CSM_Z19Des8PanicBadDesTypev); sl@0: asm("and r2, r2, #0xF0000000 "); sl@0: asm("orr r2, r12, r2 "); // r2=new type/length of this sl@0: asm("str r2, [r0], #4 "); // store it sl@0: asm("eor r3, r3, r3, lsr #1 "); sl@0: asm("msr cpsr_flg, r3 "); sl@0: asm("addcs r1, r1, #4 "); sl@0: asm("ldrle r1, [r1] "); sl@0: asm("addeq r1, r1, #4 "); // r1 = aDes.Ptr() sl@0: asm("eor r2, r2, r2, lsr #1 "); sl@0: asm("msr cpsr_flg, r2 "); sl@0: asm("addcs r0, r0, #4 "); sl@0: asm("ldrle r0, [r0] "); sl@0: asm("streq r12, [r0], #4 "); // if EBufCPtr, update length of TBufCBase, r0=Ptr() sl@0: asm("mov r2, r12 "); // length into r2 for function call sl@0: asm("b memmove "); sl@0: } sl@0: sl@0: #ifndef __KERNEL_MODE__ sl@0: __NAKED__ EXPORT_C TPtr8 TDes8::LeftTPtr(TInt /*aLength*/) const sl@0: // sl@0: // Extract the left portion of the descriptor. sl@0: // sl@0: { sl@0: // On entry r0=return store ptr, r1=this, r2=aLength sl@0: // Return TPtr8 ([r0]=length/type,[r0,#4]=maxLength,[r0,#8]=Ptr) sl@0: asm("ldr r3, [r1], #4 "); // r3=this.length/type sl@0: asm("cmp r2, #0 "); // check aLength>=0 sl@0: asm("blt Des8PanicPosOutOfRange__Fv "); // if not panic sl@0: asm("bic r12, r3, #0xF0000000 "); // r12=this.Length() sl@0: asm("cmp r2, r12 "); // limit aLength to Length() sl@0: asm("movgt r2, r12 "); sl@0: asm("cmp r3, #0x50000000 "); // check type() <= 4 sl@0: asm("bcs Des8PanicBadDesType__Fv "); // if not, panic sl@0: asm("eor r3, r3, r3, lsr #1 "); sl@0: asm("msr cpsr_flg, r3 "); sl@0: asm("addcs r1, r1, #4 "); sl@0: asm("ldrle r1, [r1] "); sl@0: asm("addeq r1, r1, #4 "); // r1=this.Ptr() sl@0: asm("mov r3, r1 "); // r3=this.Ptr() sl@0: asm("orr r1, r2, #0x20000000 "); // r1=aLength + EPtr sl@0: asm("stmia r0, {r1-r3} "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TPtr8 TDes8::RightTPtr(TInt /*aLength*/) const sl@0: // sl@0: // Extract the right portion of the descriptor. sl@0: // sl@0: { sl@0: // On entry r0=return store ptr, r1=this, r2=aLength sl@0: // Return TPtr8 ([r0]=length/type,[r0,#4]=maxLength,[r0,#8]=Ptr) sl@0: asm("ldr r3, [r1], #4 "); // r3=this.length/type sl@0: asm("cmp r2, #0 "); // check aLength>=0 sl@0: asm("blt Des8PanicPosOutOfRange__Fv "); // if not, panic sl@0: asm("bic r12, r3, #0xF0000000 "); // r12=this.Length() sl@0: asm("cmp r2, r12 "); // limit aLength to Length() sl@0: asm("movgt r2, r12 "); sl@0: asm("cmp r3, #0x50000000 "); // check type() <= 4 sl@0: asm("bcs Des8PanicBadDesType__Fv "); sl@0: asm("eor r3, r3, r3, lsr #1 "); sl@0: asm("msr cpsr_flg, r3 "); sl@0: asm("addcs r1, r1, #4 "); sl@0: asm("ldrle r1, [r1] "); sl@0: asm("addeq r1, r1, #4 "); // r1=this.Ptr() sl@0: asm("add r3, r1, r12 "); // r3=this.Ptr()+Length() sl@0: asm("orr r1, r2, #0x20000000 "); // r1=aLength + EPtr sl@0: asm("sub r3, r3, r2 "); // r3=Ptr()+Length()-aLength sl@0: asm("stmia r0, {r1-r3} "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TPtr8 TDes8::MidTPtr(TInt /*aPos*/) const sl@0: // sl@0: // Extract the middle portion of the descriptor. sl@0: // sl@0: { sl@0: // On entry r0=return store ptr, r1=this, r2=aPos sl@0: // Return TPtr8 ([r0]=length/type,[r0,#4]=maxLength,[r0,#8]=Ptr) sl@0: asm("ldr r3, [r1], #4 "); // r3=this.length/type sl@0: asm("bic r12, r3, #0xF0000000 "); // r12=this.Length() sl@0: asm("cmp r2, #0 "); // check aPos>=0 sl@0: asm("cmpge r12, r2 "); // if so check Length()>=aPos sl@0: asm("blt Des8PanicPosOutOfRange__Fv "); sl@0: asm("cmp r3, #0x50000000 "); // check type() <= 4 sl@0: asm("bcs Des8PanicBadDesType__Fv "); sl@0: asm("eor r3, r3, r3, lsr #1 "); sl@0: asm("msr cpsr_flg, r3 "); sl@0: asm("addcs r1, r1, #4 "); sl@0: asm("ldrle r1, [r1] "); sl@0: asm("addeq r1, r1, #4 "); // r1=this.Ptr() sl@0: asm("add r3, r1, r2 "); // r3=this.Ptr()+aPos sl@0: asm("sub r2, r12, r2 "); // r2=Length()-aPos (=newMaxLen) sl@0: asm("orr r1, r2, #0x20000000 "); // r1=Length()-aPos + EPtr (=newLen/Type) sl@0: asm("stmia r0, {r1-r3} "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TPtr8 TDes8::MidTPtr(TInt /*aPos*/,TInt /*aLength*/) const sl@0: // sl@0: // Extract the middle portion of the descriptor. sl@0: // sl@0: { sl@0: // On entry r0=return store ptr, r1=this, r2=aPos, r3=aLength sl@0: // Return TPtr8 ([r0]=length/type,[r0,#4]=maxLength,[r0,#8]=Ptr) sl@0: asm("str r4, [sp, #-4]! "); // save r4 sl@0: asm("ldr r12, [r1], #4 "); // r12=this.length/type sl@0: asm("mov r4, r1 "); sl@0: asm("cmp r12, #0x50000000 "); // check valid descriptor type sl@0: asm("bcs Des8PanicBadDesType__Fv "); sl@0: asm("eor r12, r12, r12, lsr #1 "); sl@0: asm("msr cpsr_flg, r12 "); sl@0: asm("ldr r12, [r1, #-4] "); sl@0: asm("addcs r4, r4, #4 "); sl@0: asm("ldrle r4, [r4] "); sl@0: asm("bic r12, r12, #0xF0000000 "); // r12=Length() sl@0: asm("addeq r4, r4, #4 "); // r4=this.Ptr() sl@0: asm("cmp r2, #0 "); // check aPos>=0 sl@0: asm("subge r12, r12, r2 "); // if so, r12=Length()-aPos sl@0: asm("cmpge r12, r3 "); // and check Length()-aPos>=aLength sl@0: asm("orrge r1, r3, #0x20000000 "); // if so, r1=aLength + EPtr sl@0: asm("addge r3, r4, r2 "); // and r3=this.Ptr()+aPos sl@0: asm("bicge r2, r1, #0xF0000000 "); // and r2=aLength sl@0: asm("stmgeia r0, {r1-r3} "); sl@0: asm("ldrge r4, [sp], #4 "); sl@0: __JUMP(ge,lr); sl@0: asm("b Des8PanicPosOutOfRange__Fv "); sl@0: } sl@0: #endif sl@0: sl@0: __NAKED__ EXPORT_C const TUint8 &TDesC8::AtC(TInt /*anIndex*/) const sl@0: // sl@0: // Return a reference to the character in the buffer. sl@0: // sl@0: { sl@0: asm("ldr r2, [r0], #4 "); // r2=length/type sl@0: asm("bic r3, r2, #0xF0000000 "); // r3=length sl@0: asm("cmp r2, #0x50000000 "); sl@0: asm("bcs " CSM_Z19Des8PanicBadDesTypev); sl@0: asm("eor r2, r2, r2, lsr #1 "); sl@0: asm("msr cpsr_flg, r2 "); sl@0: asm("addcs r0, r0, #4 "); sl@0: asm("ldrle r0, [r0] "); sl@0: asm("addeq r0, r0, #4 "); // r0=this.Ptr() sl@0: asm("cmp r1, #0 "); // check index>=0 sl@0: asm("cmpge r3, r1 "); // if so, check Length()>index sl@0: asm("addgt r0, r0, r1 "); // return value = this.Ptr()+index sl@0: __JUMP(gt,lr); sl@0: asm("b " CSM_Z27Des8PanicDesIndexOutOfRangev); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TInt TDesC8::Locate(TChar /*aChar*/) const sl@0: // sl@0: // Locate character aChar in the descriptor. sl@0: // sl@0: { sl@0: asm("ldr r2, [r0], #4 "); // r2=length/type sl@0: asm("cmp r2, #0x50000000 "); sl@0: asm("bcs " CSM_Z19Des8PanicBadDesTypev); sl@0: asm("bics r3, r2, #0xF0000000 "); // r3=length sl@0: asm("mvneq r0, #0 "); // if length=0, not found sl@0: __JUMP(eq,lr); sl@0: asm("eor r2, r2, r2, lsr #1 "); sl@0: asm("msr cpsr_flg, r2 "); sl@0: asm("addcs r0, r0, #4 "); sl@0: asm("ldrle r0, [r0] "); sl@0: asm("addeq r0, r0, #4 "); // r0=this.Ptr() sl@0: asm("add r3, r3, r0 "); // r3=ptr+length sl@0: asm("add r12, r0, #1 "); // r12=ptr+1 sl@0: asm("1: "); sl@0: asm("ldrb r2, [r0], #1 "); // r2=*r0++ sl@0: asm("cmp r1, r2 "); // is r1=match char? sl@0: asm("cmpne r0, r3 "); // if not, is r0=r3 (end pointer) sl@0: asm("bne 1b "); sl@0: asm("cmp r1, r2 "); // did we find char? sl@0: asm("subeq r0, r0, r12 "); // if we did, return value = r0-ptr-1 sl@0: asm("mvnne r0, #0 "); // else return value =-1 sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TInt TDesC8::LocateReverse(TChar /*aChar*/) const sl@0: // sl@0: // Locate character aChar in the descriptor in reverse. sl@0: // sl@0: { sl@0: asm("ldr r2, [r0], #4 "); // r2=length/type sl@0: asm("cmp r2, #0x50000000 "); sl@0: asm("bcs " CSM_Z19Des8PanicBadDesTypev); sl@0: asm("bics r3, r2, #0xF0000000 "); // r3=length sl@0: asm("mvneq r0, #0 "); // if length=0, not found sl@0: __JUMP(eq,lr); sl@0: asm("eor r2, r2, r2, lsr #1 "); sl@0: asm("msr cpsr_flg, r2 "); sl@0: asm("addcs r0, r0, #4 "); sl@0: asm("ldrle r0, [r0] "); sl@0: asm("addeq r0, r0, #4 "); // r0=this.Ptr() sl@0: asm("sub r0, r0, #1 "); // r0=Ptr-1 sl@0: asm("1: "); sl@0: asm("ldrb r2, [r0, r3] "); // r2=Ptr[r3-1] sl@0: asm("cmp r1, r2 "); // is r1=match char? sl@0: asm("subnes r3, r3, #1 "); // if not, decrement char count sl@0: asm("bne 1b "); sl@0: asm("cmp r1, r2 "); // did we find match char? sl@0: asm("subeq r0, r3, #1 "); // if we did, return value = r3-1 sl@0: asm("mvnne r0, #0 "); // else return value =-1 sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: #ifndef __KERNEL_MODE__ sl@0: __NAKED__ EXPORT_C TInt TDesC8::CompareF(const TDesC8& /*aDes*/) const sl@0: // sl@0: // Compare a descriptor to this descriptor folded. sl@0: // sl@0: { sl@0: asm("ldr r12, 1f "); sl@0: asm("b comparebody "); sl@0: asm("1: "); sl@0: asm(".word " CSM_ZN3Mem8CompareFEPKhiS0_i); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TInt TDesC8::CompareC(const TDesC8& /*aDes*/) const sl@0: // sl@0: // Compare a descriptor to this descriptor collated. sl@0: // sl@0: { sl@0: asm("ldr r12, 1f "); sl@0: asm("b comparebody "); sl@0: asm("1: "); sl@0: asm(".word " CSM_ZN3Mem8CompareCEPKhiS0_i); sl@0: } sl@0: #endif sl@0: sl@0: __NAKED__ EXPORT_C TInt TDesC8::Compare(const TDesC8& /*aDes*/) const sl@0: // sl@0: // Compare a descriptor to this descriptor. sl@0: // sl@0: { sl@0: asm("ldr r12, 1f "); sl@0: asm("comparebody: "); sl@0: asm("mov r2, r1 "); // r2=&aDes sl@0: asm("ldr r3, [r0], #4 "); // r3=this.length/type sl@0: asm("cmp r3, #0x50000000 "); sl@0: asm("bcs " CSM_Z19Des8PanicBadDesTypev); sl@0: asm("bic r1, r3, #0xF0000000 "); // r1=this.Length() sl@0: asm("eor r3, r3, r3, lsr #1 "); sl@0: asm("msr cpsr_flg, r3 "); sl@0: asm("addcs r0, r0, #4 "); sl@0: asm("ldrle r0, [r0] "); sl@0: asm("addeq r0, r0, #4 "); // r0=this.Ptr() sl@0: asm("ldr r3, [r2], #4 "); // r3=aDes.length/type sl@0: asm("cmp r3, #0x50000000 "); sl@0: asm("bcs " CSM_Z19Des8PanicBadDesTypev); sl@0: asm("eor r3, r3, r3, lsr #1 "); sl@0: asm("msr cpsr_flg, r3 "); sl@0: asm("ldr r3, [r2, #-4] "); sl@0: asm("bic r3, r3, #0xF0000000 "); // r3=aDes.Length() sl@0: asm("addcs r2, r2, #4 "); sl@0: asm("ldrle r2, [r2] "); sl@0: asm("addeq r2, r2, #4 "); // r2=aDes.Ptr() sl@0: __JUMP(,r12); sl@0: sl@0: asm("1: "); sl@0: asm(".word memcompare "); sl@0: } sl@0: #endif // __DES8_MACHINE_CODED__ sl@0: sl@0: #if defined(__DES8_MACHINE_CODED__) && !defined(__EABI__) sl@0: __NAKED__ EXPORT_C TPtrC8 TDesC8::Left(TInt /*aLength*/) const sl@0: // sl@0: // Extract the left portion of the descriptor. sl@0: // sl@0: { sl@0: // On entry r0=this, r1=aLength sl@0: // Return TPtrC8 in r0,r1 sl@0: asm("ldr r3, [r0], #4 "); // r3=this.length/type sl@0: asm("bic r12, r3, #0xF0000000 "); // r12=this.Length() sl@0: asm("cmp r1, #0 "); // check aLength>=0 sl@0: asm("blt " CSM_Z22Des8PanicPosOutOfRangev); // if not panic sl@0: asm("cmp r1, r12 "); // else limit aLength to Length() sl@0: asm("movgt r1, r12 "); sl@0: asm("cmp r3, #0x50000000 "); sl@0: asm("bcs " CSM_Z19Des8PanicBadDesTypev); sl@0: asm("eor r3, r3, r3, lsr #1 "); sl@0: asm("msr cpsr_flg, r3 "); sl@0: asm("addcs r0, r0, #4 "); sl@0: asm("ldrle r0, [r0] "); sl@0: asm("addeq r0, r0, #4 "); // r0=this.Ptr() sl@0: asm("orr r2, r1, #0x10000000 "); // r2=aLength + EPtrC sl@0: asm("mov r1, r0 "); // r1=result ptr sl@0: asm("mov r0, r2 "); // r0=result type/length sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TPtrC8 TDesC8::Right(TInt /*aLength*/) const sl@0: // sl@0: // Extract the right portion of the descriptor. sl@0: // sl@0: { sl@0: // On entry r0=this, r1=aLength sl@0: // Return TPtrC8 in r0,r1 sl@0: asm("ldr r3, [r0], #4 "); // r3=this.length/type sl@0: asm("bic r12, r3, #0xF0000000 "); // r12=this.Length() sl@0: asm("cmp r1, #0 "); // check aLength>=0 sl@0: asm("blt " CSM_Z22Des8PanicPosOutOfRangev); // if not, panic sl@0: asm("cmp r1, r12 "); // else limit aLength to Length() sl@0: asm("movgt r1, r12 "); sl@0: asm("cmp r3, #0x50000000 "); sl@0: asm("bcs " CSM_Z19Des8PanicBadDesTypev); sl@0: asm("eor r3, r3, r3, lsr #1 "); sl@0: asm("msr cpsr_flg, r3 "); sl@0: asm("addcs r0, r0, #4 "); sl@0: asm("ldrle r0, [r0] "); sl@0: asm("addeq r0, r0, #4 "); // r0=this.Ptr() sl@0: asm("add r3, r0, r12 "); // r3=this.Ptr()+len sl@0: asm("orr r0, r1, #0x10000000 "); // r0=aLength + EPtrC sl@0: asm("sub r1, r3, r1 "); // r1=Ptr()+len-aLength sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TPtrC8 TDesC8::Mid(TInt /*aPos*/) const sl@0: // sl@0: // Extract the middle portion of the descriptor. sl@0: // sl@0: { sl@0: // On entry r0=this, r1=aPos sl@0: // Return TPtrC8 in r0,r1 sl@0: asm("ldr r3, [r0], #4 "); // r3=this.length/type sl@0: asm("bic r12, r3, #0xF0000000 "); // r12=this.Length() sl@0: asm("cmp r1, #0 "); // check aPos>=0 sl@0: asm("cmpge r12, r1 "); // if so check Length()>=aPos sl@0: asm("blt " CSM_Z22Des8PanicPosOutOfRangev); sl@0: asm("cmp r3, #0x50000000 "); sl@0: asm("bcs " CSM_Z19Des8PanicBadDesTypev); sl@0: asm("eor r3, r3, r3, lsr #1 "); sl@0: asm("msr cpsr_flg, r3 "); sl@0: asm("addcs r0, r0, #4 "); sl@0: asm("ldrle r0, [r0] "); sl@0: asm("addeq r0, r0, #4 "); // r0=this.Ptr() sl@0: asm("sub r2, r12, r1 "); // r2=len-aPos sl@0: asm("add r1, r0, r1 "); // r1=this.Ptr()+aPos sl@0: asm("orr r0, r2, #0x10000000 "); // r0=aLength + EPtrC sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TPtrC8 TDesC8::Mid(TInt /*aPos*/,TInt /*aLength*/) const sl@0: // sl@0: // Extract the middle portion of the descriptor. sl@0: // sl@0: { sl@0: // On entry r0=this, r1=aPos, r2=aLength sl@0: // Return TPtrC8 in r0,r1 sl@0: asm("ldr r12, [r0], #4 "); // r12=this.length/type sl@0: asm("cmp r12, #0x50000000 "); // check valid descriptor type sl@0: asm("bcs " CSM_Z19Des8PanicBadDesTypev); sl@0: asm("eor r12, r12, r12, lsr #1 "); sl@0: asm("msr cpsr_flg, r12 "); sl@0: asm("ldr r12, [r0, #-4] "); sl@0: asm("bic r12, r12, #0xF0000000 "); // r12=Length() sl@0: asm("addcs r0, r0, #4 "); sl@0: asm("ldrle r0, [r0] "); sl@0: asm("addeq r0, r0, #4 "); // r0=this.Ptr() sl@0: asm("cmp r1, #0 "); // check aPos>=0 sl@0: asm("subge r12, r12, r1 "); // if so, r12=Length()-aPos sl@0: asm("cmpge r12, r2 "); // and check Length()-aPos>=aLength sl@0: asm("addge r1, r1, r0 "); // if so, r1=Ptr()+aPos sl@0: asm("orrge r0, r2, #0x10000000 "); // and r0 = aLength + EPtrC sl@0: __JUMP(ge,lr); sl@0: asm("b " CSM_Z22Des8PanicPosOutOfRangev); sl@0: } sl@0: #endif // defined(__DES8_MACHINE_CODED__) && !defined(__EABI__) sl@0: sl@0: #ifdef __DES8_MACHINE_CODED__ sl@0: sl@0: // Here are the __EABI__ compliant versions of the above sl@0: #ifdef __EABI__ sl@0: sl@0: __NAKED__ EXPORT_C TPtrC8 TDesC8::Left(TInt /*aLength*/) const sl@0: // sl@0: // Extract the left portion of the descriptor. sl@0: // sl@0: { sl@0: // On entry r0=return store ptr, r1=this, r2=aLength sl@0: // Return TPtrC8 ([r0]=length/type,[r0,#4]=Ptr) sl@0: asm("ldr r3, [r1], #4 "); // r3=this.length/type sl@0: asm("cmp r2, #0 "); // check aLength>=0 sl@0: asm("blt Des8PanicPosOutOfRange__Fv "); // if not panic sl@0: asm("bic r12, r3, #0xF0000000 "); // r12=this.Length() sl@0: asm("cmp r2, r12 "); // limit aLength to Length() sl@0: asm("movgt r2, r12 "); sl@0: asm("cmp r3, #0x50000000 "); // check type() <= 4 sl@0: asm("bcs Des8PanicBadDesType__Fv "); // if not, panic sl@0: asm("eor r3, r3, r3, lsr #1 "); sl@0: asm("msr cpsr_flg, r3 "); sl@0: asm("addcs r1, r1, #4 "); sl@0: asm("ldrle r1, [r1] "); sl@0: asm("addeq r1, r1, #4 "); // r1=this.Ptr() sl@0: asm("mov r3, r1"); // r3=this.Ptr() sl@0: asm("orr r1, r2, #0x10000000 "); // r1=aLength + EPtrC sl@0: asm("stmia r0, {r1,r3} "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TPtrC8 TDesC8::Right(TInt /*aLength*/) const sl@0: // sl@0: // Extract the right portion of the descriptor. sl@0: // sl@0: { sl@0: // On entry r0=return store ptr, r1=this, r2=aLength sl@0: // Return TPtrC8 ([r0]=length/type,[r0,#4]=Ptr) sl@0: asm("ldr r3, [r1], #4 "); // r3=this.length/type sl@0: asm("cmp r2, #0 "); // check aLength>=0 sl@0: asm("blt Des8PanicPosOutOfRange__Fv "); // if not, panic sl@0: asm("bic r12, r3, #0xF0000000 "); // r12=this.Length() sl@0: asm("cmp r2, r12 "); // limit aLength to Length() sl@0: asm("movgt r2, r12 "); sl@0: asm("cmp r3, #0x50000000 "); // check type() <= 4 sl@0: asm("bcs Des8PanicBadDesType__Fv "); sl@0: asm("eor r3, r3, r3, lsr #1 "); sl@0: asm("msr cpsr_flg, r3 "); sl@0: asm("addcs r1, r1, #4 "); sl@0: asm("ldrle r1, [r1] "); sl@0: asm("addeq r1, r1, #4 "); // r3=this.Ptr() sl@0: asm("add r3, r1, r12 "); // r3=this.Ptr()+Length() sl@0: asm("orr r1, r2, #0x10000000 "); // r1=aLength + EPtrC sl@0: asm("sub r3, r3, r2 "); // r3=Ptr()+Length()-aLength sl@0: asm("stmia r0, {r1,r3} "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TPtrC8 TDesC8::Mid(TInt /*aPos*/) const sl@0: // sl@0: // Extract the middle portion of the descriptor. sl@0: // sl@0: { sl@0: // On entry r0=return store ptr, r1=this, r2=aPos sl@0: // Return TPtrC8 ([r0]=length/type,[r0,#4]=Ptr) sl@0: asm("ldr r3, [r1], #4 "); // r3=this.length/type sl@0: asm("bic r12, r3, #0xF0000000 "); // r12=this.Length() sl@0: asm("cmp r2, #0 "); // check aPos>=0 sl@0: asm("cmpge r12, r2 "); // if so check Length()>=aPos sl@0: asm("blt Des8PanicPosOutOfRange__Fv "); sl@0: asm("cmp r3, #0x50000000 "); // check type() <= 4 sl@0: asm("bcs Des8PanicBadDesType__Fv "); sl@0: asm("eor r3, r3, r3, lsr #1 "); sl@0: asm("msr cpsr_flg, r3 "); sl@0: asm("addcs r1, r1, #4 "); sl@0: asm("ldrle r1, [r1] "); sl@0: asm("addeq r1, r1, #4 "); // r1=this.Ptr() sl@0: asm("add r3, r1, r2 "); // r3=this.Ptr()+aPos sl@0: asm("sub r2, r12, r2 "); // r2=Length()-aPos (=newMaxLen) sl@0: asm("orr r1, r2, #0x10000000 "); // r1=Length()-aPos + EPtrC (=newLen/Type) sl@0: asm("stmia r0, {r1,r3} "); sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C TPtrC8 TDesC8::Mid(TInt /*aPos*/,TInt /*aLength*/) const sl@0: // sl@0: // Extract the middle portion of the descriptor. sl@0: // sl@0: { sl@0: // On entry r0=return store ptr, r1=this, r2=aPos, r3=aLength sl@0: // Return TPtrC8 ([r0]=length/type,[r0,#4]=Ptr) sl@0: asm("ldr r12, [r1], #4 "); // r12=this.length/type sl@0: asm("cmp r12, #0x50000000 "); // check valid descriptor type sl@0: asm("bcs Des8PanicBadDesType__Fv "); sl@0: asm("eor r12, r12, r12, lsr #1 "); sl@0: asm("msr cpsr_flg, r12 "); sl@0: asm("ldr r12, [r1, #-4] "); sl@0: asm("addcs r1, r1, #4 "); sl@0: asm("ldrle r1, [r1] "); sl@0: asm("bic r12, r12, #0xF0000000 "); // r12=Length() sl@0: asm("addeq r1, r1, #4 "); // r1=this.Ptr() sl@0: asm("cmp r2, #0 "); // check aPos>=0 sl@0: asm("subge r12, r12, r2 "); // if so, r12=Length()-aPos sl@0: asm("cmpge r12, r3 "); // and check Length()-aPos>=aLength sl@0: asm("addge r2, r1, r2 "); // if so r2=this.Ptr()+aPos sl@0: asm("orrge r1, r3, #0x10000000 "); // and r1=aLength + EPtrC sl@0: asm("stmgeia r0, {r1,r2} "); sl@0: __JUMP(ge,lr); sl@0: asm("b Des8PanicPosOutOfRange__Fv "); sl@0: } sl@0: #endif sl@0: sl@0: __NAKED__ EXPORT_C void TDes8::Zero() sl@0: // sl@0: // Zero the buffer. sl@0: // sl@0: { sl@0: asm("ldr r1, [r0] "); // r1=length/type sl@0: asm("and r1, r1, #0xF0000000 "); // r1=type field, zero length sl@0: asm("cmp r1, #0x40000000 "); // check for EBufCPtr sl@0: asm("str r1, [r0] "); // store zero length sl@0: __JUMP(ne,lr); sl@0: asm("ldr r2, [r0, #8] "); // r2 = pointer to TBufCBase sl@0: asm("mov r1, #0 "); sl@0: asm("str r1, [r2] "); // update length of TBufCBase sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: #ifndef __KERNEL_MODE__ sl@0: __NAKED__ EXPORT_C void TDes8::ZeroTerminate() sl@0: // sl@0: // Zero terminate at Length(). sl@0: // sl@0: { sl@0: // Fall through to PtrZ below... sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C const TUint8 *TDes8::PtrZ() sl@0: // sl@0: // Return a pointer to a 0 terminated string. sl@0: // sl@0: { sl@0: asm("ldmia r0, {r1,r2} "); // r1=length/type, r2=maxlength sl@0: asm("bic r3, r1, #0xF0000000 "); // r3=Length(); sl@0: asm("cmp r3, r2 "); // check Length()=0 sl@0: __JUMP(eq,lr); sl@0: asm("blt " CSM_Z23Des8PanicLengthNegativev); sl@0: asm("appendbody: "); sl@0: asm("ldmia r0, {r3,r12} "); // r3=type/length, r12=maxlength sl@0: asm("cmp r3, #0x50000000 "); sl@0: asm("bcs " CSM_Z19Des8PanicBadDesTypev); sl@0: asm("bic r3, r3, #0xF0000000 "); // r3=Length() sl@0: asm("sub r12, r12, r3 "); // r12=MaxLength-Length sl@0: asm("cmp r2, r12 "); // check aLength<=(MaxLength-Length) sl@0: asm("bgt " CSM_Z20Des8PanicDesOverflowv); sl@0: asm("ldr r12, [r0] "); sl@0: asm("add r12, r12, r2 "); // new length/type field sl@0: asm("str r12, [r0], #4 "); // store it sl@0: asm("eor r12, r12, r12, lsr #1 "); sl@0: asm("msr cpsr_flg, r12 "); sl@0: asm("addcs r0, r0, #4 "); sl@0: asm("ldrle r0, [r0] "); sl@0: asm("ldreq r12, [r0] "); // fetch length from TBufCBase sl@0: asm("addeq r12, r12, r2 "); // add aLength sl@0: asm("streq r12, [r0], #4 "); // update length of TBufCBase, r0=Ptr() sl@0: asm("add r0, r0, r3 "); // r0=Ptr()+Length() sl@0: asm("b memmove "); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C void TDes8::FillZ() sl@0: // sl@0: // Fill the descriptor with 0. sl@0: // sl@0: { sl@0: asm("ldr r2, [r0] "); // r2=length/type sl@0: asm("cmp r2, #0x50000000 "); sl@0: asm("bcs " CSM_Z19Des8PanicBadDesTypev); sl@0: asm("bic r1, r2, #0xF0000000 "); // r1=Length() sl@0: asm("eor r2, r2, r2, lsr #1 "); sl@0: asm("msr cpsr_flg, r2 "); sl@0: asm("add r0, r0, #4 "); sl@0: asm("addcs r0, r0, #4 "); sl@0: asm("ldrle r0, [r0] "); sl@0: asm("addeq r0, r0, #4 "); // r0=Ptr() sl@0: asm("b memclr "); sl@0: } sl@0: sl@0: __NAKED__ EXPORT_C void TDes8::Fill(TChar /*aChar*/) sl@0: // sl@0: // Fill the descriptor with aChar. sl@0: // sl@0: { sl@0: asm("ldr r3, [r0] "); // r3=length/type sl@0: asm("cmp r3, #0x50000000 "); sl@0: asm("bcs " CSM_Z19Des8PanicBadDesTypev); sl@0: asm("bic r2, r3, #0xF0000000 "); // r2=Length() sl@0: asm("eor r3, r3, r3, lsr #1 "); sl@0: asm("msr cpsr_flg, r3 "); sl@0: asm("add r0, r0, #4 "); sl@0: asm("addcs r0, r0, #4 "); sl@0: asm("ldrle r0, [r0] "); sl@0: asm("addeq r0, r0, #4 "); // r0=Ptr() sl@0: asm("b memset "); // memset(Ptr(),aChar,Length()) sl@0: } sl@0: #endif //__DES8_MACHINE_CODED__ sl@0: sl@0: #ifdef __DES_MACHINE_CODED__ sl@0: __NAKED__ GLDEF_C TInt __DoConvertNum(TUint /*aVal*/, TRadix /*aRadix*/, TUint /*aA*/, TUint8*& /*aDest*/) sl@0: { sl@0: asm("ldr ip, [r3] "); // ip = aDest sl@0: asm("cmp r1, #16 "); sl@0: asm("beq do_convert_hex "); sl@0: asm("cmp r1, #10 "); sl@0: asm("beq do_convert_dec "); sl@0: #ifdef __KERNEL_MODE__ sl@0: asm("mov r0, #%a0" : : "i" ((TInt)EInvalidRadix)); sl@0: asm("b " CSM_Z5Panic9TCdtPanic); sl@0: #else // !__KERNEL_MODE__ sl@0: asm("cmp r1, #2 "); sl@0: asm("beq do_convert_bin "); sl@0: asm("cmp r1, #8 "); sl@0: asm("beq do_convert_oct "); sl@0: asm("do_convert_any: "); sl@0: asm("stmfd sp!, {r4-r8,lr} "); sl@0: asm("mov r5, r1 "); // save radix sl@0: asm("and r6, r2, #0xff "); sl@0: asm("sub r6, r6, #10 "); // save aA - 10 sl@0: asm("mov r4, ip "); // r4 = aDest sl@0: asm("mov r7, r3 "); // save &aDest sl@0: asm("mov r8, r2"); // save aA sl@0: asm("1: "); sl@0: asm("mov r1, r5 "); // r1 = radix sl@0: #ifndef __EABI__ sl@0: asm(".extern __umodsi3 "); sl@0: asm("bl __umodsi3 "); // do division, quotient->r3, rem->r0 sl@0: asm("mov r1, r0"); // move to make regs same as EABI function sl@0: asm("mov r0, r3"); sl@0: #else //__EABI__ sl@0: asm(".extern __aeabi_uidivmod "); sl@0: asm("bl __aeabi_uidivmod "); // do division, quotient->r0, rem->r1 sl@0: #endif //__EABI__ sl@0: asm("cmp r1, #9 "); sl@0: asm("addhi r1, r1, r6 "); // r1 = (r1 > 9) ? (r1 + (aA - 10)) : r1 + '0' sl@0: asm("addls r1, r1, #0x30 "); sl@0: asm("tst r8, #0x100 "); sl@0: asm("strneh r1, [r4, #-2]! "); // *--r4 = (TUint16)r1 sl@0: asm("streqb r1, [r4, #-1]! "); // *--r4 = (TUint8)r1 sl@0: asm("movs r1, r0 "); // new aVal into r1 sl@0: asm("bne 1b "); sl@0: asm("ldr ip, [r7] "); // ip can be scratched by __aeabi_uidivmod sl@0: asm("str r4, [r7] "); // aDest = r4 sl@0: asm("sub r0, ip, r4 "); // return aDest - r4 sl@0: __POPRET("r4-r8,"); sl@0: sl@0: asm("do_convert_bin: "); sl@0: asm("mov r2, r2, lsl #22 "); // aA bit 8 into bit 30 sl@0: asm("adds r2, r2, r2 "); // set V flag = aA bit 8 sl@0: asm("1: "); sl@0: asm("movs r0, r0, lsr #1 "); // aVal>>=1, bit 0 into carry, V unaffected sl@0: asm("mov r1, #0x30 "); // r1 = '0' sl@0: asm("adc r1, r1, #0 "); // add in carry sl@0: asm("strvsh r1, [ip, #-2]! "); // if aA bit 8 set, store 16 bits sl@0: asm("strvcb r1, [ip, #-1]! "); // else store 8 bits sl@0: asm("bne 1b "); // loop back if aVal still nonzero sl@0: asm("ldr r1, [r3] "); // r1 = original aDest sl@0: asm("str ip, [r3] "); // aDest = ip sl@0: asm("sub r0, r1, ip "); // return aDest - ip sl@0: __JUMP(,lr); sl@0: sl@0: asm("do_convert_oct: "); sl@0: asm("mov r2, r2, lsl #22 "); // aA bit 8 into bit 30 sl@0: asm("adds r2, r2, r2 "); // set V flag = aA bit 8 sl@0: asm("1: "); sl@0: asm("and r1, r0, #7 "); // r1 = aVal & 7 sl@0: asm("movs r0, r0, lsr #3 "); // aVal>>=3, set Z accordingly, V unaffected sl@0: asm("add r1, r1, #0x30 "); // r1 += '0' sl@0: asm("strvsh r1, [ip, #-2]! "); // if aA bit 8 set, store 16 bits sl@0: asm("strvcb r1, [ip, #-1]! "); // else store 8 bits sl@0: asm("bne 1b "); // loop back if aVal still nonzero sl@0: asm("ldr r1, [r3] "); // r1 = original aDest sl@0: asm("str ip, [r3] "); // aDest = ip sl@0: asm("sub r0, r1, ip "); // return aDest - ip sl@0: __JUMP(,lr); sl@0: #endif //__KERNEL_MODE__ sl@0: sl@0: asm("do_convert_hex: "); sl@0: asm("mov r1, r2, lsl #22 "); // aA bit 8 into r1 bit 30 sl@0: asm("adds r1, r1, r1 "); // set V flag = aA bit 8 sl@0: asm("and r2, r2, #0xff "); sl@0: asm("sub r2, r2, #0x40 "); sl@0: asm("1: "); sl@0: asm("and r1, r0, #15 "); // r1 = aVal & 15 sl@0: asm("add r1, r1, #0x36 "); // r1 += '0'+6 sl@0: asm("tst r1, #0x40 "); sl@0: asm("subeq r1, r1, #6 "); sl@0: asm("addne r1, r1, r2 "); sl@0: asm("movs r0, r0, lsr #4 "); // aVal>>=4, V unaffected sl@0: asm("strvsh r1, [ip, #-2]! "); // if aA bit 8 set, store 16 bits sl@0: asm("strvcb r1, [ip, #-1]! "); // else store 8 bits sl@0: asm("bne 1b "); // loop back if aVal still nonzero sl@0: asm("ldr r1, [r3] "); // r1 = original aDest sl@0: asm("str ip, [r3] "); // aDest = ip sl@0: asm("sub r0, r1, ip "); // return aDest - ip sl@0: __JUMP(,lr); sl@0: sl@0: asm("do_convert_dec: "); sl@0: asm("and r2, r2, #0x100 "); // mask all bits of aA except bit 8 sl@0: asm("orr r3, r3, r2, lsr #8 "); // r3 bit 0 = aA bit 8 sl@0: asm("1: "); sl@0: asm("sub r1, r0, #10 "); // divide aVal by 10, r0=quotient, r1=remainder sl@0: asm("sub r0, r0, r0, lsr #2 "); sl@0: asm("add r0, r0, r0, lsr #4 "); sl@0: asm("add r0, r0, r0, lsr #8 "); sl@0: asm("add r0, r0, r0, lsr #16 "); sl@0: asm("mov r0, r0, lsr #3 "); sl@0: asm("add r2, r0, r0, lsl #2 "); sl@0: asm("subs r1, r1, r2, lsl #1 "); sl@0: asm("addpl r0, r0, #1 "); sl@0: asm("addmi r1, r1, #10 "); sl@0: asm("add r1, r1, #0x30 "); // add '0' to remainder sl@0: asm("tst r3, #1 "); // test aA bit 8 sl@0: asm("strneh r1, [ip, #-2]! "); // if aA bit 8 set, store 16 bits sl@0: asm("streqb r1, [ip, #-1]! "); // else store 8 bits sl@0: asm("teq r0, #0 "); sl@0: asm("bne 1b "); // loop back if aVal still nonzero sl@0: asm("bic r3, r3, #1 "); sl@0: asm("ldr r1, [r3] "); // r1 = original aDest sl@0: asm("str ip, [r3] "); // aDest = ip sl@0: asm("sub r0, r1, ip "); // return aDest - ip sl@0: __JUMP(,lr); sl@0: } sl@0: sl@0: __NAKED__ GLDEF_C TInt __DoConvertNum(TUint64 /*aVal*/, TRadix /*aRadix*/, TUint /*aA*/, TUint8*& /*aDest*/) sl@0: { sl@0: asm("ldr ip, [sp] "); // ip = &aDest sl@0: asm("cmp r2, #16 "); sl@0: asm("beq do_convert_hex64 "); sl@0: asm("cmp r2, #10 "); sl@0: asm("beq do_convert_dec64 "); sl@0: #ifdef __KERNEL_MODE__ sl@0: asm("mov r0, #%a0" : : "i" ((TInt)EInvalidRadix)); sl@0: asm("b " CSM_Z5Panic9TCdtPanic); sl@0: #else // !__KERNEL_MODE__ sl@0: asm("cmp r2, #2 "); sl@0: asm("beq do_convert_bin64 "); sl@0: asm("cmp r2, #8 "); sl@0: asm("beq do_convert_oct64 "); sl@0: asm("stmfd sp!, {r4-r8,lr} "); sl@0: asm("ldr r4, [ip] "); // r4 = aDest sl@0: asm("mov r5, r2 "); // save radix sl@0: asm("and r6, r3, #0xff "); // r6 = aA & 0xff sl@0: asm("sub r6, r6, #10 "); // save a - 10 sl@0: asm("mov r7, ip "); // save &aDest sl@0: asm("mov r8, r3"); // save aA sl@0: asm("teq r1, #0 "); // aVal > 0xffffffffu sl@0: asm("beq 2f "); sl@0: asm("1: "); sl@0: asm("mov r3, #0 "); sl@0: asm("mov r2, r5 "); // r3:r2 = radix sl@0: #ifndef __EABI__ sl@0: asm("stmfd sp!, {r4-r6} "); // push the registers that are scratched by UDiv01 sl@0: asm(".extern UDiv01 "); sl@0: asm("bl UDiv01 "); // do division, quotient->r5:r4, rem->r6:r3 sl@0: asm("mov r0, r4"); // move to make regs same as EABI function sl@0: asm("mov r1, r5"); sl@0: asm("mov r2, r3"); sl@0: asm("mov r3, r6"); sl@0: asm("ldmfd sp!, {r4-r6} "); // pop the registers that are scratched by UDiv01 sl@0: #else //__EABI__ sl@0: asm(".extern __aeabi_uldivmod "); sl@0: asm("bl __aeabi_uldivmod "); // do division, quotient->r1:r0, rem->r3:r2 sl@0: #endif //__EABI__ sl@0: asm("cmp r2, #9 "); sl@0: asm("addhi r2, r2, r6 "); // r2 = (r2>9) ? r2+aA-10 : r2+'0' sl@0: asm("addls r2, r2, #0x30 "); sl@0: asm("tst r8, #0x100 "); sl@0: asm("strneh r2, [r4, #-2]! "); // *--r4 = (TUint16)r2 sl@0: asm("streqb r2, [r4, #-1]! "); // *--r4 = (TUint8)r2 sl@0: asm("teq r1, #0 "); // new aVal > 0xffffffffu sl@0: asm("bne 1b "); sl@0: asm("2: "); sl@0: asm("mov r2, r8 "); // r2 = aA sl@0: asm("ldr r1, [r7] "); // r1 = aDest sl@0: asm("mov r3, r7 "); // r3 = &aDest sl@0: asm("sub r6, r1, r4 "); // r6 = aDest - r4 sl@0: asm("mov r1, r5 "); // r1 = radix sl@0: asm("str r4, [r3] "); // aDest = r4 sl@0: asm("mov ip, r4 "); // ip = aDest sl@0: asm("bl do_convert_any "); sl@0: asm("add r0, r0, r6 "); // r0 += r6 sl@0: __POPRET("r4-r8,"); sl@0: sl@0: asm("do_convert_bin64: "); sl@0: asm("mov r2, ip "); // r2 = &aDest sl@0: asm("ldr ip, [ip] "); // ip = aDest sl@0: asm("mov r3, r3, lsl #22 "); // aA bit 8 into bit 30 sl@0: asm("adds r3, r3, r3 "); // set V flag = aA bit 8 sl@0: asm("teq r1, #0 "); // if (aVal <= 0xffffffffu) sl@0: asm("moveq r1, ip "); // r1 = orig. aDest in second half sl@0: asm("beq 2f "); // branch to second half of the loop sl@0: asm("1: "); sl@0: asm("movs r0, r0, lsr #1 "); // aVal >>= 1, bit 0 into carry, V unaffected sl@0: asm("mov r3, #0x30 "); // r3 = '0' sl@0: asm("adc r3, r3, #0 "); // add in carry sl@0: asm("strvsh r3, [ip, #-2]! "); // if aA bit 8 set, store 16 bits sl@0: asm("strvcb r3, [ip, #-1]! "); // else store 8 bits sl@0: asm("bne 1b "); // loop back if (aVal & 0xffffffffu) still nonzero sl@0: asm("ldr r0, [r2] "); // r0 = original aDest sl@0: asm("subvc r0, r0, #32 "); // r0 = end of buffer for first word (8-bit descriptor) sl@0: asm("subvs r0, r0, #64 "); // r0 = end of buffer for first word (16-bit descriptor) sl@0: asm("teq ip, r0 "); // don't pad if we have already emitted 32 bits sl@0: asm("beq finish_bin_zero_pad "); sl@0: asm("bin_zero_pad: "); sl@0: asm("mov r3, #0x30 "); // r3 = '0' sl@0: asm("bin_zero_pad_loop: "); sl@0: asm("strvsh r3, [ip, #-2]! "); // if aA bit 8 set, store 16 bits sl@0: asm("strvcb r3, [ip, #-1]! "); // else store 8 bits sl@0: asm("teq ip, r0 "); sl@0: asm("bne bin_zero_pad_loop "); // loop back if padding not complete sl@0: asm("finish_bin_zero_pad: "); sl@0: asm("addvc r0, r0, #32 "); // r0 = original aDest sl@0: asm("addvs r0, r0, #64 "); // r0 = original aDest sl@0: asm("mov r3, r1 "); sl@0: asm("mov r1, r0 "); sl@0: asm("mov r0, r3 "); // r1 = orig aDest, r0 = aVal >> 32 sl@0: asm("2: "); sl@0: asm("movs r0, r0, lsr #1 "); // aVal >>= 1, bit 0 into carry, V unaffected sl@0: asm("mov r3, #0x30 "); // r3 = '0' sl@0: asm("adc r3, r3, #0 "); // add in carry sl@0: asm("strvsh r3, [ip, #-2]! "); // if aA bit 8 set, store 16 bits sl@0: asm("strvcb r3, [ip, #-1]! "); // else store 8 bits sl@0: asm("bne 2b "); // loop back if aVal still nonzero sl@0: asm("str ip, [r2] "); // aDest = ip sl@0: asm("sub r0, r1, ip "); // return aDest - ip sl@0: __JUMP(,lr); sl@0: sl@0: asm("do_convert_oct64: "); // Convert as 31 + 33 bits when > 32 bits sl@0: asm("mov r2, ip "); // r2 = &aDest sl@0: asm("ldr ip, [ip] "); // ip = aDest sl@0: asm("mov r3, r3, lsl #22 "); // aA bit 8 into bit 30 sl@0: asm("adds r3, r3, r3 "); // set V flag = aA bit 8 sl@0: asm("teq r1, #0 "); // if (aVal <= 0xffffffffu) sl@0: asm("moveq r1, ip "); // r1 = orig. aDest in second half sl@0: asm("beq 3f "); // branch to second half of the loop sl@0: asm("and r3, r0, #7 "); // r3 = aVal & 7 sl@0: asm("mov r0, r0, lsr #3 "); // aVal>>=3, set Z accordingly, V unaffected sl@0: asm("orr r0, r0, r1, lsl #29 "); // note we now have the bottom 33 bits in r0:r3[2:0] sl@0: asm("ands r0, r0, #0x3fffffff "); // and the top 31 bits in r1 sl@0: asm("mov r1, r1, lsr #1 "); sl@0: asm("add r3, r3, #0x30 "); // r3 += '0' sl@0: asm("strvsh r3, [ip, #-2]! "); // if aA bit 8 set, store 16 bits sl@0: asm("strvcb r3, [ip, #-1]! "); // else store 8 bits sl@0: asm("beq 2f "); sl@0: asm("1: "); sl@0: asm("and r3, r0, #7 "); // r3 = aVal & 7 sl@0: asm("movs r0, r0, lsr #3 "); // aVal>>=3, set Z accordingly, V unaffected sl@0: asm("add r3, r3, #0x30 "); // r3 += '0' sl@0: asm("strvsh r3, [ip, #-2]! "); // if aA bit 8 set, store 16 bits sl@0: asm("strvcb r3, [ip, #-1]! "); // else store 8 bits sl@0: asm("bne 1b "); // loop back if (aVal & 0xffffffff) still nonzero sl@0: asm("2: "); sl@0: asm("ldr r0, [r2] "); // r0 = original aDest sl@0: asm("subvc r0, r0, #11 "); // r0 = end of buffer for first word (8-bit descriptor) sl@0: asm("subvs r0, r0, #22 "); // r0 = end of buffer for first word (16-bit descriptor) sl@0: asm("teq ip, r0 "); // don't pad if we have already emitted 32 bits sl@0: asm("beq finish_oct_zero_pad "); sl@0: asm("oct_zero_pad: "); sl@0: asm("mov r3, #0x30 "); // r3 = '0' sl@0: asm("oct_zero_pad_loop: "); sl@0: asm("strvsh r3, [ip, #-2]! "); // if aA bit 8 set, store 16 bits sl@0: asm("strvcb r3, [ip, #-1]! "); // else store 8 bits sl@0: asm("teq ip, r0 "); sl@0: asm("bne oct_zero_pad_loop "); // loop back if padding not complete sl@0: asm("finish_oct_zero_pad: "); sl@0: asm("addvc r0, r0, #11 "); // r0 = original aDest sl@0: asm("addvs r0, r0, #22 "); // r0 = original aDest sl@0: asm("mov r3, r0 "); sl@0: asm("mov r0, r1 "); sl@0: asm("mov r1, r3 "); // r1 = orig aDest, r0 = aVal >> 32 sl@0: asm("3: "); sl@0: asm("and r3, r0, #7 "); // r3 = aVal & 7 sl@0: asm("movs r0, r0, lsr #3 "); // aVal>>=3, set Z accordingly, V unaffected sl@0: asm("add r3, r3, #0x30 "); // r3 += '0' sl@0: asm("strvsh r3, [ip, #-2]! "); // if aA bit 8 set, store 16 bits sl@0: asm("strvcb r3, [ip, #-1]! "); // else store 8 bits sl@0: asm("bne 3b "); // loop back if aVal still nonzero sl@0: asm("str ip, [r2] "); // aDest = ip sl@0: asm("sub r0, r1, ip "); // return aDest - ip sl@0: __JUMP(,lr); sl@0: sl@0: #endif //__KERNEL_MODE__ sl@0: sl@0: asm("do_convert_hex64: "); sl@0: asm("ldr ip, [ip] "); // ip = aDest sl@0: asm("mov r2, r3, lsl #22 "); // aA bit 8 into r2 bit 30 sl@0: asm("adds r2, r2, r2 "); // set V flag = aA bit 8 sl@0: asm("and r3, r3, #0xff "); // aA = aA & 0xff sl@0: asm("teq r1, #0 "); // if (aVal <= 0xffffffffu) sl@0: asm("sub r3, r3, #0x40 "); // bias by ('0' + 16) either way => r3 = aA - ('0' + 16) sl@0: asm("moveq r1, ip "); // r1 = orig. aDest in second half sl@0: asm("beq 2f "); // branch to second half of the loop sl@0: asm("1: "); sl@0: asm("and r2, r0, #0xf "); // r2 = aVal & 0xf sl@0: asm("add r2, r2, #0x36 "); // r2 += ('0' + 6) sl@0: asm("tst r2, #0x40 "); // if ((aVal & 0xf) > 9) sl@0: asm("addne r2, r2, r3 "); // r2 = (aVal & 0xf) + (aA - 10) sl@0: asm("subeq r2, r2, #6 "); // r2 = (aVal & 0xf) + '0' sl@0: asm("movs r0, r0, lsr #4 "); // aVal>>=4, V unaffected sl@0: asm("strvsh r2, [ip, #-2]! "); // if aA bit 8 set, store 16 bits sl@0: asm("strvcb r2, [ip, #-1]! "); // else store 8 bits sl@0: asm("bne 1b "); // loop back if (aVal & 0xffffffffu) still nonzero sl@0: asm("ldr r0, [sp] "); // r0 = &aDest sl@0: asm("ldr r2, [r0] "); // r2 = original aDest sl@0: asm("subvc r0, r2, #8 "); // r0 = end of buffer for first word (8-bit descriptor) sl@0: asm("subvs r0, r2, #16 "); // r0 = end of buffer for first word (16-bit descriptor) sl@0: asm("teq ip, r0 "); // don't pad if we have already emitted 32 bits sl@0: asm("beq finish_hex_zero_pad "); sl@0: asm("hex_zero_pad: "); sl@0: asm("mov r2, #0x30 "); // r3 = '0' sl@0: asm("hex_zero_pad_loop: "); sl@0: asm("strvsh r2, [ip, #-2]! "); // if aA bit 8 set, store 16 bits sl@0: asm("strvcb r2, [ip, #-1]! "); // else store 8 bits sl@0: asm("teq ip, r0 "); sl@0: asm("bne hex_zero_pad_loop "); // loop back if padding not complete sl@0: asm("finish_hex_zero_pad: "); sl@0: asm("addvc r0, r0, #8 "); // r0 = original aDest sl@0: asm("addvs r0, r0, #16 "); // r0 = original aDest sl@0: asm("mov r2, r0 "); sl@0: asm("mov r0, r1 "); sl@0: asm("mov r1, r2 "); // r1 = orig aDest, r0 = aVal >> 32 sl@0: asm("2: "); sl@0: asm("and r2, r0, #0xf "); // r2 = aVal & 0xf sl@0: asm("add r2, r2, #0x36 "); // r2 += '0'+6 sl@0: asm("tst r2, #0x40 "); sl@0: asm("subeq r2, r2, #6 "); // r2 = (aVal & 0xf) + '0' sl@0: asm("addne r2, r2, r3 "); // r2 = (aVal & 0xf) + (aA - 10) sl@0: asm("movs r0, r0, lsr #4 "); // aVal>>=4, V unaffected sl@0: asm("strvsh r2, [ip, #-2]! "); // if aA bit 8 set, store 16 bits sl@0: asm("strvcb r2, [ip, #-1]! "); // else store 8 bits sl@0: asm("bne 2b "); // loop back if aVal still nonzero sl@0: asm("ldr r2, [sp] "); // r2 = &aDest sl@0: asm("sub r0, r1, ip "); // return aDest - ip sl@0: asm("str ip, [r2] "); // aDest = ip sl@0: __JUMP(,lr); sl@0: sl@0: #ifndef __FIXED_POINT_64BIT_DIV10__ sl@0: sl@0: asm("do_convert_dec64: "); // As generic case, except can save one reg as radix known sl@0: asm("stmfd sp!, {r4-r6,lr} "); sl@0: asm("ldr r4, [ip] "); // r4 = aDest sl@0: asm("mov r5, ip "); // save &aDest sl@0: asm("mov r6, r3"); // save aA sl@0: asm("teq r1, #0 "); // aVal > 0xffffffffu sl@0: asm("beq 2f "); sl@0: asm("1: "); sl@0: asm("mov r3, #0 "); sl@0: asm("mov r2, #10 "); // r3:r2 = radix sl@0: #ifndef __EABI__ sl@0: asm("stmfd sp!, {r4-r6} "); // push the registers that are scratched by UDiv01 sl@0: asm(".extern UDiv01 "); sl@0: asm("bl UDiv01 "); // do division, quotient->r5:r4, rem->r6:r3 sl@0: asm("mov r0, r4"); // move to make regs same as EABI function sl@0: asm("mov r1, r5"); sl@0: asm("mov r2, r3"); sl@0: asm("mov r3, r6"); sl@0: asm("ldmfd sp!, {r4-r6} "); // pop the registers that are scratched by UDiv01 sl@0: #else //__EABI__ sl@0: asm(".extern __aeabi_uldivmod "); sl@0: asm("bl __aeabi_uldivmod "); // do division, quotient->r1:r0, rem->r3:r2 sl@0: #endif //__EABI__ sl@0: asm("add r2, r2, #0x30 "); // add '0' to remainder sl@0: asm("tst r6, #0x100 "); sl@0: asm("strneh r2, [r4, #-2]! "); // *--r4 = (TUint16)r2 sl@0: asm("streqb r2, [r4, #-1]! "); // *--r4 = (TUint8)r2 sl@0: asm("teq r1, #0 "); // new aVal > 0xffffffffu sl@0: asm("bne 1b "); sl@0: asm("2: "); sl@0: asm("mov r2, r6 "); // r2 = aA sl@0: asm("ldr r1, [r5] "); // r1 = original aDest sl@0: asm("mov r3, r5 "); // r3 = &aDest sl@0: asm("sub r6, r1, r4 "); // r6 = original aDest - r4 sl@0: asm("mov r1, #10 "); // r1 = radix sl@0: asm("str r4, [r3] "); // aDest = r4 sl@0: asm("mov ip, r4 "); // ip = aDest sl@0: asm("bl do_convert_dec "); sl@0: asm("add r0, r0, r6 "); // r0 += r6 sl@0: __POPRET("r4-r6,"); sl@0: sl@0: #else //__FIXED_POINT_64BIT_DIV10__ sl@0: sl@0: asm("do_convert_dec64: "); // Be extra cunning by doing fixed-point arithmetic using only shifts, adds and subtracts sl@0: asm("stmfd sp!, {r4-r7,lr} "); // - avoids costs of potentially inefficient calls to __aeabi_uldivmod sl@0: asm("mov r4, r3 "); // r4 = aA sl@0: asm("ldr r5, [ip] "); // r5 = aDest sl@0: asm("teq r1, #0 "); // aVal > 0xffffffffu sl@0: asm("beq 2f "); sl@0: asm("1: "); sl@0: asm("subs r2, r0, #10 "); // divide aVal by 10, r1:r0=quotient, r3:r2=remainder sl@0: asm("sbc r3, r1, #0 "); sl@0: asm("mov r6, r0, lsr #2 "); // NB: 0.8 rounded to 64 bits is: sl@0: asm("orr r6, r6, r1, lsl #30 "); // 0.11001100110011001100110011001100110011001100110011001100110011 sl@0: asm("subs r0, r0, r6 "); sl@0: asm("sbc r1, r1, r1, lsr #2 "); // quotient -= quotient >> 2 (0.11) sl@0: asm("mov r6, r0, lsr #4 "); sl@0: asm("orr r6, r6, r1, lsl #28 "); sl@0: asm("adds r0, r0, r6 "); sl@0: asm("adc r1, r1, r1, lsr #4 "); // quotient += quotient >> 4 (0.110011) sl@0: asm("mov r6, r0, lsr #8 "); sl@0: asm("orr r6, r6, r1, lsl #24 "); sl@0: asm("adds r0, r0, r6 "); sl@0: asm("adc r1, r1, r1, lsr #8 "); // quotient += quotient >> 8 (0.11001100110011) sl@0: asm("mov r6, r0, lsr #16 "); sl@0: asm("orr r6, r6, r1, lsl #16 "); sl@0: asm("adds r0, r0, r6 "); sl@0: asm("adc r1, r1, r1, lsr #16 "); // quotient += quotient >> 16 (0.110011001100110011001100110011) sl@0: asm("adds r0, r0, r1 "); sl@0: asm("adc r1, r1, #0 "); // quotient += quotient >> 32 (0.11001100110011001100110011001100110011001100110011001100110011) sl@0: asm("mov r0, r0, lsr #3 "); sl@0: asm("orr r0, r0, r1, lsl #29 "); sl@0: asm("mov r1, r1, lsr #3 "); // quotient >>= 3 0.1 == (0.00011001100110011001100110011001100110011001100110011001100110011) sl@0: asm("mov r7, r1, lsl #2 "); // use r7:r6 as a temp sl@0: asm("orr r7, r7, r0, lsr #30 "); sl@0: asm("mov r6, r0, lsl #2 "); // r7:r6 = quotient << 2 == 4 * |aVal/10| sl@0: asm("adds r6, r6, r0 "); sl@0: asm("adc r7, r7, r1 "); // r7:r6 += quotient == 5 * |aVal/10| sl@0: asm("mov r7, r7, lsl #1 "); sl@0: asm("orr r7, r7, r6, lsr #31 "); sl@0: asm("mov r6, r6, lsl #1 "); // r7:r6 <<= 1 == 10 * |aVal/10| sl@0: asm("subs r2, r2, r6 "); sl@0: asm("sbcs r3, r3, r7 "); // r3:r2 = aVal - (10 * |aVal/10|) == remainder sl@0: asm("bpl fix_quotient "); sl@0: asm("adds r2, r2, #10 "); sl@0: asm("adc r3, r3, #0 "); // fix remainder sl@0: asm("b fix_end "); sl@0: asm("fix_quotient: "); sl@0: asm("adds r0, r0, #1 "); sl@0: asm("adc r1, r1, #0 "); // increment quotient sl@0: asm("fix_end: "); sl@0: asm("add r2, r2, #0x30 "); // add '0' to remainder sl@0: asm("tst r4, #0x100 "); // test aA bit 8 sl@0: asm("strneh r2, [r5, #-2]! "); // if aA bit 8 set, store 16 bits sl@0: asm("streqb r2, [r5, #-1]! "); // else store 8 bits sl@0: asm("teq r1, #0 "); sl@0: asm("bne 1b "); // loop back if new aVal > 0xffffffffu sl@0: asm("2: "); sl@0: asm("mov r2, r4 "); // r2 = aA sl@0: asm("ldr r1, [ip] "); // r1 = original aDest sl@0: asm("mov r3, ip "); // r3 = &aDest sl@0: asm("sub r6, r1, r5 "); // r6 = original aDest - r5 sl@0: asm("str r5, [r3] "); // aDest = r5 sl@0: asm("mov r1, #10 "); // r1 = radix sl@0: asm("mov ip, r5 "); // ip = aDest sl@0: asm("bl do_convert_dec "); sl@0: asm("add r0, r0, r6 "); // r0 += r6 sl@0: __POPRET("r4-r7,"); sl@0: sl@0: #endif //__FIXED_POINT_64BIT_DIV10__ sl@0: sl@0: } sl@0: sl@0: #endif //__DES_MACHINE_CODED__