sl@0: /* sl@0: * Copyright (c) 2007 sl@0: * Josep Torra <josep@fluendo.com>. All rights reserved. sl@0: * sl@0: * Redistribution and use in source and binary forms, with or without sl@0: * modification, are permitted provided that the following conditions sl@0: * are met: sl@0: * 1. Redistributions of source code must retain the above copyright sl@0: * notice, this list of conditions and the following disclaimer. sl@0: * 2. Redistributions in binary form must reproduce the above copyright sl@0: * notice, this list of conditions and the following disclaimer in the sl@0: * documentation and/or other materials provided with the distribution. sl@0: * sl@0: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND sl@0: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE sl@0: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE sl@0: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE sl@0: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL sl@0: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS sl@0: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) sl@0: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT sl@0: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY sl@0: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF sl@0: * SUCH DAMAGE. sl@0: */ sl@0: sl@0: #if __VFP_FP__ sl@0: /* sl@0: ** compile with -mcpu=arm1136j-s -mfpu=vfp -mfloat-abi=softfp sl@0: ** sl@0: ** void vfp_add_f32 (float *d, const float *s1, const float *s2, int n); sl@0: ** void vfp_add_f64 (double *d, const double *s1, const double *s2, int n); sl@0: ** void vfp_divide_f32 (float *d, const float *s1, const float *s2, int n); sl@0: ** void vfp_divide_f64 (double *d, const double *s1, const double *s2, int n); sl@0: ** void vfp_multiply_f32 (float *d, const float *s1, const float *s2, int n); sl@0: ** void vfp_multiply_f64 (double *d, const double *s1, const double *s2, int n); sl@0: ** void vfp_subtract_f32 (float *d, const float *s1, const float *s2, int n); sl@0: ** void vfp_subtract_f64 (double *d, const double *s1, const double *s2, int n); sl@0: ** sl@0: ** d: $r0 | s1: $r1 | s2: $r2 | n: $r3 | sl@0: ** sl@0: */ sl@0: sl@0: #define UNROLL_F32_TEMPLATE(fname,finst) \ sl@0: .global vfp_ ## fname ## ; \ sl@0: vfp_ ## fname ## : \ sl@0: stmdb sp!, {fp, lr}; /* save registers to stack */ \ sl@0: ands ip, r3, #7; /* ip = n % 8 */ \ sl@0: beq vfp_ ## fname ## _unroll; /* if ip == 0 goto prep_loop2 */ \ sl@0: vfp_ ## fname ## _loop1: \ sl@0: fldmias r1!, {s0}; \ sl@0: fldmias r2!, {s1}; \ sl@0: ## finst ##s s2, s0, s1; \ sl@0: fstmias r0!, {s2}; \ sl@0: subs ip, ip, #1; \ sl@0: bne vfp_ ## fname ## _loop1; \ sl@0: vfp_ ## fname ## _unroll: /* unroll by 8 */ \ sl@0: movs ip, r3, lsr #3; /* ip = n / 8 */ \ sl@0: beq vfp_ ## fname ## _end; /* if ip == 0 goto finish */ \ sl@0: fmrx lr, fpscr; /* read fpscr register into arm */\ sl@0: mov fp, #7; \ sl@0: orr fp, lr, fp, lsl #16; /* set vector lenght to 8 */ \ sl@0: fmxr fpscr, fp; \ sl@0: vfp_ ## fname ## _loop2: \ sl@0: fldmias r1!, {s8, s9, s10, s11, s12, s13, s14, s15}; \ sl@0: fldmias r2!, {s16, s17, s18, s19, s20, s21, s22, s23}; \ sl@0: ## finst ##s s24, s8, s16; \ sl@0: fstmias r0!, {s24, s25, s26, s27, s28, s29, s30, s31}; \ sl@0: subs ip, ip, #1; \ sl@0: bne vfp_ ## fname ## _loop2; \ sl@0: fmxr fpscr, lr; /* restore original fpscr */ \ sl@0: vfp_ ## fname ## _end: \ sl@0: ldmia sp!, {fp, pc}; /* recovering from stack and return */ sl@0: sl@0: #define UNROLL_F64_TEMPLATE(fname,finst) \ sl@0: .global vfp_ ## fname ## ; \ sl@0: vfp_ ## fname ## : \ sl@0: stmdb sp!, {fp, lr}; /* save registers to stack */ \ sl@0: ands ip, r3, #3; /* ip = n % 3 */ \ sl@0: beq vfp_ ## fname ## _unroll; /* if ip == 0 goto prep_loop2 */ \ sl@0: vfp_ ## fname ## _loop1: \ sl@0: fldmiad r1!, {d0}; \ sl@0: fldmiad r2!, {d1}; \ sl@0: ## finst ##d d2, d0, d1; \ sl@0: fstmiad r0!, {d2}; \ sl@0: subs ip, ip, #1; \ sl@0: bne vfp_ ## fname ## _loop1; \ sl@0: vfp_ ## fname ## _unroll: /* unroll by 4 */ \ sl@0: movs ip, r3, lsr #2; /* ip = n / 4 */ \ sl@0: beq vfp_ ## fname ## _end; /* if ip == 0 goto finish */ \ sl@0: fmrx lr, fpscr; /* read fpscr register into arm */\ sl@0: mov fp, #3; \ sl@0: orr fp, lr, fp, lsl #16; /* set vector lenght to 8 */ \ sl@0: fmxr fpscr, fp; \ sl@0: vfp_ ## fname ## _loop2: \ sl@0: fldmiad r1!, {d4, d5, d6, d7}; \ sl@0: fldmiad r2!, {d8, d9, d10, d11}; \ sl@0: ## finst ##d d12, d4, d8; \ sl@0: fstmiad r0!, {d12, d13, d14, d15}; \ sl@0: subs ip, ip, #1; \ sl@0: bne vfp_ ## fname ## _loop2; \ sl@0: fmxr fpscr, lr; /* restore original fpscr */ \ sl@0: vfp_ ## fname ## _end: \ sl@0: ldmia sp!, {fp, pc}; /* recovering from stack and return */ sl@0: sl@0: .align 2 sl@0: UNROLL_F32_TEMPLATE(add_f32,fadd); sl@0: UNROLL_F64_TEMPLATE(add_f64,fadd); sl@0: sl@0: UNROLL_F32_TEMPLATE(divide_f32,fdiv); sl@0: UNROLL_F64_TEMPLATE(divide_f64,fdiv); sl@0: sl@0: UNROLL_F32_TEMPLATE(multiply_f32,fmul); sl@0: UNROLL_F64_TEMPLATE(multiply_f64,fmul); sl@0: sl@0: UNROLL_F32_TEMPLATE(subtract_f32,fsub); sl@0: UNROLL_F64_TEMPLATE(subtract_f64,fsub); sl@0: sl@0: #undef UNROLL_F32_TEMPLATE sl@0: #undef UNROLL_F64_TEMPLATE sl@0: sl@0: /* sl@0: ** sl@0: ** void vfp_scalaradd_f32_ns (float *d, const float *s1, const float *s2_1, int n); sl@0: ** void vfp_scalaradd_f64_ns (double *d, const double *s1, const double *s2_1, int n); sl@0: ** void vfp_scalarmultiply_f32_ns (float *d, const float *s1, const float *s2_1, int n); sl@0: ** void vfp_scalarmultiply_f64_ns (double *d, const double *s1, const double *s2_1, int n); sl@0: ** sl@0: ** d: $r0 | s1: $r1 | s2_1: $r2 | n: $r3 | sl@0: ** sl@0: */ sl@0: #define UNROLL_F32_TEMPLATE(fname,finst) \ sl@0: .global vfp_ ## fname ## ; \ sl@0: vfp_ ## fname ## : \ sl@0: stmdb sp!, {fp, lr}; /* save registers to stack */ \ sl@0: fldmias r2, {s1}; /* load scalar value */ \ sl@0: ands ip, r3, #7; /* ip = n % 8 */ \ sl@0: beq vfp_ ## fname ## _unroll; /* if ip == 0 goto prep_loop2 */ \ sl@0: vfp_ ## fname ## _loop1: \ sl@0: fldmias r1!, {s0}; \ sl@0: ## finst ##s s2, s0, s1; \ sl@0: fstmias r0!, {s2}; \ sl@0: subs ip, ip, #1; \ sl@0: bne vfp_ ## fname ## _loop1; \ sl@0: vfp_ ## fname ## _unroll: /* unroll by 8 */ \ sl@0: movs ip, r3, lsr #3; /* ip = n / 8 */ \ sl@0: beq vfp_ ## fname ## _end; /* if ip == 0 goto finish */ \ sl@0: fmrx lr, fpscr; /* read fpscr register into arm */\ sl@0: mov fp, #7; \ sl@0: orr fp, lr, fp, lsl #16; /* set vector lenght to 8 */ \ sl@0: fmxr fpscr, fp; \ sl@0: vfp_ ## fname ## _loop2: \ sl@0: fldmias r1!, {s8, s9, s10, s11, s12, s13, s14, s15}; \ sl@0: ## finst ##s s24, s8, s1; \ sl@0: fstmias r0!, {s24, s25, s26, s27, s28, s29, s30, s31}; \ sl@0: subs ip, ip, #1; \ sl@0: bne vfp_ ## fname ## _loop2; \ sl@0: fmxr fpscr, lr; /* restore original fpscr */ \ sl@0: vfp_ ## fname ## _end: \ sl@0: ldmia sp!, {fp, pc}; /* recovering from stack and return */ sl@0: sl@0: #define UNROLL_F64_TEMPLATE(fname,finst) \ sl@0: .global vfp_ ## fname ## ; \ sl@0: vfp_ ## fname ## : \ sl@0: stmdb sp!, {fp, lr}; /* save registers to stack */ \ sl@0: fldmiad r2, {d1}; /* load scalar value */ \ sl@0: ands ip, r3, #3; /* ip = n % 3 */ \ sl@0: beq vfp_ ## fname ## _unroll; /* if ip == 0 goto prep_loop2 */ \ sl@0: vfp_ ## fname ## _loop1: \ sl@0: fldmiad r1!, {d0}; \ sl@0: ## finst ##d d2, d0, d1; \ sl@0: fstmiad r0!, {d2}; \ sl@0: subs ip, ip, #1; \ sl@0: bne vfp_ ## fname ## _loop1; \ sl@0: vfp_ ## fname ## _unroll: /* unroll by 4 */ \ sl@0: movs ip, r3, lsr #2; /* ip = n / 4 */ \ sl@0: beq vfp_ ## fname ## _end; /* if ip == 0 goto finish */ \ sl@0: fmrx lr, fpscr; /* read fpscr register into arm */\ sl@0: mov fp, #3; \ sl@0: orr fp, lr, fp, lsl #16; /* set vector lenght to 4 */ \ sl@0: fmxr fpscr, fp; \ sl@0: vfp_ ## fname ## _loop2: \ sl@0: fldmiad r1!, {d4, d5, d6, d7}; \ sl@0: ## finst ##d d12, d4, d1; \ sl@0: fstmiad r0!, {d12, d13, d14, d15}; \ sl@0: subs ip, ip, #1; \ sl@0: bne vfp_ ## fname ## _loop2; \ sl@0: fmxr fpscr, lr; /* restore original fpscr */ \ sl@0: vfp_ ## fname ## _end: \ sl@0: ldmia sp!, {fp, pc}; /* recovering from stack and return */ sl@0: sl@0: UNROLL_F32_TEMPLATE(scalaradd_f32_ns,fadd); sl@0: UNROLL_F64_TEMPLATE(scalaradd_f64_ns,fadd); sl@0: sl@0: UNROLL_F32_TEMPLATE(scalarmultiply_f32_ns,fmul); sl@0: UNROLL_F64_TEMPLATE(scalarmultiply_f64_ns,fmul); sl@0: sl@0: #undef UNROLL_F32_TEMPLATE sl@0: #undef UNROLL_F64_TEMPLATE sl@0: sl@0: /* sl@0: ** sl@0: ** void vfp_abs_f32_f32_ns(float *d, const float *s, int n); sl@0: ** void vfp_abs_f64_f64_ns(double *d, const double *s, int n); sl@0: ** void vfp_negative_f32(float *d, const float *s, int n); sl@0: ** void vfp_negative_f64(double *d, const double *s, int n); sl@0: ** sl@0: ** d: $r0 | s: $r1 | n: $r2 | sl@0: ** sl@0: */ sl@0: #define UNROLL_F32_TEMPLATE(fname,finst) \ sl@0: .global vfp_ ## fname ## ; \ sl@0: vfp_ ## fname ## : \ sl@0: stmdb sp!, {fp, lr}; /* save registers to stack */ \ sl@0: ands ip, r2, #7; /* ip = n % 8 */ \ sl@0: beq vfp_ ## fname ## _unroll; /* if ip == 0 goto prep_loop2 */ \ sl@0: vfp_ ## fname ## _loop1: \ sl@0: fldmias r1!, {s0}; \ sl@0: ## finst ##s s2, s0; \ sl@0: fstmias r0!, {s2}; \ sl@0: subs ip, ip, #1; \ sl@0: bne vfp_ ## fname ## _loop1; \ sl@0: vfp_ ## fname ## _unroll: /* unroll by 8 */ \ sl@0: movs ip, r2, lsr #3; /* ip = n / 8 */ \ sl@0: beq vfp_ ## fname ## _end; /* if ip == 0 goto finish */ \ sl@0: fmrx lr, fpscr; /* read fpscr register into arm */\ sl@0: mov fp, #7; \ sl@0: orr fp, lr, fp, lsl #16; /* set vector lenght to 8 */ \ sl@0: fmxr fpscr, fp; \ sl@0: vfp_ ## fname ## _loop2: \ sl@0: fldmias r1!, {s8, s9, s10, s11, s12, s13, s14, s15}; \ sl@0: ## finst ##s s24, s8; \ sl@0: fstmias r0!, {s24, s25, s26, s27, s28, s29, s30, s31}; \ sl@0: subs ip, ip, #1; \ sl@0: bne vfp_ ## fname ## _loop2; \ sl@0: fmxr fpscr, lr; /* restore original fpscr */ \ sl@0: vfp_ ## fname ## _end: \ sl@0: ldmia sp!, {fp, pc}; /* recovering from stack and return */ sl@0: sl@0: #define UNROLL_F64_TEMPLATE(fname,finst) \ sl@0: .global vfp_ ## fname ## ; \ sl@0: vfp_ ## fname ## : \ sl@0: stmdb sp!, {fp, lr}; /* save registers to stack */ \ sl@0: ands ip, r2, #3; /* ip = n % 3 */ \ sl@0: beq vfp_ ## fname ## _unroll; /* if ip == 0 goto prep_loop2 */ \ sl@0: vfp_ ## fname ## _loop1: \ sl@0: fldmiad r1!, {d0}; \ sl@0: ## finst ##d d2, d0; \ sl@0: fstmiad r0!, {d2}; \ sl@0: subs ip, ip, #1; \ sl@0: bne vfp_ ## fname ## _loop1; \ sl@0: vfp_ ## fname ## _unroll: /* unroll by 4 */ \ sl@0: movs ip, r2, lsr #2; /* ip = n / 4 */ \ sl@0: beq vfp_ ## fname ## _end; /* if ip == 0 goto finish */ \ sl@0: fmrx lr, fpscr; /* read fpscr register into arm */\ sl@0: mov fp, #3; \ sl@0: orr fp, lr, fp, lsl #16; /* set vector lenght to 4 */ \ sl@0: fmxr fpscr, fp; \ sl@0: vfp_ ## fname ## _loop2: \ sl@0: fldmiad r1!, {d4, d5, d6, d7}; \ sl@0: ## finst ##d d12, d4; \ sl@0: fstmiad r0!, {d12, d13, d14, d15}; \ sl@0: subs ip, ip, #1; \ sl@0: bne vfp_ ## fname ## _loop2; \ sl@0: fmxr fpscr, lr; /* restore original fpscr */ \ sl@0: vfp_ ## fname ## _end: \ sl@0: ldmia sp!, {fp, pc}; /* recovering from stack and return */ sl@0: sl@0: UNROLL_F32_TEMPLATE(abs_f32_f32_ns,fabs); sl@0: UNROLL_F64_TEMPLATE(abs_f64_f64_ns,fabs); sl@0: sl@0: UNROLL_F32_TEMPLATE(negative_f32,fneg); sl@0: UNROLL_F64_TEMPLATE(negative_f64,fneg); sl@0: sl@0: #undef UNROLL_F32_TEMPLATE sl@0: #undef UNROLL_F64_TEMPLATE sl@0: #endif