sl@0: //Portions Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies). All rights reserved. sl@0: /* sl@0: Copyright 2002,2003,2004,2005 David A. Schleef sl@0: All rights reserved. sl@0: sl@0: Redistribution and use in source and binary forms, with or without sl@0: modification, are permitted provided that the following conditions sl@0: are met: sl@0: 1. Redistributions of source code must retain the above copyright sl@0: notice, this list of conditions and the following disclaimer. sl@0: 2. Redistributions in binary form must reproduce the above copyright sl@0: notice, this list of conditions and the following disclaimer in the sl@0: documentation and/or other materials provided with the distribution. sl@0: sl@0: THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR sl@0: IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED sl@0: WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE sl@0: ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, sl@0: INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES sl@0: (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR sl@0: SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) sl@0: HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, sl@0: STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING sl@0: IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE sl@0: POSSIBILITY OF SUCH DAMAGE. sl@0: */ sl@0: sl@0: #include sl@0: #include sl@0: sl@0: sl@0: void sl@0: split_53_nomix (int16_t *d_2xn, int16_t *s_2xn, int n) sl@0: { sl@0: int i; sl@0: sl@0: if (n == 0) return; sl@0: /* predict */ sl@0: for(i=1;i> 1); sl@0: } sl@0: d_2xn[n*2-1] = s_2xn[n*2-1] - s_2xn[n*2-2]; sl@0: sl@0: /* update */ sl@0: d_2xn[0] = s_2xn[0] + (d_2xn[1] >> 1); sl@0: for(i=2;i> 2); sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL (split_53_nomix, split_53); sl@0: sl@0: #if 0 sl@0: void sl@0: synth_53_nomix (int16_t *d_2xn, int16_t *s_2xn, int n) sl@0: { sl@0: int i; sl@0: sl@0: /* predict */ sl@0: i_n[0] -= i_n[1] >> 1; sl@0: for(i=2;i> 2; sl@0: } sl@0: sl@0: /* update */ sl@0: for(i=1;i> 1; sl@0: } sl@0: i_n[n*2-1] += i_n[n*2-2]; sl@0: } sl@0: #endif sl@0: sl@0: sl@0: void sl@0: split_53_c (int16_t *d_2xn, int16_t *s_2xn, int n) sl@0: { sl@0: int i; sl@0: sl@0: if (n == 0) return; sl@0: if (n == 1) { sl@0: d_2xn[1] = s_2xn[1] - s_2xn[0]; sl@0: d_2xn[0] = s_2xn[0] + (d_2xn[1] >> 1); sl@0: } else { sl@0: d_2xn[1] = s_2xn[1] - ((s_2xn[0] + s_2xn[2]) >> 1); sl@0: d_2xn[0] = s_2xn[0] + (d_2xn[1] >> 1); sl@0: d_2xn+=2; sl@0: s_2xn+=2; sl@0: for(i=0;i<(n*2-4)/2;i++){ sl@0: d_2xn[1] = s_2xn[1] - ((s_2xn[0] + s_2xn[2]) >> 1); sl@0: d_2xn[0] = s_2xn[0] + ((d_2xn[-1] + d_2xn[1]) >> 2); sl@0: d_2xn+=2; sl@0: s_2xn+=2; sl@0: } sl@0: d_2xn[1] = s_2xn[1] - s_2xn[0]; sl@0: d_2xn[0] = s_2xn[0] + ((d_2xn[-1] + d_2xn[1]) >> 2); sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL (split_53_c, split_53); sl@0: sl@0: void sl@0: synth_53_c (int16_t *d_2xn, int16_t *s_2xn, int n) sl@0: { sl@0: int i; sl@0: sl@0: if (n == 0) return; sl@0: if (n == 1) { sl@0: d_2xn[0] = s_2xn[0] - (s_2xn[1] >> 1); sl@0: d_2xn[1] = s_2xn[1] + d_2xn[0]; sl@0: } else { sl@0: d_2xn[0] = s_2xn[0] - (s_2xn[1] >> 1); sl@0: for(i=2;i> 2); sl@0: d_2xn[i-1] = s_2xn[i-1] + ((d_2xn[i] + d_2xn[i-2]) >> 1); sl@0: } sl@0: d_2xn[n*2-2] = s_2xn[n*2-2] - ((s_2xn[n*2-3] + s_2xn[n*2-1]) >> 2); sl@0: d_2xn[n*2-3] = s_2xn[n*2-3] + ((d_2xn[n*2-2] + d_2xn[n*2-4]) >> 1); sl@0: d_2xn[n*2-1] = s_2xn[n*2-1] + d_2xn[n*2-2]; sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL (synth_53_c, synth_53); sl@0: sl@0: void sl@0: deinterleave2_c_1 (int16_t *d1, int16_t *d2, int16_t *s_2xn, int n) sl@0: { sl@0: int i; sl@0: sl@0: for(i=0;i>12); sl@0: d++; sl@0: s1++; sl@0: s2++; sl@0: s3++; sl@0: n--; sl@0: } sl@0: if (n==0) return; sl@0: sl@0: val = ((*(uint16_t *)s4)<<16) | (*(uint16_t *)s4); sl@0: n>>=2; sl@0: asm volatile ("\n" sl@0: " mov %4, %%ecx\n" sl@0: " movd %%ecx, %%mm7\n" sl@0: " punpcklwd %%mm7, %%mm7\n" sl@0: " mov %5, %%ecx\n" sl@0: "1:\n" sl@0: " movq 0(%2), %%mm0\n" sl@0: " paddsw 0(%3), %%mm0\n" sl@0: " movq %%mm0, %%mm1\n" sl@0: " pmullw %%mm7, %%mm0\n" sl@0: " pmulhw %%mm7, %%mm1\n" sl@0: " psrlw $12, %%mm0\n" sl@0: " psllw $4, %%mm1\n" sl@0: " por %%mm1, %%mm0\n" sl@0: " paddsw 0(%1), %%mm0\n" sl@0: " movq %%mm0, 0(%0)\n" sl@0: " add $8, %0\n" sl@0: " add $8, %1\n" sl@0: " add $8, %2\n" sl@0: " add $8, %3\n" sl@0: " decl %%ecx\n" sl@0: " jne 1b\n" sl@0: " emms\n" sl@0: : "+r" (d), "+r" (s1), "+r" (s2), "+r" (s3) sl@0: : "m" (val), "m" (n) sl@0: : "ecx"); sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (lift_add_mult_shift12_i386_mmx, lift_add_mult_shift12, OIL_IMPL_FLAG_MMX); sl@0: sl@0: void sl@0: interleave2_mmx (int16_t *d_2xn, int16_t *s1, int16_t *s2, int n) sl@0: { sl@0: while (n&3) { sl@0: d_2xn[0] = s1[0]; sl@0: d_2xn[1] = s2[0]; sl@0: s1++; sl@0: s2++; sl@0: d_2xn+=2; sl@0: n--; sl@0: } sl@0: if (n==0) return; sl@0: sl@0: asm volatile ("\n" sl@0: " xor %%ecx, %%ecx\n" sl@0: "1:\n" sl@0: " movq (%1,%%ecx,2), %%mm0\n" sl@0: " movq (%2,%%ecx,2), %%mm1\n" sl@0: " movq %%mm0, %%mm2\n" sl@0: " punpckhwd %%mm1, %%mm0\n" sl@0: " punpcklwd %%mm1, %%mm2\n" sl@0: " movq %%mm2, (%0,%%ecx,4)\n" sl@0: " movq %%mm0, 8(%0,%%ecx,4)\n" sl@0: " add $4, %%ecx\n" sl@0: " cmp %3, %%ecx\n" sl@0: " jl 1b\n" sl@0: " emms\n" sl@0: : "+r" (d_2xn), "+r" (s1), "+r" (s2) sl@0: : "m" (n) sl@0: : "eax", "ecx"); sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (interleave2_mmx, interleave2_s16, OIL_IMPL_FLAG_MMX); sl@0: sl@0: void sl@0: lift_add_shift1_mmx (int16_t *d, int16_t *s1, int16_t *s2, int16_t *s3, int n) sl@0: { sl@0: while (n&3) { sl@0: d[0] = s1[0] + ((s2[0] + s3[0])>>1); sl@0: d++; sl@0: s1++; sl@0: s2++; sl@0: s3++; sl@0: n--; sl@0: } sl@0: if (n==0) return; sl@0: sl@0: asm volatile ("\n" sl@0: " xor %%ecx, %%ecx\n" sl@0: "1:\n" sl@0: " movq (%2,%%ecx,2), %%mm1\n" sl@0: " movq (%3,%%ecx,2), %%mm2\n" sl@0: " paddw %%mm2, %%mm1\n" sl@0: " psraw $1, %%mm1\n" sl@0: " paddw (%1,%%ecx,2), %%mm1\n" sl@0: " movq %%mm1, (%0,%%ecx,2)\n" sl@0: " add $4, %%ecx\n" sl@0: " cmp %4, %%ecx\n" sl@0: " jl 1b\n" sl@0: " emms\n" sl@0: : "+r" (d), "+r" (s1), "+r" (s2), "+r" (s3) sl@0: : "m" (n) sl@0: : "ecx"); sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (lift_add_shift1_mmx, lift_add_shift1, OIL_IMPL_FLAG_MMX); sl@0: sl@0: void sl@0: lift_sub_shift1_mmx (int16_t *d, int16_t *s1, int16_t *s2, int16_t *s3, int n) sl@0: { sl@0: while (n&3) { sl@0: d[0] = s1[0] - ((s2[0] + s3[0])>>1); sl@0: d++; sl@0: s1++; sl@0: s2++; sl@0: s3++; sl@0: n--; sl@0: } sl@0: if (n==0) return; sl@0: sl@0: asm volatile ("\n" sl@0: " xor %%ecx, %%ecx\n" sl@0: "1:\n" sl@0: " movq (%2,%%ecx,2), %%mm1\n" sl@0: " movq (%3,%%ecx,2), %%mm2\n" sl@0: " movq (%1,%%ecx,2), %%mm0\n" sl@0: " paddw %%mm2, %%mm1\n" sl@0: " psraw $1, %%mm1\n" sl@0: " psubw %%mm1, %%mm0\n" sl@0: " movq %%mm0, (%0,%%ecx,2)\n" sl@0: " add $4, %%ecx\n" sl@0: " cmp %4, %%ecx\n" sl@0: " jl 1b\n" sl@0: " emms\n" sl@0: : "+r" (d), "+r" (s1), "+r" (s2), "+r" (s3) sl@0: : "m" (n) sl@0: : "ecx"); sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (lift_sub_shift1_mmx, lift_sub_shift1, OIL_IMPL_FLAG_MMX); sl@0: sl@0: void sl@0: lift_add_shift2_mmx (int16_t *d, int16_t *s1, int16_t *s2, int16_t *s3, int n) sl@0: { sl@0: while (n&3) { sl@0: d[0] = s1[0] + ((s2[0] + s3[0])>>2); sl@0: d++; sl@0: s1++; sl@0: s2++; sl@0: s3++; sl@0: n--; sl@0: } sl@0: if (n==0) return; sl@0: sl@0: asm volatile ("\n" sl@0: " xor %%ecx, %%ecx\n" sl@0: "1:\n" sl@0: " movq (%2,%%ecx,2), %%mm1\n" sl@0: " movq (%3,%%ecx,2), %%mm2\n" sl@0: " paddw %%mm2, %%mm1\n" sl@0: " psraw $2, %%mm1\n" sl@0: " paddw (%1,%%ecx,2), %%mm1\n" sl@0: " movq %%mm1, (%0,%%ecx,2)\n" sl@0: " add $4, %%ecx\n" sl@0: " cmp %4, %%ecx\n" sl@0: " jl 1b\n" sl@0: " emms\n" sl@0: : "+r" (d), "+r" (s1), "+r" (s2), "+r" (s3) sl@0: : "m" (n) sl@0: : "ecx"); sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (lift_add_shift2_mmx, lift_add_shift2, OIL_IMPL_FLAG_MMX); sl@0: sl@0: void sl@0: lift_sub_shift2_mmx (int16_t *d, int16_t *s1, int16_t *s2, int16_t *s3, int n) sl@0: { sl@0: while (n&3) { sl@0: d[0] = s1[0] - ((s2[0] + s3[0])>>2); sl@0: d++; sl@0: s1++; sl@0: s2++; sl@0: s3++; sl@0: n--; sl@0: } sl@0: if (n==0) return; sl@0: sl@0: asm volatile ("\n" sl@0: " xor %%ecx, %%ecx\n" sl@0: "1:\n" sl@0: " movq (%2,%%ecx,2), %%mm1\n" sl@0: " movq (%3,%%ecx,2), %%mm2\n" sl@0: " movq (%1,%%ecx,2), %%mm0\n" sl@0: " paddw %%mm2, %%mm1\n" sl@0: " psraw $2, %%mm1\n" sl@0: " psubw %%mm1, %%mm0\n" sl@0: " movq %%mm0, (%0,%%ecx,2)\n" sl@0: " add $4, %%ecx\n" sl@0: " cmp %4, %%ecx\n" sl@0: " jl 1b\n" sl@0: " emms\n" sl@0: : "+r" (d), "+r" (s1), "+r" (s2), "+r" (s3) sl@0: : "m" (n) sl@0: : "ecx"); sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (lift_sub_shift2_mmx, lift_sub_shift2, OIL_IMPL_FLAG_MMX); sl@0: sl@0: #ifdef ENABLE_BROKEN_IMPLS sl@0: void sl@0: synth_53_mmx (int16_t *d_2xn, int16_t *s_2xn, int n) sl@0: { sl@0: int i; sl@0: sl@0: if (n==0) return; sl@0: if (n == 1) { sl@0: d_2xn[0] = s_2xn[0] - (s_2xn[1] >> 1); sl@0: d_2xn[1] = s_2xn[1] + d_2xn[0]; sl@0: } else { sl@0: int i; sl@0: sl@0: d_2xn[0] = s_2xn[0] - (s_2xn[1] >> 1); sl@0: sl@0: if (n > 6) { sl@0: n-=5; sl@0: sl@0: asm volatile ("\n" sl@0: " xor %%ecx, %%ecx\n" sl@0: " movw 2(%1), %%ecx\n" sl@0: " movd %%ecx, %%mm7\n" sl@0: " movw 0(%0), %%ecx\n" sl@0: " movd %%ecx, %%mm6\n" sl@0: " movw 0(%1), %%ecx\n" sl@0: " movd %%ecx, %%mm5\n" sl@0: sl@0: " xor %%ecx, %%ecx\n" sl@0: "1:\n" sl@0: " movq 4(%1,%%ecx,4), %%mm1\n" // mm1 = s5 s4 s3 s2 sl@0: " movq %%mm1, %%mm2\n" // mm2 = s5 s4 s3 s2 sl@0: " movq 12(%1,%%ecx,4), %%mm0\n" // mm0 = s9 s8 s7 s6 sl@0: " punpcklwd %%mm0, %%mm1\n" // mm1 = s7 s3 s6 s2 sl@0: " punpckhwd %%mm0, %%mm2\n" // mm2 = s9 s5 s8 s4 sl@0: " movq %%mm1, %%mm0\n" // mm0 = s7 s3 s6 s2 sl@0: " punpcklwd %%mm2, %%mm0\n" // mm0 = s8 s6 s4 s2 sl@0: " punpckhwd %%mm2, %%mm1\n" // mm1 = s9 s7 s5 s3 sl@0: //" movq %%mm0, %%mm3\n" // mm0 = s8 s6 s4 s2 sl@0: sl@0: " movq %%mm1, %%mm2\n" // mm2 = s9 s7 s5 s3 sl@0: " psllq $16, %%mm2\n" // mm2 = s7 s5 s3 00 sl@0: " por %%mm7, %%mm2\n" // mm2 = s7 s5 s3 s1 sl@0: " movq %%mm2, %%mm4\n" // mm4 = s7 s5 s3 s1 sl@0: " paddw %%mm1, %%mm2\n" // mm2 = s9+s7 ... sl@0: " psraw $2, %%mm2\n" // mm2 = (s9+s7)>>2 ... sl@0: " movq %%mm1, %%mm7\n" // mm7 = s9 s7 s5 s3 sl@0: " psrlq $48, %%mm7\n" // mm7 = 00 00 00 s9 sl@0: " psubw %%mm2, %%mm0\n" // mm0 = d8 d6 d4 d2 sl@0: sl@0: " movq %%mm0, %%mm1\n" // mm1 = d8 d6 d4 d2 sl@0: " movq %%mm0, %%mm3\n" // mm1 = d8 d6 d4 d2 sl@0: " psllq $16, %%mm0\n" // mm0 = d6 d4 d2 00 sl@0: " por %%mm6, %%mm0\n" // mm0 = d6 d4 d2 d0 sl@0: " psrlq $48, %%mm1\n" // mm1 = 00 00 00 d8 sl@0: " movq %%mm1, %%mm6\n" // mm6 = 00 00 00 d8 sl@0: sl@0: " movq %%mm0, %%mm1\n" sl@0: " paddw %%mm3, %%mm1\n" // mm0 = d8+d6 ... sl@0: " psraw $1, %%mm1\n" // mm1 = (d8+d6)>>1 ... sl@0: " paddw %%mm4, %%mm1\n" // mm1 = d7 d5 d3 d1 sl@0: sl@0: " movq %%mm1, %%mm2\n" sl@0: sl@0: " movq %%mm0, %%mm1\n" sl@0: " punpcklwd %%mm2, %%mm0\n" sl@0: " punpckhwd %%mm2, %%mm1\n" sl@0: sl@0: " movq %%mm0, (%0, %%ecx, 4)\n" sl@0: " movq %%mm1, 8(%0, %%ecx, 4)\n" sl@0: sl@0: " add $4, %%ecx\n" sl@0: " cmp %3, %%ecx\n" sl@0: " jl 1b\n" sl@0: " emms\n" sl@0: : "+r" (d_2xn), "+r" (s_2xn), "+ecx" (i) sl@0: : "m" (n)); sl@0: sl@0: i*=2; sl@0: n+=5; sl@0: d_2xn[i] = s_2xn[i] - ((s_2xn[i-1] + s_2xn[i+1]) >> 2); sl@0: i+=2; sl@0: } else { sl@0: i = 2; sl@0: } sl@0: for(;i> 2); sl@0: d_2xn[i-1] = s_2xn[i-1] + ((d_2xn[i] + d_2xn[i-2]) >> 1); sl@0: } sl@0: d_2xn[n*2-2] = s_2xn[n*2-2] - ((s_2xn[n*2-3] + s_2xn[n*2-1]) >> 2); sl@0: d_2xn[n*2-3] = s_2xn[n*2-3] + ((d_2xn[n*2-2] + d_2xn[n*2-4]) >> 1); sl@0: d_2xn[n*2-1] = s_2xn[n*2-1] + d_2xn[n*2-2]; sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (synth_53_mmx, synth_53, OIL_IMPL_FLAG_MMX); sl@0: #endif sl@0: sl@0: sl@0: void sl@0: mas2_add_s16_mmx (int16_t *d1, int16_t *s1, int16_t *s2, int16_t *s3_2, sl@0: int16_t *s4_2, int n) sl@0: { sl@0: int shift = s4_2[1]; sl@0: sl@0: while (n&3) { sl@0: int x; sl@0: sl@0: x = s4_2[0] + s2[0]*s3_2[0] + s2[1]*s3_2[1]; sl@0: x >>= s4_2[1]; sl@0: d1[0] = s1[0] + x; sl@0: sl@0: d1++; sl@0: s1++; sl@0: s2++; sl@0: n--; sl@0: } sl@0: if (n==0) return; sl@0: sl@0: n>>=2; sl@0: asm volatile ("\n" sl@0: " movzwl 0(%0), %%ecx\n" sl@0: " movd %%ecx, %%mm7\n" sl@0: " pshufw $0x00, %%mm7, %%mm7\n" sl@0: " movzwl 2(%0), %%ecx\n" sl@0: " movd %%ecx, %%mm6\n" sl@0: " pshufw $0x00, %%mm6, %%mm6\n" sl@0: " movzwl 0(%1), %%ecx\n" sl@0: " movd %%ecx, %%mm5\n" sl@0: " pshufw $0x44, %%mm5, %%mm5\n" sl@0: :: "r" (s3_2), "r" (s4_2) sl@0: : "ecx" sl@0: ); sl@0: asm volatile ("\n" sl@0: "1:\n" sl@0: " movq 0(%2), %%mm0\n" // mm0 = s0, s1, s2, s3 sl@0: " movq 0(%2), %%mm1\n" // mm1 = s0, s1, s2, s3 sl@0: " pmullw %%mm7, %%mm0\n" // mm0 = lo(s0*a0), lo(s1*a0), ... sl@0: " pmulhw %%mm7, %%mm1\n" // mm1 = hi(s0*a0), hi(s1*a0), ... sl@0: " movq %%mm0, %%mm2\n" // mm2 = lo(s0*a0), lo(s1*a0), ... sl@0: " punpcklwd %%mm1, %%mm0\n" // mm0 = s0*a0, s1*a0 sl@0: " punpckhwd %%mm1, %%mm2\n" // mm2 = s2*a0, s3*a0 sl@0: " movq %%mm2, %%mm1\n" // mm1 = s2*a0, s3*a0 sl@0: sl@0: " movq 2(%2), %%mm2\n" sl@0: " movq 2(%2), %%mm3\n" sl@0: " pmullw %%mm6, %%mm2\n" sl@0: " pmulhw %%mm6, %%mm3\n" sl@0: " movq %%mm2, %%mm4\n" sl@0: " punpcklwd %%mm3, %%mm2\n" // mm2 = s1*a1, s2*a1 sl@0: " punpckhwd %%mm3, %%mm4\n" // mm4 = s3*a1, s4*a1 sl@0: " movq %%mm4, %%mm3\n" // mm3 = s3*a1, s4*a1 sl@0: sl@0: " paddd %%mm3, %%mm1\n" // mm1 = s2*a0 + s3*a1, ... sl@0: " paddd %%mm2, %%mm0\n" // mm0 = s0*a0 + s1*a1, ... sl@0: sl@0: " paddd %%mm5, %%mm1\n" // mm1 = s2*a0 + s3*a1 + offset, ... sl@0: " paddd %%mm5, %%mm0\n" // mm0 = s0*a0 + s1*a1 + offset, ... sl@0: sl@0: " movd %4, %%mm4\n" sl@0: " psrad %%mm4, %%mm1\n" // mm1 = (s2*a0 + s3*a1 + offset)>>shift, ... sl@0: " psrad %%mm4, %%mm0\n" // mm0 = (s0*a0 + s1*a1 + offset)>>shift, ... sl@0: sl@0: " packssdw %%mm1, %%mm0\n" sl@0: " paddw 0(%1), %%mm0\n" sl@0: " movq %%mm0, 0(%0)\n" sl@0: " add $8, %0\n" sl@0: " add $8, %1\n" sl@0: " add $8, %2\n" sl@0: " decl %3\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: : "+r" (d1), "+r" (s1), "+r" (s2), "+r" (n) sl@0: : "r" (shift) sl@0: ); sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (mas2_add_s16_mmx, mas2_add_s16, OIL_IMPL_FLAG_MMX | OIL_IMPL_FLAG_MMXEXT); sl@0: sl@0: #if 0 sl@0: void sl@0: mas2_add_s16_lim_mmx (int16_t *d1, int16_t *s1, int16_t *s2, int16_t *s3_2, sl@0: int16_t *s4_2, int n) sl@0: { sl@0: int shift = s4_2[1]; sl@0: sl@0: while (n&3) { sl@0: int x; sl@0: sl@0: x = s4_2[0] + s2[0]*s3_2[0] + s2[1]*s3_2[1]; sl@0: x >>= s4_2[1]; sl@0: d1[0] = s1[0] + x; sl@0: sl@0: d1++; sl@0: s1++; sl@0: s2++; sl@0: n--; sl@0: } sl@0: if (n==0) return; sl@0: sl@0: n>>=2; sl@0: asm volatile ("\n" sl@0: " movzwl 0(%0), %%ecx\n" sl@0: " movd %%ecx, %%mm7\n" sl@0: " pshufw $0x00, %%mm7, %%mm7\n" sl@0: " movzwl 2(%0), %%ecx\n" sl@0: " movd %%ecx, %%mm6\n" sl@0: " pshufw $0x00, %%mm6, %%mm6\n" sl@0: " movzwl 0(%1), %%ecx\n" sl@0: " movd %%ecx, %%mm5\n" sl@0: " pshufw $0x44, %%mm5, %%mm5\n" sl@0: :: "r" (s3_2), "r" (s4_2) sl@0: : "ecx" sl@0: ); sl@0: asm volatile ("\n" sl@0: "1:\n" sl@0: " movq 0(%2), %%mm0\n" sl@0: " paddq 2(%2), %%mm0\n" sl@0: sl@0: " movd %4, %%mm4\n" sl@0: " psraw %%mm4, %%mm0\n" sl@0: sl@0: " paddw 0(%1), %%mm0\n" sl@0: " movq %%mm0, 0(%0)\n" sl@0: " add $8, %0\n" sl@0: " add $8, %1\n" sl@0: " add $8, %2\n" sl@0: " decl %3\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: : "+r" (d1), "+r" (s1), "+r" (s2), "+r" (n) sl@0: : "r" (shift) sl@0: ); sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (mas2_add_s16_lim_mmx, mas2_add_s16, OIL_IMPL_FLAG_MMX | OIL_IMPL_FLAG_MMXEXT); sl@0: #endif sl@0: sl@0: void sl@0: mas4_add_s16_mmx (int16_t *d1, int16_t *s1, int16_t *s2, int16_t *s3_4, sl@0: int16_t *s4_2, int n) sl@0: { sl@0: int shift = s4_2[1]; sl@0: //int m; sl@0: sl@0: //m = n&3; sl@0: #if 1 sl@0: while (n&3) { sl@0: int x; sl@0: int i; sl@0: sl@0: x = s4_2[0]; sl@0: for(i=0;i<4;i++){ sl@0: x += s2[i]*s3_4[i]; sl@0: } sl@0: x >>= s4_2[1]; sl@0: d1[0] = s1[0] + x; sl@0: sl@0: d1++; sl@0: s1++; sl@0: s2++; sl@0: n--; sl@0: } sl@0: #endif sl@0: if (n==0) return; sl@0: sl@0: n>>=2; sl@0: asm volatile ("\n" sl@0: " movq 0(%0), %%mm7\n" sl@0: " movzwl 0(%1), %%ecx\n" sl@0: " movd %%ecx, %%mm5\n" sl@0: " pshufw $0x44, %%mm5, %%mm5\n" sl@0: :: "r" (s3_4), "r" (s4_2) sl@0: : "ecx" sl@0: ); sl@0: asm volatile ("\n" sl@0: "1:\n" sl@0: " movq 0(%2), %%mm0\n" // mm0 = s0, s1, s2, s3 sl@0: " movq 0(%2), %%mm1\n" // mm1 = s0, s1, s2, s3 sl@0: " pshufw $0x00, %%mm7, %%mm6\n" sl@0: " pmullw %%mm6, %%mm0\n" // mm0 = lo(s0*a0), lo(s1*a0), ... sl@0: " pmulhw %%mm6, %%mm1\n" // mm1 = hi(s0*a0), hi(s1*a0), ... sl@0: " movq %%mm0, %%mm2\n" // mm2 = lo(s0*a0), lo(s1*a0), ... sl@0: " punpcklwd %%mm1, %%mm0\n" // mm0 = s0*a0, s1*a0 sl@0: " punpckhwd %%mm1, %%mm2\n" // mm2 = s2*a0, s3*a0 sl@0: " movq %%mm2, %%mm1\n" // mm1 = s2*a0, s3*a0 sl@0: sl@0: " movq 2(%2), %%mm2\n" sl@0: " movq 2(%2), %%mm3\n" sl@0: " pshufw $0x55, %%mm7, %%mm6\n" sl@0: " pmullw %%mm6, %%mm2\n" sl@0: " pmulhw %%mm6, %%mm3\n" sl@0: " movq %%mm2, %%mm4\n" sl@0: " punpcklwd %%mm3, %%mm2\n" // mm2 = s1*a1, s2*a1 sl@0: " punpckhwd %%mm3, %%mm4\n" // mm4 = s3*a1, s4*a1 sl@0: " movq %%mm4, %%mm3\n" // mm3 = s3*a1, s4*a1 sl@0: " paddd %%mm3, %%mm1\n" // mm1 = s2*a0 + s3*a1, ... sl@0: " paddd %%mm2, %%mm0\n" // mm0 = s0*a0 + s1*a1, ... sl@0: sl@0: " movq 4(%2), %%mm2\n" sl@0: " movq 4(%2), %%mm3\n" sl@0: " pshufw $0xaa, %%mm7, %%mm6\n" sl@0: " pmullw %%mm6, %%mm2\n" sl@0: " pmulhw %%mm6, %%mm3\n" sl@0: " movq %%mm2, %%mm4\n" sl@0: " punpcklwd %%mm3, %%mm2\n" sl@0: " punpckhwd %%mm3, %%mm4\n" sl@0: " movq %%mm4, %%mm3\n" sl@0: " paddd %%mm3, %%mm1\n" sl@0: " paddd %%mm2, %%mm0\n" sl@0: sl@0: " movq 6(%2), %%mm2\n" sl@0: " movq 6(%2), %%mm3\n" sl@0: " pshufw $0xff, %%mm7, %%mm6\n" sl@0: " pmullw %%mm6, %%mm2\n" sl@0: " pmulhw %%mm6, %%mm3\n" sl@0: " movq %%mm2, %%mm4\n" sl@0: " punpcklwd %%mm3, %%mm2\n" sl@0: " punpckhwd %%mm3, %%mm4\n" sl@0: " movq %%mm4, %%mm3\n" sl@0: " paddd %%mm3, %%mm1\n" sl@0: " paddd %%mm2, %%mm0\n" sl@0: sl@0: " paddd %%mm5, %%mm1\n" sl@0: " paddd %%mm5, %%mm0\n" sl@0: sl@0: " movd %4, %%mm4\n" sl@0: " psrad %%mm4, %%mm1\n" sl@0: " psrad %%mm4, %%mm0\n" sl@0: sl@0: " packssdw %%mm1, %%mm0\n" sl@0: " paddw 0(%1), %%mm0\n" sl@0: " movq %%mm0, 0(%0)\n" sl@0: " add $8, %0\n" sl@0: " add $8, %1\n" sl@0: " add $8, %2\n" sl@0: " decl %3\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: : "+r" (d1), "+r" (s1), "+r" (s2), "+r" (n) sl@0: : "r" (shift) sl@0: ); sl@0: #if 0 sl@0: while (m) { sl@0: int x; sl@0: int i; sl@0: sl@0: x = s4_2[0]; sl@0: for(i=0;i<4;i++){ sl@0: x += s2[i]*s3_4[i]; sl@0: } sl@0: x >>= s4_2[1]; sl@0: d1[0] = s1[0] + x; sl@0: sl@0: d1++; sl@0: s1++; sl@0: s2++; sl@0: m--; sl@0: } sl@0: #endif sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (mas4_add_s16_mmx, mas4_add_s16, OIL_IMPL_FLAG_MMX | OIL_IMPL_FLAG_MMXEXT); sl@0: sl@0: #if 0 sl@0: /* This only does 16-bit intermediates, whereas the ref specifies 32-bit */ sl@0: void sl@0: mas2_add_s16_mmx (int16_t *d1, int16_t *s1, int16_t *s2, int16_t *s3_2, sl@0: int16_t *s4_2, int n) sl@0: { sl@0: while (n&3) { sl@0: int x; sl@0: sl@0: x = s4_2[0] + s2[0]*s3_2[0] + s2[1]*s3_2[1]; sl@0: x >>= s4_2[1]; sl@0: d1[0] = s1[0] + x; sl@0: sl@0: d1++; sl@0: s1++; sl@0: s2++; sl@0: n--; sl@0: } sl@0: if (n==0) return; sl@0: sl@0: n>>=2; sl@0: asm volatile ("\n" sl@0: " movzwl 0(%0), %%ecx\n" sl@0: " movd %%ecx, %%mm7\n" sl@0: " pshufw $0x00, %%mm7, %%mm7\n" sl@0: " movzwl 2(%0), %%ecx\n" sl@0: " movd %%ecx, %%mm6\n" sl@0: " pshufw $0x00, %%mm6, %%mm6\n" sl@0: " movzwl 0(%1), %%ecx\n" sl@0: " movd %%ecx, %%mm5\n" sl@0: " pshufw $0x00, %%mm5, %%mm5\n" sl@0: " movzwl 2(%1), %%ecx\n" sl@0: " movd %%ecx, %%mm4\n" sl@0: :: "r" (s3_2), "r" (s4_2) sl@0: : "ecx" sl@0: ); sl@0: asm volatile ("\n" sl@0: "1:\n" sl@0: " movq 0(%2), %%mm0\n" sl@0: " pmullw %%mm7, %%mm0\n" sl@0: " movq 2(%2), %%mm1\n" sl@0: " pmullw %%mm6, %%mm1\n" sl@0: " paddw %%mm1, %%mm0\n" sl@0: " paddw %%mm5, %%mm0\n" sl@0: " psraw %%mm4, %%mm0\n" sl@0: " paddw 0(%1), %%mm0\n" sl@0: " movq %%mm0, 0(%0)\n" sl@0: " add $8, %0\n" sl@0: " add $8, %1\n" sl@0: " add $8, %2\n" sl@0: " decl %3\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: : "+r" (d1), "+r" (s1), "+r" (s2), "+r" (n) sl@0: ); sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (mas2_add_s16_mmx, mas2_add_s16, OIL_IMPL_FLAG_MMX | OIL_IMPL_FLAG_MMXEXT); sl@0: #endif sl@0: sl@0: sl@0: #if 0 sl@0: /* This only does 16-bit intermediates, whereas the ref specifies 32-bit */ sl@0: void sl@0: mas4_add_s16_mmx (int16_t *d1, int16_t *s1, int16_t *s2, int16_t *s3_2, sl@0: int16_t *s4_2, int n) sl@0: { sl@0: while (n&3) { sl@0: int x; sl@0: sl@0: x = s4_2[0] + s2[0]*s3_2[0] + s2[1]*s3_2[1] + sl@0: s2[2]*s3_2[2] + s2[2]*s3_2[2]; sl@0: x >>= s4_2[1]; sl@0: d1[0] = s1[0] + x; sl@0: sl@0: d1++; sl@0: s1++; sl@0: s2++; sl@0: n--; sl@0: } sl@0: if (n==0) return; sl@0: sl@0: n>>=2; sl@0: asm volatile ("\n" sl@0: " movzwl 0(%0), %%ecx\n" sl@0: " movd %%ecx, %%mm7\n" sl@0: " pshufw $0x00, %%mm7, %%mm7\n" sl@0: " movzwl 2(%0), %%ecx\n" sl@0: " movd %%ecx, %%mm6\n" sl@0: " pshufw $0x00, %%mm6, %%mm6\n" sl@0: " movzwl 2(%0), %%ecx\n" sl@0: " movd %%ecx, %%mm5\n" sl@0: " pshufw $0x00, %%mm5, %%mm5\n" sl@0: " movzwl 2(%0), %%ecx\n" sl@0: " movd %%ecx, %%mm4\n" sl@0: " pshufw $0x00, %%mm4, %%mm4\n" sl@0: " movzwl 0(%1), %%ecx\n" sl@0: " movd %%ecx, %%mm3\n" sl@0: " pshufw $0x00, %%mm3, %%mm3\n" sl@0: " movzwl 2(%1), %%ecx\n" sl@0: " movd %%ecx, %%mm2\n" sl@0: :: "r" (s3_2), "r" (s4_2) sl@0: : "ecx" sl@0: ); sl@0: asm volatile ("\n" sl@0: "1:\n" sl@0: " movq 0(%2), %%mm0\n" sl@0: " pmullw %%mm7, %%mm0\n" sl@0: " movq 2(%2), %%mm1\n" sl@0: " pmullw %%mm6, %%mm1\n" sl@0: " paddw %%mm1, %%mm0\n" sl@0: " movq 4(%2), %%mm1\n" sl@0: " pmullw %%mm5, %%mm1\n" sl@0: " paddw %%mm1, %%mm0\n" sl@0: " movq 6(%2), %%mm1\n" sl@0: " pmullw %%mm4, %%mm1\n" sl@0: " paddw %%mm1, %%mm0\n" sl@0: " paddw %%mm3, %%mm0\n" sl@0: " psraw %%mm2, %%mm0\n" sl@0: " paddw 0(%1), %%mm0\n" sl@0: " movq %%mm0, 0(%0)\n" sl@0: " add $8, %0\n" sl@0: " add $8, %1\n" sl@0: " add $8, %2\n" sl@0: " decl %3\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: : "+r" (d1), "+r" (s1), "+r" (s2), "+r" (n) sl@0: ); sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (mas4_add_s16_mmx, mas4_add_s16, OIL_IMPL_FLAG_MMX | OIL_IMPL_FLAG_MMXEXT); sl@0: #endif sl@0: sl@0: sl@0: #if 0 sl@0: /* This only does 16-bit intermediates, whereas the ref specifies 32-bit */ sl@0: void sl@0: mas8_add_s16_mmx (int16_t *d1, int16_t *s1, int16_t *s2, int16_t *s3_2, sl@0: int16_t *s4_2, int n) sl@0: { sl@0: while (n&3) { sl@0: int x; sl@0: int i; sl@0: sl@0: x = s4_2[0]; sl@0: for(i=0;i<8;i++){ sl@0: x += s2[i]*s3_2[i]; sl@0: } sl@0: x >>= s4_2[1]; sl@0: d1[0] = s1[0] + x; sl@0: sl@0: d1++; sl@0: s1++; sl@0: s2++; sl@0: n--; sl@0: } sl@0: if (n==0) return; sl@0: sl@0: n>>=2; sl@0: asm volatile ("\n" sl@0: " movq 0(%0), %%mm6\n" sl@0: " movq 8(%0), %%mm7\n" sl@0: " movzwl 0(%1), %%ecx\n" sl@0: " movd %%ecx, %%mm3\n" sl@0: " pshufw $0x00, %%mm3, %%mm3\n" sl@0: " pxor %%mm4, %%mm4\n" sl@0: " movzwl 2(%1), %%ecx\n" sl@0: " movd %%ecx, %%mm4\n" sl@0: :: "r" (s3_2), "r" (s4_2) sl@0: : "ecx" sl@0: ); sl@0: asm volatile ("\n" sl@0: "1:\n" sl@0: " pshufw $0x00, %%mm6, %%mm1\n" sl@0: " movq 0(%2), %%mm0\n" sl@0: " pmullw %%mm1, %%mm0\n" sl@0: " pshufw $0x55, %%mm6, %%mm2\n" sl@0: " movq 2(%2), %%mm1\n" sl@0: " pmullw %%mm2, %%mm1\n" sl@0: " paddw %%mm1, %%mm0\n" sl@0: " pshufw $0xaa, %%mm6, %%mm2\n" sl@0: " movq 4(%2), %%mm1\n" sl@0: " pmullw %%mm2, %%mm1\n" sl@0: " paddw %%mm1, %%mm0\n" sl@0: " pshufw $0xff, %%mm6, %%mm2\n" sl@0: " movq 6(%2), %%mm1\n" sl@0: " pmullw %%mm2, %%mm1\n" sl@0: " paddw %%mm1, %%mm0\n" sl@0: sl@0: " pshufw $0x00, %%mm7, %%mm2\n" sl@0: " movq 8(%2), %%mm1\n" sl@0: " pmullw %%mm2, %%mm1\n" sl@0: " paddw %%mm1, %%mm0\n" sl@0: " pshufw $0x55, %%mm7, %%mm2\n" sl@0: " movq 10(%2), %%mm1\n" sl@0: " pmullw %%mm2, %%mm1\n" sl@0: " paddw %%mm1, %%mm0\n" sl@0: " pshufw $0xaa, %%mm7, %%mm2\n" sl@0: " movq 12(%2), %%mm1\n" sl@0: " pmullw %%mm2, %%mm1\n" sl@0: " paddw %%mm1, %%mm0\n" sl@0: " pshufw $0xff, %%mm7, %%mm2\n" sl@0: " movq 14(%2), %%mm1\n" sl@0: " pmullw %%mm2, %%mm1\n" sl@0: " paddw %%mm1, %%mm0\n" sl@0: sl@0: " paddw %%mm3, %%mm0\n" sl@0: " psraw %%mm4, %%mm0\n" sl@0: " paddw 0(%1), %%mm0\n" sl@0: " movq %%mm0, 0(%0)\n" sl@0: " add $8, %0\n" sl@0: " add $8, %1\n" sl@0: " add $8, %2\n" sl@0: " decl %3\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: : "+r" (d1), "+r" (s1), "+r" (s2), "+r" (n) sl@0: ); sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (mas8_add_s16_mmx, mas8_add_s16, OIL_IMPL_FLAG_MMX | OIL_IMPL_FLAG_MMXEXT); sl@0: #endif sl@0: sl@0: sl@0: void sl@0: mas4_add_s16_pmaddwd (int16_t *d1, int16_t *s1, int16_t *s2, int16_t *s3_2, sl@0: int16_t *s4_2, int n) sl@0: { sl@0: if (n==0) return; sl@0: asm volatile ("\n" sl@0: " movq 0(%0), %%mm6\n" sl@0: " movzwl 0(%1), %%ecx\n" sl@0: " movd %%ecx, %%mm3\n" sl@0: " movzwl 2(%1), %%ecx\n" sl@0: " movd %%ecx, %%mm4\n" sl@0: :: "r" (s3_2), "r" (s4_2) sl@0: : "ecx" sl@0: ); sl@0: asm volatile ("\n" sl@0: "1:\n" sl@0: " movq 0(%2), %%mm0\n" sl@0: " pmaddwd %%mm6, %%mm0\n" sl@0: " pshufw $0xee, %%mm0, %%mm1\n" // 11 10 11 10 sl@0: " paddd %%mm1, %%mm0\n" sl@0: " paddd %%mm3, %%mm0\n" sl@0: " psrad %%mm4, %%mm0\n" sl@0: " movd %%mm0, %%eax\n" sl@0: " addw 0(%1), %%ax\n" sl@0: " movw %%ax, 0(%0)\n" sl@0: " add $2, %0\n" sl@0: " add $2, %1\n" sl@0: " add $2, %2\n" sl@0: " decl %3\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: : "+r" (d1), "+r" (s1), "+r" (s2), "+r" (n) sl@0: : sl@0: : "eax" sl@0: ); sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (mas4_add_s16_pmaddwd, mas4_add_s16, OIL_IMPL_FLAG_MMX | OIL_IMPL_FLAG_MMXEXT); sl@0: sl@0: void sl@0: mas4_add_s16_pmaddwd_2 (int16_t *d1, int16_t *s1, int16_t *s2, int16_t *s3_2, sl@0: int16_t *s4_2, int n) sl@0: { sl@0: if (n==0) return; sl@0: asm volatile ("\n" sl@0: " movq 0(%0), %%mm6\n" sl@0: " movzwl 0(%1), %%ecx\n" sl@0: " movd %%ecx, %%mm3\n" sl@0: " pshufw $0x44, %%mm3, %%mm3\n" // 01 00 01 00 sl@0: " movzwl 2(%1), %%ecx\n" sl@0: " movd %%ecx, %%mm4\n" sl@0: :: "r" (s3_2), "r" (s4_2) sl@0: : "ecx" sl@0: ); sl@0: if (n&1) { sl@0: asm volatile ("\n" sl@0: " movq 0(%2), %%mm0\n" sl@0: " pmaddwd %%mm6, %%mm0\n" sl@0: " pshufw $0xee, %%mm0, %%mm1\n" // 11 10 11 10 sl@0: " paddd %%mm1, %%mm0\n" sl@0: " paddd %%mm3, %%mm0\n" sl@0: " psrad %%mm4, %%mm0\n" sl@0: " movd %%mm0, %%eax\n" sl@0: " addw 0(%1), %%ax\n" sl@0: " movw %%ax, 0(%0)\n" sl@0: " add $2, %0\n" sl@0: " add $2, %1\n" sl@0: " add $2, %2\n" sl@0: " decl %3\n" sl@0: : "+r" (d1), "+r" (s1), "+r" (s2), "+r" (n) sl@0: : sl@0: : "eax" sl@0: ); sl@0: } sl@0: n>>=1; sl@0: asm volatile ("\n" sl@0: "1:\n" sl@0: " movq 0(%2), %%mm0\n" sl@0: " pmaddwd %%mm6, %%mm0\n" sl@0: " movq 2(%2), %%mm2\n" sl@0: " pmaddwd %%mm6, %%mm2\n" sl@0: sl@0: " movq %%mm0, %%mm1\n" sl@0: " punpckhdq %%mm2, %%mm0\n" sl@0: " punpckldq %%mm2, %%mm1\n" sl@0: sl@0: " paddd %%mm1, %%mm0\n" sl@0: " paddd %%mm3, %%mm0\n" sl@0: " psrad %%mm4, %%mm0\n" sl@0: " pshufw $0xd8, %%mm0, %%mm0\n" // 11 01 10 00 sl@0: sl@0: " paddw 0(%1), %%mm0\n" sl@0: " movd %%mm0, 0(%0)\n" sl@0: " add $4, %0\n" sl@0: " add $4, %1\n" sl@0: " add $4, %2\n" sl@0: " decl %3\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: : "+r" (d1), "+r" (s1), "+r" (s2), "+r" (n) sl@0: : sl@0: : "eax" sl@0: ); sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (mas4_add_s16_pmaddwd_2, mas4_add_s16, OIL_IMPL_FLAG_MMX | OIL_IMPL_FLAG_MMXEXT); sl@0: sl@0: void sl@0: mas8_add_s16_pmaddwd (int16_t *d1, int16_t *s1, int16_t *s2, int16_t *s3_2, sl@0: int16_t *s4_2, int n) sl@0: { sl@0: if (n==0) return; sl@0: asm volatile ("\n" sl@0: " movq 0(%0), %%mm6\n" sl@0: " movq 8(%0), %%mm7\n" sl@0: " movzwl 0(%1), %%ecx\n" sl@0: " movd %%ecx, %%mm3\n" sl@0: " movzwl 2(%1), %%ecx\n" sl@0: " movd %%ecx, %%mm4\n" sl@0: :: "r" (s3_2), "r" (s4_2) sl@0: : "ecx" sl@0: ); sl@0: asm volatile ("\n" sl@0: "1:\n" sl@0: " movq 0(%2), %%mm0\n" sl@0: " pmaddwd %%mm6, %%mm0\n" sl@0: " movq 8(%2), %%mm1\n" sl@0: " pmaddwd %%mm7, %%mm1\n" sl@0: " paddd %%mm1, %%mm0\n" sl@0: " pshufw $0xee, %%mm0, %%mm1\n" sl@0: " paddd %%mm1, %%mm0\n" sl@0: " paddd %%mm3, %%mm0\n" sl@0: " psrad %%mm4, %%mm0\n" sl@0: " movd %%mm0, %%eax\n" sl@0: " addw 0(%1), %%ax\n" sl@0: " movw %%ax, 0(%0)\n" sl@0: " add $2, %0\n" sl@0: " add $2, %1\n" sl@0: " add $2, %2\n" sl@0: " decl %3\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: : "+r" (d1), "+r" (s1), "+r" (s2), "+r" (n) sl@0: : sl@0: : "eax" sl@0: ); sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (mas8_add_s16_pmaddwd, mas8_add_s16, OIL_IMPL_FLAG_MMX|OIL_IMPL_FLAG_MMXEXT); sl@0: sl@0: sl@0: sl@0: #if 0 sl@0: void sl@0: mas8_add_s16_pmaddwd2 (int16_t *d1, int16_t *s1, int16_t *s2, int16_t *s3_2, sl@0: int16_t *s4_2, int n) sl@0: { sl@0: while (n&3) { sl@0: int x; sl@0: int i; sl@0: sl@0: x = s4_2[0]; sl@0: for(i=0;i<8;i++){ sl@0: x += s2[i]*s3_2[i]; sl@0: } sl@0: x >>= s4_2[1]; sl@0: d1[0] = s1[0] + x; sl@0: sl@0: d1++; sl@0: s1++; sl@0: s2++; sl@0: n--; sl@0: } sl@0: if (n==0) return; sl@0: sl@0: n>>=2; sl@0: asm volatile ("\n" sl@0: " movq 0(%0), %%mm6\n" sl@0: " movq 8(%0), %%mm7\n" sl@0: " movzwl 0(%1), %%ecx\n" sl@0: " movd %%ecx, %%mm5\n" sl@0: " pshufw $0x00, %%mm5, %%mm5\n" sl@0: " pxor %%mm4, %%mm4\n" sl@0: " movzwl 2(%1), %%ecx\n" sl@0: " movd %%ecx, %%mm4\n" sl@0: :: "r" (s3_2), "r" (s4_2) sl@0: : "ecx" sl@0: ); sl@0: asm volatile ("\n" sl@0: "1:\n" sl@0: " movq 0(%2), %%mm0\n" sl@0: " pmaddwd %%mm6, %%mm0\n" sl@0: " movq 8(%2), %%mm1\n" sl@0: " pmaddwd %%mm7, %%mm1\n" sl@0: " paddd %%mm1, %%mm0\n" sl@0: " pshufw $0xee, %%mm0, %%mm1\n" sl@0: " paddw %%mm1, %%mm0\n" sl@0: sl@0: " movq 2(%2), %%mm2\n" sl@0: " pmaddwd %%mm6, %%mm2\n" sl@0: " movq 10(%2), %%mm3\n" sl@0: " pmaddwd %%mm7, %%mm3\n" sl@0: " paddd %%mm3, %%mm2\n" sl@0: " pshufw $0xee, %%mm2, %%mm3\n" sl@0: " paddw %%mm3, %%mm2\n" sl@0: " pextrw $0, %%mm2, %%eax\n" sl@0: " pinsrw $1, %%eax, %%mm0\n" sl@0: sl@0: " movq 4(%2), %%mm2\n" sl@0: " pmaddwd %%mm6, %%mm2\n" sl@0: " movq 12(%2), %%mm3\n" sl@0: " pmaddwd %%mm7, %%mm3\n" sl@0: " paddd %%mm3, %%mm2\n" sl@0: " pshufw $0xee, %%mm2, %%mm3\n" sl@0: " paddw %%mm3, %%mm2\n" sl@0: " pextrw $0, %%mm2, %%eax\n" sl@0: " pinsrw $2, %%eax, %%mm0\n" sl@0: sl@0: " movq 6(%2), %%mm2\n" sl@0: " pmaddwd %%mm6, %%mm2\n" sl@0: " movq 14(%2), %%mm3\n" sl@0: " pmaddwd %%mm7, %%mm3\n" sl@0: " paddd %%mm3, %%mm2\n" sl@0: " pshufw $0xee, %%mm2, %%mm3\n" sl@0: " paddw %%mm3, %%mm2\n" sl@0: " pextrw $0, %%mm2, %%eax\n" sl@0: " pinsrw $3, %%eax, %%mm0\n" sl@0: sl@0: " paddw %%mm5, %%mm0\n" sl@0: " psraw %%mm4, %%mm0\n" sl@0: " paddw 0(%1), %%mm0\n" sl@0: " movq %%mm0, 0(%0)\n" sl@0: " add $8, %0\n" sl@0: " add $8, %1\n" sl@0: " add $8, %2\n" sl@0: " decl %3\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: : "+r" (d1), "+r" (s1), "+r" (s2), "+r" (n) sl@0: : sl@0: : "eax" sl@0: ); sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (mas8_add_s16_pmaddwd2, mas8_add_s16, OIL_IMPL_FLAG_SSE); sl@0: #endif sl@0: sl@0: #if 0 sl@0: /* This only does 16-bit intermediates, whereas the ref specifies 32-bit */ sl@0: void sl@0: mas8_add_s16_sse2 (int16_t *d1, int16_t *s1, int16_t *s2, int16_t *s3_2, sl@0: int16_t *s4_2, int n) sl@0: { sl@0: asm volatile ("\n" sl@0: " movq 0(%0), %%mm6\n" sl@0: " movq 8(%0), %%mm7\n" sl@0: " movzwl 0(%1), %%ecx\n" sl@0: " movd %%ecx, %%mm3\n" sl@0: " pshufw $0x00, %%mm3, %%mm3\n" sl@0: " pxor %%mm4, %%mm4\n" sl@0: " movzwl 2(%1), %%ecx\n" sl@0: " movd %%ecx, %%mm4\n" sl@0: :: "r" (s3_2), "r" (s4_2) sl@0: : "ecx" sl@0: ); sl@0: asm volatile ("\n" sl@0: "1:\n" sl@0: " movq 0(%2), %%mm0\n" sl@0: " pmullw %%mm6, %%mm0\n" sl@0: " movq 8(%2), %%mm1\n" sl@0: " pmullw %%mm7, %%mm1\n" sl@0: " paddw %%mm1, %%mm0\n" sl@0: " pshufw $0xee, %%mm0, %%mm1\n" sl@0: " paddw %%mm1, %%mm0\n" sl@0: " pshufw $0x01, %%mm0, %%mm1\n" sl@0: " paddw %%mm1, %%mm0\n" sl@0: " paddw %%mm3, %%mm0\n" sl@0: " psraw %%mm4, %%mm0\n" sl@0: " movd %%mm0, %%eax\n" sl@0: " addw 0(%1), %%ax\n" sl@0: " movw %%ax, 0(%0)\n" sl@0: " add $2, %0\n" sl@0: " add $2, %1\n" sl@0: " add $2, %2\n" sl@0: " decl %3\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: : "+r" (d1), "+r" (s1), "+r" (s2), "+r" (n) sl@0: : sl@0: : "eax" sl@0: ); sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (mas8_add_s16_sse2, mas8_add_s16, OIL_IMPL_FLAG_SSE); sl@0: #endif sl@0: sl@0: void sl@0: mas2_across_add_s16_mmx (int16_t *d1, int16_t *s1, int16_t *s2, int16_t *s3, sl@0: int16_t *s4_2, int16_t *s5_2, int n) sl@0: { sl@0: int shift = s5_2[1]; sl@0: sl@0: while (n&3) { sl@0: int x; sl@0: sl@0: x = s5_2[0] + s2[0]*s4_2[0] + s3[0]*s4_2[1]; sl@0: x >>= s5_2[1]; sl@0: d1[0] = s1[0] + x; sl@0: sl@0: d1++; sl@0: s1++; sl@0: s2++; sl@0: s3++; sl@0: n--; sl@0: } sl@0: if (n==0) return; sl@0: sl@0: n>>=2; sl@0: if (n==0) return; sl@0: asm volatile ("\n" sl@0: " movzwl 0(%0), %%ecx\n" sl@0: " movd %%ecx, %%mm7\n" sl@0: " pshufw $0x00, %%mm7, %%mm7\n" sl@0: " movzwl 2(%0), %%ecx\n" sl@0: " movd %%ecx, %%mm6\n" sl@0: " pshufw $0x00, %%mm6, %%mm6\n" sl@0: " movzwl 0(%1), %%ecx\n" sl@0: " movd %%ecx, %%mm5\n" sl@0: " pshufw $0x44, %%mm5, %%mm5\n" sl@0: :: "r" (s4_2), "r" (s5_2) sl@0: : "ecx" sl@0: ); sl@0: asm volatile ("\n" sl@0: "1:\n" sl@0: " movq 0(%2), %%mm0\n" // mm0 = s0, s1, s2, s3 sl@0: " movq 0(%2), %%mm1\n" // mm1 = s0, s1, s2, s3 sl@0: " pmullw %%mm7, %%mm0\n" // mm0 = lo(s0*a0), lo(s1*a0), ... sl@0: " pmulhw %%mm7, %%mm1\n" // mm1 = hi(s0*a0), hi(s1*a0), ... sl@0: " movq %%mm0, %%mm2\n" // mm2 = lo(s0*a0), lo(s1*a0), ... sl@0: " punpcklwd %%mm1, %%mm0\n" // mm0 = s0*a0, s1*a0 sl@0: " punpckhwd %%mm1, %%mm2\n" // mm2 = s2*a0, s3*a0 sl@0: " movq %%mm2, %%mm1\n" // mm1 = s2*a0, s3*a0 sl@0: sl@0: " movq 0(%3), %%mm2\n" sl@0: " movq 0(%3), %%mm3\n" sl@0: " pmullw %%mm6, %%mm2\n" sl@0: " pmulhw %%mm6, %%mm3\n" sl@0: " movq %%mm2, %%mm4\n" sl@0: " punpcklwd %%mm3, %%mm2\n" // mm2 = s1*a1, s2*a1 sl@0: " punpckhwd %%mm3, %%mm4\n" // mm4 = s3*a1, s4*a1 sl@0: " movq %%mm4, %%mm3\n" // mm3 = s3*a1, s4*a1 sl@0: sl@0: " paddd %%mm3, %%mm1\n" // mm1 = s2*a0 + s3*a1, ... sl@0: " paddd %%mm2, %%mm0\n" // mm0 = s0*a0 + s1*a1, ... sl@0: sl@0: " paddd %%mm5, %%mm1\n" // mm1 = s2*a0 + s3*a1 + offset, ... sl@0: " paddd %%mm5, %%mm0\n" // mm0 = s0*a0 + s1*a1 + offset, ... sl@0: sl@0: " movd %5, %%mm4\n" sl@0: " psrad %%mm4, %%mm1\n" // mm1 = (s2*a0 + s3*a1 + offset)>>shift, ... sl@0: " psrad %%mm4, %%mm0\n" // mm0 = (s0*a0 + s1*a1 + offset)>>shift, ... sl@0: sl@0: " packssdw %%mm1, %%mm0\n" sl@0: " paddw 0(%1), %%mm0\n" sl@0: " movq %%mm0, 0(%0)\n" sl@0: " add $8, %0\n" sl@0: " add $8, %1\n" sl@0: " add $8, %2\n" sl@0: " add $8, %3\n" sl@0: " decl %4\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: : "+r" (d1), "+r" (s1), "+r" (s2), "+r" (s3), "+m" (n) sl@0: : "r" (shift) sl@0: ); sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (mas2_across_add_s16_mmx, mas2_across_add_s16, sl@0: OIL_IMPL_FLAG_MMX | OIL_IMPL_FLAG_MMXEXT); sl@0: sl@0: void sl@0: add_const_rshift_s16_mmx(int16_t *d1, int16_t *s1, int16_t *s2_2, int n) sl@0: { sl@0: while(n&3) { sl@0: d1[0] = (s1[0] + s2_2[0])>>s2_2[1]; sl@0: d1++; sl@0: s1++; sl@0: n--; sl@0: } sl@0: n>>=2; sl@0: if (n==0) return; sl@0: asm volatile ("\n" sl@0: " movzwl 0(%2), %%ecx\n" sl@0: " movd %%ecx, %%mm7\n" sl@0: " pshufw $0x00, %%mm7, %%mm7\n" sl@0: " movzwl 2(%2), %%ecx\n" sl@0: " movd %%ecx, %%mm6\n" sl@0: "1:\n" sl@0: " movq 0(%1), %%mm0\n" sl@0: " paddsw %%mm7, %%mm0\n" sl@0: " psraw %%mm6, %%mm0\n" sl@0: " movq %%mm0, 0(%0)\n" sl@0: " add $8, %0\n" sl@0: " add $8, %1\n" sl@0: " decl %3\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: : "+r" (d1), "+r" (s1), "+r" (s2_2), "+r" (n) sl@0: : sl@0: : "ecx" sl@0: ); sl@0: sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (add_const_rshift_s16_mmx, add_const_rshift_s16, sl@0: OIL_IMPL_FLAG_MMX | OIL_IMPL_FLAG_MMXEXT); sl@0: sl@0: void sl@0: multiply_and_add_s16_mmx(int16_t *d1, int16_t *s1, int16_t *s2, int16_t *s3, int n) sl@0: { sl@0: while(n&3) { sl@0: d1[0] = s1[0] + s2[0]*s3[0]; sl@0: d1++; sl@0: s1++; sl@0: s2++; sl@0: s3++; sl@0: n--; sl@0: } sl@0: n>>=2; sl@0: if (n==0) return; sl@0: asm volatile ("\n" sl@0: "1:\n" sl@0: " movq 0(%2), %%mm0\n" sl@0: " pmullw 0(%3), %%mm0\n" sl@0: " paddw 0(%1), %%mm0\n" sl@0: " movq %%mm0, 0(%0)\n" sl@0: " add $8, %0\n" sl@0: " add $8, %1\n" sl@0: " add $8, %2\n" sl@0: " add $8, %3\n" sl@0: " decl %4\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: : "+r" (d1), "+r" (s1), "+r" (s2), "+r" (s3), "+r" (n) sl@0: ); sl@0: sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (multiply_and_add_s16_mmx, multiply_and_add_s16, sl@0: OIL_IMPL_FLAG_MMX); sl@0: sl@0: void sl@0: multiply_and_add_s16_u8_mmx(int16_t *d1, int16_t *s1, int16_t *s2, sl@0: uint8_t *s3, int n) sl@0: { sl@0: while(n&3) { sl@0: d1[0] = s1[0] + s2[0]*s3[0]; sl@0: d1++; sl@0: s1++; sl@0: s2++; sl@0: s3++; sl@0: n--; sl@0: } sl@0: n>>=2; sl@0: if (n==0) return; sl@0: asm volatile ("\n" sl@0: " pxor %%mm7, %%mm7\n" sl@0: "1:\n" sl@0: " movd 0(%3), %%mm0\n" sl@0: " punpcklbw %%mm7, %%mm0\n" sl@0: " pmullw 0(%2), %%mm0\n" sl@0: " paddw 0(%1), %%mm0\n" sl@0: " movq %%mm0, 0(%0)\n" sl@0: " add $8, %0\n" sl@0: " add $8, %1\n" sl@0: " add $8, %2\n" sl@0: " add $4, %3\n" sl@0: " decl %4\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: : "+r" (d1), "+r" (s1), "+r" (s2), "+r" (s3), "+r" (n) sl@0: ); sl@0: sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (multiply_and_add_s16_u8_mmx, multiply_and_add_s16_u8, sl@0: OIL_IMPL_FLAG_MMX); sl@0: sl@0: void sl@0: multiply_and_add_s16_u8_mmx_2(int16_t *d1, int16_t *s1, int16_t *s2, sl@0: uint8_t *s3, int n) sl@0: { sl@0: while(n&7) { sl@0: d1[0] = s1[0] + s2[0]*s3[0]; sl@0: d1++; sl@0: s1++; sl@0: s2++; sl@0: s3++; sl@0: n--; sl@0: } sl@0: n>>=3; sl@0: if (n==0) return; sl@0: asm volatile ("\n" sl@0: " pxor %%mm7, %%mm7\n" sl@0: "1:\n" sl@0: " movd 0(%3), %%mm0\n" sl@0: " punpcklbw %%mm7, %%mm0\n" sl@0: " movd 4(%3), %%mm1\n" sl@0: " pmullw 0(%2), %%mm0\n" sl@0: " punpcklbw %%mm7, %%mm1\n" sl@0: " paddw 0(%1), %%mm0\n" sl@0: " pmullw 8(%2), %%mm1\n" sl@0: " movq %%mm0, 0(%0)\n" sl@0: " paddw 8(%1), %%mm1\n" sl@0: " movq %%mm1, 8(%0)\n" sl@0: sl@0: " add $16, %0\n" sl@0: " add $16, %1\n" sl@0: " add $16, %2\n" sl@0: " add $8, %3\n" sl@0: " decl %4\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: : "+r" (d1), "+r" (s1), "+r" (s2), "+r" (s3), "+r" (n) sl@0: ); sl@0: sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (multiply_and_add_s16_u8_mmx_2, multiply_and_add_s16_u8, sl@0: OIL_IMPL_FLAG_MMX); sl@0: sl@0: void sl@0: multiply_and_acc_12xn_s16_u8_mmx (int16_t *i1, int is1, int16_t *s1, sl@0: int ss1, uint8_t *s2, int ss2, int n) sl@0: { sl@0: if (n==0) return; sl@0: __asm__ __volatile__ ("\n" sl@0: " pxor %%mm7, %%mm7\n" sl@0: "1:\n" sl@0: " movd 0(%2), %%mm0\n" sl@0: " punpcklbw %%mm7, %%mm0\n" sl@0: " pmullw 0(%1), %%mm0\n" sl@0: " paddw 0(%0), %%mm0\n" sl@0: " movq %%mm0, 0(%0)\n" sl@0: " movd 4(%2), %%mm1\n" sl@0: " punpcklbw %%mm7, %%mm1\n" sl@0: " pmullw 8(%1), %%mm1\n" sl@0: " paddw 8(%0), %%mm1\n" sl@0: " movq %%mm1, 8(%0)\n" sl@0: " movd 8(%2), %%mm2\n" sl@0: " punpcklbw %%mm7, %%mm2\n" sl@0: " pmullw 16(%1), %%mm2\n" sl@0: " paddw 16(%0), %%mm2\n" sl@0: " movq %%mm2, 16(%0)\n" sl@0: sl@0: " addl %4, %0\n" sl@0: " addl %5, %1\n" sl@0: " addl %6, %2\n" sl@0: " decl %3\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: : "+r" (i1), "+r" (s1), "+r" (s2), "+r" (n) sl@0: : "m" (is1), "m" (ss1), "m" (ss2) sl@0: ); sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (multiply_and_acc_12xn_s16_u8_mmx, sl@0: multiply_and_acc_12xn_s16_u8, OIL_IMPL_FLAG_MMX); sl@0: sl@0: #ifdef ENABLE_BROKEN_IMPLS sl@0: void sl@0: mas4_across_add_s16_mmx (int16_t *d, int16_t *s1, int16_t *s2_nx4, int sstr2, sl@0: int16_t *s3_4, int16_t *s4_2, int n) sl@0: { sl@0: int16_t *s2_nx4_off; sl@0: sl@0: while (n&3) { sl@0: int x; sl@0: int j; sl@0: x = s4_2[0]; sl@0: for(j=0;j<4;j++){ sl@0: x += OIL_GET(s2_nx4, j*sstr2, int16_t)*s3_4[j]; sl@0: } sl@0: x >>= s4_2[1]; sl@0: d[0] = s1[0] + x; sl@0: sl@0: n--; sl@0: d++; sl@0: s1++; sl@0: s2_nx4++; sl@0: } sl@0: if (n==0) return; sl@0: sl@0: s2_nx4_off = OIL_OFFSET(s2_nx4, 3*sstr2); sl@0: sl@0: n >>= 2; sl@0: __asm__ __volatile__ ("\n" sl@0: " movq 0(%[s3_4]), %%mm0\n" sl@0: " pshufw $0x55, %%mm0, %%mm1\n" sl@0: " pshufw $0xaa, %%mm0, %%mm2\n" sl@0: " pshufw $0xff, %%mm0, %%mm3\n" sl@0: " pshufw $0x00, %%mm0, %%mm0\n" sl@0: " movzwl 0(%[s4_2]), %%ecx\n" sl@0: " movd %%ecx, %%mm7\n" sl@0: " pshufw $0x00, %%mm7, %%mm7\n" sl@0: " movzwl 2(%[s4_2]), %%ecx\n" sl@0: " movd %%ecx, %%mm6\n" sl@0: : sl@0: : [s3_4] "r" (s3_4), sl@0: [s4_2] "r" (s4_2) sl@0: : "ecx" sl@0: ); sl@0: sl@0: __asm__ __volatile__ ("\n" sl@0: "1:\n" sl@0: " movq 0(%[s2_nx4]), %%mm4\n" sl@0: " pmullw %%mm0, %%mm4\n" sl@0: " movq (%[s2_nx4],%[sstr]), %%mm5\n" sl@0: " pmullw %%mm1, %%mm5\n" sl@0: " paddsw %%mm5,%%mm4\n" sl@0: " movq (%[s2_nx4],%[sstr],2), %%mm5\n" sl@0: " pmullw %%mm2, %%mm5\n" sl@0: " paddsw %%mm5,%%mm4\n" sl@0: " movq (%[s2_nx4_off]), %%mm5\n" sl@0: " pmullw %%mm3, %%mm5\n" sl@0: " paddsw %%mm5,%%mm4\n" sl@0: " paddsw %%mm7, %%mm4\n" sl@0: " psraw %%mm6, %%mm4\n" sl@0: " paddsw (%[s1]),%%mm4\n" sl@0: " movq %%mm4, 0(%[d])\n" sl@0: sl@0: " addl $8, %[s2_nx4]\n" sl@0: " addl $8, %[s2_nx4_off]\n" sl@0: " addl $8, %[s1]\n" sl@0: " addl $8, %[d]\n" sl@0: " decl %[n]\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: : [s2_nx4] "+r" (s2_nx4), sl@0: [d] "+r" (d), sl@0: [s2_nx4_off] "+r" (s2_nx4_off), sl@0: [n] "+m" (n), sl@0: [s1] "+r" (s1) sl@0: : [sstr] "r" (sstr2) sl@0: ); sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (mas4_across_add_s16_mmx, mas4_across_add_s16, sl@0: OIL_IMPL_FLAG_MMX|OIL_IMPL_FLAG_MMXEXT); sl@0: #endif sl@0: sl@0: void sl@0: mas4_across_add_s16_mmx (int16_t *d, int16_t *s1, int16_t *s2_nx4, int sstr2, sl@0: int16_t *s3_4, int16_t *s4_2, int n) sl@0: { sl@0: int16_t *s2_nx4_off; sl@0: sl@0: while (n&3) { sl@0: int x; sl@0: int j; sl@0: x = s4_2[0]; sl@0: for(j=0;j<4;j++){ sl@0: x += OIL_GET(s2_nx4, j*sstr2, int16_t)*s3_4[j]; sl@0: } sl@0: x >>= s4_2[1]; sl@0: d[0] = s1[0] + x; sl@0: sl@0: n--; sl@0: d++; sl@0: s1++; sl@0: s2_nx4++; sl@0: } sl@0: if (n==0) return; sl@0: sl@0: s2_nx4_off = OIL_OFFSET(s2_nx4, 3*sstr2); sl@0: sl@0: n >>= 2; sl@0: __asm__ __volatile__ ("\n" sl@0: " movq 0(%[s3_4]), %%mm0\n" sl@0: " pxor %%mm5, %%mm5\n" sl@0: " movd 0(%[s4_2]), %%mm5\n" sl@0: : sl@0: : [s3_4] "r" (s3_4), sl@0: [s4_2] "r" (s4_2) sl@0: ); sl@0: sl@0: __asm__ __volatile__ ("\n" sl@0: "1:\n" sl@0: " pshufw $0x00, %%mm0, %%mm6\n" sl@0: " pmullw 0(%[s2_nx4]), %%mm6\n" sl@0: " pshufw $0x00, %%mm0, %%mm3\n" sl@0: " pmulhw 0(%[s2_nx4]), %%mm3\n" sl@0: " movq %%mm6, %%mm7\n" sl@0: " punpcklwd %%mm3, %%mm6\n" sl@0: " punpckhwd %%mm3, %%mm7\n" sl@0: sl@0: " pshufw $0x55, %%mm0, %%mm2\n" sl@0: " pmullw 0(%[s2_nx4],%[sstr]), %%mm2\n" sl@0: " pshufw $0x55, %%mm0, %%mm3\n" sl@0: " pmulhw 0(%[s2_nx4],%[sstr]), %%mm3\n" sl@0: " movq %%mm2, %%mm4\n" sl@0: " punpcklwd %%mm3, %%mm2\n" sl@0: " punpckhwd %%mm3, %%mm4\n" sl@0: " paddd %%mm2, %%mm6\n" sl@0: " paddd %%mm4, %%mm7\n" sl@0: sl@0: " pshufw $0xaa, %%mm0, %%mm2\n" sl@0: " pmullw 0(%[s2_nx4],%[sstr],2), %%mm2\n" sl@0: " pshufw $0xaa, %%mm0, %%mm3\n" sl@0: " pmulhw 0(%[s2_nx4],%[sstr],2), %%mm3\n" sl@0: " movq %%mm2, %%mm4\n" sl@0: " punpcklwd %%mm3, %%mm2\n" sl@0: " punpckhwd %%mm3, %%mm4\n" sl@0: " paddd %%mm2, %%mm6\n" sl@0: " paddd %%mm4, %%mm7\n" sl@0: sl@0: " pshufw $0xff, %%mm0, %%mm2\n" sl@0: " pmullw 0(%[s2_nx4_off]), %%mm2\n" sl@0: " pshufw $0xff, %%mm0, %%mm3\n" sl@0: " pmulhw 0(%[s2_nx4_off]), %%mm3\n" sl@0: " movq %%mm2, %%mm4\n" sl@0: " punpcklwd %%mm3, %%mm2\n" sl@0: " punpckhwd %%mm3, %%mm4\n" sl@0: " paddd %%mm2, %%mm6\n" sl@0: " paddd %%mm4, %%mm7\n" sl@0: sl@0: " pshufw $0xcc, %%mm5, %%mm1\n" sl@0: " paddd %%mm1, %%mm6\n" sl@0: " paddd %%mm1, %%mm7\n" sl@0: sl@0: " pshufw $0xfd, %%mm5, %%mm1\n" sl@0: " psrad %%mm1, %%mm6\n" sl@0: " psrad %%mm1, %%mm7\n" sl@0: " packssdw %%mm7, %%mm6\n" sl@0: sl@0: " paddsw (%[s1]),%%mm6\n" sl@0: " movq %%mm6, 0(%[d])\n" sl@0: sl@0: " addl $8, %[s2_nx4]\n" sl@0: " addl $8, %[s2_nx4_off]\n" sl@0: " addl $8, %[s1]\n" sl@0: " addl $8, %[d]\n" sl@0: " decl %[n]\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: : [s2_nx4] "+r" (s2_nx4), sl@0: [d] "+r" (d), sl@0: [s2_nx4_off] "+r" (s2_nx4_off), sl@0: [n] "+m" (n), sl@0: [s1] "+r" (s1) sl@0: : [sstr] "r" (sstr2) sl@0: ); sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (mas4_across_add_s16_mmx, mas4_across_add_s16, sl@0: OIL_IMPL_FLAG_MMX|OIL_IMPL_FLAG_MMXEXT); sl@0: sl@0: void sl@0: mas8_across_add_s16_mmx (int16_t *d, int16_t *s1, int16_t *s2_nx8, int sstr2, sl@0: int16_t *s3_8, int16_t *s4_2, int n) sl@0: { sl@0: int16_t *s2_nx8_off; sl@0: void *tmp = NULL; sl@0: sl@0: while (n&3) { sl@0: int x; sl@0: int j; sl@0: x = s4_2[0]; sl@0: for(j=0;j<8;j++){ sl@0: x += OIL_GET(s2_nx8, j*sstr2, int16_t)*s3_8[j]; sl@0: } sl@0: x >>= s4_2[1]; sl@0: d[0] = s1[0] + x; sl@0: sl@0: n--; sl@0: d++; sl@0: s1++; sl@0: s2_nx8++; sl@0: } sl@0: if (n==0) return; sl@0: sl@0: s2_nx8_off = OIL_OFFSET(s2_nx8, 7*sstr2); sl@0: sl@0: n >>= 2; sl@0: __asm__ __volatile__ ("\n" sl@0: " movq 0(%[s3_8]), %%mm0\n" sl@0: " pxor %%mm5, %%mm5\n" sl@0: " movd 0(%[s4_2]), %%mm5\n" sl@0: : sl@0: : [s3_8] "r" (s3_8), sl@0: [s4_2] "r" (s4_2) sl@0: ); sl@0: sl@0: __asm__ __volatile__ ("\n" sl@0: "1:\n" sl@0: " movl %[s2_nx8], %[tmp]\n" sl@0: " movq 0(%[s3_8]), %%mm0\n" sl@0: sl@0: " pshufw $0x00, %%mm0, %%mm6\n" sl@0: " pmullw 0(%[tmp]), %%mm6\n" sl@0: " pshufw $0x00, %%mm0, %%mm3\n" sl@0: " pmulhw 0(%[tmp]), %%mm3\n" sl@0: " movq %%mm6, %%mm7\n" sl@0: " punpcklwd %%mm3, %%mm6\n" sl@0: " punpckhwd %%mm3, %%mm7\n" sl@0: sl@0: " addl %[sstr], %[tmp]\n" sl@0: " pshufw $0x55, %%mm0, %%mm2\n" sl@0: " pmullw 0(%[tmp]), %%mm2\n" sl@0: " pshufw $0x55, %%mm0, %%mm3\n" sl@0: " pmulhw 0(%[tmp]), %%mm3\n" sl@0: " movq %%mm2, %%mm4\n" sl@0: " punpcklwd %%mm3, %%mm2\n" sl@0: " punpckhwd %%mm3, %%mm4\n" sl@0: " paddd %%mm2, %%mm6\n" sl@0: " paddd %%mm4, %%mm7\n" sl@0: sl@0: " addl %[sstr], %[tmp]\n" sl@0: " pshufw $0xaa, %%mm0, %%mm2\n" sl@0: " pmullw 0(%[tmp]), %%mm2\n" sl@0: " pshufw $0xaa, %%mm0, %%mm3\n" sl@0: " pmulhw 0(%[tmp]), %%mm3\n" sl@0: " movq %%mm2, %%mm4\n" sl@0: " punpcklwd %%mm3, %%mm2\n" sl@0: " punpckhwd %%mm3, %%mm4\n" sl@0: " paddd %%mm2, %%mm6\n" sl@0: " paddd %%mm4, %%mm7\n" sl@0: sl@0: " addl %[sstr], %[tmp]\n" sl@0: " pshufw $0xff, %%mm0, %%mm2\n" sl@0: " pmullw 0(%[tmp]), %%mm2\n" sl@0: " pshufw $0xff, %%mm0, %%mm3\n" sl@0: " pmulhw 0(%[tmp]), %%mm3\n" sl@0: " movq %%mm2, %%mm4\n" sl@0: " punpcklwd %%mm3, %%mm2\n" sl@0: " punpckhwd %%mm3, %%mm4\n" sl@0: " paddd %%mm2, %%mm6\n" sl@0: " paddd %%mm4, %%mm7\n" sl@0: sl@0: " movq 8(%[s3_8]), %%mm0\n" sl@0: sl@0: " addl %[sstr], %[tmp]\n" sl@0: " pshufw $0x00, %%mm0, %%mm2\n" sl@0: " pmullw 0(%[tmp]), %%mm2\n" sl@0: " pshufw $0x00, %%mm0, %%mm3\n" sl@0: " pmulhw 0(%[tmp]), %%mm3\n" sl@0: " movq %%mm2, %%mm4\n" sl@0: " punpcklwd %%mm3, %%mm2\n" sl@0: " punpckhwd %%mm3, %%mm4\n" sl@0: " paddd %%mm2, %%mm6\n" sl@0: " paddd %%mm4, %%mm7\n" sl@0: sl@0: " addl %[sstr], %[tmp]\n" sl@0: " pshufw $0x55, %%mm0, %%mm2\n" sl@0: " pmullw 0(%[tmp]), %%mm2\n" sl@0: " pshufw $0x55, %%mm0, %%mm3\n" sl@0: " pmulhw 0(%[tmp]), %%mm3\n" sl@0: " movq %%mm2, %%mm4\n" sl@0: " punpcklwd %%mm3, %%mm2\n" sl@0: " punpckhwd %%mm3, %%mm4\n" sl@0: " paddd %%mm2, %%mm6\n" sl@0: " paddd %%mm4, %%mm7\n" sl@0: sl@0: " addl %[sstr], %[tmp]\n" sl@0: " pshufw $0xaa, %%mm0, %%mm2\n" sl@0: " pmullw 0(%[tmp]), %%mm2\n" sl@0: " pshufw $0xaa, %%mm0, %%mm3\n" sl@0: " pmulhw 0(%[tmp]), %%mm3\n" sl@0: " movq %%mm2, %%mm4\n" sl@0: " punpcklwd %%mm3, %%mm2\n" sl@0: " punpckhwd %%mm3, %%mm4\n" sl@0: " paddd %%mm2, %%mm6\n" sl@0: " paddd %%mm4, %%mm7\n" sl@0: sl@0: " addl %[sstr], %[tmp]\n" sl@0: " pshufw $0xff, %%mm0, %%mm2\n" sl@0: " pmullw 0(%[tmp]), %%mm2\n" sl@0: " pshufw $0xff, %%mm0, %%mm3\n" sl@0: " pmulhw 0(%[tmp]), %%mm3\n" sl@0: " movq %%mm2, %%mm4\n" sl@0: " punpcklwd %%mm3, %%mm2\n" sl@0: " punpckhwd %%mm3, %%mm4\n" sl@0: " paddd %%mm2, %%mm6\n" sl@0: " paddd %%mm4, %%mm7\n" sl@0: sl@0: " pshufw $0xcc, %%mm5, %%mm1\n" sl@0: " paddd %%mm1, %%mm6\n" sl@0: " paddd %%mm1, %%mm7\n" sl@0: sl@0: " pshufw $0xfd, %%mm5, %%mm1\n" sl@0: " psrad %%mm1, %%mm6\n" sl@0: " psrad %%mm1, %%mm7\n" sl@0: " packssdw %%mm7, %%mm6\n" sl@0: sl@0: " paddsw (%[s1]),%%mm6\n" sl@0: " movq %%mm6, 0(%[d])\n" sl@0: sl@0: " addl $8, %[s2_nx8]\n" sl@0: " addl $8, %[s1]\n" sl@0: " addl $8, %[d]\n" sl@0: " decl %[n]\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: : [s2_nx8] "+r" (s2_nx8), sl@0: [tmp] "+r" (tmp), sl@0: [s3_8] "+r" (s3_8), sl@0: [d] "+r" (d), sl@0: [n] "+m" (n), sl@0: [s1] "+r" (s1) sl@0: : [sstr] "m" (sstr2) sl@0: ); sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (mas8_across_add_s16_mmx, mas8_across_add_s16, sl@0: OIL_IMPL_FLAG_MMX|OIL_IMPL_FLAG_MMXEXT); sl@0: sl@0: void sl@0: lshift_s16_mmx(int16_t *d1, int16_t *s1, int16_t *s3_1, int n) sl@0: { sl@0: while (n&3) { sl@0: d1[0] = s1[0]<>= 2; sl@0: __asm__ __volatile__ ("\n" sl@0: " movzwl 0(%[s3_1]), %%ecx\n" sl@0: " movd %%ecx, %%mm1\n" sl@0: "1:\n" sl@0: " movq 0(%[s1]), %%mm0\n" sl@0: " psllw %%mm1, %%mm0\n" sl@0: " movq %%mm0, 0(%[d1])\n" sl@0: " add $8, %[d1]\n" sl@0: " add $8, %[s1]\n" sl@0: " decl %[n]\n" sl@0: " jnz 1b\n" sl@0: " emms" sl@0: : [d1] "+r" (d1), sl@0: [s1] "+r" (s1), sl@0: [n] "+r" (n) sl@0: : [s3_1] "r" (s3_1) sl@0: : "ecx"); sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (lshift_s16_mmx, lshift_s16, OIL_IMPL_FLAG_MMX); sl@0: sl@0: void sl@0: lshift_s16_mmx_2(int16_t *d1, int16_t *s1, int16_t *s3_1, int n) sl@0: { sl@0: while (n&7) { sl@0: d1[0] = s1[0]<>= 3; sl@0: if (n == 0) return; sl@0: __asm__ __volatile__ ("\n" sl@0: " movzwl 0(%[s3_1]), %%ecx\n" sl@0: " movd %%ecx, %%mm1\n" sl@0: "1:\n" sl@0: " movq 0(%[s1]), %%mm0\n" sl@0: " psllw %%mm1, %%mm0\n" sl@0: " movq %%mm0, 0(%[d1])\n" sl@0: " movq 8(%[s1]), %%mm0\n" sl@0: " psllw %%mm1, %%mm0\n" sl@0: " movq %%mm0, 8(%[d1])\n" sl@0: " add $16, %[d1]\n" sl@0: " add $16, %[s1]\n" sl@0: " decl %[n]\n" sl@0: " jnz 1b\n" sl@0: " emms" sl@0: : [d1] "+r" (d1), sl@0: [s1] "+r" (s1), sl@0: [n] "+r" (n) sl@0: : [s3_1] "r" (s3_1) sl@0: : "ecx"); sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (lshift_s16_mmx_2, lshift_s16, OIL_IMPL_FLAG_MMX); sl@0: sl@0: sl@0: sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_deinterleave2_mmx, deinterleave2_s16() { sl@0: return &_oil_function_impl_deinterleave2_mmx, deinterleave2_s16; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_deinterleave2_mmx_2, deinterleave2_s16() { sl@0: return &_oil_function_impl_deinterleave2_mmx_2, deinterleave2_s16; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_deinterleave2_mmx_3, deinterleave2_s16() { sl@0: return &_oil_function_impl_deinterleave2_mmx_3, deinterleave2_s16; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_deinterleave2_mmx_4, deinterleave2_s16() { sl@0: return &_oil_function_impl_deinterleave2_mmx_4, deinterleave2_s16; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_lift_add_mult_shift12_i386_mmx, lift_add_mult_shift12() { sl@0: return &_oil_function_impl_lift_add_mult_shift12_i386_mmx, lift_add_mult_shift12; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_interleave2_mmx, interleave2_s16() { sl@0: return &_oil_function_impl_interleave2_mmx, interleave2_s16; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_lift_add_shift1_mmx, lift_add_shift1() { sl@0: return &_oil_function_impl_lift_add_shift1_mmx, lift_add_shift1; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_lift_sub_shift1_mmx, lift_sub_shift1() { sl@0: return &_oil_function_impl_lift_sub_shift1_mmx, lift_sub_shift1; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_lift_add_shift2_mmx, lift_add_shift2() { sl@0: return &_oil_function_impl_lift_add_shift2_mmx, lift_add_shift2; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_lift_sub_shift2_mmx, lift_sub_shift2() { sl@0: return &_oil_function_impl_lift_sub_shift2_mmx, lift_sub_shift2; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_synth_53_mmx, synth_53() { sl@0: return &_oil_function_impl_synth_53_mmx, synth_53; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_mas2_add_s16_mmx, mas2_add_s16() { sl@0: return &_oil_function_impl_mas2_add_s16_mmx, mas2_add_s16; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_mas2_add_s16_lim_mmx, mas2_add_s16() { sl@0: return &_oil_function_impl_mas2_add_s16_lim_mmx, mas2_add_s16; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_mas4_add_s16_mmx, mas4_add_s16() { sl@0: return &_oil_function_impl_mas4_add_s16_mmx, mas4_add_s16; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_mas2_add_s16_mmx, mas2_add_s16() { sl@0: return &_oil_function_impl_mas2_add_s16_mmx, mas2_add_s16; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_mas4_add_s16_mmx, mas4_add_s16() { sl@0: return &_oil_function_impl_mas4_add_s16_mmx, mas4_add_s16; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_mas8_add_s16_mmx, mas8_add_s16() { sl@0: return &_oil_function_impl_mas8_add_s16_mmx, mas8_add_s16; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_mas4_add_s16_pmaddwd, mas4_add_s16() { sl@0: return &_oil_function_impl_mas4_add_s16_pmaddwd, mas4_add_s16; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_mas4_add_s16_pmaddwd_2, mas4_add_s16() { sl@0: return &_oil_function_impl_mas4_add_s16_pmaddwd_2, mas4_add_s16; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_mas8_add_s16_pmaddwd, mas8_add_s16() { sl@0: return &_oil_function_impl_mas8_add_s16_pmaddwd, mas8_add_s16; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_mas8_add_s16_pmaddwd2, mas8_add_s16() { sl@0: return &_oil_function_impl_mas8_add_s16_pmaddwd2, mas8_add_s16; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_mas8_add_s16_sse2, mas8_add_s16() { sl@0: return &_oil_function_impl_mas8_add_s16_sse2, mas8_add_s16; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_mas2_across_add_s16_mmx, mas2_across_add_s16() { sl@0: return &_oil_function_impl_mas2_across_add_s16_mmx, mas2_across_add_s16; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_add_const_rshift_s16_mmx, add_const_rshift_s16() { sl@0: return &_oil_function_impl_add_const_rshift_s16_mmx, add_const_rshift_s16; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_multiply_and_add_s16_mmx, multiply_and_add_s16() { sl@0: return &_oil_function_impl_multiply_and_add_s16_mmx, multiply_and_add_s16; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_multiply_and_add_s16_u8_mmx, multiply_and_add_s16_u8() { sl@0: return &_oil_function_impl_multiply_and_add_s16_u8_mmx, multiply_and_add_s16_u8; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_multiply_and_add_s16_u8_mmx_2, multiply_and_add_s16_u8() { sl@0: return &_oil_function_impl_multiply_and_add_s16_u8_mmx_2, multiply_and_add_s16_u8; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_multiply_and_acc_12xn_s16_u8_mmx() { sl@0: return &_oil_function_impl_multiply_and_acc_12xn_s16_u8_mmx; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_mas4_across_add_s16_mmx, mas4_across_add_s16() { sl@0: return &_oil_function_impl_mas4_across_add_s16_mmx, mas4_across_add_s16; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_mas4_across_add_s16_mmx, mas4_across_add_s16() { sl@0: return &_oil_function_impl_mas4_across_add_s16_mmx, mas4_across_add_s16; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_mas8_across_add_s16_mmx, mas8_across_add_s16() { sl@0: return &_oil_function_impl_mas8_across_add_s16_mmx, mas8_across_add_s16; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_lshift_s16_mmx, lshift_s16() { sl@0: return &_oil_function_impl_lshift_s16_mmx, lshift_s16; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_lshift_s16_mmx_2, lshift_s16() { sl@0: return &_oil_function_impl_lshift_s16_mmx_2, lshift_s16; sl@0: } sl@0: #endif sl@0: sl@0: sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_split_53_nomix() { sl@0: return &_oil_function_impl_split_53_nomix; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_split_53_c() { sl@0: return &_oil_function_impl_split_53_c; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_synth_53_c() { sl@0: return &_oil_function_impl_synth_53_c; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_deinterleave2_c_1() { sl@0: return &_oil_function_impl_deinterleave2_c_1; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_deinterleave2_asm() { sl@0: return &_oil_function_impl_deinterleave2_asm; sl@0: } sl@0: #endif sl@0: