sl@0: /* sl@0: * LIBOIL - Library of Optimized Inner Loops sl@0: * Copyright (c) 2003,2004 David A. Schleef sl@0: * All rights reserved. sl@0: * sl@0: * Redistribution and use in source and binary forms, with or without sl@0: * modification, are permitted provided that the following conditions sl@0: * are met: sl@0: * 1. Redistributions of source code must retain the above copyright sl@0: * notice, this list of conditions and the following disclaimer. sl@0: * 2. Redistributions in binary form must reproduce the above copyright sl@0: * notice, this list of conditions and the following disclaimer in the sl@0: * documentation and/or other materials provided with the distribution. sl@0: * sl@0: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR sl@0: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED sl@0: * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE sl@0: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, sl@0: * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES sl@0: * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR sl@0: * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) sl@0: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, sl@0: * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING sl@0: * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE sl@0: * POSSIBILITY OF SUCH DAMAGE. sl@0: */ sl@0: //Portions Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies). All rights reserved. sl@0: sl@0: #ifdef HAVE_CONFIG_H sl@0: #include "config.h" sl@0: #endif sl@0: sl@0: #include sl@0: #include sl@0: sl@0: OIL_DECLARE_CLASS(trans8x8_u16); sl@0: sl@0: /* this could use additional work. */ sl@0: static void sl@0: trans8x8_u16_mmx (uint16_t *dest, int dstr, uint16_t *src, int sstr) sl@0: { sl@0: #if !defined(__WINSCW__) && !defined(__WINS__) sl@0: asm volatile ( sl@0: " leal (%3,%3,2),%%eax \n" // UBER 0: sl@0: " movq (%1), %%mm0 \n" // UBER 1: sl@0: " movq (%1,%3,2), %%mm2 \n" // UBER 2: sl@0: " movq %%mm0, %%mm4 \n" // UBER 3: 1 sl@0: " movq %%mm2, %%mm5 \n" // UBER 4: 2 sl@0: " punpcklwd (%1,%3), %%mm0 \n" // UBER 5: 1 sl@0: " punpcklwd (%1,%%eax), %%mm2 \n" // UBER 6: 0 2 sl@0: " punpckhwd (%1,%3), %%mm4 \n" // UBER 7: 3 sl@0: " punpckhwd (%1,%%eax), %%mm5 \n" // UBER 8: 4 sl@0: " movq %%mm0, %%mm1 \n" // UBER 9: 5 sl@0: " movq %%mm4, %%mm3 \n" // UBER 10: 7 sl@0: " punpckldq %%mm2, %%mm0 \n" // UBER 11: 5 6 sl@0: " punpckldq %%mm5, %%mm4 \n" // UBER 12: 7 8 sl@0: " punpckhdq %%mm2, %%mm1 \n" // UBER 13: 6 9 sl@0: " punpckhdq %%mm5, %%mm3 \n" // UBER 14: 9 10 sl@0: " leal (%2,%2,2),%%eax \n" // UBER 15: 8 sl@0: " movq %%mm0, 0(%0) \n" // UBER 16: 11 sl@0: " movq %%mm1, (%0,%2) \n" // UBER 17: 13 sl@0: " movq %%mm4, (%0,%2,2) \n" // UBER 18: 12 sl@0: " movq %%mm3, (%0,%%eax) \n" // UBER 19: 14 15 sl@0: sl@0: " leal (%3,%3,2),%%eax \n" sl@0: " movq 8(%1), %%mm0 \n" sl@0: " movq 8(%1,%3,2), %%mm2 \n" sl@0: " movq %%mm0, %%mm4 \n" sl@0: " movq %%mm2, %%mm5 \n" sl@0: " punpcklwd 8(%1,%3), %%mm0 \n" sl@0: " punpcklwd 8(%1,%%eax), %%mm2 \n" sl@0: " punpckhwd 8(%1,%3), %%mm4 \n" sl@0: " punpckhwd 8(%1,%%eax), %%mm5 \n" sl@0: " movq %%mm0, %%mm1 \n" sl@0: " movq %%mm4, %%mm3 \n" sl@0: " punpckldq %%mm2, %%mm0 \n" sl@0: " punpckldq %%mm5, %%mm4 \n" sl@0: " punpckhdq %%mm2, %%mm1 \n" sl@0: " punpckhdq %%mm5, %%mm3 \n" sl@0: " leal (%2,%2,2),%%eax \n" sl@0: " leal (%0,%2,4),%0 \n" sl@0: " movq %%mm0, 0(%0) \n" sl@0: " movq %%mm1, (%0,%2) \n" sl@0: " movq %%mm4, (%0,%2,2) \n" sl@0: " movq %%mm3, (%0,%%eax) \n" sl@0: sl@0: " leal (%1,%3,4),%1 \n" sl@0: " leal (%3,%3,2),%%eax \n" sl@0: " movq 0(%1), %%mm0 \n" sl@0: " movq 0(%1,%3,2), %%mm2 \n" sl@0: " movq %%mm0, %%mm4 \n" sl@0: " movq %%mm2, %%mm5 \n" sl@0: " punpcklwd 0(%1,%3), %%mm0 \n" sl@0: " punpcklwd 0(%1,%%eax), %%mm2 \n" sl@0: " punpckhwd 0(%1,%3), %%mm4 \n" sl@0: " punpckhwd 0(%1,%%eax), %%mm5 \n" sl@0: " movq %%mm0, %%mm1 \n" sl@0: " movq %%mm4, %%mm3 \n" sl@0: " punpckldq %%mm2, %%mm0 \n" sl@0: " punpckldq %%mm5, %%mm4 \n" sl@0: " punpckhdq %%mm2, %%mm1 \n" sl@0: " punpckhdq %%mm5, %%mm3 \n" sl@0: " leal (%2,%2,2),%%eax \n" sl@0: " neg %2 \n" sl@0: " leal (%0,%2,4),%0 \n" sl@0: " neg %2 \n" sl@0: " movq %%mm0, 8(%0) \n" sl@0: " movq %%mm1, 8(%0,%2) \n" sl@0: " movq %%mm4, 8(%0,%2,2) \n" sl@0: " movq %%mm3, 8(%0,%%eax) \n" sl@0: sl@0: " leal (%3,%3,2),%%eax \n" sl@0: " movq 8(%1), %%mm0 \n" sl@0: " movq 8(%1,%3,2), %%mm2 \n" sl@0: " movq %%mm0, %%mm4 \n" sl@0: " movq %%mm2, %%mm5 \n" sl@0: " punpcklwd 8(%1,%3), %%mm0 \n" sl@0: " punpcklwd 8(%1,%%eax), %%mm2 \n" sl@0: " punpckhwd 8(%1,%3), %%mm4 \n" sl@0: " punpckhwd 8(%1,%%eax), %%mm5 \n" sl@0: " movq %%mm0, %%mm1 \n" sl@0: " movq %%mm4, %%mm3 \n" sl@0: " punpckldq %%mm2, %%mm0 \n" sl@0: " punpckldq %%mm5, %%mm4 \n" sl@0: " punpckhdq %%mm2, %%mm1 \n" sl@0: " punpckhdq %%mm5, %%mm3 \n" sl@0: " leal (%2,%2,2),%%eax \n" sl@0: " leal (%0,%2,4),%0 \n" sl@0: " movq %%mm0, 8(%0) \n" sl@0: " movq %%mm1, 8(%0,%2) \n" sl@0: " movq %%mm4, 8(%0,%2,2) \n" sl@0: " movq %%mm3, 8(%0,%%eax) \n" sl@0: " emms \n" sl@0: : "+r" (dest), "+r" (src), "+r" (dstr), "+r" (sstr) sl@0: : sl@0: : "eax"); sl@0: #endif sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (trans8x8_u16_mmx, trans8x8_u16, OIL_IMPL_FLAG_MMX); sl@0: sl@0: static void sl@0: trans8x8_u16_asm1 (uint16_t *dest, int dstr, uint16_t *src, int sstr) sl@0: { sl@0: #if !defined(__WINSCW__) && !defined(__WINS__) sl@0: int saved_ebx = 0; sl@0: asm ( sl@0: " movl %%ebx, %4 \n" sl@0: " movl %0, %%ecx \n" sl@0: " movl %2, %%ebx \n" sl@0: " movl %1, %%edx \n" sl@0: " lea (%%ecx,%%edx,8), %%esi \n" sl@0: " sub %%edx, %%esi\n " sl@0: " movl $7, %%edi \n" sl@0: "1: \n" sl@0: sl@0: " mov (%%ebx), %%ax \n" sl@0: " mov %%ax,(%%ecx) \n" sl@0: " mov 2(%%ebx), %%ax \n" sl@0: " mov %%ax,(%%ecx,%%edx,1) \n" sl@0: " mov 4(%%ebx), %%ax \n" sl@0: " mov %%ax,(%%ecx,%%edx,2) \n" sl@0: " mov 8(%%ebx), %%ax \n" sl@0: " mov %%ax,(%%ecx,%%edx,4) \n" sl@0: sl@0: " neg %%edx \n" sl@0: sl@0: " mov 6(%%ebx), %%ax \n" sl@0: " mov %%ax,(%%esi,%%edx,4) \n" sl@0: " mov 10(%%ebx), %%ax \n" sl@0: " mov %%ax,(%%esi,%%edx,2) \n" sl@0: " mov 12(%%ebx), %%ax \n" sl@0: " mov %%ax,(%%esi,%%edx,1) \n" sl@0: " mov 14(%%ebx), %%ax \n" sl@0: " mov %%ax,(%%esi) \n" sl@0: sl@0: " neg %%edx \n" sl@0: " add %3, %%ebx \n" sl@0: " add $2, %%ecx \n" sl@0: " add $2, %%esi \n" sl@0: sl@0: " dec %%edi \n" sl@0: " jge 1b \n" sl@0: " movl %4, %%ebx \n" sl@0: : sl@0: : "m" (dest), "m" (dstr), "m" (src), "m" (sstr), "m" (saved_ebx) sl@0: : "eax", "ecx", "edx", "esi", "edi"); sl@0: #endif sl@0: } sl@0: OIL_DEFINE_IMPL (trans8x8_u16_asm1, trans8x8_u16); sl@0: sl@0: static void sl@0: trans8x8_u16_asm2 (uint16_t *dest, int dstr, uint16_t *src, int sstr) sl@0: { sl@0: #if !defined(__WINSCW__) && !defined(__WINS__) sl@0: int i; sl@0: int saved_ebx = 0; sl@0: asm ( sl@0: " movl %%ebx, %5 \n" sl@0: " movl %0, %%ecx \n" sl@0: " movl %2, %%ebx \n" sl@0: " movl %1, %%edx \n" sl@0: " lea (%%ecx,%%edx,8), %%esi \n" sl@0: " sub %%edx, %%esi\n " sl@0: " movl $7, %4 \n" sl@0: " movl %%edx, %%edi \n" sl@0: " negl %%edi \n" sl@0: "1: \n" sl@0: sl@0: " movl (%%ebx), %%eax \n" sl@0: " mov %%ax,(%%ecx) \n" sl@0: " shr $16, %%eax \n" sl@0: " mov %%ax,(%%ecx,%%edx,1) \n" sl@0: sl@0: " movl 4(%%ebx), %%eax \n" sl@0: " mov %%ax,(%%ecx,%%edx,2) \n" sl@0: " shr $16, %%eax \n" sl@0: " mov %%ax,(%%esi,%%edi,4) \n" sl@0: sl@0: " movl 8(%%ebx), %%eax \n" sl@0: " mov %%ax,(%%ecx,%%edx,4) \n" sl@0: " shr $16, %%eax \n" sl@0: " mov %%ax,(%%esi,%%edi,2) \n" sl@0: sl@0: " movl 12(%%ebx), %%eax \n" sl@0: " mov %%ax,(%%esi,%%edi,1) \n" sl@0: " shr $16, %%eax \n" sl@0: " mov %%ax,(%%esi) \n" sl@0: sl@0: " add %3, %%ebx \n" sl@0: " add $2, %%ecx \n" sl@0: " add $2, %%esi \n" sl@0: sl@0: " decl %4 \n" sl@0: " jge 1b \n" sl@0: " movl %5, %%ebx \n" sl@0: : sl@0: : "m" (dest), "m" (dstr), "m" (src), "m" (sstr), "m" (i), "m" (saved_ebx) sl@0: : "eax", "ecx", "edx", "esi", "edi"); sl@0: #endif sl@0: } sl@0: OIL_DEFINE_IMPL (trans8x8_u16_asm2, trans8x8_u16); sl@0: sl@0: sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_trans8x8_u16_mmx, trans8x8_u16() { sl@0: return &_oil_function_impl_trans8x8_u16_mmx, trans8x8_u16; sl@0: } sl@0: #endif sl@0: sl@0: sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_trans8x8_u16_asm1() { sl@0: return &_oil_function_impl_trans8x8_u16_asm1; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_trans8x8_u16_asm2() { sl@0: return &_oil_function_impl_trans8x8_u16_asm2; sl@0: } sl@0: #endif sl@0: