sl@0: /* sl@0: * LIBOIL - Library of Optimized Inner Loops sl@0: * Copyright (c) 2005 David A. Schleef sl@0: * All rights reserved. sl@0: * sl@0: * Redistribution and use in source and binary forms, with or without sl@0: * modification, are permitted provided that the following conditions sl@0: * are met: sl@0: * 1. Redistributions of source code must retain the above copyright sl@0: * notice, this list of conditions and the following disclaimer. sl@0: * 2. Redistributions in binary form must reproduce the above copyright sl@0: * notice, this list of conditions and the following disclaimer in the sl@0: * documentation and/or other materials provided with the distribution. sl@0: * sl@0: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR sl@0: * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED sl@0: * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE sl@0: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, sl@0: * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES sl@0: * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR sl@0: * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) sl@0: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, sl@0: * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING sl@0: * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE sl@0: * POSSIBILITY OF SUCH DAMAGE. sl@0: */ sl@0: //Portions Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies). All rights reserved. sl@0: sl@0: #ifdef HAVE_CONFIG_H sl@0: #include "config.h" sl@0: #endif sl@0: sl@0: #include sl@0: #include sl@0: sl@0: OIL_DECLARE_CLASS (composite_in_argb); sl@0: OIL_DECLARE_CLASS (composite_in_argb_const_src); sl@0: OIL_DECLARE_CLASS (composite_in_argb_const_mask); sl@0: OIL_DECLARE_CLASS (composite_over_argb); sl@0: OIL_DECLARE_CLASS (composite_over_argb_const_src); sl@0: OIL_DECLARE_CLASS (composite_add_argb); sl@0: OIL_DECLARE_CLASS (composite_add_argb_const_src); sl@0: OIL_DECLARE_CLASS (composite_in_over_argb); sl@0: OIL_DECLARE_CLASS (composite_in_over_argb_const_src); sl@0: OIL_DECLARE_CLASS (composite_in_over_argb_const_mask); sl@0: sl@0: #if 0 sl@0: static void sl@0: composite_in_argb_mmx (uint32_t *dest, uint32_t *src, uint8_t *mask, int n) sl@0: { sl@0: int i; sl@0: sl@0: for(i=0;i>=1; sl@0: sl@0: if (n>0){ sl@0: __asm__ __volatile__ ("\n" sl@0: "3:\n" sl@0: #if 0 sl@0: " movl (%1), %%eax\n" sl@0: " orl 4(%1), %%eax\n" sl@0: " testl $0xff000000, %%eax\n" sl@0: " jz 4f\n" sl@0: #endif sl@0: sl@0: " movq (%1), %%xmm1\n" sl@0: " punpcklbw %%xmm7, %%xmm1\n" sl@0: " pshuflw $0xff, %%xmm1, %%xmm0\n" sl@0: " pshufhw $0xff, %%xmm0, %%xmm0\n" sl@0: " pxor %%xmm5, %%xmm0\n" sl@0: sl@0: " movq (%0), %%xmm3\n" sl@0: " punpcklbw %%xmm7, %%xmm3\n" sl@0: " pmullw %%xmm0, %%xmm3\n" sl@0: " paddw %%xmm6, %%xmm3\n" sl@0: " pmulhuw %%xmm4, %%xmm3\n" sl@0: " paddw %%xmm1, %%xmm3\n" sl@0: " packuswb %%xmm3, %%xmm3\n" sl@0: " movq %%xmm3, (%0)\n" sl@0: "4:\n" sl@0: " addl $8, %0\n" sl@0: " addl $8, %1\n" sl@0: " subl $1, %2\n" sl@0: " jnz 3b\n" sl@0: :"+r" (dest), "+r" (src), "+r" (n) sl@0: : sl@0: :"eax"); sl@0: } sl@0: #endif sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (composite_over_argb_sse2_2, composite_over_argb, OIL_IMPL_FLAG_SSE2); sl@0: sl@0: /* written for shaun */ sl@0: static void sl@0: composite_over_argb_sse2_3 (uint32_t *dest, uint32_t *src, int n) sl@0: { sl@0: int begin; sl@0: int middle; sl@0: int end; sl@0: #if !defined(__WINSCW__) && !defined(__WINS__) sl@0: __asm__ __volatile__ (" pxor %%xmm7, %%xmm7\n" // mm7 = { 0, 0, 0, 0 } sl@0: " movl $0x80808080, %%eax\n" sl@0: " movd %%eax, %%xmm6\n" // mm6 = { 128, 128, 128, 128 } sl@0: " punpcklbw %%xmm7, %%xmm6\n" sl@0: " punpcklwd %%xmm6, %%xmm6\n" sl@0: " movl $0xffffffff, %%eax\n" // mm5 = { 255, 255, 255, 255 } sl@0: " movd %%eax, %%xmm5\n" sl@0: " punpcklbw %%xmm7, %%xmm5\n" sl@0: " punpcklwd %%xmm5, %%xmm5\n" sl@0: " movl $0x02020202, %%eax\n" sl@0: " movd %%eax, %%xmm4\n" sl@0: " punpcklbw %%xmm7, %%xmm4\n" sl@0: " paddw %%xmm5, %%xmm4\n" // mm4 = { 257, 257, 257, 257 } sl@0: " punpcklwd %%xmm4, %%xmm4\n" sl@0: : sl@0: : sl@0: :"eax"); sl@0: sl@0: begin = 0x3 & (4 - (((unsigned long)dest & 0xf) >> 2)); sl@0: if (begin>n) { sl@0: begin = n; sl@0: middle = 0; sl@0: end = 0; sl@0: } else { sl@0: middle = (n-begin)>>2; sl@0: end = n - begin - middle*4; sl@0: } sl@0: sl@0: if (begin>0) { sl@0: __asm__ __volatile__ ("\n" sl@0: "1:\n" sl@0: " movl (%1), %%eax\n" sl@0: " testl $0xff000000, %%eax\n" sl@0: " jz 2f\n" sl@0: sl@0: " movd (%1), %%xmm1\n" sl@0: " punpcklbw %%xmm7, %%xmm1\n" sl@0: " pshuflw $0xff, %%xmm1, %%xmm0\n" sl@0: " pxor %%xmm5, %%xmm0\n" sl@0: sl@0: " movd (%0), %%xmm3\n" sl@0: " punpcklbw %%xmm7, %%xmm3\n" sl@0: " pmullw %%xmm0, %%xmm3\n" sl@0: " paddw %%xmm6, %%xmm3\n" sl@0: " pmulhuw %%xmm4, %%xmm3\n" sl@0: sl@0: " paddw %%xmm1, %%xmm3\n" sl@0: " packuswb %%xmm3, %%xmm3\n" sl@0: " movd %%xmm3, (%0)\n" sl@0: sl@0: "2:\n" sl@0: " addl $4, %0\n" sl@0: " addl $4, %1\n" sl@0: " subl $1, %2\n" sl@0: " jnz 1b\n" sl@0: :"+r" (dest), "+r" (src), "+r" (begin) sl@0: : sl@0: :"eax"); sl@0: } sl@0: sl@0: if (middle>0){ sl@0: __asm__ __volatile__ ("\n" sl@0: "1:\n" sl@0: " movq (%1), %%xmm1\n" sl@0: " movq 8(%1), %%xmm0\n" sl@0: " movl (%1), %%eax\n" sl@0: " orl 4(%1), %%eax\n" sl@0: " orl 8(%1), %%eax\n" sl@0: " orl 12(%1), %%eax\n" sl@0: " test $0xff000000, %%eax\n" sl@0: " jz 2f\n" sl@0: " punpcklbw %%xmm7, %%xmm1\n" sl@0: " punpcklbw %%xmm7, %%xmm0\n" sl@0: " pshuflw $0xff, %%xmm1, %%xmm1\n" sl@0: " pshuflw $0xff, %%xmm0, %%xmm0\n" sl@0: " pshufhw $0xff, %%xmm1, %%xmm1\n" sl@0: " pshufhw $0xff, %%xmm0, %%xmm0\n" sl@0: sl@0: " pxor %%xmm5, %%xmm1\n" sl@0: " pxor %%xmm5, %%xmm0\n" sl@0: sl@0: " movq (%0), %%xmm3\n" sl@0: " movq 8(%0), %%xmm2\n" sl@0: " punpcklbw %%xmm7, %%xmm3\n" sl@0: " punpcklbw %%xmm7, %%xmm2\n" sl@0: sl@0: " pmullw %%xmm1, %%xmm3\n" sl@0: " paddw %%xmm6, %%xmm3\n" sl@0: " pmulhuw %%xmm4, %%xmm3\n" sl@0: " pmullw %%xmm0, %%xmm2\n" sl@0: " paddw %%xmm6, %%xmm2\n" sl@0: " pmulhuw %%xmm4, %%xmm2\n" sl@0: " packuswb %%xmm2, %%xmm3\n" sl@0: sl@0: " movdqu (%1), %%xmm1\n" sl@0: " paddb %%xmm1, %%xmm3\n" sl@0: " movdqa %%xmm3, (%0)\n" sl@0: "2:\n" sl@0: " addl $16, %0\n" sl@0: " addl $16, %1\n" sl@0: " subl $1, %2\n" sl@0: " jnz 1b\n" sl@0: :"+r" (dest), "+r" (src), "+r" (middle) sl@0: : sl@0: :"eax"); sl@0: } sl@0: if (end>0) { sl@0: __asm__ __volatile__ ("\n" sl@0: "1:\n" sl@0: " movl (%1), %%eax\n" sl@0: " testl $0xff000000, %%eax\n" sl@0: " jz 2f\n" sl@0: sl@0: " movd (%1), %%xmm1\n" sl@0: " punpcklbw %%xmm7, %%xmm1\n" sl@0: " pshuflw $0xff, %%xmm1, %%xmm0\n" sl@0: " pxor %%xmm5, %%xmm0\n" sl@0: sl@0: " movd (%0), %%xmm3\n" sl@0: " punpcklbw %%xmm7, %%xmm3\n" sl@0: " pmullw %%xmm0, %%xmm3\n" sl@0: " paddw %%xmm6, %%xmm3\n" sl@0: " pmulhuw %%xmm4, %%xmm3\n" sl@0: sl@0: " paddw %%xmm1, %%xmm3\n" sl@0: " packuswb %%xmm3, %%xmm3\n" sl@0: " movd %%xmm3, (%0)\n" sl@0: sl@0: "2:\n" sl@0: " addl $4, %0\n" sl@0: " addl $4, %1\n" sl@0: " subl $1, %2\n" sl@0: " jnz 1b\n" sl@0: :"+r" (dest), "+r" (src), "+r" (end) sl@0: : sl@0: :"eax"); sl@0: } sl@0: #endif sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (composite_over_argb_sse2_3, composite_over_argb, OIL_IMPL_FLAG_SSE2); sl@0: sl@0: sl@0: static void sl@0: composite_over_argb_const_src_mmx (uint32_t *dest, uint32_t *src, int n) sl@0: { sl@0: #if !defined(__WINSCW__) && !defined(__WINS__) sl@0: __asm__ __volatile__ ( sl@0: MMX_LOAD_CONSTANTS sl@0: " movl (%1), %%eax\n" sl@0: " movd %%eax, %%mm0\n" sl@0: " punpcklbw %%mm7, %%mm0\n" sl@0: " pshufw $0xff, %%mm0, %%mm3\n" sl@0: " pxor %%mm5, %%mm3\n" sl@0: "1:\n" sl@0: " movq %%mm3, %%mm1\n" sl@0: " movd (%0), %%mm2\n" sl@0: " punpcklbw %%mm7, %%mm2\n" sl@0: sl@0: MMX_MULDIV255(mm2, mm1) sl@0: sl@0: " paddw %%mm0, %%mm2\n" sl@0: " packuswb %%mm2, %%mm2\n" sl@0: sl@0: " movd %%mm2, (%0)\n" sl@0: " addl $4, %0\n" sl@0: " decl %2\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: :"+r" (dest), "+r" (src), "+r" (n) sl@0: : sl@0: :"eax"); sl@0: #endif sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (composite_over_argb_const_src_mmx, composite_over_argb_const_src, OIL_IMPL_FLAG_MMX | OIL_IMPL_FLAG_MMXEXT); sl@0: sl@0: static void sl@0: composite_add_argb_mmx (uint32_t *dest, uint32_t *src, int n) sl@0: { sl@0: #if !defined(__WINSCW__) && !defined(__WINS__) sl@0: __asm__ __volatile__ ( sl@0: "1:\n" sl@0: " movd (%1), %%mm0\n" sl@0: " movd (%0), %%mm2\n" sl@0: " paddusb %%mm0, %%mm2\n" sl@0: " movd %%mm2, (%0)\n" sl@0: " addl $4, %0\n" sl@0: " addl $4, %1\n" sl@0: " decl %2\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: :"+r" (dest), "+r" (src), "+r" (n) sl@0: : sl@0: :"eax"); sl@0: #endif sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (composite_add_argb_mmx, composite_add_argb, OIL_IMPL_FLAG_MMX); sl@0: sl@0: static void sl@0: composite_add_argb_const_src_mmx (uint32_t *dest, uint32_t *src, int n) sl@0: { sl@0: #if !defined(__WINSCW__) && !defined(__WINS__) sl@0: __asm__ __volatile__ ( sl@0: " movd (%1), %%mm0\n" sl@0: "1:\n" sl@0: " movd (%0), %%mm2\n" sl@0: " paddusb %%mm0, %%mm2\n" sl@0: " movd %%mm2, (%0)\n" sl@0: " addl $4, %0\n" sl@0: " decl %2\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: :"+r" (dest), "+r" (src), "+r" (n) sl@0: : sl@0: :"eax"); sl@0: #endif sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (composite_add_argb_const_src_mmx, composite_add_argb_const_src, OIL_IMPL_FLAG_MMX); sl@0: sl@0: static void sl@0: composite_in_over_argb_mmx (uint32_t *dest, uint32_t *src, uint8_t *mask, int n) sl@0: { sl@0: #if !defined(__WINSCW__) && !defined(__WINS__) sl@0: __asm__ __volatile__ ( sl@0: MMX_LOAD_CONSTANTS sl@0: "1:\n" sl@0: " movd (%2), %%mm0\n" sl@0: " punpcklbw %%mm7, %%mm0\n" sl@0: " pshufw $0x00, %%mm0, %%mm1\n" sl@0: sl@0: " movd (%1), %%mm2\n" sl@0: " punpcklbw %%mm7, %%mm2\n" sl@0: sl@0: MMX_MULDIV255(mm2, mm1) sl@0: sl@0: " movd (%0), %%mm0\n" sl@0: " punpcklbw %%mm7, %%mm0\n" sl@0: sl@0: " pshufw $0xff, %%mm2, %%mm1\n" sl@0: " pxor %%mm5, %%mm1\n" sl@0: sl@0: MMX_MULDIV255(mm0, mm1) sl@0: sl@0: " paddw %%mm0, %%mm2\n" sl@0: " packuswb %%mm2, %%mm2\n" sl@0: sl@0: " movd %%mm2, (%0)\n" sl@0: " addl $4, %0\n" sl@0: " addl $4, %1\n" sl@0: " addl $1, %2\n" sl@0: " decl %3\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: :"+r" (dest), "+r" (src), "+r" (mask), "+r" (n) sl@0: : sl@0: :"eax"); sl@0: #endif sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (composite_in_over_argb_mmx, composite_in_over_argb, OIL_IMPL_FLAG_MMX | OIL_IMPL_FLAG_MMXEXT); sl@0: sl@0: static void sl@0: composite_in_over_argb_const_src_mmx (uint32_t *dest, uint32_t *src, uint8_t *mask, int n) sl@0: { sl@0: #if !defined(__WINSCW__) && !defined(__WINS__) sl@0: __asm__ __volatile__ ( sl@0: MMX_LOAD_CONSTANTS sl@0: sl@0: " movd (%1), %%mm3\n" sl@0: " punpcklbw %%mm7, %%mm3\n" sl@0: "1:\n" sl@0: " movd (%2), %%mm0\n" sl@0: " punpcklbw %%mm7, %%mm0\n" sl@0: " pshufw $0x00, %%mm0, %%mm1\n" sl@0: sl@0: " movq %%mm3, %%mm2\n" sl@0: sl@0: MMX_MULDIV255(mm2, mm1) sl@0: sl@0: " movd (%0), %%mm0\n" sl@0: " punpcklbw %%mm7, %%mm0\n" sl@0: sl@0: " pshufw $0xff, %%mm2, %%mm1\n" sl@0: " pxor %%mm5, %%mm1\n" sl@0: sl@0: MMX_MULDIV255(mm0, mm1) sl@0: sl@0: " paddw %%mm0, %%mm2\n" sl@0: " packuswb %%mm2, %%mm2\n" sl@0: sl@0: " movd %%mm2, (%0)\n" sl@0: " addl $4, %0\n" sl@0: " addl $1, %2\n" sl@0: " decl %3\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: :"+r" (dest), "+r" (src), "+r" (mask), "+r" (n) sl@0: : sl@0: :"eax"); sl@0: #endif sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (composite_in_over_argb_const_src_mmx, composite_in_over_argb_const_src, OIL_IMPL_FLAG_MMX | OIL_IMPL_FLAG_MMXEXT); sl@0: sl@0: static void sl@0: composite_in_over_argb_const_mask_mmx (uint32_t *dest, uint32_t *src, uint8_t *mask, int n) sl@0: { sl@0: #if !defined(__WINSCW__) && !defined(__WINS__) sl@0: __asm__ __volatile__ ( sl@0: MMX_LOAD_CONSTANTS sl@0: " movd (%2), %%mm0\n" sl@0: " punpcklbw %%mm7, %%mm0\n" sl@0: " pshufw $0x00, %%mm0, %%mm3\n" sl@0: sl@0: "1:\n" sl@0: " movd (%1), %%mm2\n" sl@0: " punpcklbw %%mm7, %%mm2\n" sl@0: " movq %%mm3, %%mm1\n" sl@0: sl@0: MMX_MULDIV255(mm2, mm1) sl@0: sl@0: " movd (%0), %%mm0\n" sl@0: " punpcklbw %%mm7, %%mm0\n" sl@0: sl@0: " pshufw $0xff, %%mm2, %%mm1\n" sl@0: " pxor %%mm5, %%mm1\n" sl@0: sl@0: MMX_MULDIV255(mm0, mm1) sl@0: sl@0: " paddw %%mm0, %%mm2\n" sl@0: " packuswb %%mm2, %%mm2\n" sl@0: sl@0: " movd %%mm2, (%0)\n" sl@0: " addl $4, %0\n" sl@0: " addl $4, %1\n" sl@0: " decl %3\n" sl@0: " jnz 1b\n" sl@0: " emms\n" sl@0: :"+r" (dest), "+r" (src), "+r" (mask), "+r" (n) sl@0: : sl@0: :"eax"); sl@0: #endif sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (composite_in_over_argb_const_mask_mmx, composite_in_over_argb_const_mask, OIL_IMPL_FLAG_MMX | OIL_IMPL_FLAG_MMXEXT); sl@0: sl@0: sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_in_argb_mmx() { sl@0: return &_oil_function_impl_composite_in_argb_mmx; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_in_argb_mmx, composite_in_argb() { sl@0: return &_oil_function_impl_composite_in_argb_mmx, composite_in_argb; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_in_argb_mmx2, composite_in_argb() { sl@0: return &_oil_function_impl_composite_in_argb_mmx2, composite_in_argb; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_in_argb_const_src_mmx, composite_in_argb_const_src() { sl@0: return &_oil_function_impl_composite_in_argb_const_src_mmx, composite_in_argb_const_src; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_in_argb_const_mask_mmx, composite_in_argb_const_mask() { sl@0: return &_oil_function_impl_composite_in_argb_const_mask_mmx, composite_in_argb_const_mask; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_over_argb_mmx, composite_over_argb() { sl@0: return &_oil_function_impl_composite_over_argb_mmx, composite_over_argb; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_over_argb_mmx_2, composite_over_argb() { sl@0: return &_oil_function_impl_composite_over_argb_mmx_2, composite_over_argb; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_over_argb_mmx_3, composite_over_argb() { sl@0: return &_oil_function_impl_composite_over_argb_mmx_3, composite_over_argb; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_over_argb_mmx_4, composite_over_argb() { sl@0: return &_oil_function_impl_composite_over_argb_mmx_4, composite_over_argb; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_over_argb_mmx_5, composite_over_argb() { sl@0: return &_oil_function_impl_composite_over_argb_mmx_5, composite_over_argb; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_over_argb_sse2, composite_over_argb() { sl@0: return &_oil_function_impl_composite_over_argb_sse2, composite_over_argb; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_over_argb_sse2_2, composite_over_argb() { sl@0: return &_oil_function_impl_composite_over_argb_sse2_2, composite_over_argb; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_over_argb_sse2_3, composite_over_argb() { sl@0: return &_oil_function_impl_composite_over_argb_sse2_3, composite_over_argb; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_over_argb_const_src_mmx, composite_over_argb_const_src() { sl@0: return &_oil_function_impl_composite_over_argb_const_src_mmx, composite_over_argb_const_src; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_add_argb_mmx, composite_add_argb() { sl@0: return &_oil_function_impl_composite_add_argb_mmx, composite_add_argb; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_add_argb_const_src_mmx, composite_add_argb_const_src() { sl@0: return &_oil_function_impl_composite_add_argb_const_src_mmx, composite_add_argb_const_src; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_in_over_argb_mmx, composite_in_over_argb() { sl@0: return &_oil_function_impl_composite_in_over_argb_mmx, composite_in_over_argb; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_in_over_argb_const_src_mmx, composite_in_over_argb_const_src() { sl@0: return &_oil_function_impl_composite_in_over_argb_const_src_mmx, composite_in_over_argb_const_src; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_in_over_argb_const_mask_mmx, composite_in_over_argb_const_mask() { sl@0: return &_oil_function_impl_composite_in_over_argb_const_mask_mmx, composite_in_over_argb_const_mask; sl@0: } sl@0: #endif sl@0: