sl@0: /* sl@0: * Copyright (c) 2005 sl@0: * Eric Anholt. All rights reserved. sl@0: * sl@0: * Redistribution and use in source and binary forms, with or without sl@0: * modification, are permitted provided that the following conditions sl@0: * are met: sl@0: * 1. Redistributions of source code must retain the above copyright sl@0: * notice, this list of conditions and the following disclaimer. sl@0: * 2. Redistributions in binary form must reproduce the above copyright sl@0: * notice, this list of conditions and the following disclaimer in the sl@0: * documentation and/or other materials provided with the distribution. sl@0: * sl@0: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND sl@0: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE sl@0: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE sl@0: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE sl@0: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL sl@0: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS sl@0: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) sl@0: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT sl@0: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY sl@0: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF sl@0: * SUCH DAMAGE. sl@0: */ sl@0: //Portions Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies). All rights reserved. sl@0: sl@0: #ifdef HAVE_CONFIG_H sl@0: #include "config.h" sl@0: #endif sl@0: #include sl@0: #include sl@0: #include "liboil/liboilcolorspace.h" sl@0: sl@0: #define COMPOSITE_ADD(d,s) oil_clamp_255((d) + (s)) sl@0: #define COMPOSITE_OVER(d,s,m) ((d) + (s) - oil_muldiv_255((d),(m))) sl@0: sl@0: static void sl@0: composite_in_argb_fast (uint32_t *dest, const uint32_t *src, sl@0: const uint8_t *mask, int n) sl@0: { sl@0: for (; n > 0; n--) { sl@0: uint32_t src1, src2; sl@0: uint8_t m = *mask++; sl@0: sl@0: /* Split the pixel into two sets of two channels, and multiply by the sl@0: * mask. sl@0: */ sl@0: src1 = *src & 0x00ff00ff; sl@0: src1 *= m; sl@0: src1 += 0x00800080; sl@0: src1 += (src1 >> 8) & 0x00ff00ff; sl@0: src1 >>= 8; sl@0: src1 &= 0x00ff00ff; sl@0: sl@0: src2 = (*src >> 8) & 0x00ff00ff; sl@0: src2 *= m; sl@0: src2 += 0x00800080; sl@0: src2 += (src2 >> 8) & 0x00ff00ff; sl@0: src2 &= 0xff00ff00; sl@0: sl@0: *dest++ = src1 | src2; sl@0: src++; sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL (composite_in_argb_fast, composite_in_argb); sl@0: sl@0: static void sl@0: composite_in_argb_const_src_fast (uint32_t *dest, const uint32_t *src, sl@0: const uint8_t *mask, int n) sl@0: { sl@0: uint32_t src1, src2; sl@0: sl@0: src1 = *src & 0x00ff00ff; sl@0: src2 = (*src >> 8) & 0x00ff00ff; sl@0: sl@0: for (; n > 0; n--) { sl@0: uint32_t temp1, temp2; sl@0: uint8_t m = *mask++; sl@0: sl@0: /* Split the pixel into two sets of two channels, and multiply by the sl@0: * mask. sl@0: */ sl@0: temp1 = src1 * m; sl@0: temp1 += 0x00800080; sl@0: temp1 += (temp1 >> 8) & 0x00ff00ff; sl@0: temp1 >>= 8; sl@0: temp1 &= 0x00ff00ff; sl@0: sl@0: temp2 = src2 * m; sl@0: temp2 += 0x00800080; sl@0: temp2 += (temp2 >> 8) & 0x00ff00ff; sl@0: temp2 &= 0xff00ff00; sl@0: sl@0: *dest++ = temp1 | temp2; sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL (composite_in_argb_const_src_fast, composite_in_argb_const_src); sl@0: sl@0: static void sl@0: composite_in_argb_const_mask_fast (uint32_t *dest, const uint32_t *src, sl@0: const uint8_t *mask, int n) sl@0: { sl@0: uint8_t m = *mask; sl@0: sl@0: for (; n > 0; n--) { sl@0: uint32_t src1, src2; sl@0: sl@0: /* Split the pixel into two sets of two channels, and multiply by the sl@0: * mask. sl@0: */ sl@0: src1 = *src & 0x00ff00ff; sl@0: src1 *= m; sl@0: src1 += 0x00800080; sl@0: src1 += (src1 >> 8) & 0x00ff00ff; sl@0: src1 >>= 8; sl@0: src1 &= 0x00ff00ff; sl@0: sl@0: src2 = (*src >> 8) & 0x00ff00ff; sl@0: src2 *= m; sl@0: src2 += 0x00800080; sl@0: src2 += (src2 >> 8) & 0x00ff00ff; sl@0: src2 &= 0xff00ff00; sl@0: sl@0: *dest++ = src1 | src2; sl@0: src++; sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL (composite_in_argb_const_mask_fast, sl@0: composite_in_argb_const_mask); sl@0: sl@0: static void sl@0: composite_over_argb_fast (uint32_t *dest, const uint32_t *src, int n) sl@0: { sl@0: for (; n > 0; n--) { sl@0: uint32_t d = *dest, s = *src, sa; sl@0: uint32_t s1, s2, d1, d2; sl@0: sl@0: sa = ~s >> 24; sl@0: sl@0: s1 = s & 0x00ff00ff; sl@0: d1 = d & 0x00ff00ff; sl@0: d1 *= sa; sl@0: d1 += 0x00800080; sl@0: d1 += (d1 >> 8) & 0x00ff00ff; sl@0: d1 >>= 8; sl@0: d1 &= 0x00ff00ff; sl@0: d1 += s1; sl@0: d1 |= 0x01000100 - ((d1 >> 8) & 0x00ff00ff); sl@0: d1 &= 0x00ff00ff; sl@0: sl@0: s2 = (s >> 8) & 0x00ff00ff; sl@0: d2 = (d >> 8) & 0x00ff00ff; sl@0: d2 *= sa; sl@0: d2 += 0x00800080; sl@0: d2 += (d2 >> 8) & 0x00ff00ff; sl@0: d2 >>= 8; sl@0: d2 &= 0x00ff00ff; sl@0: d2 += s2; sl@0: d2 |= 0x01000100 - ((d2 >> 8) & 0x00ff00ff); sl@0: d2 &= 0x00ff00ff; sl@0: sl@0: *dest++ = d1 | (d2 << 8); sl@0: src++; sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL (composite_over_argb_fast, composite_over_argb); sl@0: sl@0: static void sl@0: composite_over_argb_const_src_fast (uint32_t *dest, const uint32_t *src, int n) sl@0: { sl@0: uint32_t s = *src; sl@0: uint32_t sa, s1, s2; sl@0: sa = ~s >> 24; sl@0: sl@0: s1 = s & 0x00ff00ff; sl@0: s2 = (s >> 8) & 0x00ff00ff; sl@0: sl@0: for (; n > 0; n--) { sl@0: uint32_t d = *dest; sl@0: uint32_t d1, d2; sl@0: sl@0: d1 = d & 0x00ff00ff; sl@0: d1 *= sa; sl@0: d1 += 0x00800080; sl@0: d1 += (d1 >> 8) & 0x00ff00ff; sl@0: d1 >>= 8; sl@0: d1 &= 0x00ff00ff; sl@0: d1 += s1; sl@0: d1 |= 0x01000100 - ((d1 >> 8) & 0x00ff00ff); sl@0: d1 &= 0x00ff00ff; sl@0: sl@0: d2 = (d >> 8) & 0x00ff00ff; sl@0: d2 *= sa; sl@0: d2 += 0x00800080; sl@0: d2 += (d2 >> 8) & 0x00ff00ff; sl@0: d2 >>= 8; sl@0: d2 &= 0x00ff00ff; sl@0: d2 += s2; sl@0: d2 |= 0x01000100 - ((d2 >> 8) & 0x00ff00ff); sl@0: d2 &= 0x00ff00ff; sl@0: sl@0: *dest++ = d1 | (d2 << 8); sl@0: src++; sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL (composite_over_argb_const_src_fast, sl@0: composite_over_argb_const_src); sl@0: sl@0: static void sl@0: composite_add_argb_fast (uint32_t *dest, const uint32_t *src, int n) sl@0: { sl@0: for (; n > 0; n--) { sl@0: uint32_t s = *src++, d = *dest; sl@0: uint32_t s1, s2, d1, d2; sl@0: sl@0: s1 = s & 0x00ff00ff; sl@0: s2 = (s >> 8) & 0x00ff00ff; sl@0: d1 = d & 0x00ff00ff; sl@0: d2 = (d >> 8) & 0x00ff00ff; sl@0: sl@0: d1 += s1; sl@0: d1 |= 0x01000100 - ((d1 >> 8) & 0x00ff00ff); sl@0: d1 &= 0x00ff00ff; sl@0: sl@0: d2 += s2; sl@0: d2 |= 0x01000100 - ((d2 >> 8) & 0x00ff00ff); sl@0: d2 &= 0x00ff00ff; sl@0: sl@0: *dest++ = d1 | (d2 << 8); sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL (composite_add_argb_fast, composite_add_argb); sl@0: sl@0: static void sl@0: composite_add_argb_const_src_fast (uint32_t *dest, const uint32_t *src, int n) sl@0: { sl@0: uint32_t s1, s2; sl@0: sl@0: s1 = *src & 0x00ff00ff; sl@0: s2 = (*src >> 8) & 0x00ff00ff; sl@0: for (; n > 0; n--) { sl@0: uint32_t d = *dest; sl@0: uint32_t d1, d2; sl@0: sl@0: d1 = d & 0x00ff00ff; sl@0: d2 = (d >> 8) & 0x00ff00ff; sl@0: sl@0: d1 += s1; sl@0: d1 |= 0x01000100 - ((d1 >> 8) & 0x00ff00ff); sl@0: d1 &= 0x00ff00ff; sl@0: sl@0: d2 += s2; sl@0: d2 |= 0x01000100 - ((d2 >> 8) & 0x00ff00ff); sl@0: d2 &= 0x00ff00ff; sl@0: sl@0: *dest++ = d1 | (d2 << 8); sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL (composite_add_argb_const_src_fast, sl@0: composite_add_argb_const_src); sl@0: sl@0: static void sl@0: composite_in_over_argb_fast (uint32_t *dest, const uint32_t *src, sl@0: const uint8_t *mask, int n) sl@0: { sl@0: for (; n > 0; n--) { sl@0: uint32_t d = *dest, s = *src++; sl@0: uint32_t s1, s2, d1, d2, sa; sl@0: uint8_t m = *mask++; sl@0: sl@0: s1 = s & 0x00ff00ff; sl@0: s2 = (s >> 8) & 0x00ff00ff; sl@0: sl@0: /* in */ sl@0: s1 *= m; sl@0: s1 += 0x00800080; sl@0: s1 += (s1 >> 8) & 0x00ff00ff; sl@0: s1 >>= 8; sl@0: s1 &= 0x00ff00ff; sl@0: sl@0: s2 *= m; sl@0: s2 += 0x00800080; sl@0: s2 += (s2 >> 8) & 0x00ff00ff; sl@0: s2 >>= 8; sl@0: s2 &= 0x00ff00ff; sl@0: sl@0: /* over */ sl@0: sa = (~s2 >> 16) & 0xff; sl@0: sl@0: d1 = d & 0x00ff00ff; sl@0: d1 *= sa; sl@0: d1 += 0x00800080; sl@0: d1 += (d1 >> 8) & 0x00ff00ff; sl@0: d1 >>= 8; sl@0: d1 &= 0x00ff00ff; sl@0: d1 += s1; sl@0: d1 |= 0x01000100 - ((d1 >> 8) & 0x00ff00ff); sl@0: d1 &= 0x00ff00ff; sl@0: sl@0: d2 = (d >> 8) & 0x00ff00ff; sl@0: d2 *= sa; sl@0: d2 += 0x00800080; sl@0: d2 += (d2 >> 8) & 0x00ff00ff; sl@0: d2 >>= 8; sl@0: d2 &= 0x00ff00ff; sl@0: d2 += s2; sl@0: d2 |= 0x01000100 - ((d2 >> 8) & 0x00ff00ff); sl@0: d2 &= 0x00ff00ff; sl@0: sl@0: *dest++ = d1 | (d2 << 8); sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL (composite_in_over_argb_fast, composite_in_over_argb); sl@0: sl@0: static void sl@0: composite_in_over_argb_const_src_fast (uint32_t *dest, const uint32_t *src, sl@0: const uint8_t *mask, int n) sl@0: { sl@0: uint32_t s = *src; sl@0: uint32_t s1, s2; sl@0: sl@0: s1 = s & 0x00ff00ff; sl@0: s2 = (s >> 8) & 0x00ff00ff; sl@0: sl@0: for (; n > 0; n--) { sl@0: uint32_t d = *dest; sl@0: uint32_t temp1, temp2, d1, d2, sa; sl@0: uint8_t m = *mask++; sl@0: sl@0: /* in */ sl@0: temp1 = s1 * m; sl@0: temp1 += 0x00800080; sl@0: temp1 += (temp1 >> 8) & 0x00ff00ff; sl@0: temp1 >>= 8; sl@0: temp1 &= 0x00ff00ff; sl@0: sl@0: temp2 = s2 * m; sl@0: temp2 += 0x00800080; sl@0: temp2 += (temp2 >> 8) & 0x00ff00ff; sl@0: temp2 >>= 8; sl@0: temp2 &= 0x00ff00ff; sl@0: sl@0: /* over */ sl@0: sa = (~temp2 >> 16) & 0xff; sl@0: sl@0: d1 = d & 0x00ff00ff; sl@0: d1 *= sa; sl@0: d1 += 0x00800080; sl@0: d1 += (d1 >> 8) & 0x00ff00ff; sl@0: d1 >>= 8; sl@0: d1 &= 0x00ff00ff; sl@0: d1 += temp1; sl@0: d1 |= 0x01000100 - ((d1 >> 8) & 0x00ff00ff); sl@0: d1 &= 0x00ff00ff; sl@0: sl@0: d2 = (d >> 8) & 0x00ff00ff; sl@0: d2 *= sa; sl@0: d2 += 0x00800080; sl@0: d2 += (d2 >> 8) & 0x00ff00ff; sl@0: d2 >>= 8; sl@0: d2 &= 0x00ff00ff; sl@0: d2 += temp2; sl@0: d2 |= 0x01000100 - ((d2 >> 8) & 0x00ff00ff); sl@0: d2 &= 0x00ff00ff; sl@0: sl@0: *dest++ = d1 | (d2 << 8); sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL (composite_in_over_argb_const_src_fast, sl@0: composite_in_over_argb_const_src); sl@0: sl@0: static void sl@0: composite_in_over_argb_const_mask_fast (uint32_t *dest, const uint32_t *src, sl@0: const uint8_t *mask, int n) sl@0: { sl@0: uint8_t m = *mask; sl@0: for (; n > 0; n--) { sl@0: uint32_t d = *dest, s = *src++; sl@0: uint32_t s1, s2, d1, d2, sa; sl@0: sl@0: s1 = s & 0x00ff00ff; sl@0: s2 = (s >> 8) & 0x00ff00ff; sl@0: sl@0: /* in */ sl@0: s1 *= m; sl@0: s1 += 0x00800080; sl@0: s1 += (s1 >> 8) & 0x00ff00ff; sl@0: s1 >>= 8; sl@0: s1 &= 0x00ff00ff; sl@0: sl@0: s2 *= m; sl@0: s2 += 0x00800080; sl@0: s2 += (s2 >> 8) & 0x00ff00ff; sl@0: s2 >>= 8; sl@0: s2 &= 0x00ff00ff; sl@0: sl@0: /* over */ sl@0: sa = (~s2 >> 16) & 0xff; sl@0: sl@0: d1 = d & 0x00ff00ff; sl@0: d1 *= sa; sl@0: d1 += 0x00800080; sl@0: d1 += (d1 >> 8) & 0x00ff00ff; sl@0: d1 >>= 8; sl@0: d1 &= 0x00ff00ff; sl@0: d1 += s1; sl@0: d1 |= 0x01000100 - ((d1 >> 8) & 0x00ff00ff); sl@0: d1 &= 0x00ff00ff; sl@0: sl@0: d2 = (d >> 8) & 0x00ff00ff; sl@0: d2 *= sa; sl@0: d2 += 0x00800080; sl@0: d2 += (d2 >> 8) & 0x00ff00ff; sl@0: d2 >>= 8; sl@0: d2 &= 0x00ff00ff; sl@0: d2 += s2; sl@0: d2 |= 0x01000100 - ((d2 >> 8) & 0x00ff00ff); sl@0: d2 &= 0x00ff00ff; sl@0: sl@0: *dest++ = d1 | (d2 << 8); sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL (composite_in_over_argb_const_mask_fast, sl@0: composite_in_over_argb_const_mask); sl@0: sl@0: #ifdef HAVE_UNALIGNED_ACCESS sl@0: static void sl@0: composite_add_u8_fast (uint8_t *dest, const uint8_t *src, int n) sl@0: { sl@0: for (; n > 3; n-= 4) { sl@0: uint32_t s = *(uint32_t *)src, d = *(uint32_t *)dest; sl@0: uint32_t s1, s2, d1, d2; sl@0: sl@0: s1 = s & 0x00ff00ff; sl@0: s2 = (s >> 8) & 0x00ff00ff; sl@0: d1 = d & 0x00ff00ff; sl@0: d2 = (d >> 8) & 0x00ff00ff; sl@0: sl@0: d1 += s1; sl@0: d1 |= 0x01000100 - ((d1 >> 8) & 0x00ff00ff); sl@0: d1 &= 0x00ff00ff; sl@0: sl@0: d2 += s2; sl@0: d2 |= 0x01000100 - ((d2 >> 8) & 0x00ff00ff); sl@0: d2 &= 0x00ff00ff; sl@0: sl@0: *(uint32_t *)dest = d1 | (d2 << 8); sl@0: src += 4; sl@0: dest += 4; sl@0: } sl@0: for (; n > 0; n--) { sl@0: *dest = COMPOSITE_ADD(*dest, *src); sl@0: src++; sl@0: dest++; sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL (composite_add_u8_fast, composite_add_u8); sl@0: #endif sl@0: sl@0: #ifdef HAVE_UNALIGNED_ACCESS sl@0: static void sl@0: composite_add_u8_const_src_fast (uint8_t *dest, const uint8_t *src, int n) sl@0: { sl@0: uint32_t s; sl@0: sl@0: s = *src | (*src << 16); sl@0: for (; n > 3; n-= 4) { sl@0: uint32_t d = *(uint32_t *)dest; sl@0: uint32_t d1, d2; sl@0: sl@0: d1 = d & 0x00ff00ff; sl@0: d2 = (d >> 8) & 0x00ff00ff; sl@0: sl@0: d1 += s; sl@0: d1 |= 0x01000100 - ((d1 >> 8) & 0x00ff00ff); sl@0: d1 &= 0x00ff00ff; sl@0: sl@0: d2 += s; sl@0: d2 |= 0x01000100 - ((d2 >> 8) & 0x00ff00ff); sl@0: d2 &= 0x00ff00ff; sl@0: sl@0: *(uint32_t *)dest = d1 | (d2 << 8); sl@0: dest += 4; sl@0: } sl@0: for (; n > 0; n--) { sl@0: *dest = COMPOSITE_ADD(*dest, *src); sl@0: dest++; sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL (composite_add_u8_const_src_fast, composite_add_u8_const_src); sl@0: #endif sl@0: sl@0: #ifdef HAVE_UNALIGNED_ACCESS sl@0: static void sl@0: composite_over_u8_fast (uint8_t *dest, const uint8_t *src, int n) sl@0: { sl@0: for (; n > 3; n-= 4) { sl@0: uint32_t d = *(uint32_t *)dest, s = *(uint32_t *)src; sl@0: uint32_t d1, d2, s1, s2; sl@0: sl@0: d1 = d & 0x00ff00ff; sl@0: d2 = (d >> 8) & 0x00ff00ff; sl@0: s1 = s & 0x00ff00ff; sl@0: s2 = (s >> 8) & 0x00ff00ff; sl@0: sl@0: d1 = ((d1 & 0xff) * (~s1 & 0xff)) | sl@0: ((d1 & 0x00ff0000) * (~s1 >> 16 & 0xff)); sl@0: d1 += 0x00800080; sl@0: d1 += (d1 >> 8) & 0x00ff00ff; sl@0: d1 >>= 8; sl@0: d1 &= 0x00ff00ff; sl@0: d1 += s1; sl@0: d1 |= 0x01000100 - ((d1 >> 8) & 0x00ff00ff); sl@0: d1 &= 0x00ff00ff; sl@0: sl@0: d2 = ((d2 & 0xff) * (~s2 & 0xff)) | sl@0: ((d2 & 0x00ff0000) * (~s2 >> 16 & 0xff)); sl@0: d2 += 0x00800080; sl@0: d2 += (d2 >> 8) & 0x00ff00ff; sl@0: d2 >>= 8; sl@0: d2 &= 0x00ff00ff; sl@0: d2 += s2; sl@0: d2 |= 0x01000100 - ((d2 >> 8) & 0x00ff00ff); sl@0: d2 &= 0x00ff00ff; sl@0: sl@0: *(uint32_t *)dest = d1 | (d2 << 8); sl@0: dest += 4; sl@0: src += 4; sl@0: } sl@0: for (; n > 0; n--) { sl@0: *dest = COMPOSITE_OVER(*dest, *src, *src); sl@0: dest++; sl@0: src++; sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL (composite_over_u8_fast, composite_over_u8); sl@0: #endif sl@0: sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_in_argb_fast() { sl@0: return &_oil_function_impl_composite_in_argb_fast; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_in_argb_const_src_fast() { sl@0: return &_oil_function_impl_composite_in_argb_const_src_fast; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_in_argb_const_mask_fast() { sl@0: return &_oil_function_impl_composite_in_argb_const_mask_fast; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_over_argb_fast() { sl@0: return &_oil_function_impl_composite_over_argb_fast; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_over_argb_const_src_fast() { sl@0: return &_oil_function_impl_composite_over_argb_const_src_fast; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_add_argb_fast() { sl@0: return &_oil_function_impl_composite_add_argb_fast; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_add_argb_const_src_fast() { sl@0: return &_oil_function_impl_composite_add_argb_const_src_fast; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_in_over_argb_fast() { sl@0: return &_oil_function_impl_composite_in_over_argb_fast; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_in_over_argb_const_src_fast() { sl@0: return &_oil_function_impl_composite_in_over_argb_const_src_fast; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_in_over_argb_const_mask_fast() { sl@0: return &_oil_function_impl_composite_in_over_argb_const_mask_fast; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef HAVE_UNALIGNED_ACCESS sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_add_u8_fast() { sl@0: return &_oil_function_impl_composite_add_u8_fast; sl@0: } sl@0: #endif sl@0: #endif sl@0: sl@0: #ifdef HAVE_UNALIGNED_ACCESS sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_add_u8_const_src_fast() { sl@0: return &_oil_function_impl_composite_add_u8_const_src_fast; sl@0: } sl@0: #endif sl@0: #endif sl@0: sl@0: #ifdef HAVE_UNALIGNED_ACCESS sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_composite_over_u8_fast() { sl@0: return &_oil_function_impl_composite_over_u8_fast; sl@0: } sl@0: #endif sl@0: #endif