1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/ossrv/genericopenlibs/liboil/src/i386/diff8x8_i386.c Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,219 @@
1.4 +/*
1.5 + * LIBOIL - Library of Optimized Inner Loops
1.6 + * Copyright (c) 2003,2004 David A. Schleef <ds@schleef.org>
1.7 + * All rights reserved.
1.8 + *
1.9 + * Redistribution and use in source and binary forms, with or without
1.10 + * modification, are permitted provided that the following conditions
1.11 + * are met:
1.12 + * 1. Redistributions of source code must retain the above copyright
1.13 + * notice, this list of conditions and the following disclaimer.
1.14 + * 2. Redistributions in binary form must reproduce the above copyright
1.15 + * notice, this list of conditions and the following disclaimer in the
1.16 + * documentation and/or other materials provided with the distribution.
1.17 + *
1.18 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1.19 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1.20 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1.21 + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
1.22 + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1.23 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
1.24 + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1.25 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
1.26 + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
1.27 + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1.28 + * POSSIBILITY OF SUCH DAMAGE.
1.29 + */
1.30 +//Portions Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies). All rights reserved.
1.31 +
1.32 +#ifdef HAVE_CONFIG_H
1.33 +#include "config.h"
1.34 +#endif
1.35 +
1.36 +#include <liboil/liboilfunction.h>
1.37 +
1.38 +OIL_DECLARE_CLASS (diff8x8_s16_u8);
1.39 +OIL_DECLARE_CLASS (diff8x8_const128_s16_u8);
1.40 +OIL_DECLARE_CLASS (diff8x8_average_s16_u8);
1.41 +
1.42 +
1.43 +static void
1.44 +diff8x8_s16_u8_mmx (int16_t *dest, uint8_t *src1, int ss1, uint8_t *src2, int ss2)
1.45 +{
1.46 +#if !defined(__WINSCW__) && !defined(__WINS__)
1.47 + __asm__ __volatile__ (
1.48 + " pxor %%mm7, %%mm7 \n\t"
1.49 +
1.50 +#define LOOP \
1.51 + " movq (%0), %%mm0 \n\t" /* mm0 = FiltPtr */ \
1.52 + " movq (%1), %%mm1 \n\t" /* mm1 = ReconPtr */ \
1.53 + " movq %%mm0, %%mm2 \n\t" /* dup to prepare for up conversion */ \
1.54 + " movq %%mm1, %%mm3 \n\t" /* dup to prepare for up conversion */ \
1.55 + /* convert from UINT8 to INT16 */ \
1.56 + " punpcklbw %%mm7, %%mm0 \n\t" /* mm0 = INT16(FiltPtr) */ \
1.57 + " punpcklbw %%mm7, %%mm1 \n\t" /* mm1 = INT16(ReconPtr) */ \
1.58 + " punpckhbw %%mm7, %%mm2 \n\t" /* mm2 = INT16(FiltPtr) */ \
1.59 + " punpckhbw %%mm7, %%mm3 \n\t" /* mm3 = INT16(ReconPtr) */ \
1.60 + /* start calculation */ \
1.61 + " psubw %%mm1, %%mm0 \n\t" /* mm0 = FiltPtr - ReconPtr */ \
1.62 + " psubw %%mm3, %%mm2 \n\t" /* mm2 = FiltPtr - ReconPtr */ \
1.63 + " movq %%mm0, (%2) \n\t" /* write answer out */ \
1.64 + " movq %%mm2, 8(%2) \n\t" /* write answer out */ \
1.65 + /* Increment pointers */ \
1.66 + " add $16, %2 \n\t" \
1.67 + " add %3, %0 \n\t" \
1.68 + " add %4, %1 \n\t"
1.69 +
1.70 + LOOP
1.71 + LOOP
1.72 + LOOP
1.73 + LOOP
1.74 + LOOP
1.75 + LOOP
1.76 + LOOP
1.77 + LOOP
1.78 +#undef LOOP
1.79 +
1.80 + " emms \n\t"
1.81 +
1.82 + : "+r" (src1),
1.83 + "+r" (src2),
1.84 + "+r" (dest)
1.85 + : "m" (ss1),
1.86 + "m" (ss2)
1.87 + : "memory"
1.88 + );
1.89 +#endif
1.90 +}
1.91 +OIL_DEFINE_IMPL_FULL (diff8x8_s16_u8_mmx, diff8x8_s16_u8, OIL_IMPL_FLAG_MMX);
1.92 +
1.93 +static void
1.94 +diff8x8_const128_s16_u8_mmx (int16_t *dest, uint8_t *src1, int ss1)
1.95 +{
1.96 +#if !defined(__WINSCW__) && !defined(__WINS__)
1.97 + const int16_t tmp[4] = { 0x0080, 0x0080, 0x0080, 0x0080 };
1.98 +
1.99 + __asm__ __volatile__ (
1.100 + " pxor %%mm7, %%mm7 \n\t"
1.101 + " movq (%3), %%mm1 \n\t"
1.102 +
1.103 +#define LOOP \
1.104 + " movq (%0), %%mm0 \n\t" /* mm0 = FiltPtr */ \
1.105 + " movq %%mm0, %%mm2 \n\t" /* dup to prepare for up conversion */ \
1.106 + /* convert from UINT8 to INT16 */ \
1.107 + " punpcklbw %%mm7, %%mm0 \n\t" /* mm0 = INT16(FiltPtr) */ \
1.108 + " punpckhbw %%mm7, %%mm2 \n\t" /* mm2 = INT16(FiltPtr) */ \
1.109 + /* start calculation */ \
1.110 + " psubw %%mm1, %%mm0 \n\t" /* mm0 = FiltPtr - 128 */ \
1.111 + " psubw %%mm1, %%mm2 \n\t" /* mm2 = FiltPtr - 128 */ \
1.112 + " movq %%mm0, (%1) \n\t" /* write answer out */ \
1.113 + " movq %%mm2, 8(%1) \n\t" /* write answer out */ \
1.114 + /* Increment pointers */ \
1.115 + " add $16, %1 \n\t" \
1.116 + " add %2, %0 \n\t"
1.117 +
1.118 + LOOP
1.119 + LOOP
1.120 + LOOP
1.121 + LOOP
1.122 + LOOP
1.123 + LOOP
1.124 + LOOP
1.125 + LOOP
1.126 +#undef LOOP
1.127 +
1.128 + " emms \n\t"
1.129 +
1.130 + : "+r" (src1),
1.131 + "+r" (dest)
1.132 + : "r" (ss1),
1.133 + "r" (tmp)
1.134 + : "memory"
1.135 + );
1.136 +#endif
1.137 +}
1.138 +OIL_DEFINE_IMPL_FULL (diff8x8_const128_s16_u8_mmx, diff8x8_const128_s16_u8, OIL_IMPL_FLAG_MMX);
1.139 +
1.140 +static void
1.141 +diff8x8_average_s16_u8_mmx (int16_t *dest, uint8_t *src1, int ss1, uint8_t *src2, int ss2, uint8_t *src3)
1.142 +{
1.143 +#if !defined(__WINSCW__) && !defined(__WINS__)
1.144 + __asm__ __volatile__ (
1.145 + " pxor %%mm7, %%mm7 \n\t"
1.146 +
1.147 +#define LOOP \
1.148 + " movq (%0), %%mm0 \n\t" /* mm0 = FiltPtr */ \
1.149 + " movq (%1), %%mm1 \n\t" /* mm1 = ReconPtr1 */ \
1.150 + " movq (%2), %%mm4 \n\t" /* mm1 = ReconPtr2 */ \
1.151 + " movq %%mm0, %%mm2 \n\t" /* dup to prepare for up conversion */ \
1.152 + " movq %%mm1, %%mm3 \n\t" /* dup to prepare for up conversion */ \
1.153 + " movq %%mm4, %%mm5 \n\t" /* dup to prepare for up conversion */ \
1.154 + /* convert from UINT8 to INT16 */ \
1.155 + " punpcklbw %%mm7, %%mm0 \n\t" /* mm0 = INT16(FiltPtr) */ \
1.156 + " punpcklbw %%mm7, %%mm1 \n\t" /* mm1 = INT16(ReconPtr1) */ \
1.157 + " punpcklbw %%mm7, %%mm4 \n\t" /* mm1 = INT16(ReconPtr2) */ \
1.158 + " punpckhbw %%mm7, %%mm2 \n\t" /* mm2 = INT16(FiltPtr) */ \
1.159 + " punpckhbw %%mm7, %%mm3 \n\t" /* mm3 = INT16(ReconPtr1) */ \
1.160 + " punpckhbw %%mm7, %%mm5 \n\t" /* mm3 = INT16(ReconPtr2) */ \
1.161 + /* average ReconPtr1 and ReconPtr2 */ \
1.162 + " paddw %%mm4, %%mm1 \n\t" /* mm1 = ReconPtr1 + ReconPtr2 */ \
1.163 + " paddw %%mm5, %%mm3 \n\t" /* mm3 = ReconPtr1 + ReconPtr2 */ \
1.164 + " psrlw $1, %%mm1 \n\t" /* mm1 = (ReconPtr1 + ReconPtr2) / 2 */ \
1.165 + " psrlw $1, %%mm3 \n\t" /* mm3 = (ReconPtr1 + ReconPtr2) / 2 */ \
1.166 + " psubw %%mm1, %%mm0 \n\t" /* mm0 = FiltPtr - ((ReconPtr1 + ReconPtr2) / 2) */ \
1.167 + " psubw %%mm3, %%mm2 \n\t" /* mm2 = FiltPtr - ((ReconPtr1 + ReconPtr2) / 2) */ \
1.168 + " movq %%mm0, (%3) \n\t" /* write answer out */ \
1.169 + " movq %%mm2, 8(%3) \n\t" /* write answer out */ \
1.170 + /* Increment pointers */ \
1.171 + " add $16, %3 \n\t" \
1.172 + " add %4, %0 \n\t" \
1.173 + " add %5, %1 \n\t" \
1.174 + " add %5, %2 \n\t"
1.175 +
1.176 + LOOP
1.177 + LOOP
1.178 + LOOP
1.179 + LOOP
1.180 + LOOP
1.181 + LOOP
1.182 + LOOP
1.183 + LOOP
1.184 +#undef LOOP
1.185 +
1.186 + " emms \n\t"
1.187 +
1.188 + : "+r" (src1),
1.189 + "+r" (src2),
1.190 + "+r" (src3),
1.191 + "+r" (dest)
1.192 + : "m" (ss1),
1.193 + "m" (ss2)
1.194 + : "memory"
1.195 + );
1.196 +#endif
1.197 +}
1.198 +OIL_DEFINE_IMPL_FULL (diff8x8_average_s16_u8_mmx, diff8x8_average_s16_u8, OIL_IMPL_FLAG_MMX);
1.199 +
1.200 +
1.201 +
1.202 +#ifdef __SYMBIAN32__
1.203 +
1.204 +OilFunctionImpl* __oil_function_impl_diff8x8_s16_u8_mmx, diff8x8_s16_u8() {
1.205 + return &_oil_function_impl_diff8x8_s16_u8_mmx, diff8x8_s16_u8;
1.206 +}
1.207 +#endif
1.208 +
1.209 +#ifdef __SYMBIAN32__
1.210 +
1.211 +OilFunctionImpl* __oil_function_impl_diff8x8_const128_s16_u8_mmx, diff8x8_const128_s16_u8() {
1.212 + return &_oil_function_impl_diff8x8_const128_s16_u8_mmx, diff8x8_const128_s16_u8;
1.213 +}
1.214 +#endif
1.215 +
1.216 +#ifdef __SYMBIAN32__
1.217 +
1.218 +OilFunctionImpl* __oil_function_impl_diff8x8_average_s16_u8_mmx, diff8x8_average_s16_u8() {
1.219 + return &_oil_function_impl_diff8x8_average_s16_u8_mmx, diff8x8_average_s16_u8;
1.220 +}
1.221 +#endif
1.222 +