1.1 --- /dev/null Thu Jan 01 00:00:00 1970 +0000
1.2 +++ b/os/ossrv/genericopenlibs/liboil/src/i386/mult8x8_i386.c Fri Jun 15 03:10:57 2012 +0200
1.3 @@ -0,0 +1,145 @@
1.4 +/*
1.5 + * LIBOIL - Library of Optimized Inner Loops
1.6 + * Copyright (c) 2003,2004 David A. Schleef <ds@schleef.org>
1.7 + * All rights reserved.
1.8 + *
1.9 + * Redistribution and use in source and binary forms, with or without
1.10 + * modification, are permitted provided that the following conditions
1.11 + * are met:
1.12 + * 1. Redistributions of source code must retain the above copyright
1.13 + * notice, this list of conditions and the following disclaimer.
1.14 + * 2. Redistributions in binary form must reproduce the above copyright
1.15 + * notice, this list of conditions and the following disclaimer in the
1.16 + * documentation and/or other materials provided with the distribution.
1.17 + *
1.18 + * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
1.19 + * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
1.20 + * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
1.21 + * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
1.22 + * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
1.23 + * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
1.24 + * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
1.25 + * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
1.26 + * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
1.27 + * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
1.28 + * POSSIBILITY OF SUCH DAMAGE.
1.29 + */
1.30 +//Portions Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies). All rights reserved.
1.31 +
1.32 +#ifdef HAVE_CONFIG_H
1.33 +#include "config.h"
1.34 +#endif
1.35 +
1.36 +#include <liboil/liboilfunction.h>
1.37 +#include "liboil/simdpack/simdpack.h"
1.38 +
1.39 +#include <math.h>
1.40 +
1.41 +
1.42 +static void
1.43 +mult8x8_s16_mmx(int16_t *dest, int16_t *src1, int16_t *src2, int dstr, int sstr1,
1.44 + int sstr2)
1.45 +{
1.46 +#if !defined(__WINSCW__) && !defined(__WINS__)
1.47 + asm volatile(
1.48 + " movq 0(%1), %%mm0 \n"
1.49 + " pmullw 0(%2), %%mm0 \n"
1.50 + " movq %%mm0, 0(%0) \n"
1.51 + " movq 8(%1), %%mm1 \n"
1.52 + " pmullw 8(%2), %%mm1 \n"
1.53 + " movq %%mm1, 8(%0) \n"
1.54 +
1.55 + " add %3, %0 \n"
1.56 + " add %4, %1 \n"
1.57 + " add %5, %2 \n"
1.58 +
1.59 + " movq 0(%1), %%mm0 \n"
1.60 + " pmullw 0(%2), %%mm0 \n"
1.61 + " movq %%mm0, 0(%0) \n"
1.62 + " movq 8(%1), %%mm1 \n"
1.63 + " pmullw 8(%2), %%mm1 \n"
1.64 + " movq %%mm1, 8(%0) \n"
1.65 +
1.66 + " add %3, %0 \n"
1.67 + " add %4, %1 \n"
1.68 + " add %5, %2 \n"
1.69 +
1.70 + " movq 0(%1), %%mm0 \n"
1.71 + " pmullw 0(%2), %%mm0 \n"
1.72 + " movq %%mm0, 0(%0) \n"
1.73 + " movq 8(%1), %%mm1 \n"
1.74 + " pmullw 8(%2), %%mm1 \n"
1.75 + " movq %%mm1, 8(%0) \n"
1.76 +
1.77 + " add %3, %0 \n"
1.78 + " add %4, %1 \n"
1.79 + " add %5, %2 \n"
1.80 +
1.81 + " movq 0(%1), %%mm0 \n"
1.82 + " pmullw 0(%2), %%mm0 \n"
1.83 + " movq %%mm0, 0(%0) \n"
1.84 + " movq 8(%1), %%mm1 \n"
1.85 + " pmullw 8(%2), %%mm1 \n"
1.86 + " movq %%mm1, 8(%0) \n"
1.87 +
1.88 + " add %3, %0 \n"
1.89 + " add %4, %1 \n"
1.90 + " add %5, %2 \n"
1.91 +
1.92 + " movq 0(%1), %%mm0 \n"
1.93 + " pmullw 0(%2), %%mm0 \n"
1.94 + " movq %%mm0, 0(%0) \n"
1.95 + " movq 8(%1), %%mm1 \n"
1.96 + " pmullw 8(%2), %%mm1 \n"
1.97 + " movq %%mm1, 8(%0) \n"
1.98 +
1.99 + " add %3, %0 \n"
1.100 + " add %4, %1 \n"
1.101 + " add %5, %2 \n"
1.102 +
1.103 + " movq 0(%1), %%mm0 \n"
1.104 + " pmullw 0(%2), %%mm0 \n"
1.105 + " movq %%mm0, 0(%0) \n"
1.106 + " movq 8(%1), %%mm1 \n"
1.107 + " pmullw 8(%2), %%mm1 \n"
1.108 + " movq %%mm1, 8(%0) \n"
1.109 +
1.110 + " add %3, %0 \n"
1.111 + " add %4, %1 \n"
1.112 + " add %5, %2 \n"
1.113 +
1.114 + " movq 0(%1), %%mm0 \n"
1.115 + " pmullw 0(%2), %%mm0 \n"
1.116 + " movq %%mm0, 0(%0) \n"
1.117 + " movq 8(%1), %%mm1 \n"
1.118 + " pmullw 8(%2), %%mm1 \n"
1.119 + " movq %%mm1, 8(%0) \n"
1.120 +
1.121 + " add %3, %0 \n"
1.122 + " add %4, %1 \n"
1.123 + " add %5, %2 \n"
1.124 +
1.125 + " movq 0(%1), %%mm0 \n"
1.126 + " pmullw 0(%2), %%mm0 \n"
1.127 + " movq %%mm0, 0(%0) \n"
1.128 + " movq 8(%1), %%mm1 \n"
1.129 + " pmullw 8(%2), %%mm1 \n"
1.130 + " movq %%mm1, 8(%0) \n"
1.131 + " emms\n "
1.132 +
1.133 + : "+r" (dest), "+r" (src1), "+r" (src2)
1.134 + : "m" (dstr), "m" (sstr1), "m" (sstr2));
1.135 +#endif
1.136 +}
1.137 +
1.138 +OIL_DEFINE_IMPL_FULL (mult8x8_s16_mmx, mult8x8_s16, OIL_IMPL_FLAG_MMX);
1.139 +
1.140 +
1.141 +
1.142 +#ifdef __SYMBIAN32__
1.143 +
1.144 +OilFunctionImpl* __oil_function_impl_mult8x8_s16_mmx, mult8x8_s16() {
1.145 + return &_oil_function_impl_mult8x8_s16_mmx, mult8x8_s16;
1.146 +}
1.147 +#endif
1.148 +