sl@0: /* sl@0: * Copyright (c) 2005 sl@0: * Eric Anholt. All rights reserved. sl@0: * sl@0: * Redistribution and use in source and binary forms, with or without sl@0: * modification, are permitted provided that the following conditions sl@0: * are met: sl@0: * 1. Redistributions of source code must retain the above copyright sl@0: * notice, this list of conditions and the following disclaimer. sl@0: * 2. Redistributions in binary form must reproduce the above copyright sl@0: * notice, this list of conditions and the following disclaimer in the sl@0: * documentation and/or other materials provided with the distribution. sl@0: * sl@0: * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND sl@0: * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE sl@0: * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE sl@0: * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE sl@0: * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL sl@0: * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS sl@0: * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) sl@0: * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT sl@0: * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY sl@0: * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF sl@0: * SUCH DAMAGE. sl@0: */ sl@0: //Portions Copyright (c) 2008-2009 Nokia Corporation and/or its subsidiary(-ies). All rights reserved. sl@0: sl@0: #ifdef HAVE_CONFIG_H sl@0: #include "config.h" sl@0: #endif sl@0: #include sl@0: #include sl@0: #include sl@0: #include sl@0: sl@0: #define SSE_FUNCTION __attribute__((force_align_arg_pointer)) sl@0: sl@0: SSE_FUNCTION static void sl@0: add_f32_sse (float *dest, float *src1, float *src2, int n) sl@0: { sl@0: /* Initial operations to align the destination pointer */ sl@0: for (; ((long)dest & 15) && (n > 0); n--) { sl@0: *dest++ = *src1++ + *src2++; sl@0: } sl@0: for (; n >= 4; n -= 4) { sl@0: __m128 xmm0, xmm1; sl@0: xmm0 = _mm_loadu_ps(src1); sl@0: xmm1 = _mm_loadu_ps(src2); sl@0: xmm0 = _mm_add_ps(xmm0, xmm1); sl@0: _mm_store_ps(dest, xmm0); sl@0: dest += 4; sl@0: src1 += 4; sl@0: src2 += 4; sl@0: } sl@0: for (; n > 0; n--) { sl@0: *dest++ = *src1++ + *src2++; sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (add_f32_sse, add_f32, OIL_IMPL_FLAG_SSE); sl@0: sl@0: SSE_FUNCTION static void sl@0: add_f64_sse2 (double *dest, double *src1, double *src2, int n) sl@0: { sl@0: __m128d xmm0, xmm1; sl@0: while (((long)dest & 15) && (0 < n)) { sl@0: *dest++ = *src1++ + *src2++; sl@0: n--; sl@0: } sl@0: while (1 < n) { sl@0: xmm0 = _mm_loadu_pd(src1); sl@0: xmm1 = _mm_loadu_pd(src2); sl@0: xmm0 = _mm_add_pd(xmm0, xmm1); sl@0: _mm_store_pd(dest, xmm0); sl@0: dest += 2; sl@0: src1 += 2; sl@0: src2 += 2; sl@0: n -= 2; sl@0: } sl@0: while (0 < n) { sl@0: *dest++ = *src1++ + *src2++; sl@0: n--; sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (add_f64_sse2, add_f64, OIL_IMPL_FLAG_SSE2); sl@0: sl@0: SSE_FUNCTION static void sl@0: add_f64_sse2_unroll (double *dest, double *src1, double *src2, int n) sl@0: { sl@0: __m128d xmm0, xmm1; sl@0: while (((long)dest & 15) && (0 < n)) { sl@0: *dest++ = *src1++ + *src2++; sl@0: n--; sl@0: } sl@0: while (3 < n) { sl@0: xmm0 = _mm_loadu_pd(src1); sl@0: xmm1 = _mm_loadu_pd(src2); sl@0: xmm0 = _mm_add_pd(xmm0, xmm1); sl@0: _mm_store_pd(dest, xmm0); sl@0: sl@0: xmm0 = _mm_loadu_pd(src1+2); sl@0: xmm1 = _mm_loadu_pd(src2+2); sl@0: xmm0 = _mm_add_pd(xmm0, xmm1); sl@0: _mm_store_pd(dest+2, xmm0); sl@0: dest += 4; sl@0: src1 += 4; sl@0: src2 += 4; sl@0: n -= 4; sl@0: } sl@0: while (1 < n) { sl@0: xmm0 = _mm_loadu_pd(src1); sl@0: xmm1 = _mm_loadu_pd(src2); sl@0: xmm0 = _mm_add_pd(xmm0, xmm1); sl@0: _mm_store_pd(dest, xmm0); sl@0: dest += 2; sl@0: src1 += 2; sl@0: src2 += 2; sl@0: n -= 2; sl@0: } sl@0: while (0 < n) { sl@0: *dest++ = *src1++ + *src2++; sl@0: n--; sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (add_f64_sse2_unroll, add_f64, OIL_IMPL_FLAG_SSE2); sl@0: sl@0: SSE_FUNCTION static void sl@0: subtract_f32_sse (float *dest, float *src1, float *src2, int n) sl@0: { sl@0: /* Initial operations to align the destination pointer */ sl@0: for (; ((long)dest & 15) && (n > 0); n--) { sl@0: *dest++ = *src1++ - *src2++; sl@0: } sl@0: for (; n >= 4; n -= 4) { sl@0: __m128 xmm0, xmm1; sl@0: xmm0 = _mm_loadu_ps(src1); sl@0: xmm1 = _mm_loadu_ps(src2); sl@0: xmm0 = _mm_sub_ps(xmm0, xmm1); sl@0: _mm_store_ps(dest, xmm0); sl@0: dest += 4; sl@0: src1 += 4; sl@0: src2 += 4; sl@0: } sl@0: for (; n > 0; n--) { sl@0: *dest++ = *src1++ - *src2++; sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (subtract_f32_sse, subtract_f32, OIL_IMPL_FLAG_SSE); sl@0: sl@0: SSE_FUNCTION static void sl@0: multiply_f32_sse (float *dest, float *src1, float *src2, int n) sl@0: { sl@0: /* Initial operations to align the destination pointer */ sl@0: for (; ((long)dest & 15) && (n > 0); n--) { sl@0: *dest++ = *src1++ * *src2++; sl@0: } sl@0: for (; n >= 4; n -= 4) { sl@0: __m128 xmm0, xmm1; sl@0: xmm0 = _mm_loadu_ps(src1); sl@0: xmm1 = _mm_loadu_ps(src2); sl@0: xmm0 = _mm_mul_ps(xmm0, xmm1); sl@0: _mm_store_ps(dest, xmm0); sl@0: dest += 4; sl@0: src1 += 4; sl@0: src2 += 4; sl@0: } sl@0: for (; n > 0; n--) { sl@0: *dest++ = *src1++ * *src2++; sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (multiply_f32_sse, multiply_f32, OIL_IMPL_FLAG_SSE); sl@0: sl@0: SSE_FUNCTION static void sl@0: divide_f32_sse (float *dest, float *src1, float *src2, int n) sl@0: { sl@0: /* Initial operations to align the destination pointer */ sl@0: for (; ((long)dest & 15) && (n > 0); n--) { sl@0: *dest++ = *src1++ / *src2++; sl@0: } sl@0: for (; n >= 4; n -= 4) { sl@0: __m128 xmm0, xmm1; sl@0: xmm0 = _mm_loadu_ps(src1); sl@0: xmm1 = _mm_loadu_ps(src2); sl@0: xmm0 = _mm_div_ps(xmm0, xmm1); sl@0: _mm_store_ps(dest, xmm0); sl@0: dest += 4; sl@0: src1 += 4; sl@0: src2 += 4; sl@0: } sl@0: for (; n > 0; n--) { sl@0: *dest++ = *src1++ / *src2++; sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (divide_f32_sse, divide_f32, OIL_IMPL_FLAG_SSE); sl@0: sl@0: SSE_FUNCTION static void sl@0: minimum_f32_sse (float *dest, float *src1, float *src2, int n) sl@0: { sl@0: /* Initial operations to align the destination pointer */ sl@0: for (; ((long)dest & 15) && (n > 0); n--) { sl@0: *dest++ = *src1 < *src2 ? *src1 : *src2; sl@0: src1++; sl@0: src2++; sl@0: } sl@0: for (; n >= 4; n -= 4) { sl@0: __m128 xmm0, xmm1; sl@0: xmm0 = _mm_loadu_ps(src1); sl@0: xmm1 = _mm_loadu_ps(src2); sl@0: xmm0 = _mm_min_ps(xmm0, xmm1); sl@0: _mm_store_ps(dest, xmm0); sl@0: dest += 4; sl@0: src1 += 4; sl@0: src2 += 4; sl@0: } sl@0: for (; n > 0; n--) { sl@0: *dest++ = *src1 < *src2 ? *src1 : *src2; sl@0: src1++; sl@0: src2++; sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (minimum_f32_sse, minimum_f32, OIL_IMPL_FLAG_SSE); sl@0: sl@0: SSE_FUNCTION static void sl@0: maximum_f32_sse (float *dest, float *src1, float *src2, int n) sl@0: { sl@0: /* Initial operations to align the destination pointer */ sl@0: for (; ((long)dest & 15) && (n > 0); n--) { sl@0: *dest++ = *src1 > *src2 ? *src1 : *src2; sl@0: src1++; sl@0: src2++; sl@0: } sl@0: for (; n >= 4; n -= 4) { sl@0: __m128 xmm0, xmm1; sl@0: xmm0 = _mm_loadu_ps(src1); sl@0: xmm1 = _mm_loadu_ps(src2); sl@0: xmm0 = _mm_max_ps(xmm0, xmm1); sl@0: _mm_store_ps(dest, xmm0); sl@0: dest += 4; sl@0: src1 += 4; sl@0: src2 += 4; sl@0: } sl@0: for (; n > 0; n--) { sl@0: *dest++ = *src1 > *src2 ? *src1 : *src2; sl@0: src1++; sl@0: src2++; sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (maximum_f32_sse, maximum_f32, OIL_IMPL_FLAG_SSE); sl@0: sl@0: SSE_FUNCTION static void sl@0: inverse_f32_sse (float *dest, float *src1, int n) sl@0: { sl@0: /* Initial operations to align the destination pointer */ sl@0: for (; ((long)dest & 15) && (n > 0); n--) { sl@0: *dest++ = 1.0 / *src1++; sl@0: } sl@0: for (; n >= 4; n -= 4) { sl@0: __m128 xmm0, xmm1; sl@0: /* While _mm_rcp_ps sounds promising, the results it gives are rather sl@0: * different from the 1.0 / src1 reference implementation, so do that. sl@0: */ sl@0: xmm0 = _mm_set_ps1(1.0); sl@0: xmm1 = _mm_loadu_ps(src1); sl@0: xmm0 = _mm_div_ps(xmm0, xmm1); sl@0: _mm_store_ps(dest, xmm0); sl@0: dest += 4; sl@0: src1 += 4; sl@0: } sl@0: for (; n > 0; n--) { sl@0: *dest++ = 1.0 / *src1++; sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (inverse_f32_sse, inverse_f32, OIL_IMPL_FLAG_SSE); sl@0: sl@0: SSE_FUNCTION static void sl@0: negative_f32_sse (float *dest, float *src1, int n) sl@0: { sl@0: /* Initial operations to align the destination pointer */ sl@0: for (; ((long)dest & 15) && (n > 0); n--) { sl@0: *dest++ = -(*src1++); sl@0: } sl@0: for (; n >= 4; n -= 4) { sl@0: __m128 xmm0, xmm1; sl@0: xmm0 = _mm_setzero_ps(); sl@0: xmm1 = _mm_loadu_ps(src1); sl@0: xmm0 = _mm_sub_ps(xmm0, xmm1); sl@0: _mm_store_ps(dest, xmm0); sl@0: dest += 4; sl@0: src1 += 4; sl@0: } sl@0: for (; n > 0; n--) { sl@0: *dest++ = -(*src1++); sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (negative_f32_sse, negative_f32, OIL_IMPL_FLAG_SSE); sl@0: sl@0: SSE_FUNCTION static void sl@0: scalaradd_f32_ns_sse (float *dest, float *src1, float *val, int n) sl@0: { sl@0: __m128 xmm1; sl@0: sl@0: /* Initial operations to align the destination pointer */ sl@0: for (; ((long)dest & 15) && (n > 0); n--) { sl@0: *dest++ = *src1++ + *val; sl@0: } sl@0: xmm1 = _mm_load_ps1(val); sl@0: for (; n >= 4; n -= 4) { sl@0: __m128 xmm0; sl@0: xmm0 = _mm_loadu_ps(src1); sl@0: xmm0 = _mm_add_ps(xmm0, xmm1); sl@0: _mm_store_ps(dest, xmm0); sl@0: dest += 4; sl@0: src1 += 4; sl@0: } sl@0: for (; n > 0; n--) { sl@0: *dest++ = *src1++ + *val; sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (scalaradd_f32_ns_sse, scalaradd_f32_ns, OIL_IMPL_FLAG_SSE); sl@0: sl@0: SSE_FUNCTION static void sl@0: scalarmultiply_f32_ns_sse (float *dest, float *src1, float *val, int n) sl@0: { sl@0: __m128 xmm1; sl@0: sl@0: /* Initial operations to align the destination pointer */ sl@0: for (; ((long)dest & 15) && (n > 0); n--) { sl@0: *dest++ = *src1++ * *val; sl@0: } sl@0: xmm1 = _mm_load_ps1(val); sl@0: for (; n >= 4; n -= 4) { sl@0: __m128 xmm0; sl@0: xmm0 = _mm_loadu_ps(src1); sl@0: xmm0 = _mm_mul_ps(xmm0, xmm1); sl@0: _mm_store_ps(dest, xmm0); sl@0: dest += 4; sl@0: src1 += 4; sl@0: } sl@0: for (; n > 0; n--) { sl@0: *dest++ = *src1++ * *val; sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (scalarmultiply_f32_ns_sse, scalarmultiply_f32_ns, OIL_IMPL_FLAG_SSE); sl@0: sl@0: SSE_FUNCTION static void sl@0: scalarmultiply_f64_ns_sse2 (double *dest, double *src1, double *val, int n) sl@0: { sl@0: __m128d xmm1; sl@0: sl@0: /* Initial operations to align the destination pointer */ sl@0: for (; ((long)dest & 15) && (n > 0); n--) { sl@0: *dest++ = *src1++ * *val; sl@0: } sl@0: xmm1 = _mm_load_pd1(val); sl@0: for (; n >= 2; n -= 2) { sl@0: __m128d xmm0; sl@0: xmm0 = _mm_loadu_pd(src1); sl@0: xmm0 = _mm_mul_pd(xmm0, xmm1); sl@0: _mm_store_pd(dest, xmm0); sl@0: dest += 2; sl@0: src1 += 2; sl@0: } sl@0: for (; n > 0; n--) { sl@0: *dest++ = *src1++ * *val; sl@0: } sl@0: } sl@0: OIL_DEFINE_IMPL_FULL (scalarmultiply_f64_ns_sse2, scalarmultiply_f64_ns, OIL_IMPL_FLAG_SSE2); sl@0: sl@0: sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_add_f32_sse, add_f32() { sl@0: return &_oil_function_impl_add_f32_sse, add_f32; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_add_f64_sse2, add_f64() { sl@0: return &_oil_function_impl_add_f64_sse2, add_f64; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_add_f64_sse2_unroll, add_f64() { sl@0: return &_oil_function_impl_add_f64_sse2_unroll, add_f64; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_subtract_f32_sse, subtract_f32() { sl@0: return &_oil_function_impl_subtract_f32_sse, subtract_f32; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_multiply_f32_sse, multiply_f32() { sl@0: return &_oil_function_impl_multiply_f32_sse, multiply_f32; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_divide_f32_sse, divide_f32() { sl@0: return &_oil_function_impl_divide_f32_sse, divide_f32; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_minimum_f32_sse, minimum_f32() { sl@0: return &_oil_function_impl_minimum_f32_sse, minimum_f32; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_maximum_f32_sse, maximum_f32() { sl@0: return &_oil_function_impl_maximum_f32_sse, maximum_f32; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_inverse_f32_sse, inverse_f32() { sl@0: return &_oil_function_impl_inverse_f32_sse, inverse_f32; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_negative_f32_sse, negative_f32() { sl@0: return &_oil_function_impl_negative_f32_sse, negative_f32; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_scalaradd_f32_ns_sse, scalaradd_f32_ns() { sl@0: return &_oil_function_impl_scalaradd_f32_ns_sse, scalaradd_f32_ns; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_scalarmultiply_f32_ns_sse, scalarmultiply_f32_ns() { sl@0: return &_oil_function_impl_scalarmultiply_f32_ns_sse, scalarmultiply_f32_ns; sl@0: } sl@0: #endif sl@0: sl@0: #ifdef __SYMBIAN32__ sl@0: sl@0: OilFunctionImpl* __oil_function_impl_scalarmultiply_f64_ns_sse2, scalarmultiply_f64_ns() { sl@0: return &_oil_function_impl_scalarmultiply_f64_ns_sse2, scalarmultiply_f64_ns; sl@0: } sl@0: #endif sl@0: