sl@0: /*
sl@0:  * LIBOIL - Library of Optimized Inner Loops
sl@0:  * Copyright (c) 2003,2004 David A. Schleef <ds@schleef.org>
sl@0:  * All rights reserved.
sl@0:  *
sl@0:  * Redistribution and use in source and binary forms, with or without
sl@0:  * modification, are permitted provided that the following conditions
sl@0:  * are met:
sl@0:  * 1. Redistributions of source code must retain the above copyright
sl@0:  *    notice, this list of conditions and the following disclaimer.
sl@0:  * 2. Redistributions in binary form must reproduce the above copyright
sl@0:  *    notice, this list of conditions and the following disclaimer in the
sl@0:  *    documentation and/or other materials provided with the distribution.
sl@0:  * 
sl@0:  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
sl@0:  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
sl@0:  * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
sl@0:  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT,
sl@0:  * INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
sl@0:  * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
sl@0:  * SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
sl@0:  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
sl@0:  * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
sl@0:  * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
sl@0:  * POSSIBILITY OF SUCH DAMAGE.
sl@0:  */
sl@0: //Portions Copyright (c)  2008-2009 Nokia Corporation and/or its subsidiary(-ies). All rights reserved. 
sl@0: 
sl@0: #ifdef HAVE_CONFIG_H
sl@0: #include "config.h"
sl@0: #endif
sl@0: 
sl@0: #include <liboil/liboilfunction.h>
sl@0: 
sl@0: OIL_DECLARE_CLASS (diff8x8_s16_u8);
sl@0: OIL_DECLARE_CLASS (diff8x8_const128_s16_u8);
sl@0: OIL_DECLARE_CLASS (diff8x8_average_s16_u8);
sl@0: 
sl@0: 
sl@0: static void
sl@0: diff8x8_s16_u8_mmx (int16_t *dest, uint8_t *src1, int ss1, uint8_t *src2, int ss2)
sl@0: {
sl@0: #if !defined(__WINSCW__) && !defined(__WINS__)      
sl@0:   __asm__ __volatile__ (
sl@0:     "  pxor        %%mm7, %%mm7     \n\t" 
sl@0: 
sl@0: #define LOOP \
sl@0:     "  movq        (%0), %%mm0      \n\t" /* mm0 = FiltPtr */ \
sl@0:     "  movq        (%1), %%mm1      \n\t" /* mm1 = ReconPtr */ \
sl@0:     "  movq        %%mm0, %%mm2     \n\t" /* dup to prepare for up conversion */ \
sl@0:     "  movq        %%mm1, %%mm3     \n\t" /* dup to prepare for up conversion */ \
sl@0:     /* convert from UINT8 to INT16 */ \
sl@0:     "  punpcklbw   %%mm7, %%mm0     \n\t" /* mm0 = INT16(FiltPtr) */ \
sl@0:     "  punpcklbw   %%mm7, %%mm1     \n\t" /* mm1 = INT16(ReconPtr) */ \
sl@0:     "  punpckhbw   %%mm7, %%mm2     \n\t" /* mm2 = INT16(FiltPtr) */ \
sl@0:     "  punpckhbw   %%mm7, %%mm3     \n\t" /* mm3 = INT16(ReconPtr) */ \
sl@0:     /* start calculation */ \
sl@0:     "  psubw       %%mm1, %%mm0     \n\t" /* mm0 = FiltPtr - ReconPtr */ \
sl@0:     "  psubw       %%mm3, %%mm2     \n\t" /* mm2 = FiltPtr - ReconPtr */ \
sl@0:     "  movq        %%mm0,  (%2)     \n\t" /* write answer out */ \
sl@0:     "  movq        %%mm2, 8(%2)     \n\t" /* write answer out */ \
sl@0:     /* Increment pointers */ \
sl@0:     "  add         $16, %2          \n\t" \
sl@0:     "  add         %3, %0           \n\t" \
sl@0:     "  add         %4, %1           \n\t"
sl@0: 
sl@0:     LOOP
sl@0:     LOOP
sl@0:     LOOP
sl@0:     LOOP
sl@0:     LOOP
sl@0:     LOOP
sl@0:     LOOP
sl@0:     LOOP
sl@0: #undef LOOP
sl@0: 
sl@0:     "  emms                         \n\t"
sl@0: 
sl@0:      : "+r" (src1),
sl@0:        "+r" (src2),
sl@0:        "+r" (dest)
sl@0:      : "m" (ss1),
sl@0:        "m" (ss2) 
sl@0:      : "memory"
sl@0:   );
sl@0: #endif  
sl@0: }
sl@0: OIL_DEFINE_IMPL_FULL (diff8x8_s16_u8_mmx, diff8x8_s16_u8, OIL_IMPL_FLAG_MMX);
sl@0: 
sl@0: static void
sl@0: diff8x8_const128_s16_u8_mmx (int16_t *dest, uint8_t *src1, int ss1)
sl@0: {
sl@0: #if !defined(__WINSCW__) && !defined(__WINS__)      
sl@0:   const int16_t tmp[4] = { 0x0080, 0x0080, 0x0080, 0x0080 };
sl@0: 
sl@0:   __asm__ __volatile__ (
sl@0:     "  pxor        %%mm7, %%mm7     \n\t" 
sl@0:     "  movq        (%3), %%mm1  \n\t"
sl@0: 
sl@0: #define LOOP \
sl@0:     "  movq        (%0), %%mm0      \n\t" /* mm0 = FiltPtr */ \
sl@0:     "  movq        %%mm0, %%mm2     \n\t" /* dup to prepare for up conversion */ \
sl@0:     /* convert from UINT8 to INT16 */ \
sl@0:     "  punpcklbw   %%mm7, %%mm0     \n\t" /* mm0 = INT16(FiltPtr) */ \
sl@0:     "  punpckhbw   %%mm7, %%mm2     \n\t" /* mm2 = INT16(FiltPtr) */ \
sl@0:     /* start calculation */ \
sl@0:     "  psubw       %%mm1, %%mm0     \n\t" /* mm0 = FiltPtr - 128 */ \
sl@0:     "  psubw       %%mm1, %%mm2     \n\t" /* mm2 = FiltPtr - 128 */ \
sl@0:     "  movq        %%mm0,  (%1)     \n\t" /* write answer out */ \
sl@0:     "  movq        %%mm2, 8(%1)     \n\t" /* write answer out */ \
sl@0:     /* Increment pointers */ \
sl@0:     "  add         $16, %1           \n\t" \
sl@0:     "  add         %2, %0           \n\t"
sl@0: 
sl@0:     LOOP
sl@0:     LOOP
sl@0:     LOOP
sl@0:     LOOP
sl@0:     LOOP
sl@0:     LOOP
sl@0:     LOOP
sl@0:     LOOP
sl@0: #undef LOOP
sl@0: 
sl@0:     "  emms                         \n\t"
sl@0: 
sl@0:      : "+r" (src1),
sl@0:        "+r" (dest)
sl@0:      : "r" (ss1),
sl@0:        "r" (tmp)
sl@0:      : "memory"
sl@0:   );
sl@0: #endif  
sl@0: }
sl@0: OIL_DEFINE_IMPL_FULL (diff8x8_const128_s16_u8_mmx, diff8x8_const128_s16_u8, OIL_IMPL_FLAG_MMX);
sl@0: 
sl@0: static void
sl@0: diff8x8_average_s16_u8_mmx (int16_t *dest, uint8_t *src1, int ss1, uint8_t *src2, int ss2, uint8_t *src3)
sl@0: {
sl@0: #if !defined(__WINSCW__) && !defined(__WINS__)      
sl@0:   __asm__ __volatile__ (
sl@0:     "  pxor        %%mm7, %%mm7     \n\t" 
sl@0: 
sl@0: #define LOOP \
sl@0:     "  movq        (%0), %%mm0      \n\t" /* mm0 = FiltPtr */ \
sl@0:     "  movq        (%1), %%mm1      \n\t" /* mm1 = ReconPtr1 */ \
sl@0:     "  movq        (%2), %%mm4      \n\t" /* mm1 = ReconPtr2 */ \
sl@0:     "  movq        %%mm0, %%mm2     \n\t" /* dup to prepare for up conversion */ \
sl@0:     "  movq        %%mm1, %%mm3     \n\t" /* dup to prepare for up conversion */ \
sl@0:     "  movq        %%mm4, %%mm5     \n\t" /* dup to prepare for up conversion */ \
sl@0:     /* convert from UINT8 to INT16 */ \
sl@0:     "  punpcklbw   %%mm7, %%mm0     \n\t" /* mm0 = INT16(FiltPtr) */ \
sl@0:     "  punpcklbw   %%mm7, %%mm1     \n\t" /* mm1 = INT16(ReconPtr1) */ \
sl@0:     "  punpcklbw   %%mm7, %%mm4     \n\t" /* mm1 = INT16(ReconPtr2) */ \
sl@0:     "  punpckhbw   %%mm7, %%mm2     \n\t" /* mm2 = INT16(FiltPtr) */ \
sl@0:     "  punpckhbw   %%mm7, %%mm3     \n\t" /* mm3 = INT16(ReconPtr1) */ \
sl@0:     "  punpckhbw   %%mm7, %%mm5     \n\t" /* mm3 = INT16(ReconPtr2) */ \
sl@0:     /* average ReconPtr1 and ReconPtr2 */ \
sl@0:     "  paddw       %%mm4, %%mm1     \n\t" /* mm1 = ReconPtr1 + ReconPtr2 */ \
sl@0:     "  paddw       %%mm5, %%mm3     \n\t" /* mm3 = ReconPtr1 + ReconPtr2 */ \
sl@0:     "  psrlw       $1, %%mm1        \n\t" /* mm1 = (ReconPtr1 + ReconPtr2) / 2 */ \
sl@0:     "  psrlw       $1, %%mm3        \n\t" /* mm3 = (ReconPtr1 + ReconPtr2) / 2 */ \
sl@0:     "  psubw       %%mm1, %%mm0     \n\t" /* mm0 = FiltPtr - ((ReconPtr1 + ReconPtr2) / 2) */ \
sl@0:     "  psubw       %%mm3, %%mm2     \n\t" /* mm2 = FiltPtr - ((ReconPtr1 + ReconPtr2) / 2) */ \
sl@0:     "  movq        %%mm0,  (%3)     \n\t" /* write answer out */ \
sl@0:     "  movq        %%mm2, 8(%3)     \n\t" /* write answer out */ \
sl@0:     /* Increment pointers */ \
sl@0:     "  add         $16, %3           \n\t" \
sl@0:     "  add         %4, %0           \n\t" \
sl@0:     "  add         %5, %1           \n\t" \
sl@0:     "  add         %5, %2           \n\t"
sl@0: 
sl@0:     LOOP
sl@0:     LOOP
sl@0:     LOOP
sl@0:     LOOP
sl@0:     LOOP
sl@0:     LOOP
sl@0:     LOOP
sl@0:     LOOP
sl@0: #undef LOOP
sl@0: 
sl@0:     "  emms                         \n\t"
sl@0: 
sl@0:      : "+r" (src1),
sl@0:        "+r" (src2),
sl@0:        "+r" (src3),
sl@0:        "+r" (dest)
sl@0:      : "m" (ss1),
sl@0:        "m" (ss2) 
sl@0:      : "memory"
sl@0:   );
sl@0: #endif  
sl@0: }
sl@0: OIL_DEFINE_IMPL_FULL (diff8x8_average_s16_u8_mmx, diff8x8_average_s16_u8, OIL_IMPL_FLAG_MMX);
sl@0: 
sl@0: 
sl@0: 
sl@0: #ifdef	__SYMBIAN32__
sl@0:  
sl@0: OilFunctionImpl* __oil_function_impl_diff8x8_s16_u8_mmx, diff8x8_s16_u8() {
sl@0: 		return &_oil_function_impl_diff8x8_s16_u8_mmx, diff8x8_s16_u8;
sl@0: }
sl@0: #endif
sl@0: 
sl@0: #ifdef	__SYMBIAN32__
sl@0:  
sl@0: OilFunctionImpl* __oil_function_impl_diff8x8_const128_s16_u8_mmx, diff8x8_const128_s16_u8() {
sl@0: 		return &_oil_function_impl_diff8x8_const128_s16_u8_mmx, diff8x8_const128_s16_u8;
sl@0: }
sl@0: #endif
sl@0: 
sl@0: #ifdef	__SYMBIAN32__
sl@0:  
sl@0: OilFunctionImpl* __oil_function_impl_diff8x8_average_s16_u8_mmx, diff8x8_average_s16_u8() {
sl@0: 		return &_oil_function_impl_diff8x8_average_s16_u8_mmx, diff8x8_average_s16_u8;
sl@0: }
sl@0: #endif
sl@0: