Cisco optimization for x86 & fixed point
[opus.git] / celt / x86 / pitch_sse.h
1 /* Copyright (c) 2013 Jean-Marc Valin and John Ridges
2    Copyright (c) 2014, Cisco Systems, INC MingXiang WeiZhou MinPeng YanWang*/
3 /**
4    @file pitch_sse.h
5    @brief Pitch analysis
6  */
7
8 /*
9    Redistribution and use in source and binary forms, with or without
10    modification, are permitted provided that the following conditions
11    are met:
12
13    - Redistributions of source code must retain the above copyright
14    notice, this list of conditions and the following disclaimer.
15
16    - Redistributions in binary form must reproduce the above copyright
17    notice, this list of conditions and the following disclaimer in the
18    documentation and/or other materials provided with the distribution.
19
20    THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21    ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22    LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23    A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER
24    OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25    EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26    PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
27    PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
28    LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
29    NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30    SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 */
32
33 #ifndef PITCH_SSE_H
34 #define PITCH_SSE_H
35
36 #if defined(HAVE_CONFIG_H)
37 #include "config.h"
38 #endif
39
40 #if defined(OPUS_X86_MAY_HAVE_SSE4_1) || defined(OPUS_X86_MAY_HAVE_SSE2)
41 #if defined(OPUS_X86_MAY_HAVE_SSE4_1)
42 void xcorr_kernel_sse4_1(
43                     const opus_int16 *x,
44                     const opus_int16 *y,
45                     opus_val32       sum[4],
46                     int              len );
47
48 extern void (*const XCORR_KERNEL_IMPL[OPUS_ARCHMASK + 1])(
49                     const opus_int16 *x,
50                     const opus_int16 *y,
51                     opus_val32       sum[4],
52                     int              len );
53
54 #define xcorr_kernel(x, y, sum, len, arch) \
55     ((*XCORR_KERNEL_IMPL[(arch) & OPUS_ARCHMASK])(x, y, sum, len))
56
57 opus_val32 celt_inner_prod_sse4_1(
58     const opus_int16 *x,
59     const opus_int16 *y,
60     int               N);
61 #endif
62
63 #if defined(OPUS_X86_MAY_HAVE_SSE2)
64 opus_val32 celt_inner_prod_sse2(
65     const opus_int16 *x,
66     const opus_int16 *y,
67     int               N);
68 #endif
69
70 extern opus_val32 (*const CELT_INNER_PROD_IMPL[OPUS_ARCHMASK + 1])(
71                     const opus_int16 *x,
72                     const opus_int16 *y,
73                     int               N);
74
75 #define OVERRIDE_CELT_INNER_PROD
76 #define celt_inner_prod(x, y, N, arch) \
77     ((*CELT_INNER_PROD_IMPL[(arch) & OPUS_ARCHMASK])(x, y, N))
78 #else
79
80 #include <xmmintrin.h>
81 #include "arch.h"
82
83 #define OVERRIDE_XCORR_KERNEL
84 static OPUS_INLINE void xcorr_kernel_sse(const opus_val16 *x, const opus_val16 *y, opus_val32 sum[4], int len)
85 {
86    int j;
87    __m128 xsum1, xsum2;
88    xsum1 = _mm_loadu_ps(sum);
89    xsum2 = _mm_setzero_ps();
90
91    for (j = 0; j < len-3; j += 4)
92    {
93       __m128 x0 = _mm_loadu_ps(x+j);
94       __m128 yj = _mm_loadu_ps(y+j);
95       __m128 y3 = _mm_loadu_ps(y+j+3);
96
97       xsum1 = _mm_add_ps(xsum1,_mm_mul_ps(_mm_shuffle_ps(x0,x0,0x00),yj));
98       xsum2 = _mm_add_ps(xsum2,_mm_mul_ps(_mm_shuffle_ps(x0,x0,0x55),
99                                           _mm_shuffle_ps(yj,y3,0x49)));
100       xsum1 = _mm_add_ps(xsum1,_mm_mul_ps(_mm_shuffle_ps(x0,x0,0xaa),
101                                           _mm_shuffle_ps(yj,y3,0x9e)));
102       xsum2 = _mm_add_ps(xsum2,_mm_mul_ps(_mm_shuffle_ps(x0,x0,0xff),y3));
103    }
104    if (j < len)
105    {
106       xsum1 = _mm_add_ps(xsum1,_mm_mul_ps(_mm_load1_ps(x+j),_mm_loadu_ps(y+j)));
107       if (++j < len)
108       {
109          xsum2 = _mm_add_ps(xsum2,_mm_mul_ps(_mm_load1_ps(x+j),_mm_loadu_ps(y+j)));
110          if (++j < len)
111          {
112             xsum1 = _mm_add_ps(xsum1,_mm_mul_ps(_mm_load1_ps(x+j),_mm_loadu_ps(y+j)));
113          }
114       }
115    }
116    _mm_storeu_ps(sum,_mm_add_ps(xsum1,xsum2));
117 }
118
119 #define xcorr_kernel(_x, _y, _z, len, arch) \
120     ((void)(arch),xcorr_kernel_sse(_x, _y, _z, len))
121
122 #define OVERRIDE_DUAL_INNER_PROD
123 static OPUS_INLINE void dual_inner_prod(const opus_val16 *x, const opus_val16 *y01, const opus_val16 *y02,
124       int N, opus_val32 *xy1, opus_val32 *xy2)
125 {
126    int i;
127    __m128 xsum1, xsum2;
128    xsum1 = _mm_setzero_ps();
129    xsum2 = _mm_setzero_ps();
130    for (i=0;i<N-3;i+=4)
131    {
132       __m128 xi = _mm_loadu_ps(x+i);
133       __m128 y1i = _mm_loadu_ps(y01+i);
134       __m128 y2i = _mm_loadu_ps(y02+i);
135       xsum1 = _mm_add_ps(xsum1,_mm_mul_ps(xi, y1i));
136       xsum2 = _mm_add_ps(xsum2,_mm_mul_ps(xi, y2i));
137    }
138    /* Horizontal sum */
139    xsum1 = _mm_add_ps(xsum1, _mm_movehl_ps(xsum1, xsum1));
140    xsum1 = _mm_add_ss(xsum1, _mm_shuffle_ps(xsum1, xsum1, 0x55));
141    _mm_store_ss(xy1, xsum1);
142    xsum2 = _mm_add_ps(xsum2, _mm_movehl_ps(xsum2, xsum2));
143    xsum2 = _mm_add_ss(xsum2, _mm_shuffle_ps(xsum2, xsum2, 0x55));
144    _mm_store_ss(xy2, xsum2);
145    for (;i<N;i++)
146    {
147       *xy1 = MAC16_16(*xy1, x[i], y01[i]);
148       *xy2 = MAC16_16(*xy2, x[i], y02[i]);
149    }
150 }
151
152 #define OVERRIDE_CELT_INNER_PROD
153 static OPUS_INLINE opus_val32 celt_inner_prod_sse(const opus_val16 *x, const opus_val16 *y,
154       int N)
155 {
156    int i;
157    float xy;
158    __m128 sum;
159    sum = _mm_setzero_ps();
160    /* FIXME: We should probably go 8-way and use 2 sums. */
161    for (i=0;i<N-3;i+=4)
162    {
163       __m128 xi = _mm_loadu_ps(x+i);
164       __m128 yi = _mm_loadu_ps(y+i);
165       sum = _mm_add_ps(sum,_mm_mul_ps(xi, yi));
166    }
167    /* Horizontal sum */
168    sum = _mm_add_ps(sum, _mm_movehl_ps(sum, sum));
169    sum = _mm_add_ss(sum, _mm_shuffle_ps(sum, sum, 0x55));
170    _mm_store_ss(&xy, sum);
171    for (;i<N;i++)
172    {
173       xy = MAC16_16(xy, x[i], y[i]);
174    }
175    return xy;
176 }
177
178 #  define celt_inner_prod(_x, _y, len, arch) \
179     ((void)(arch),celt_inner_prod_sse(_x, _y, len))
180
181 #define OVERRIDE_COMB_FILTER_CONST
182 static OPUS_INLINE void comb_filter_const(opus_val32 *y, opus_val32 *x, int T, int N,
183       opus_val16 g10, opus_val16 g11, opus_val16 g12)
184 {
185    int i;
186    __m128 x0v;
187    __m128 g10v, g11v, g12v;
188    g10v = _mm_load1_ps(&g10);
189    g11v = _mm_load1_ps(&g11);
190    g12v = _mm_load1_ps(&g12);
191    x0v = _mm_loadu_ps(&x[-T-2]);
192    for (i=0;i<N-3;i+=4)
193    {
194       __m128 yi, yi2, x1v, x2v, x3v, x4v;
195       const opus_val32 *xp = &x[i-T-2];
196       yi = _mm_loadu_ps(x+i);
197       x4v = _mm_loadu_ps(xp+4);
198 #if 0
199       /* Slower version with all loads */
200       x1v = _mm_loadu_ps(xp+1);
201       x2v = _mm_loadu_ps(xp+2);
202       x3v = _mm_loadu_ps(xp+3);
203 #else
204       x2v = _mm_shuffle_ps(x0v, x4v, 0x4e);
205       x1v = _mm_shuffle_ps(x0v, x2v, 0x99);
206       x3v = _mm_shuffle_ps(x2v, x4v, 0x99);
207 #endif
208
209       yi = _mm_add_ps(yi, _mm_mul_ps(g10v,x2v));
210 #if 0 /* Set to 1 to make it bit-exact with the non-SSE version */
211       yi = _mm_add_ps(yi, _mm_mul_ps(g11v,_mm_add_ps(x3v,x1v)));
212       yi = _mm_add_ps(yi, _mm_mul_ps(g12v,_mm_add_ps(x4v,x0v)));
213 #else
214       /* Use partial sums */
215       yi2 = _mm_add_ps(_mm_mul_ps(g11v,_mm_add_ps(x3v,x1v)),
216                        _mm_mul_ps(g12v,_mm_add_ps(x4v,x0v)));
217       yi = _mm_add_ps(yi, yi2);
218 #endif
219       x0v=x4v;
220       _mm_storeu_ps(y+i, yi);
221    }
222 #ifdef CUSTOM_MODES
223    for (;i<N;i++)
224    {
225       y[i] = x[i]
226                + MULT16_32_Q15(g10,x[i-T])
227                + MULT16_32_Q15(g11,ADD32(x[i-T+1],x[i-T-1]))
228                + MULT16_32_Q15(g12,ADD32(x[i-T+2],x[i-T-2]));
229    }
230 #endif
231 }
232
233 #endif
234 #endif