1 /* Copyright (c) 2007-2008 CSIRO
2 Copyright (c) 2007-2009 Xiph.Org Foundation
3 Written by Jean-Marc Valin */
5 Redistribution and use in source and binary forms, with or without
6 modification, are permitted provided that the following conditions
9 - Redistributions of source code must retain the above copyright
10 notice, this list of conditions and the following disclaimer.
12 - Redistributions in binary form must reproduce the above copyright
13 notice, this list of conditions and the following disclaimer in the
14 documentation and/or other materials provided with the distribution.
16 - Neither the name of the Xiph.org Foundation nor the names of its
17 contributors may be used to endorse or promote products derived from
18 this software without specific prior written permission.
20 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21 ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22 LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23 A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE FOUNDATION OR
24 CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25 EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26 PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
27 PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
28 LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
29 NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30 SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
41 #include "os_support.h"
45 #define M_PI 3.141592653
48 static void exp_rotation(celt_norm *X, int len, int dir, int stride, int K)
52 celt_word16 gain, theta;
54 gain = celt_div((celt_word32)MULT16_16(Q15_ONE,len),(celt_word32)(3+len+6*K));
55 /* FIXME: Make that HALF16 instead of HALF32 */
56 theta = SUB16(Q15ONE, HALF32(MULT16_16_Q15(gain,gain)));
63 c = celt_cos_norm(EXTEND32(theta));
64 s = dir*celt_cos_norm(EXTEND32(SUB16(Q15ONE,theta))); /* sin(theta) */
66 stride *= len/(8*stride);
70 /* We could use MULT16_16_P15 instead of MULT16_16_Q15 for more accuracy,
71 but at this point, I really don't think it's necessary */
73 for (i=0;i<len-stride;i++)
78 Xptr[stride] = MULT16_16_Q15(c,x2) + MULT16_16_Q15(s,x1);
79 *Xptr++ = MULT16_16_Q15(c,x1) - MULT16_16_Q15(s,x2);
81 Xptr = &X[len-2*stride-1];
82 for (i=len-2*stride-1;i>=0;i--)
87 Xptr[stride] = MULT16_16_Q15(c,x2) + MULT16_16_Q15(s,x1);
88 *Xptr-- = MULT16_16_Q15(c,x1) - MULT16_16_Q15(s,x2);
101 /** Takes the pitch vector and the decoded residual vector, computes the gain
102 that will give ||p+g*y||=1 and mixes the residual with the pitch. */
103 static void normalise_residual(int * restrict iy, celt_norm * restrict X, int N, int K, celt_word32 Ryy)
113 k = celt_ilog2(Ryy)>>1;
115 t = VSHR32(Ryy, (k-7)<<1);
116 g = celt_rsqrt_norm(t);
120 X[i] = EXTRACT16(PSHR32(MULT16_16(g, iy[i]), k+1));
124 void alg_quant(celt_norm *X, int N, int K, int spread, ec_enc *enc)
126 VARDECL(celt_norm, y);
128 VARDECL(celt_word16, signx);
134 int N_1; /* Inverse of N, in Q14 format (even for float) */
142 yshift = 13-celt_ilog2(K);
145 ALLOC(y, N, celt_norm);
147 ALLOC(signx, N, celt_word16);
151 exp_rotation(X, N, 1, spread, K);
169 /* Do a pre-search by projecting on the pyramid */
183 X[0] = QCONST16(1.f,14);
187 sum = QCONST16(1.f,14);
189 /* Do we have sufficient accuracy here? */
190 rcp = EXTRACT16(MULT16_32_Q16(K-1, celt_rcp(sum)));
193 /* It's really important to round *towards zero* here */
194 iy[j] = MULT16_16_Q15(X[j],rcp);
196 iy[j] = floor(rcp*X[j]);
198 y[j] = SHL16(iy[j],yshift);
199 yy = MAC16_16(yy, y[j],y[j]);
200 xy = MAC16_16(xy, X[j],y[j]);
205 celt_assert2(pulsesLeft>=1, "Allocated too many pulses in the quick pass");
207 while (pulsesLeft > 0)
211 celt_word16 magnitude;
212 celt_word32 best_num = -VERY_LARGE16;
213 celt_word16 best_den = 0;
217 /* Decide on how many pulses to find at once */
218 pulsesAtOnce = (pulsesLeft*N_1)>>9; /* pulsesLeft/N */
222 rshift = yshift+1+celt_ilog2(K-pulsesLeft+pulsesAtOnce);
224 magnitude = SHL16(pulsesAtOnce, yshift);
227 /* The squared magnitude term gets added anyway, so we might as well
228 add it outside the loop */
229 yy = MAC16_16(yy, magnitude,magnitude);
230 /* Choose between fast and accurate strategy depending on where we are in the search */
231 /* This should ensure that anything we can process will have a better score */
234 celt_word16 Rxy, Ryy;
235 /* Select sign based on X[j] alone */
237 /* Temporary sums of the new pulse(s) */
238 Rxy = EXTRACT16(SHR32(MAC16_16(xy, s,X[j]),rshift));
239 /* We're multiplying y[j] by two so we don't have to do it here */
240 Ryy = EXTRACT16(SHR32(MAC16_16(yy, s,y[j]),rshift));
242 /* Approximate score: we maximise Rxy/sqrt(Ryy) (we're guaranteed that
243 Rxy is positive because the sign is pre-computed) */
244 Rxy = MULT16_16_Q15(Rxy,Rxy);
245 /* The idea is to check for num/den >= best_num/best_den, but that way
246 we can do it without any division */
247 /* OPT: Make sure to use conditional moves here */
248 if (MULT16_16(best_den, Rxy) > MULT16_16(Ryy, best_num))
258 s = SHL16(is, yshift);
260 /* Updating the sums of the new pulse(s) */
261 xy = xy + MULT16_16(s,X[j]);
262 /* We're multiplying y[j] by two so we don't have to do it here */
263 yy = yy + MULT16_16(s,y[j]);
265 /* Only now that we've made the final choice, update y/iy */
266 /* Multiplying y[j] by 2 so we don't have to do it everywhere else */
269 pulsesLeft -= pulsesAtOnce;
273 X[j] = MULT16_16(signx[j],X[j]);
277 encode_pulses(iy, N, K, enc);
279 /* Recompute the gain in one pass to reduce the encoder-decoder mismatch
280 due to the recursive computation used in quantisation. */
281 normalise_residual(iy, X, N, K, EXTRACT16(SHR32(yy,2*yshift)));
283 exp_rotation(X, N, -1, spread, K);
288 /** Decode pulse vector and combine the result with the pitch vector to produce
289 the final normalised signal in the current band. */
290 void alg_unquant(celt_norm *X, int N, int K, int spread, ec_dec *dec)
298 decode_pulses(iy, N, K, dec);
302 Ryy = MAC16_16(Ryy, iy[i], iy[i]);
304 normalise_residual(iy, X, N, K, Ryy);
306 exp_rotation(X, N, -1, spread, K);
310 celt_word16 renormalise_vector(celt_norm *X, celt_word16 value, int N, int stride)
313 celt_word32 E = EPSILON;
319 E = MAC16_16(E, *xptr, *xptr);
329 g = MULT16_16_Q15(value,celt_rcp(SHL32(rE,9)));
333 *xptr = PSHR32(MULT16_16(g, *xptr),8);
339 static void fold(const CELTMode *m, int N, const celt_norm * restrict Y, celt_norm * restrict P, int N0, int B)
343 /* Here, we assume that id will never be greater than N0, i.e. that
344 no band is wider than N0. In the unlikely case it happens, we set
345 everything to zero */
347 int offset = (N0*C - (id+C*N))/2;
348 if (offset > C*N0/16)
350 offset -= offset % (C*B);
353 //printf ("%d\n", offset);
364 void intra_fold(const CELTMode *m, int N, const celt_norm * restrict Y, celt_norm * restrict P, int N0, int B)
366 fold(m, N, Y, P, N0, B);
367 renormalise_vector(P, Q15ONE, N, 1);