celt_free(l->trig);
}
+/* Only divide by half if float. In fixed-point, it's included in the shift */
+#ifdef FIXED_POINT
+#define FL_HALF(x) (x)
+#else
+#define FL_HALF(x) (.5f*(x))
+#endif
+
void mdct_forward(const mdct_lookup *l, kiss_fft_scalar *in, kiss_fft_scalar * restrict out, const celt_word16_t *window, int overlap)
{
int i;
ALLOC(f, N2, kiss_fft_scalar);
/* Consider the input to be compused of four blocks: [a, b, c, d] */
- /* Shuffle, fold, pre-rotate (part 1) */
+ /* Window, shuffle, fold */
{
/* Temp pointers to make it really clear to the compiler what we're doing */
- const kiss_fft_scalar * restrict xp1 = in+N4;
- const kiss_fft_scalar * restrict xp2 = in+N2+N4-1;
+ const kiss_fft_scalar * restrict xp1 = in+(overlap>>1);
+ const kiss_fft_scalar * restrict xp2 = in+N2-1+(overlap>>1);
kiss_fft_scalar * restrict yp = out;
- kiss_fft_scalar *t = &l->trig[0];
- const celt_word16_t * restrict wp1 = window+overlap/2;
- const celt_word16_t * restrict wp2 = window+overlap/2-1;
- for(i=0;i<overlap/4;i++)
+ const celt_word16_t * restrict wp1 = window+(overlap>>1);
+ const celt_word16_t * restrict wp2 = window+(overlap>>1)-1;
+ for(i=0;i<(overlap>>2);i++)
{
- kiss_fft_scalar re, im;
/* Real part arranged as -d-cR, Imag part arranged as -b+aR*/
- re = -HALF32(MULT16_32_Q15(*wp2, xp1[N2]) + MULT16_32_Q15(*wp1,*xp2));
- im = -HALF32(MULT16_32_Q15(*wp1, *xp1) - MULT16_32_Q15(*wp2, xp2[-N2]));
+ *yp++ = -FL_HALF(MULT16_32_Q16(*wp2, xp1[N2]) + MULT16_32_Q16(*wp1,*xp2));
+ *yp++ = -FL_HALF(MULT16_32_Q16(*wp1, *xp1) - MULT16_32_Q16(*wp2, xp2[-N2]));
xp1+=2;
xp2-=2;
wp1+=2;
wp2-=2;
- /* We could remove the HALF32 above and just use MULT16_32_Q16 below
- (MIXED_PRECISION only) */
- *yp++ = S_MUL(re,t[0]) - S_MUL(im,t[N4]);
- *yp++ = S_MUL(im,t[0]) + S_MUL(re,t[N4]);
- t++;
- }
- for(;i<N/8;i++)
- {
- kiss_fft_scalar re, im;
- /* Real part arranged as -d-cR, Imag part arranged as -b+aR*/
- re = -HALF32(*xp2);
- im = -HALF32(*xp1);
- xp1+=2;
- xp2-=2;
- /* We could remove the HALF32 above and just use MULT16_32_Q16 below
- (MIXED_PRECISION only) */
- *yp++ = S_MUL(re,t[0]) - S_MUL(im,t[N4]);
- *yp++ = S_MUL(im,t[0]) + S_MUL(re,t[N4]);
- t++;
}
wp1 = window;
wp2 = window+overlap-1;
- for(;i<N4-overlap/4;i++)
+ for(;i<N4-(overlap>>2);i++)
{
- kiss_fft_scalar re, im;
/* Real part arranged as a-bR, Imag part arranged as -c-dR */
- re = HALF32(-*xp2);
- im = -HALF32(*xp1);
+ *yp++ = -HALF32(*xp2);
+ *yp++ = -HALF32(*xp1);
xp1+=2;
xp2-=2;
- /* We could remove the HALF32 above and just use MULT16_32_Q16 below
- (MIXED_PRECISION only) */
- *yp++ = S_MUL(re,t[0]) - S_MUL(im,t[N4]);
- *yp++ = S_MUL(im,t[0]) + S_MUL(re,t[N4]);
- t++;
}
for(;i<N4;i++)
{
- kiss_fft_scalar re, im;
/* Real part arranged as a-bR, Imag part arranged as -c-dR */
- re = HALF32(MULT16_32_Q15(*wp1, xp1[-N2]) - MULT16_32_Q15(*wp2, *xp2));
- im = -HALF32(MULT16_32_Q15(*wp2, *xp1) + MULT16_32_Q15(*wp1, xp2[N2]));
+ *yp++ = FL_HALF(MULT16_32_Q16(*wp1, xp1[-N2]) - MULT16_32_Q16(*wp2, *xp2));
+ *yp++ = -FL_HALF(MULT16_32_Q16(*wp2, *xp1) + MULT16_32_Q16(*wp1, xp2[N2]));
xp1+=2;
xp2-=2;
wp1+=2;
wp2-=2;
- /* We could remove the HALF32 above and just use MULT16_32_Q16 below
- (MIXED_PRECISION only) */
+ }
+ }
+ /* Pre-rotation */
+ {
+ kiss_fft_scalar * restrict yp = out;
+ kiss_fft_scalar *t = &l->trig[0];
+ for(i=0;i<N4;i++)
+ {
+ kiss_fft_scalar re, im;
+ re = yp[0];
+ im = yp[1];
*yp++ = S_MUL(re,t[0]) - S_MUL(im,t[N4]);
*yp++ = S_MUL(im,t[0]) + S_MUL(re,t[N4]);
t++;
fp += 2;
yp1 += 2;
yp2 -= 2;
- t++;
+ t++;
}
}
RESTORE_STACK;
}
-void mdct_backward(const mdct_lookup *l, kiss_fft_scalar *in, kiss_fft_scalar * restrict out)
+void mdct_backward(const mdct_lookup *l, kiss_fft_scalar *in, kiss_fft_scalar * restrict out, const celt_word16_t * restrict window, int overlap)
{
int i;
int N, N2, N4;
VARDECL(kiss_fft_scalar, f);
+ VARDECL(kiss_fft_scalar, f2);
SAVE_STACK;
N = l->n;
N2 = N>>1;
N4 = N>>2;
ALLOC(f, N2, kiss_fft_scalar);
+ ALLOC(f2, N2, kiss_fft_scalar);
/* Pre-rotate */
{
/* Temp pointers to make it really clear to the compiler what we're doing */
const kiss_fft_scalar * restrict xp1 = in;
const kiss_fft_scalar * restrict xp2 = in+N2-1;
- kiss_fft_scalar * restrict yp = out;
+ kiss_fft_scalar * restrict yp = f2;
kiss_fft_scalar *t = &l->trig[0];
for(i=0;i<N4;i++)
{
*yp++ = S_MUL(*xp2, t[N4]) - S_MUL(*xp1,t[0]);
xp1+=2;
xp2-=2;
- t++;
+ t++;
}
}
/* Inverse N/4 complex FFT. This one should *not* downscale even in fixed-point */
- cpx32_ifft(l->kfft, out, f, N4);
+ cpx32_ifft(l->kfft, f2, f, N4);
/* Post-rotate */
{
{
const kiss_fft_scalar * restrict fp1 = f;
const kiss_fft_scalar * restrict fp2 = f+N2-1;
- kiss_fft_scalar * restrict yp = out+N4;
+ kiss_fft_scalar * restrict yp = f2;
for(i = 0; i < N4; i++)
{
- *yp++ =-*fp1;
- *yp++ = *fp2;
+ *yp++ =-*fp1*2;
+ *yp++ = *fp2*2;
fp1 += 2;
fp2 -= 2;
}
/* Mirror on both sides for TDAC */
{
- const kiss_fft_scalar * restrict xp1 = out+N2-1;
- const kiss_fft_scalar * restrict xp2 = out+N2;
- kiss_fft_scalar * restrict yp1 = out;
- kiss_fft_scalar * restrict yp2 = out+N-1;
- for(i = 0; i < N4; i++)
+ kiss_fft_scalar * restrict fp1 = f2+N4-1;
+ kiss_fft_scalar * restrict xp1 = out+N2-1;
+ kiss_fft_scalar * restrict yp1 = out+N4-overlap/2;
+ const celt_word16_t * restrict wp1 = window;
+ const celt_word16_t * restrict wp2 = window+overlap-1;
+ for(i = 0; i< N4-overlap/2; i++)
+ {
+ *xp1 = *fp1;
+ xp1--;
+ fp1--;
+ }
+ for(; i < N4; i++)
+ {
+ kiss_fft_scalar x1;
+ x1 = *fp1--;
+ *yp1++ +=-MULT16_32_Q15(*wp1, x1);
+ *xp1-- += MULT16_32_Q15(*wp2, x1);
+ wp1++;
+ wp2--;
+ }
+ }
+ {
+ kiss_fft_scalar * restrict fp2 = f2+N4;
+ kiss_fft_scalar * restrict xp2 = out+N2;
+ kiss_fft_scalar * restrict yp2 = out+N-1-(N4-overlap/2);
+ const celt_word16_t * restrict wp1 = window;
+ const celt_word16_t * restrict wp2 = window+overlap-1;
+ for(i = 0; i< N4-overlap/2; i++)
+ {
+ *xp2 = *fp2;
+ xp2++;
+ fp2++;
+ }
+ for(; i < N4; i++)
{
- *yp1++ =-*xp1--;
- *yp2-- = *xp2++;
+ kiss_fft_scalar x2;
+ x2 = *fp2++;
+ *yp2-- = MULT16_32_Q15(*wp1, x2);
+ *xp2++ = MULT16_32_Q15(*wp2, x2);
+ wp1++;
+ wp2--;
}
}
RESTORE_STACK;