mdct_backward now does the WOLA, so there isn't much left in compute_inv_mdcts
[opus.git] / libcelt / mdct.c
index d47046e..075ab5e 100644 (file)
 #endif
 
 #include "mdct.h"
-#include "kiss_fft.h"
+#include "kfft_double.h"
 #include <math.h>
 #include "os_support.h"
-#include "_kiss_fft_guts.h"
 #include "mathops.h"
+#include "stack_alloc.h"
 
 #ifndef M_PI
 #define M_PI 3.141592653
 void mdct_init(mdct_lookup *l,int N)
 {
    int i;
-   int N2, N4;
+   int N2;
    l->n = N;
-   N2 = N/2;
-   N4 = N/4;
-   l->kfft = kiss_fft_alloc(N4, NULL, NULL);
+   N2 = N>>1;
+   l->kfft = cpx32_fft_alloc(N>>2);
    l->trig = (kiss_twiddle_scalar*)celt_alloc(N2*sizeof(kiss_twiddle_scalar));
    /* We have enough points that sine isn't necessary */
 #if defined(FIXED_POINT)
@@ -73,7 +72,7 @@ void mdct_init(mdct_lookup *l,int N)
       l->trig[i] = SAMP_MAX*cos(2*M_PI*(i+1./8.)/N);
 #else
    for (i=0;i<N2;i++)
-      l->trig[i] = TRIG_UPSCALE*celt_cos_norm(DIV32(ADD32(SHL32(i,17),16386),N));
+      l->trig[i] = TRIG_UPSCALE*celt_cos_norm(DIV32(ADD32(SHL32(EXTEND32(i),17),16386),N));
 #endif
 #else
    for (i=0;i<N2;i++)
@@ -83,99 +82,215 @@ void mdct_init(mdct_lookup *l,int N)
 
 void mdct_clear(mdct_lookup *l)
 {
-   kiss_fft_free(l->kfft);
+   cpx32_fft_free(l->kfft);
    celt_free(l->trig);
 }
 
-void mdct_forward(mdct_lookup *l, kiss_fft_scalar *in, kiss_fft_scalar *out)
+/* Only divide by half if float. In fixed-point, it's included in the shift */
+#ifdef FIXED_POINT
+#define FL_HALF(x) (x)
+#else
+#define FL_HALF(x) (.5f*(x))
+#endif
+
+void mdct_forward(const mdct_lookup *l, kiss_fft_scalar *in, kiss_fft_scalar * restrict out, const celt_word16_t *window, int overlap)
 {
    int i;
    int N, N2, N4;
-   VARDECL(kiss_fft_scalar *f);
+   VARDECL(kiss_fft_scalarf);
    SAVE_STACK;
    N = l->n;
-   N2 = N/2;
-   N4 = N/4;
+   N2 = N>>1;
+   N4 = N>>2;
    ALLOC(f, N2, kiss_fft_scalar);
    
    /* Consider the input to be compused of four blocks: [a, b, c, d] */
-   /* Shuffle, fold, pre-rotate (part 1) */
-   for(i=0;i<N/8;i++)
+   /* Window, shuffle, fold */
    {
-      kiss_fft_scalar re, im;
-      /* Real part arranged as -d-cR, Imag part arranged as -b+aR*/
-      re = -.5*(in[N2+N4+2*i] + in[N2+N4-2*i-1]);
-      im = -.5*(in[N4+2*i]    - in[N4-2*i-1]);
-      out[2*i]   = S_MUL(re,l->trig[i])  -  S_MUL(im,l->trig[i+N4]);
-      out[2*i+1] = S_MUL(im,l->trig[i])  +  S_MUL(re,l->trig[i+N4]);
+      /* Temp pointers to make it really clear to the compiler what we're doing */
+      const kiss_fft_scalar * restrict xp1 = in+(overlap>>1);
+      const kiss_fft_scalar * restrict xp2 = in+N2-1+(overlap>>1);
+      kiss_fft_scalar * restrict yp = out;
+      const celt_word16_t * restrict wp1 = window+(overlap>>1);
+      const celt_word16_t * restrict wp2 = window+(overlap>>1)-1;
+      for(i=0;i<(overlap>>2);i++)
+      {
+         /* Real part arranged as -d-cR, Imag part arranged as -b+aR*/
+         *yp++ = -FL_HALF(MULT16_32_Q16(*wp2, xp1[N2]) + MULT16_32_Q16(*wp1,*xp2));
+         *yp++ = -FL_HALF(MULT16_32_Q16(*wp1, *xp1)    - MULT16_32_Q16(*wp2, xp2[-N2]));
+         xp1+=2;
+         xp2-=2;
+         wp1+=2;
+         wp2-=2;
+      }
+      wp1 = window;
+      wp2 = window+overlap-1;
+      for(;i<N4-(overlap>>2);i++)
+      {
+         /* Real part arranged as a-bR, Imag part arranged as -c-dR */
+         *yp++ = -HALF32(*xp2);
+         *yp++ = -HALF32(*xp1);
+         xp1+=2;
+         xp2-=2;
+      }
+      for(;i<N4;i++)
+      {
+         /* Real part arranged as a-bR, Imag part arranged as -c-dR */
+         *yp++ =  FL_HALF(MULT16_32_Q16(*wp1, xp1[-N2]) - MULT16_32_Q16(*wp2, *xp2));
+         *yp++ = -FL_HALF(MULT16_32_Q16(*wp2, *xp1)     + MULT16_32_Q16(*wp1, xp2[N2]));
+         xp1+=2;
+         xp2-=2;
+         wp1+=2;
+         wp2-=2;
+      }
    }
-   for(;i<N4;i++)
+   /* Pre-rotation */
    {
-      kiss_fft_scalar re, im;
-      /* Real part arranged as a-bR, Imag part arranged as -c-dR */
-      re =  .5*(in[2*i-N4] - in[N2+N4-2*i-1]);
-      im = -.5*(in[N4+2*i] + in[N+N4-2*i-1]);
-      out[2*i]   = S_MUL(re,l->trig[i])  -  S_MUL(im,l->trig[i+N4]);
-      out[2*i+1] = S_MUL(im,l->trig[i])  +  S_MUL(re,l->trig[i+N4]);
+      kiss_fft_scalar * restrict yp = out;
+      kiss_fft_scalar *t = &l->trig[0];
+      for(i=0;i<N4;i++)
+      {
+         kiss_fft_scalar re, im;
+         re = yp[0];
+         im = yp[1];
+         *yp++ = S_MUL(re,t[0])  -  S_MUL(im,t[N4]);
+         *yp++ = S_MUL(im,t[0])  +  S_MUL(re,t[N4]);
+         t++;
+      }
    }
 
    /* N/4 complex FFT, which should normally down-scale by 4/N (but doesn't now) */
-   kiss_fft(l->kfft, (const kiss_fft_cpx *)out, (kiss_fft_cpx *)f);
+   cpx32_fft(l->kfft, out, f, N4);
 
    /* Post-rotate and apply the scaling if the FFT doesn't to it itself */
-   for(i=0;i<N4;i++)
    {
-      out[2*i]      = -S_MUL(f[2*i+1],l->trig[i+N4]) + S_MUL(f[2*i]  ,l->trig[i]);
-      out[N2-1-2*i] = -S_MUL(f[2*i]  ,l->trig[i+N4]) - S_MUL(f[2*i+1],l->trig[i]);
+      /* Temp pointers to make it really clear to the compiler what we're doing */
+      const kiss_fft_scalar * restrict fp = f;
+      kiss_fft_scalar * restrict yp1 = out;
+      kiss_fft_scalar * restrict yp2 = out+N2-1;
+      kiss_fft_scalar *t = &l->trig[0];
+      /* Temp pointers to make it really clear to the compiler what we're doing */
+      for(i=0;i<N4;i++)
+      {
+         *yp1 = -S_MUL(fp[1],t[N4]) + S_MUL(fp[0],t[0]);
+         *yp2 = -S_MUL(fp[0],t[N4]) - S_MUL(fp[1],t[0]);
+         fp += 2;
+         yp1 += 2;
+         yp2 -= 2;
+         t++;
+      }
    }
    RESTORE_STACK;
 }
 
 
-void mdct_backward(mdct_lookup *l, kiss_fft_scalar *in, kiss_fft_scalar *out)
+void mdct_backward(const mdct_lookup *l, kiss_fft_scalar *in, kiss_fft_scalar * restrict out, const celt_word16_t * restrict window, int overlap)
 {
    int i;
-   int N, N2, N4, N8;
-   VARDECL(kiss_fft_scalar *f);
+   int N, N2, N4;
+   VARDECL(kiss_fft_scalar, f);
+   VARDECL(kiss_fft_scalar, f2);
    SAVE_STACK;
    N = l->n;
-   N2 = N/2;
-   N4 = N/4;
-   N8 = N/8;
+   N2 = N>>1;
+   N4 = N>>2;
    ALLOC(f, N2, kiss_fft_scalar);
+   ALLOC(f2, N2, kiss_fft_scalar);
    
    /* Pre-rotate */
-   for(i=0;i<N4;i++) 
    {
-      out[2*i]   = -S_MUL(in[N2-2*i-1], l->trig[i])    - S_MUL(in[2*i],l->trig[i+N4]);
-      out[2*i+1] =  S_MUL(in[N2-2*i-1], l->trig[i+N4]) - S_MUL(in[2*i],l->trig[i]);
+      /* Temp pointers to make it really clear to the compiler what we're doing */
+      const kiss_fft_scalar * restrict xp1 = in;
+      const kiss_fft_scalar * restrict xp2 = in+N2-1;
+      kiss_fft_scalar * restrict yp = f2;
+      kiss_fft_scalar *t = &l->trig[0];
+      for(i=0;i<N4;i++) 
+      {
+         *yp++ = -S_MUL(*xp2, t[0])  - S_MUL(*xp1,t[N4]);
+         *yp++ =  S_MUL(*xp2, t[N4]) - S_MUL(*xp1,t[0]);
+         xp1+=2;
+         xp2-=2;
+         t++;
+      }
    }
 
    /* Inverse N/4 complex FFT. This one should *not* downscale even in fixed-point */
-   kiss_ifft(l->kfft, (const kiss_fft_cpx *)out, (kiss_fft_cpx *)f);
+   cpx32_ifft(l->kfft, f2, f, N4);
    
    /* Post-rotate */
-   for(i=0;i<N4;i++)
    {
-      kiss_fft_scalar re, im;
-      re = f[2*i];
-      im = f[2*i+1];
-      /* We'd scale up by 2 here, but instead it's done when mixing the windows */
-      f[2*i]   = S_MUL(re,l->trig[i]) + S_MUL(im,l->trig[i+N4]);
-      f[2*i+1] = S_MUL(im,l->trig[i]) - S_MUL(re,l->trig[i+N4]);
+      kiss_fft_scalar * restrict fp = f;
+      kiss_fft_scalar *t = &l->trig[0];
+
+      for(i=0;i<N4;i++)
+      {
+         kiss_fft_scalar re, im;
+         re = fp[0];
+         im = fp[1];
+         /* We'd scale up by 2 here, but instead it's done when mixing the windows */
+         *fp++ = S_MUL(re,*t) + S_MUL(im,t[N4]);
+         *fp++ = S_MUL(im,*t) - S_MUL(re,t[N4]);
+         t++;
+      }
    }
    /* De-shuffle the components for the middle of the window only */
-   for(i = 0; i < N4; i++)
    {
-      out[N4+2*i]   =-f[2*i];
-      out[N4+2*i+1] = f[N2-2*i-1];
+      const kiss_fft_scalar * restrict fp1 = f;
+      const kiss_fft_scalar * restrict fp2 = f+N2-1;
+      kiss_fft_scalar * restrict yp = f2;
+      for(i = 0; i < N4; i++)
+      {
+         *yp++ =-*fp1*2;
+         *yp++ = *fp2*2;
+         fp1 += 2;
+         fp2 -= 2;
+      }
    }
 
    /* Mirror on both sides for TDAC */
-   for(i = 0; i < N4; i++)
    {
-      out[i]     =-out[N2-i-1];
-      out[N-i-1] = out[N2+i];
+      kiss_fft_scalar * restrict fp1 = f2+N4-1;
+      kiss_fft_scalar * restrict xp1 = out+N2-1;
+      kiss_fft_scalar * restrict yp1 = out+N4-overlap/2;
+      const celt_word16_t * restrict wp1 = window;
+      const celt_word16_t * restrict wp2 = window+overlap-1;
+      for(i = 0; i< N4-overlap/2; i++)
+      {
+         *xp1 = *fp1;
+         xp1--;
+         fp1--;
+      }
+      for(; i < N4; i++)
+      {
+         kiss_fft_scalar x1;
+         x1 = *fp1--;
+         *yp1++ +=-MULT16_32_Q15(*wp1, x1);
+         *xp1-- += MULT16_32_Q15(*wp2, x1);
+         wp1++;
+         wp2--;
+      }
+   }
+   {
+      kiss_fft_scalar * restrict fp2 = f2+N4;
+      kiss_fft_scalar * restrict xp2 = out+N2;
+      kiss_fft_scalar * restrict yp2 = out+N-1-(N4-overlap/2);
+      const celt_word16_t * restrict wp1 = window;
+      const celt_word16_t * restrict wp2 = window+overlap-1;
+      for(i = 0; i< N4-overlap/2; i++)
+      {
+         *xp2 = *fp2;
+         xp2++;
+         fp2++;
+      }
+      for(; i < N4; i++)
+      {
+         kiss_fft_scalar x2;
+         x2 = *fp2++;
+         *yp2--  = MULT16_32_Q15(*wp1, x2);
+         *xp2++  = MULT16_32_Q15(*wp2, x2);
+         wp1++;
+         wp2--;
+      }
    }
    RESTORE_STACK;
 }