src/libFLAC/stream_decoder.c : Fix NULL de-reference.
[flac.git] / src / libFLAC / lpc_intrin_sse2.c
index 3ccebd0..e1908ed 100644 (file)
@@ -1,6 +1,6 @@
 /* libFLAC - Free Lossless Audio Codec library
  * Copyright (C) 2000-2009  Josh Coalson
- * Copyright (C) 2011-2013  Xiph.Org Foundation
+ * Copyright (C) 2011-2014  Xiph.Org Foundation
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions
@@ -30,7 +30,7 @@
  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
  */
 
-#if HAVE_CONFIG_H
+#ifdef HAVE_CONFIG_H
 #  include <config.h>
 #endif
 
 #define RESIDUAL16_RESULT(xmmN) curr = *data++; *residual++ = curr - (_mm_cvtsi128_si32(xmmN) >> lp_quantization);
 #define     DATA16_RESULT(xmmN) curr = *residual++ + (_mm_cvtsi128_si32(xmmN) >> lp_quantization); *data++ = curr;
 
-#define RESIDUAL_RESULT(xmmN) residual[i] = data[i] - (_mm_cvtsi128_si32(xmmN) >> lp_quantization);
-#define     DATA_RESULT(xmmN) data[i] = residual[i] + (_mm_cvtsi128_si32(xmmN) >> lp_quantization);
+#define RESIDUAL32_RESULT(xmmN) residual[i] = data[i] - (_mm_cvtsi128_si32(xmmN) >> lp_quantization);
+#define     DATA32_RESULT(xmmN) data[i] = residual[i] + (_mm_cvtsi128_si32(xmmN) >> lp_quantization);
 
 FLAC__SSE_TARGET("sse2")
 void FLAC__lpc_compute_residual_from_qlp_coefficients_16_intrin_sse2(const FLAC__int32 *data, unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 residual[])
 {
        int i;
        FLAC__int32 sum;
+       __m128i cnt = _mm_cvtsi32_si128(lp_quantization);
 
        FLAC__ASSERT(order > 0);
        FLAC__ASSERT(order <= 32);
-       FLAC__ASSERT(data_len > 0);
 
        if(order <= 12) {
-               FLAC__int32 curr;
-               if(order > 8) { /* order == 9, 10, 11, 12 */
-#ifdef FLAC__CPU_IA32 /* 8 XMM registers available */
-                       int r;
-                       __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
-                       xmm0 = _mm_loadu_si128((const __m128i*)(qlp_coeff+0));
-                       xmm6 = _mm_loadu_si128((const __m128i*)(qlp_coeff+4));
-                       xmm1 = _mm_loadu_si128((const __m128i*)(qlp_coeff+8)); /* read 0 to 3 uninitialized coeffs... */
-                       switch(order)                                          /* ...and zero them out */
-                       {
-                       case 9:
-                               xmm1 = _mm_slli_si128(xmm1, 12); xmm1 = _mm_srli_si128(xmm1, 12); break;
-                       case 10:
-                               xmm1 = _mm_slli_si128(xmm1, 8); xmm1 = _mm_srli_si128(xmm1, 8); break;
-                       case 11:
-                               xmm1 = _mm_slli_si128(xmm1, 4); xmm1 = _mm_srli_si128(xmm1, 4); break;
-                       }
-                       xmm2 = _mm_setzero_si128();
-                       xmm0 = _mm_packs_epi32(xmm0, xmm6);
-                       xmm1 = _mm_packs_epi32(xmm1, xmm2);
-
-                       xmm4 = _mm_loadu_si128((const __m128i*)(data-12));
-                       xmm5 = _mm_loadu_si128((const __m128i*)(data-8));
-                       xmm3 = _mm_loadu_si128((const __m128i*)(data-4));
-                       xmm4 = _mm_shuffle_epi32(xmm4, _MM_SHUFFLE(0,1,2,3));
-                       xmm5 = _mm_shuffle_epi32(xmm5, _MM_SHUFFLE(0,1,2,3));
-                       xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(0,1,2,3));
-                       xmm4 = _mm_packs_epi32(xmm4, xmm2);
-                       xmm3 = _mm_packs_epi32(xmm3, xmm5);
-
-                       xmm7 = _mm_slli_si128(xmm1, 2);
-                       xmm7 = _mm_or_si128(xmm7, _mm_srli_si128(xmm0, 14));
-                       xmm2 = _mm_slli_si128(xmm0, 2);
-
-                       /* xmm0, xmm1: qlp_coeff
-                          xmm2, xmm7: qlp_coeff << 16 bit
-                          xmm3, xmm4: data */
-
-                       xmm6 = xmm4;
-                       xmm6 = _mm_madd_epi16(xmm6, xmm1);
-                       xmm5 = xmm3;
-                       xmm5 = _mm_madd_epi16(xmm5, xmm0);
-                       xmm6 = _mm_add_epi32(xmm6, xmm5);
-                       xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                       xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                       RESIDUAL16_RESULT(xmm6);
-
-                       data_len--;
-                       r = data_len % 2;
-
-                       if(r) {
-                               xmm4 = _mm_slli_si128(xmm4, 2);
-                               xmm6 = xmm3;
-                               xmm3 = _mm_slli_si128(xmm3, 2);
-                               xmm4 = _mm_or_si128(xmm4, _mm_srli_si128(xmm6, 14));
-                               xmm3 = _mm_insert_epi16(xmm3, curr, 0);
-
-                               xmm6 = xmm4;
-                               xmm6 = _mm_madd_epi16(xmm6, xmm1);
-                               xmm5 = xmm3;
-                               xmm5 = _mm_madd_epi16(xmm5, xmm0);
-                               xmm6 = _mm_add_epi32(xmm6, xmm5);
-                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                               RESIDUAL16_RESULT(xmm6);
-
-                               data_len--;
-                       }
-
-                       while(data_len) { /* data_len is a multiple of 2 */
-                               /* 1 _mm_slli_si128 per data element less but we need shifted qlp_coeff in xmm2:xmm7 */
-                               xmm4 = _mm_slli_si128(xmm4, 4);
-                               xmm6 = xmm3;
-                               xmm3 = _mm_slli_si128(xmm3, 4);
-                               xmm4 = _mm_or_si128(xmm4, _mm_srli_si128(xmm6, 12));
-                               xmm3 = _mm_insert_epi16(xmm3, curr, 1);
-
-                               xmm6 = xmm4;
-                               xmm6 = _mm_madd_epi16(xmm6, xmm7);
-                               xmm5 = xmm3;
-                               xmm5 = _mm_madd_epi16(xmm5, xmm2);
-                               xmm6 = _mm_add_epi32(xmm6, xmm5);
-                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                               RESIDUAL16_RESULT(xmm6);
-
-                               xmm3 = _mm_insert_epi16(xmm3, curr, 0);
-
-                               xmm6 = xmm4;
-                               xmm6 = _mm_madd_epi16(xmm6, xmm1);
-                               xmm5 = xmm3;
-                               xmm5 = _mm_madd_epi16(xmm5, xmm0);
-                               xmm6 = _mm_add_epi32(xmm6, xmm5);
-                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                               RESIDUAL16_RESULT(xmm6);
-
-                               data_len-=2;
-                       }
-#else /* 16 XMM registers available */
-                       int r;
-                       __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7, xmm8, xmm9, xmmA, xmmB;
-                       xmm0 = _mm_loadu_si128((const __m128i*)(qlp_coeff+0));
-                       xmm6 = _mm_loadu_si128((const __m128i*)(qlp_coeff+4));
-                       xmm1 = _mm_loadu_si128((const __m128i*)(qlp_coeff+8)); /* read 0 to 3 uninitialized coeffs... */
-                       switch(order)                                          /* ...and zero them out */
-                       {
-                       case 9:
-                               xmm1 = _mm_slli_si128(xmm1, 12); xmm1 = _mm_srli_si128(xmm1, 12); break;
-                       case 10:
-                               xmm1 = _mm_slli_si128(xmm1, 8); xmm1 = _mm_srli_si128(xmm1, 8); break;
-                       case 11:
-                               xmm1 = _mm_slli_si128(xmm1, 4); xmm1 = _mm_srli_si128(xmm1, 4); break;
-                       }
-                       xmm2 = _mm_setzero_si128();
-                       xmm0 = _mm_packs_epi32(xmm0, xmm6);
-                       xmm1 = _mm_packs_epi32(xmm1, xmm2);
-
-                       xmm4 = _mm_loadu_si128((const __m128i*)(data-12));
-                       xmm5 = _mm_loadu_si128((const __m128i*)(data-8));
-                       xmm3 = _mm_loadu_si128((const __m128i*)(data-4));
-                       xmm4 = _mm_shuffle_epi32(xmm4, _MM_SHUFFLE(0,1,2,3));
-                       xmm5 = _mm_shuffle_epi32(xmm5, _MM_SHUFFLE(0,1,2,3));
-                       xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(0,1,2,3));
-                       xmm4 = _mm_packs_epi32(xmm4, xmm2);
-                       xmm3 = _mm_packs_epi32(xmm3, xmm5);
-
-                       xmm7 = _mm_slli_si128(xmm1, 2);
-                       xmm7 = _mm_or_si128(xmm7, _mm_srli_si128(xmm0, 14));
-                       xmm2 = _mm_slli_si128(xmm0, 2);
-
-                       xmm9 = _mm_slli_si128(xmm1, 4);
-                       xmm9 = _mm_or_si128(xmm9, _mm_srli_si128(xmm0, 12));
-                       xmm8 = _mm_slli_si128(xmm0, 4);
-
-                       xmmB = _mm_slli_si128(xmm1, 6);
-                       xmmB = _mm_or_si128(xmmB, _mm_srli_si128(xmm0, 10));
-                       xmmA = _mm_slli_si128(xmm0, 6);
-
-                       /* xmm0, xmm1: qlp_coeff
-                          xmm2, xmm7: qlp_coeff << 16 bit
-                          xmm8, xmm9: qlp_coeff << 2*16 bit
-                          xmmA, xmmB: qlp_coeff << 3*16 bit
-                          xmm3, xmm4: data */
-
-                       xmm6 = xmm4;
-                       xmm6 = _mm_madd_epi16(xmm6, xmm1);
-                       xmm5 = xmm3;
-                       xmm5 = _mm_madd_epi16(xmm5, xmm0);
-                       xmm6 = _mm_add_epi32(xmm6, xmm5);
-                       xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                       xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                       RESIDUAL16_RESULT(xmm6);
-
-                       data_len--;
-                       r = data_len % 4;
-
-                       while(r) {
-                               xmm4 = _mm_slli_si128(xmm4, 2);
-                               xmm6 = xmm3;
-                               xmm3 = _mm_slli_si128(xmm3, 2);
-                               xmm4 = _mm_or_si128(xmm4, _mm_srli_si128(xmm6, 14));
-                               xmm3 = _mm_insert_epi16(xmm3, curr, 0);
-
-                               xmm6 = xmm4;
-                               xmm6 = _mm_madd_epi16(xmm6, xmm1);
-                               xmm5 = xmm3;
-                               xmm5 = _mm_madd_epi16(xmm5, xmm0);
-                               xmm6 = _mm_add_epi32(xmm6, xmm5);
-                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                               RESIDUAL16_RESULT(xmm6);
-
-                               data_len--; r--;
+               if(order > 8) {
+                       if(order > 10) {
+                               if(order == 12) {
+                                       __m128i q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11;
+                                       q0 = _mm_cvtsi32_si128(0xffff & qlp_coeff[0]); q0 = _mm_shuffle_epi32(q0, _MM_SHUFFLE(0,0,0,0));
+                                       q1 = _mm_cvtsi32_si128(0xffff & qlp_coeff[1]); q1 = _mm_shuffle_epi32(q1, _MM_SHUFFLE(0,0,0,0));
+                                       q2 = _mm_cvtsi32_si128(0xffff & qlp_coeff[2]); q2 = _mm_shuffle_epi32(q2, _MM_SHUFFLE(0,0,0,0));
+                                       q3 = _mm_cvtsi32_si128(0xffff & qlp_coeff[3]); q3 = _mm_shuffle_epi32(q3, _MM_SHUFFLE(0,0,0,0));
+                                       q4 = _mm_cvtsi32_si128(0xffff & qlp_coeff[4]); q4 = _mm_shuffle_epi32(q4, _MM_SHUFFLE(0,0,0,0));
+                                       q5 = _mm_cvtsi32_si128(0xffff & qlp_coeff[5]); q5 = _mm_shuffle_epi32(q5, _MM_SHUFFLE(0,0,0,0));
+                                       q6 = _mm_cvtsi32_si128(0xffff & qlp_coeff[6]); q6 = _mm_shuffle_epi32(q6, _MM_SHUFFLE(0,0,0,0));
+                                       q7 = _mm_cvtsi32_si128(0xffff & qlp_coeff[7]); q7 = _mm_shuffle_epi32(q7, _MM_SHUFFLE(0,0,0,0));
+                                       q8 = _mm_cvtsi32_si128(0xffff & qlp_coeff[8]); q8 = _mm_shuffle_epi32(q8, _MM_SHUFFLE(0,0,0,0));
+                                       q9 = _mm_cvtsi32_si128(0xffff & qlp_coeff[9]); q9 = _mm_shuffle_epi32(q9, _MM_SHUFFLE(0,0,0,0));
+                                       q10 = _mm_cvtsi32_si128(0xffff & qlp_coeff[10]); q10 = _mm_shuffle_epi32(q10, _MM_SHUFFLE(0,0,0,0));
+                                       q11 = _mm_cvtsi32_si128(0xffff & qlp_coeff[11]); q11 = _mm_shuffle_epi32(q11, _MM_SHUFFLE(0,0,0,0));
+
+                                       for(i = 0; i < (int)data_len-3; i+=4) {
+                                               __m128i summ, mull;
+                                               summ = _mm_madd_epi16(q11, _mm_loadu_si128((const __m128i*)(data+i-12)));
+                                               mull = _mm_madd_epi16(q10, _mm_loadu_si128((const __m128i*)(data+i-11))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q9, _mm_loadu_si128((const __m128i*)(data+i-10))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q8, _mm_loadu_si128((const __m128i*)(data+i-9))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q7, _mm_loadu_si128((const __m128i*)(data+i-8))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q6, _mm_loadu_si128((const __m128i*)(data+i-7))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q5, _mm_loadu_si128((const __m128i*)(data+i-6))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q4, _mm_loadu_si128((const __m128i*)(data+i-5))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q3, _mm_loadu_si128((const __m128i*)(data+i-4))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q2, _mm_loadu_si128((const __m128i*)(data+i-3))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q1, _mm_loadu_si128((const __m128i*)(data+i-2))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q0, _mm_loadu_si128((const __m128i*)(data+i-1))); summ = _mm_add_epi32(summ, mull);
+                                               summ = _mm_sra_epi32(summ, cnt);
+                                               _mm_storeu_si128((__m128i*)(residual+i), _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(data+i)), summ));
+                                       }
+                               }
+                               else { /* order == 11 */
+                                       __m128i q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10;
+                                       q0 = _mm_cvtsi32_si128(0xffff & qlp_coeff[0]); q0 = _mm_shuffle_epi32(q0, _MM_SHUFFLE(0,0,0,0));
+                                       q1 = _mm_cvtsi32_si128(0xffff & qlp_coeff[1]); q1 = _mm_shuffle_epi32(q1, _MM_SHUFFLE(0,0,0,0));
+                                       q2 = _mm_cvtsi32_si128(0xffff & qlp_coeff[2]); q2 = _mm_shuffle_epi32(q2, _MM_SHUFFLE(0,0,0,0));
+                                       q3 = _mm_cvtsi32_si128(0xffff & qlp_coeff[3]); q3 = _mm_shuffle_epi32(q3, _MM_SHUFFLE(0,0,0,0));
+                                       q4 = _mm_cvtsi32_si128(0xffff & qlp_coeff[4]); q4 = _mm_shuffle_epi32(q4, _MM_SHUFFLE(0,0,0,0));
+                                       q5 = _mm_cvtsi32_si128(0xffff & qlp_coeff[5]); q5 = _mm_shuffle_epi32(q5, _MM_SHUFFLE(0,0,0,0));
+                                       q6 = _mm_cvtsi32_si128(0xffff & qlp_coeff[6]); q6 = _mm_shuffle_epi32(q6, _MM_SHUFFLE(0,0,0,0));
+                                       q7 = _mm_cvtsi32_si128(0xffff & qlp_coeff[7]); q7 = _mm_shuffle_epi32(q7, _MM_SHUFFLE(0,0,0,0));
+                                       q8 = _mm_cvtsi32_si128(0xffff & qlp_coeff[8]); q8 = _mm_shuffle_epi32(q8, _MM_SHUFFLE(0,0,0,0));
+                                       q9 = _mm_cvtsi32_si128(0xffff & qlp_coeff[9]); q9 = _mm_shuffle_epi32(q9, _MM_SHUFFLE(0,0,0,0));
+                                       q10 = _mm_cvtsi32_si128(0xffff & qlp_coeff[10]); q10 = _mm_shuffle_epi32(q10, _MM_SHUFFLE(0,0,0,0));
+
+                                       for(i = 0; i < (int)data_len-3; i+=4) {
+                                               __m128i summ, mull;
+                                               summ = _mm_madd_epi16(q10, _mm_loadu_si128((const __m128i*)(data+i-11)));
+                                               mull = _mm_madd_epi16(q9, _mm_loadu_si128((const __m128i*)(data+i-10))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q8, _mm_loadu_si128((const __m128i*)(data+i-9))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q7, _mm_loadu_si128((const __m128i*)(data+i-8))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q6, _mm_loadu_si128((const __m128i*)(data+i-7))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q5, _mm_loadu_si128((const __m128i*)(data+i-6))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q4, _mm_loadu_si128((const __m128i*)(data+i-5))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q3, _mm_loadu_si128((const __m128i*)(data+i-4))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q2, _mm_loadu_si128((const __m128i*)(data+i-3))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q1, _mm_loadu_si128((const __m128i*)(data+i-2))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q0, _mm_loadu_si128((const __m128i*)(data+i-1))); summ = _mm_add_epi32(summ, mull);
+                                               summ = _mm_sra_epi32(summ, cnt);
+                                               _mm_storeu_si128((__m128i*)(residual+i), _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(data+i)), summ));
+                                       }
+                               }
                        }
-
-                       while(data_len) { /* data_len is a multiple of 4 */
-                               xmm4 = _mm_slli_si128(xmm4, 8);
-                               xmm6 = xmm3;
-                               xmm3 = _mm_slli_si128(xmm3, 8);
-                               xmm4 = _mm_or_si128(xmm4, _mm_srli_si128(xmm6, 8));
-
-                               xmm3 = _mm_insert_epi16(xmm3, curr, 3);
-
-                               xmm6 = xmm4;
-                               xmm6 = _mm_madd_epi16(xmm6, xmmB);
-                               xmm5 = xmm3;
-                               xmm5 = _mm_madd_epi16(xmm5, xmmA);
-                               xmm6 = _mm_add_epi32(xmm6, xmm5);
-                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                               RESIDUAL16_RESULT(xmm6);
-
-                               xmm3 = _mm_insert_epi16(xmm3, curr, 2);
-
-                               xmm6 = xmm4;
-                               xmm6 = _mm_madd_epi16(xmm6, xmm9);
-                               xmm5 = xmm3;
-                               xmm5 = _mm_madd_epi16(xmm5, xmm8);
-                               xmm6 = _mm_add_epi32(xmm6, xmm5);
-                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                               RESIDUAL16_RESULT(xmm6);
-
-                               xmm3 = _mm_insert_epi16(xmm3, curr, 1);
-
-                               xmm6 = xmm4;
-                               xmm6 = _mm_madd_epi16(xmm6, xmm7);
-                               xmm5 = xmm3;
-                               xmm5 = _mm_madd_epi16(xmm5, xmm2);
-                               xmm6 = _mm_add_epi32(xmm6, xmm5);
-                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                               RESIDUAL16_RESULT(xmm6);
-
-                               xmm3 = _mm_insert_epi16(xmm3, curr, 0);
-
-                               xmm6 = xmm4;
-                               xmm6 = _mm_madd_epi16(xmm6, xmm1);
-                               xmm5 = xmm3;
-                               xmm5 = _mm_madd_epi16(xmm5, xmm0);
-                               xmm6 = _mm_add_epi32(xmm6, xmm5);
-                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                               RESIDUAL16_RESULT(xmm6);
-
-                               data_len-=4;
+                       else {
+                               if(order == 10) {
+                                       __m128i q0, q1, q2, q3, q4, q5, q6, q7, q8, q9;
+                                       q0 = _mm_cvtsi32_si128(0xffff & qlp_coeff[0]); q0 = _mm_shuffle_epi32(q0, _MM_SHUFFLE(0,0,0,0));
+                                       q1 = _mm_cvtsi32_si128(0xffff & qlp_coeff[1]); q1 = _mm_shuffle_epi32(q1, _MM_SHUFFLE(0,0,0,0));
+                                       q2 = _mm_cvtsi32_si128(0xffff & qlp_coeff[2]); q2 = _mm_shuffle_epi32(q2, _MM_SHUFFLE(0,0,0,0));
+                                       q3 = _mm_cvtsi32_si128(0xffff & qlp_coeff[3]); q3 = _mm_shuffle_epi32(q3, _MM_SHUFFLE(0,0,0,0));
+                                       q4 = _mm_cvtsi32_si128(0xffff & qlp_coeff[4]); q4 = _mm_shuffle_epi32(q4, _MM_SHUFFLE(0,0,0,0));
+                                       q5 = _mm_cvtsi32_si128(0xffff & qlp_coeff[5]); q5 = _mm_shuffle_epi32(q5, _MM_SHUFFLE(0,0,0,0));
+                                       q6 = _mm_cvtsi32_si128(0xffff & qlp_coeff[6]); q6 = _mm_shuffle_epi32(q6, _MM_SHUFFLE(0,0,0,0));
+                                       q7 = _mm_cvtsi32_si128(0xffff & qlp_coeff[7]); q7 = _mm_shuffle_epi32(q7, _MM_SHUFFLE(0,0,0,0));
+                                       q8 = _mm_cvtsi32_si128(0xffff & qlp_coeff[8]); q8 = _mm_shuffle_epi32(q8, _MM_SHUFFLE(0,0,0,0));
+                                       q9 = _mm_cvtsi32_si128(0xffff & qlp_coeff[9]); q9 = _mm_shuffle_epi32(q9, _MM_SHUFFLE(0,0,0,0));
+
+                                       for(i = 0; i < (int)data_len-3; i+=4) {
+                                               __m128i summ, mull;
+                                               summ = _mm_madd_epi16(q9, _mm_loadu_si128((const __m128i*)(data+i-10)));
+                                               mull = _mm_madd_epi16(q8, _mm_loadu_si128((const __m128i*)(data+i-9))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q7, _mm_loadu_si128((const __m128i*)(data+i-8))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q6, _mm_loadu_si128((const __m128i*)(data+i-7))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q5, _mm_loadu_si128((const __m128i*)(data+i-6))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q4, _mm_loadu_si128((const __m128i*)(data+i-5))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q3, _mm_loadu_si128((const __m128i*)(data+i-4))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q2, _mm_loadu_si128((const __m128i*)(data+i-3))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q1, _mm_loadu_si128((const __m128i*)(data+i-2))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q0, _mm_loadu_si128((const __m128i*)(data+i-1))); summ = _mm_add_epi32(summ, mull);
+                                               summ = _mm_sra_epi32(summ, cnt);
+                                               _mm_storeu_si128((__m128i*)(residual+i), _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(data+i)), summ));
+                                       }
+                               }
+                               else { /* order == 9 */
+                                       __m128i q0, q1, q2, q3, q4, q5, q6, q7, q8;
+                                       q0 = _mm_cvtsi32_si128(0xffff & qlp_coeff[0]); q0 = _mm_shuffle_epi32(q0, _MM_SHUFFLE(0,0,0,0));
+                                       q1 = _mm_cvtsi32_si128(0xffff & qlp_coeff[1]); q1 = _mm_shuffle_epi32(q1, _MM_SHUFFLE(0,0,0,0));
+                                       q2 = _mm_cvtsi32_si128(0xffff & qlp_coeff[2]); q2 = _mm_shuffle_epi32(q2, _MM_SHUFFLE(0,0,0,0));
+                                       q3 = _mm_cvtsi32_si128(0xffff & qlp_coeff[3]); q3 = _mm_shuffle_epi32(q3, _MM_SHUFFLE(0,0,0,0));
+                                       q4 = _mm_cvtsi32_si128(0xffff & qlp_coeff[4]); q4 = _mm_shuffle_epi32(q4, _MM_SHUFFLE(0,0,0,0));
+                                       q5 = _mm_cvtsi32_si128(0xffff & qlp_coeff[5]); q5 = _mm_shuffle_epi32(q5, _MM_SHUFFLE(0,0,0,0));
+                                       q6 = _mm_cvtsi32_si128(0xffff & qlp_coeff[6]); q6 = _mm_shuffle_epi32(q6, _MM_SHUFFLE(0,0,0,0));
+                                       q7 = _mm_cvtsi32_si128(0xffff & qlp_coeff[7]); q7 = _mm_shuffle_epi32(q7, _MM_SHUFFLE(0,0,0,0));
+                                       q8 = _mm_cvtsi32_si128(0xffff & qlp_coeff[8]); q8 = _mm_shuffle_epi32(q8, _MM_SHUFFLE(0,0,0,0));
+
+                                       for(i = 0; i < (int)data_len-3; i+=4) {
+                                               __m128i summ, mull;
+                                               summ = _mm_madd_epi16(q8, _mm_loadu_si128((const __m128i*)(data+i-9)));
+                                               mull = _mm_madd_epi16(q7, _mm_loadu_si128((const __m128i*)(data+i-8))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q6, _mm_loadu_si128((const __m128i*)(data+i-7))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q5, _mm_loadu_si128((const __m128i*)(data+i-6))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q4, _mm_loadu_si128((const __m128i*)(data+i-5))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q3, _mm_loadu_si128((const __m128i*)(data+i-4))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q2, _mm_loadu_si128((const __m128i*)(data+i-3))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q1, _mm_loadu_si128((const __m128i*)(data+i-2))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q0, _mm_loadu_si128((const __m128i*)(data+i-1))); summ = _mm_add_epi32(summ, mull);
+                                               summ = _mm_sra_epi32(summ, cnt);
+                                               _mm_storeu_si128((__m128i*)(residual+i), _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(data+i)), summ));
+                                       }
+                               }
                        }
-#endif
-               } /* endif(order > 8) */
-               else if(order > 4) { /* order == 5, 6, 7, 8 */
-                       if(order > 6) { /* order == 7, 8 */
+               }
+               else if(order > 4) {
+                       if(order > 6) {
                                if(order == 8) {
-                                       __m128i xmm0, xmm1, xmm3, xmm6;
-                                       xmm0 = _mm_loadu_si128((const __m128i*)(qlp_coeff+0));
-                                       xmm1 = _mm_loadu_si128((const __m128i*)(qlp_coeff+4));
-                                       xmm0 = _mm_packs_epi32(xmm0, xmm1);
-
-                                       xmm1 = _mm_loadu_si128((const __m128i*)(data-8));
-                                       xmm3 = _mm_loadu_si128((const __m128i*)(data-4));
-                                       xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(0,1,2,3));
-                                       xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(0,1,2,3));
-                                       xmm3 = _mm_packs_epi32(xmm3, xmm1);
-
-                                       /* xmm0: qlp_coeff
-                                          xmm3: data */
-
-                                       xmm6 = xmm3;
-                                       xmm6 = _mm_madd_epi16(xmm6, xmm0);
-                                       xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                                       xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                                       RESIDUAL16_RESULT(xmm6);
-
-                                       data_len--;
-
-                                       while(data_len) {
-                                               xmm3 = _mm_slli_si128(xmm3, 2);
-                                               xmm3 = _mm_insert_epi16(xmm3, curr, 0);
-
-                                               xmm6 = xmm3;
-                                               xmm6 = _mm_madd_epi16(xmm6, xmm0);
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                                               RESIDUAL16_RESULT(xmm6);
-
-                                               data_len--;
+                                       __m128i q0, q1, q2, q3, q4, q5, q6, q7;
+                                       q0 = _mm_cvtsi32_si128(0xffff & qlp_coeff[0]); q0 = _mm_shuffle_epi32(q0, _MM_SHUFFLE(0,0,0,0));
+                                       q1 = _mm_cvtsi32_si128(0xffff & qlp_coeff[1]); q1 = _mm_shuffle_epi32(q1, _MM_SHUFFLE(0,0,0,0));
+                                       q2 = _mm_cvtsi32_si128(0xffff & qlp_coeff[2]); q2 = _mm_shuffle_epi32(q2, _MM_SHUFFLE(0,0,0,0));
+                                       q3 = _mm_cvtsi32_si128(0xffff & qlp_coeff[3]); q3 = _mm_shuffle_epi32(q3, _MM_SHUFFLE(0,0,0,0));
+                                       q4 = _mm_cvtsi32_si128(0xffff & qlp_coeff[4]); q4 = _mm_shuffle_epi32(q4, _MM_SHUFFLE(0,0,0,0));
+                                       q5 = _mm_cvtsi32_si128(0xffff & qlp_coeff[5]); q5 = _mm_shuffle_epi32(q5, _MM_SHUFFLE(0,0,0,0));
+                                       q6 = _mm_cvtsi32_si128(0xffff & qlp_coeff[6]); q6 = _mm_shuffle_epi32(q6, _MM_SHUFFLE(0,0,0,0));
+                                       q7 = _mm_cvtsi32_si128(0xffff & qlp_coeff[7]); q7 = _mm_shuffle_epi32(q7, _MM_SHUFFLE(0,0,0,0));
+
+                                       for(i = 0; i < (int)data_len-3; i+=4) {
+                                               __m128i summ, mull;
+                                               summ = _mm_madd_epi16(q7, _mm_loadu_si128((const __m128i*)(data+i-8)));
+                                               mull = _mm_madd_epi16(q6, _mm_loadu_si128((const __m128i*)(data+i-7))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q5, _mm_loadu_si128((const __m128i*)(data+i-6))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q4, _mm_loadu_si128((const __m128i*)(data+i-5))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q3, _mm_loadu_si128((const __m128i*)(data+i-4))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q2, _mm_loadu_si128((const __m128i*)(data+i-3))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q1, _mm_loadu_si128((const __m128i*)(data+i-2))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q0, _mm_loadu_si128((const __m128i*)(data+i-1))); summ = _mm_add_epi32(summ, mull);
+                                               summ = _mm_sra_epi32(summ, cnt);
+                                               _mm_storeu_si128((__m128i*)(residual+i), _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(data+i)), summ));
                                        }
                                }
                                else { /* order == 7 */
-                                       int r;
-                                       __m128i xmm0, xmm1, xmm2, xmm3, xmm6;
-                                       xmm0 = _mm_loadu_si128((const __m128i*)(qlp_coeff+0));
-                                       xmm1 = _mm_loadu_si128((const __m128i*)(qlp_coeff+4));
-                                       xmm1 = _mm_slli_si128(xmm1, 4); xmm1 = _mm_srli_si128(xmm1, 4);
-                                       xmm0 = _mm_packs_epi32(xmm0, xmm1);
-
-                                       xmm1 = _mm_loadu_si128((const __m128i*)(data-8));
-                                       xmm3 = _mm_loadu_si128((const __m128i*)(data-4));
-                                       xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(0,1,2,3));
-                                       xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(0,1,2,3));
-                                       xmm3 = _mm_packs_epi32(xmm3, xmm1);
-                                       xmm2 = _mm_slli_si128(xmm0, 2);
-
-                                       /* xmm0: qlp_coeff
-                                          xmm2: qlp_coeff << 16 bit
-                                          xmm3: data */
-
-                                       xmm6 = xmm3;
-                                       xmm6 = _mm_madd_epi16(xmm6, xmm0);
-                                       xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                                       xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                                       RESIDUAL16_RESULT(xmm6);
-
-                                       data_len--;
-                                       r = data_len % 2;
-
-                                       if(r) {
-                                               xmm3 = _mm_slli_si128(xmm3, 2);
-                                               xmm3 = _mm_insert_epi16(xmm3, curr, 0);
-
-                                               xmm6 = xmm3;
-                                               xmm6 = _mm_madd_epi16(xmm6, xmm0);
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                                               RESIDUAL16_RESULT(xmm6);
-
-                                               data_len--;
-                                       }
-
-                                       while(data_len) { /* data_len is a multiple of 2 */
-                                               xmm3 = _mm_slli_si128(xmm3, 4);
-                                               xmm3 = _mm_insert_epi16(xmm3, curr, 1);
-
-                                               xmm6 = xmm3;
-                                               xmm6 = _mm_madd_epi16(xmm6, xmm2);
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                                               RESIDUAL16_RESULT(xmm6);
-
-                                               xmm3 = _mm_insert_epi16(xmm3, curr, 0);
-                                               xmm6 = xmm3;
-                                               xmm6 = _mm_madd_epi16(xmm6, xmm0);
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                                               RESIDUAL16_RESULT(xmm6);
-
-                                               data_len-=2;
+                                       __m128i q0, q1, q2, q3, q4, q5, q6;
+                                       q0 = _mm_cvtsi32_si128(0xffff & qlp_coeff[0]); q0 = _mm_shuffle_epi32(q0, _MM_SHUFFLE(0,0,0,0));
+                                       q1 = _mm_cvtsi32_si128(0xffff & qlp_coeff[1]); q1 = _mm_shuffle_epi32(q1, _MM_SHUFFLE(0,0,0,0));
+                                       q2 = _mm_cvtsi32_si128(0xffff & qlp_coeff[2]); q2 = _mm_shuffle_epi32(q2, _MM_SHUFFLE(0,0,0,0));
+                                       q3 = _mm_cvtsi32_si128(0xffff & qlp_coeff[3]); q3 = _mm_shuffle_epi32(q3, _MM_SHUFFLE(0,0,0,0));
+                                       q4 = _mm_cvtsi32_si128(0xffff & qlp_coeff[4]); q4 = _mm_shuffle_epi32(q4, _MM_SHUFFLE(0,0,0,0));
+                                       q5 = _mm_cvtsi32_si128(0xffff & qlp_coeff[5]); q5 = _mm_shuffle_epi32(q5, _MM_SHUFFLE(0,0,0,0));
+                                       q6 = _mm_cvtsi32_si128(0xffff & qlp_coeff[6]); q6 = _mm_shuffle_epi32(q6, _MM_SHUFFLE(0,0,0,0));
+
+                                       for(i = 0; i < (int)data_len-3; i+=4) {
+                                               __m128i summ, mull;
+                                               summ = _mm_madd_epi16(q6, _mm_loadu_si128((const __m128i*)(data+i-7)));
+                                               mull = _mm_madd_epi16(q5, _mm_loadu_si128((const __m128i*)(data+i-6))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q4, _mm_loadu_si128((const __m128i*)(data+i-5))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q3, _mm_loadu_si128((const __m128i*)(data+i-4))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q2, _mm_loadu_si128((const __m128i*)(data+i-3))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q1, _mm_loadu_si128((const __m128i*)(data+i-2))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q0, _mm_loadu_si128((const __m128i*)(data+i-1))); summ = _mm_add_epi32(summ, mull);
+                                               summ = _mm_sra_epi32(summ, cnt);
+                                               _mm_storeu_si128((__m128i*)(residual+i), _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(data+i)), summ));
                                        }
                                }
                        }
-                       else { /* order == 5, 6 */
+                       else {
                                if(order == 6) {
-                                       int r;
-                                       __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm6;
-                                       xmm0 = _mm_loadu_si128((const __m128i*)(qlp_coeff+0));
-                                       xmm1 = _mm_loadu_si128((const __m128i*)(qlp_coeff+4));
-                                       xmm1 = _mm_slli_si128(xmm1, 8); xmm1 = _mm_srli_si128(xmm1, 8);
-                                       xmm0 = _mm_packs_epi32(xmm0, xmm1);
-
-                                       xmm1 = _mm_loadu_si128((const __m128i*)(data-8));
-                                       xmm3 = _mm_loadu_si128((const __m128i*)(data-4));
-                                       xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(0,1,2,3));
-                                       xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(0,1,2,3));
-                                       xmm3 = _mm_packs_epi32(xmm3, xmm1);
-                                       xmm2 = _mm_slli_si128(xmm0, 2);
-                                       xmm4 = _mm_slli_si128(xmm0, 4);
-
-                                       /* xmm0: qlp_coeff
-                                          xmm2: qlp_coeff << 16 bit
-                                          xmm4: qlp_coeff << 2*16 bit
-                                          xmm3: data */
-
-                                       xmm6 = xmm3;
-                                       xmm6 = _mm_madd_epi16(xmm6, xmm0);
-                                       xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                                       xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                                       RESIDUAL16_RESULT(xmm6);
-
-                                       data_len--;
-                                       r = data_len % 3;
-
-                                       while(r) {
-                                               xmm3 = _mm_slli_si128(xmm3, 2);
-                                               xmm3 = _mm_insert_epi16(xmm3, curr, 0);
-
-                                               xmm6 = xmm3;
-                                               xmm6 = _mm_madd_epi16(xmm6, xmm0);
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                                               RESIDUAL16_RESULT(xmm6);
-
-                                               data_len--; r--;
-                                       }
-
-                                       while(data_len) { /* data_len is a multiple of 3 */
-                                               xmm3 = _mm_slli_si128(xmm3, 6);
-                                               xmm3 = _mm_insert_epi16(xmm3, curr, 2);
-
-                                               xmm6 = xmm3;
-                                               xmm6 = _mm_madd_epi16(xmm6, xmm4);
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                                               RESIDUAL16_RESULT(xmm6);
-
-                                               xmm3 = _mm_insert_epi16(xmm3, curr, 1);
-
-                                               xmm6 = xmm3;
-                                               xmm6 = _mm_madd_epi16(xmm6, xmm2);
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                                               RESIDUAL16_RESULT(xmm6);
-
-                                               xmm3 = _mm_insert_epi16(xmm3, curr, 0);
-
-                                               xmm6 = xmm3;
-                                               xmm6 = _mm_madd_epi16(xmm6, xmm0);
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                                               RESIDUAL16_RESULT(xmm6);
-
-                                               data_len-=3;
+                                       __m128i q0, q1, q2, q3, q4, q5;
+                                       q0 = _mm_cvtsi32_si128(0xffff & qlp_coeff[0]); q0 = _mm_shuffle_epi32(q0, _MM_SHUFFLE(0,0,0,0));
+                                       q1 = _mm_cvtsi32_si128(0xffff & qlp_coeff[1]); q1 = _mm_shuffle_epi32(q1, _MM_SHUFFLE(0,0,0,0));
+                                       q2 = _mm_cvtsi32_si128(0xffff & qlp_coeff[2]); q2 = _mm_shuffle_epi32(q2, _MM_SHUFFLE(0,0,0,0));
+                                       q3 = _mm_cvtsi32_si128(0xffff & qlp_coeff[3]); q3 = _mm_shuffle_epi32(q3, _MM_SHUFFLE(0,0,0,0));
+                                       q4 = _mm_cvtsi32_si128(0xffff & qlp_coeff[4]); q4 = _mm_shuffle_epi32(q4, _MM_SHUFFLE(0,0,0,0));
+                                       q5 = _mm_cvtsi32_si128(0xffff & qlp_coeff[5]); q5 = _mm_shuffle_epi32(q5, _MM_SHUFFLE(0,0,0,0));
+
+                                       for(i = 0; i < (int)data_len-3; i+=4) {
+                                               __m128i summ, mull;
+                                               summ = _mm_madd_epi16(q5, _mm_loadu_si128((const __m128i*)(data+i-6)));
+                                               mull = _mm_madd_epi16(q4, _mm_loadu_si128((const __m128i*)(data+i-5))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q3, _mm_loadu_si128((const __m128i*)(data+i-4))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q2, _mm_loadu_si128((const __m128i*)(data+i-3))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q1, _mm_loadu_si128((const __m128i*)(data+i-2))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q0, _mm_loadu_si128((const __m128i*)(data+i-1))); summ = _mm_add_epi32(summ, mull);
+                                               summ = _mm_sra_epi32(summ, cnt);
+                                               _mm_storeu_si128((__m128i*)(residual+i), _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(data+i)), summ));
                                        }
                                }
                                else { /* order == 5 */
-                                       int r;
-                                       __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6;
-                                       xmm0 = _mm_loadu_si128((const __m128i*)(qlp_coeff+0));
-                                       xmm1 = _mm_loadu_si128((const __m128i*)(qlp_coeff+4));
-                                       xmm1 = _mm_slli_si128(xmm1, 12); xmm1 = _mm_srli_si128(xmm1, 12);
-                                       xmm0 = _mm_packs_epi32(xmm0, xmm1);
-
-                                       xmm1 = _mm_loadu_si128((const __m128i*)(data-8));
-                                       xmm3 = _mm_loadu_si128((const __m128i*)(data-4));
-                                       xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(0,1,2,3));
-                                       xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(0,1,2,3));
-                                       xmm3 = _mm_packs_epi32(xmm3, xmm1);
-                                       xmm2 = _mm_slli_si128(xmm0, 2);
-                                       xmm4 = _mm_slli_si128(xmm0, 4);
-                                       xmm5 = _mm_slli_si128(xmm0, 6);
-
-                                       /* xmm0: qlp_coeff
-                                          xmm2: qlp_coeff << 16 bit
-                                          xmm4: qlp_coeff << 2*16 bit
-                                          xmm4: qlp_coeff << 3*16 bit
-                                          xmm3: data */
-
-                                       xmm6 = xmm3;
-                                       xmm6 = _mm_madd_epi16(xmm6, xmm0);
-                                       xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                                       xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                                       RESIDUAL16_RESULT(xmm6);
-
-                                       data_len--;
-                                       r = data_len % 4;
-
-                                       while(r) {
-                                               xmm3 = _mm_slli_si128(xmm3, 2);
-                                               xmm3 = _mm_insert_epi16(xmm3, curr, 0);
-
-                                               xmm6 = xmm3;
-                                               xmm6 = _mm_madd_epi16(xmm6, xmm0);
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                                               RESIDUAL16_RESULT(xmm6);
-
-                                               data_len--; r--;
-                                       }
-
-                                       while(data_len) { /* data_len is a multiple of 4 */
-                                               xmm3 = _mm_slli_si128(xmm3, 8);
-                                               xmm3 = _mm_insert_epi16(xmm3, curr, 3);
-
-                                               xmm6 = xmm3;
-                                               xmm6 = _mm_madd_epi16(xmm6, xmm5);
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                                               RESIDUAL16_RESULT(xmm6);
-
-                                               xmm3 = _mm_insert_epi16(xmm3, curr, 2);
-
-                                               xmm6 = xmm3;
-                                               xmm6 = _mm_madd_epi16(xmm6, xmm4);
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                                               RESIDUAL16_RESULT(xmm6);
-
-                                               xmm3 = _mm_insert_epi16(xmm3, curr, 1);
-
-                                               xmm6 = xmm3;
-                                               xmm6 = _mm_madd_epi16(xmm6, xmm2);
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                                               RESIDUAL16_RESULT(xmm6);
-
-                                               xmm3 = _mm_insert_epi16(xmm3, curr, 0);
-
-                                               xmm6 = xmm3;
-                                               xmm6 = _mm_madd_epi16(xmm6, xmm0);
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                                               RESIDUAL16_RESULT(xmm6);
-
-                                               data_len-=4;
+                                       __m128i q0, q1, q2, q3, q4;
+                                       q0 = _mm_cvtsi32_si128(0xffff & qlp_coeff[0]); q0 = _mm_shuffle_epi32(q0, _MM_SHUFFLE(0,0,0,0));
+                                       q1 = _mm_cvtsi32_si128(0xffff & qlp_coeff[1]); q1 = _mm_shuffle_epi32(q1, _MM_SHUFFLE(0,0,0,0));
+                                       q2 = _mm_cvtsi32_si128(0xffff & qlp_coeff[2]); q2 = _mm_shuffle_epi32(q2, _MM_SHUFFLE(0,0,0,0));
+                                       q3 = _mm_cvtsi32_si128(0xffff & qlp_coeff[3]); q3 = _mm_shuffle_epi32(q3, _MM_SHUFFLE(0,0,0,0));
+                                       q4 = _mm_cvtsi32_si128(0xffff & qlp_coeff[4]); q4 = _mm_shuffle_epi32(q4, _MM_SHUFFLE(0,0,0,0));
+
+                                       for(i = 0; i < (int)data_len-3; i+=4) {
+                                               __m128i summ, mull;
+                                               summ = _mm_madd_epi16(q4, _mm_loadu_si128((const __m128i*)(data+i-5)));
+                                               mull = _mm_madd_epi16(q3, _mm_loadu_si128((const __m128i*)(data+i-4))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q2, _mm_loadu_si128((const __m128i*)(data+i-3))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q1, _mm_loadu_si128((const __m128i*)(data+i-2))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q0, _mm_loadu_si128((const __m128i*)(data+i-1))); summ = _mm_add_epi32(summ, mull);
+                                               summ = _mm_sra_epi32(summ, cnt);
+                                               _mm_storeu_si128((__m128i*)(residual+i), _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(data+i)), summ));
                                        }
                                }
                        }
                }
-               else { /* order == 1, 2, 3, 4 */
+               else {
                        if(order > 2) {
                                if(order == 4) {
-                                       __m128i xmm0, xmm3, xmm6;
-                                       xmm6 = _mm_setzero_si128();
-                                       xmm0 = _mm_loadu_si128((const __m128i*)(qlp_coeff+0));
-                                       xmm0 = _mm_packs_epi32(xmm0, xmm6);
-
-                                       xmm3 = _mm_loadu_si128((const __m128i*)(data-4));
-                                       xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(0,1,2,3));
-                                       xmm3 = _mm_packs_epi32(xmm3, xmm6);
-
-                                       /* xmm0: qlp_coeff
-                                          xmm3: data */
-
-                                       xmm6 = xmm3;
-                                       xmm6 = _mm_madd_epi16(xmm6, xmm0);
-                                       xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                                       RESIDUAL16_RESULT(xmm6);
-
-                                       data_len--;
-
-                                       while(data_len) {
-                                               xmm3 = _mm_slli_si128(xmm3, 2);
-                                               xmm3 = _mm_insert_epi16(xmm3, curr, 0);
-
-                                               xmm6 = xmm3;
-                                               xmm6 = _mm_madd_epi16(xmm6, xmm0);
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                                               RESIDUAL16_RESULT(xmm6);
-
-                                               data_len--;
+                                       __m128i q0, q1, q2, q3;
+                                       q0 = _mm_cvtsi32_si128(0xffff & qlp_coeff[0]); q0 = _mm_shuffle_epi32(q0, _MM_SHUFFLE(0,0,0,0));
+                                       q1 = _mm_cvtsi32_si128(0xffff & qlp_coeff[1]); q1 = _mm_shuffle_epi32(q1, _MM_SHUFFLE(0,0,0,0));
+                                       q2 = _mm_cvtsi32_si128(0xffff & qlp_coeff[2]); q2 = _mm_shuffle_epi32(q2, _MM_SHUFFLE(0,0,0,0));
+                                       q3 = _mm_cvtsi32_si128(0xffff & qlp_coeff[3]); q3 = _mm_shuffle_epi32(q3, _MM_SHUFFLE(0,0,0,0));
+
+                                       for(i = 0; i < (int)data_len-3; i+=4) {
+                                               __m128i summ, mull;
+                                               summ = _mm_madd_epi16(q3, _mm_loadu_si128((const __m128i*)(data+i-4)));
+                                               mull = _mm_madd_epi16(q2, _mm_loadu_si128((const __m128i*)(data+i-3))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q1, _mm_loadu_si128((const __m128i*)(data+i-2))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q0, _mm_loadu_si128((const __m128i*)(data+i-1))); summ = _mm_add_epi32(summ, mull);
+                                               summ = _mm_sra_epi32(summ, cnt);
+                                               _mm_storeu_si128((__m128i*)(residual+i), _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(data+i)), summ));
                                        }
                                }
                                else { /* order == 3 */
-                                       int r;
-                                       __m128i xmm0, xmm1, xmm3, xmm6;
-                                       xmm6 = _mm_setzero_si128();
-                                       xmm0 = _mm_loadu_si128((const __m128i*)(qlp_coeff+0));
-                                       xmm0 = _mm_slli_si128(xmm0, 4); xmm0 = _mm_srli_si128(xmm0, 4);
-                                       xmm0 = _mm_packs_epi32(xmm0, xmm6);
-
-                                       xmm3 = _mm_loadu_si128((const __m128i*)(data-4));
-                                       xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(0,1,2,3));
-                                       xmm3 = _mm_packs_epi32(xmm3, xmm6);
-                                       xmm1 = _mm_slli_si128(xmm0, 2);
-
-                                       /* xmm0: qlp_coeff
-                                          xmm1: qlp_coeff << 16 bit
-                                          xmm3: data */
-
-                                       xmm6 = xmm3;
-                                       xmm6 = _mm_madd_epi16(xmm6, xmm0);
-                                       xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                                       RESIDUAL16_RESULT(xmm6);
-
-                                       data_len--;
-                                       r = data_len % 2;
-
-                                       if(r) {
-                                               xmm3 = _mm_slli_si128(xmm3, 2);
-                                               xmm3 = _mm_insert_epi16(xmm3, curr, 0);
-
-                                               xmm6 = xmm3;
-                                               xmm6 = _mm_madd_epi16(xmm6, xmm0);
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                                               RESIDUAL16_RESULT(xmm6);
-
-                                               data_len--;
-                                       }
-
-                                       while(data_len) { /* data_len is a multiple of 2 */
-                                               xmm3 = _mm_slli_si128(xmm3, 4);
-
-                                               xmm3 = _mm_insert_epi16(xmm3, curr, 1);
-
-                                               xmm6 = xmm3;
-                                               xmm6 = _mm_madd_epi16(xmm6, xmm1);
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                                               RESIDUAL16_RESULT(xmm6);
-
-                                               xmm3 = _mm_insert_epi16(xmm3, curr, 0);
-
-                                               xmm6 = xmm3;
-                                               xmm6 = _mm_madd_epi16(xmm6, xmm0);
-                                               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
-
-                                               RESIDUAL16_RESULT(xmm6);
-
-                                               data_len-=2;
+                                       __m128i q0, q1, q2;
+                                       q0 = _mm_cvtsi32_si128(0xffff & qlp_coeff[0]); q0 = _mm_shuffle_epi32(q0, _MM_SHUFFLE(0,0,0,0));
+                                       q1 = _mm_cvtsi32_si128(0xffff & qlp_coeff[1]); q1 = _mm_shuffle_epi32(q1, _MM_SHUFFLE(0,0,0,0));
+                                       q2 = _mm_cvtsi32_si128(0xffff & qlp_coeff[2]); q2 = _mm_shuffle_epi32(q2, _MM_SHUFFLE(0,0,0,0));
+
+                                       for(i = 0; i < (int)data_len-3; i+=4) {
+                                               __m128i summ, mull;
+                                               summ = _mm_madd_epi16(q2, _mm_loadu_si128((const __m128i*)(data+i-3)));
+                                               mull = _mm_madd_epi16(q1, _mm_loadu_si128((const __m128i*)(data+i-2))); summ = _mm_add_epi32(summ, mull);
+                                               mull = _mm_madd_epi16(q0, _mm_loadu_si128((const __m128i*)(data+i-1))); summ = _mm_add_epi32(summ, mull);
+                                               summ = _mm_sra_epi32(summ, cnt);
+                                               _mm_storeu_si128((__m128i*)(residual+i), _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(data+i)), summ));
                                        }
                                }
                        }
                        else {
                                if(order == 2) {
-                                       __m128i xmm0, xmm3, xmm6;
-                                       xmm6 = _mm_setzero_si128();
-                                       xmm0 = _mm_loadu_si128((const __m128i*)(qlp_coeff+0));
-                                       xmm0 = _mm_slli_si128(xmm0, 8); xmm0 = _mm_srli_si128(xmm0, 8);
-                                       xmm0 = _mm_packs_epi32(xmm0, xmm6);
-
-                                       xmm3 = _mm_loadu_si128((const __m128i*)(data-4));
-                                       xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(0,1,2,3));
-                                       xmm3 = _mm_packs_epi32(xmm3, xmm6);
-
-                                       /* xmm0: qlp_coeff
-                                          xmm3: data */
-
-                                       xmm6 = xmm3;
-                                       xmm6 = _mm_madd_epi16(xmm6, xmm0);
-
-                                       RESIDUAL16_RESULT(xmm6);
-
-                                       data_len--;
-
-                                       while(data_len) {
-                                               xmm3 = _mm_slli_si128(xmm3, 2);
-                                               xmm3 = _mm_insert_epi16(xmm3, curr, 0);
-
-                                               xmm6 = xmm3;
-                                               xmm6 = _mm_madd_epi16(xmm6, xmm0);
-
-                                               RESIDUAL16_RESULT(xmm6);
-
-                                               data_len--;
+                                       __m128i q0, q1;
+                                       q0 = _mm_cvtsi32_si128(0xffff & qlp_coeff[0]); q0 = _mm_shuffle_epi32(q0, _MM_SHUFFLE(0,0,0,0));
+                                       q1 = _mm_cvtsi32_si128(0xffff & qlp_coeff[1]); q1 = _mm_shuffle_epi32(q1, _MM_SHUFFLE(0,0,0,0));
+
+                                       for(i = 0; i < (int)data_len-3; i+=4) {
+                                               __m128i summ, mull;
+                                               summ = _mm_madd_epi16(q1, _mm_loadu_si128((const __m128i*)(data+i-2)));
+                                               mull = _mm_madd_epi16(q0, _mm_loadu_si128((const __m128i*)(data+i-1))); summ = _mm_add_epi32(summ, mull);
+                                               summ = _mm_sra_epi32(summ, cnt);
+                                               _mm_storeu_si128((__m128i*)(residual+i), _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(data+i)), summ));
                                        }
                                }
                                else { /* order == 1 */
-                                       for(i = 0; i < (int)data_len; i++)
-                                               residual[i] = data[i] - ((qlp_coeff[0] * data[i-1]) >> lp_quantization);
+                                       __m128i q0;
+                                       q0 = _mm_cvtsi32_si128(0xffff & qlp_coeff[0]); q0 = _mm_shuffle_epi32(q0, _MM_SHUFFLE(0,0,0,0));
+
+                                       for(i = 0; i < (int)data_len-3; i+=4) {
+                                               __m128i summ;
+                                               summ = _mm_madd_epi16(q0, _mm_loadu_si128((const __m128i*)(data+i-1)));
+                                               summ = _mm_sra_epi32(summ, cnt);
+                                               _mm_storeu_si128((__m128i*)(residual+i), _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(data+i)), summ));
+                                       }
                                }
                        }
                }
+               for(; i < (int)data_len; i++) {
+                       sum = 0;
+                       switch(order) {
+                               case 12: sum += qlp_coeff[11] * data[i-12];
+                               case 11: sum += qlp_coeff[10] * data[i-11];
+                               case 10: sum += qlp_coeff[ 9] * data[i-10];
+                               case 9:  sum += qlp_coeff[ 8] * data[i- 9];
+                               case 8:  sum += qlp_coeff[ 7] * data[i- 8];
+                               case 7:  sum += qlp_coeff[ 6] * data[i- 7];
+                               case 6:  sum += qlp_coeff[ 5] * data[i- 6];
+                               case 5:  sum += qlp_coeff[ 4] * data[i- 5];
+                               case 4:  sum += qlp_coeff[ 3] * data[i- 4];
+                               case 3:  sum += qlp_coeff[ 2] * data[i- 3];
+                               case 2:  sum += qlp_coeff[ 1] * data[i- 2];
+                               case 1:  sum += qlp_coeff[ 0] * data[i- 1];
+                       }
+                       residual[i] = data[i] - (sum >> lp_quantization);
+               }
        }
        else { /* order > 12 */
                for(i = 0; i < (int)data_len; i++) {
@@ -828,7 +476,7 @@ void FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2(const FLAC__in
                                                xmm7 = _mm_add_epi32(xmm7, xmm6);
 
                                                xmm7 = _mm_add_epi32(xmm7, _mm_srli_si128(xmm7, 8));
-                                               RESIDUAL_RESULT(xmm7);
+                                               RESIDUAL32_RESULT(xmm7);
                                        }
                                }
                                else { /* order == 11 */
@@ -888,7 +536,7 @@ void FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2(const FLAC__in
                                                xmm7 = _mm_add_epi32(xmm7, xmm6);
 
                                                xmm7 = _mm_add_epi32(xmm7, _mm_srli_si128(xmm7, 8));
-                                               RESIDUAL_RESULT(xmm7);
+                                               RESIDUAL32_RESULT(xmm7);
                                        }
                                }
                        }
@@ -944,7 +592,7 @@ void FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2(const FLAC__in
                                                xmm7 = _mm_add_epi32(xmm7, xmm6);
 
                                                xmm7 = _mm_add_epi32(xmm7, _mm_srli_si128(xmm7, 8));
-                                               RESIDUAL_RESULT(xmm7);
+                                               RESIDUAL32_RESULT(xmm7);
                                        }
                                }
                                else { /* order == 9 */
@@ -995,7 +643,7 @@ void FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2(const FLAC__in
                                                xmm7 = _mm_add_epi32(xmm7, xmm6);
 
                                                xmm7 = _mm_add_epi32(xmm7, _mm_srli_si128(xmm7, 8));
-                                               RESIDUAL_RESULT(xmm7);
+                                               RESIDUAL32_RESULT(xmm7);
                                        }
                                }
                        }
@@ -1044,7 +692,7 @@ void FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2(const FLAC__in
                                                xmm7 = _mm_add_epi32(xmm7, xmm6);
 
                                                xmm7 = _mm_add_epi32(xmm7, _mm_srli_si128(xmm7, 8));
-                                               RESIDUAL_RESULT(xmm7);
+                                               RESIDUAL32_RESULT(xmm7);
                                        }
                                }
                                else { /* order == 7 */
@@ -1086,7 +734,7 @@ void FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2(const FLAC__in
                                                xmm7 = _mm_add_epi32(xmm7, xmm6);
 
                                                xmm7 = _mm_add_epi32(xmm7, _mm_srli_si128(xmm7, 8));
-                                               RESIDUAL_RESULT(xmm7);
+                                               RESIDUAL32_RESULT(xmm7);
                                        }
                                }
                        }
@@ -1124,7 +772,7 @@ void FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2(const FLAC__in
                                                xmm7 = _mm_add_epi32(xmm7, xmm6);
 
                                                xmm7 = _mm_add_epi32(xmm7, _mm_srli_si128(xmm7, 8));
-                                               RESIDUAL_RESULT(xmm7);
+                                               RESIDUAL32_RESULT(xmm7);
                                        }
                                }
                                else { /* order == 5 */
@@ -1157,7 +805,7 @@ void FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2(const FLAC__in
                                                xmm7 = _mm_add_epi32(xmm7, xmm6);
 
                                                xmm7 = _mm_add_epi32(xmm7, _mm_srli_si128(xmm7, 8));
-                                               RESIDUAL_RESULT(xmm7);
+                                               RESIDUAL32_RESULT(xmm7);
                                        }
                                }
                        }
@@ -1188,7 +836,7 @@ void FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2(const FLAC__in
                                                xmm7 = _mm_add_epi32(xmm7, xmm6);
 
                                                xmm7 = _mm_add_epi32(xmm7, _mm_srli_si128(xmm7, 8));
-                                               RESIDUAL_RESULT(xmm7);
+                                               RESIDUAL32_RESULT(xmm7);
                                        }
                                }
                                else { /* order == 3 */
@@ -1212,7 +860,7 @@ void FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2(const FLAC__in
                                                xmm7 = _mm_add_epi32(xmm7, xmm6);
 
                                                xmm7 = _mm_add_epi32(xmm7, _mm_srli_si128(xmm7, 8));
-                                               RESIDUAL_RESULT(xmm7);
+                                               RESIDUAL32_RESULT(xmm7);
                                        }
                                }
                        }
@@ -1231,7 +879,7 @@ void FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2(const FLAC__in
                                                xmm7 = _mm_mul_epu32(xmm7, xmm0);
 
                                                xmm7 = _mm_add_epi32(xmm7, _mm_srli_si128(xmm7, 8));
-                                               RESIDUAL_RESULT(xmm7);
+                                               RESIDUAL32_RESULT(xmm7);
                                        }
                                }
                                else { /* order == 1 */
@@ -1284,6 +932,156 @@ void FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse2(const FLAC__in
        }
 }
 
+#if defined FLAC__CPU_IA32 && !defined FLAC__HAS_NASM /* unused for x64; not better than MMX asm */
+
+FLAC__SSE_TARGET("sse2")
+void FLAC__lpc_restore_signal_16_intrin_sse2(const FLAC__int32 residual[], unsigned data_len, const FLAC__int32 qlp_coeff[], unsigned order, int lp_quantization, FLAC__int32 data[])
+{
+       if (order < 8 || order > 12) {
+               FLAC__lpc_restore_signal(residual, data_len, qlp_coeff, order, lp_quantization, data);
+               return;
+       }
+       if (data_len == 0)
+               return;
+
+       FLAC__ASSERT(order >= 8);
+       FLAC__ASSERT(order <= 12);
+
+       if(order > 8) { /* order == 9, 10, 11, 12 */
+               FLAC__int32 curr;
+               __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
+               xmm0 = _mm_loadu_si128((const __m128i*)(qlp_coeff+0));
+               xmm6 = _mm_loadu_si128((const __m128i*)(qlp_coeff+4));
+               xmm1 = _mm_loadu_si128((const __m128i*)(qlp_coeff+8)); /* read 0 to 3 uninitialized coeffs... */
+               switch(order)                                          /* ...and zero them out */
+               {
+               case 9:
+                       xmm1 = _mm_slli_si128(xmm1, 12); xmm1 = _mm_srli_si128(xmm1, 12); break;
+               case 10:
+                       xmm1 = _mm_slli_si128(xmm1, 8); xmm1 = _mm_srli_si128(xmm1, 8); break;
+               case 11:
+                       xmm1 = _mm_slli_si128(xmm1, 4); xmm1 = _mm_srli_si128(xmm1, 4); break;
+               }
+               xmm2 = _mm_setzero_si128();
+               xmm0 = _mm_packs_epi32(xmm0, xmm6);
+               xmm1 = _mm_packs_epi32(xmm1, xmm2);
+
+               xmm4 = _mm_loadu_si128((const __m128i*)(data-12));
+               xmm5 = _mm_loadu_si128((const __m128i*)(data-8));
+               xmm3 = _mm_loadu_si128((const __m128i*)(data-4));
+               xmm4 = _mm_shuffle_epi32(xmm4, _MM_SHUFFLE(0,1,2,3));
+               xmm5 = _mm_shuffle_epi32(xmm5, _MM_SHUFFLE(0,1,2,3));
+               xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(0,1,2,3));
+               xmm4 = _mm_packs_epi32(xmm4, xmm2);
+               xmm3 = _mm_packs_epi32(xmm3, xmm5);
+
+               xmm7 = _mm_slli_si128(xmm1, 2);
+               xmm7 = _mm_or_si128(xmm7, _mm_srli_si128(xmm0, 14));
+               xmm2 = _mm_slli_si128(xmm0, 2);
+
+               /* xmm0, xmm1: qlp_coeff
+                       xmm2, xmm7: qlp_coeff << 16 bit
+                       xmm3, xmm4: data */
+
+               xmm5 = _mm_madd_epi16(xmm4, xmm1);
+               xmm6 = _mm_madd_epi16(xmm3, xmm0);
+               xmm6 = _mm_add_epi32(xmm6, xmm5);
+               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
+               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
+
+               DATA16_RESULT(xmm6);
+
+               data_len--;
+
+               if(data_len % 2) {
+                       xmm6 = _mm_srli_si128(xmm3, 14);
+                       xmm4 = _mm_slli_si128(xmm4, 2);
+                       xmm3 = _mm_slli_si128(xmm3, 2);
+                       xmm4 = _mm_or_si128(xmm4, xmm6);
+                       xmm3 = _mm_insert_epi16(xmm3, curr, 0);
+
+                       xmm5 = _mm_madd_epi16(xmm4, xmm1);
+                       xmm6 = _mm_madd_epi16(xmm3, xmm0);
+                       xmm6 = _mm_add_epi32(xmm6, xmm5);
+                       xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
+                       xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
+
+                       DATA16_RESULT(xmm6);
+
+                       data_len--;
+               }
+
+               while(data_len) { /* data_len is a multiple of 2 */
+                       /* 1 _mm_slli_si128 per data element less but we need shifted qlp_coeff in xmm2:xmm7 */
+                       xmm6 = _mm_srli_si128(xmm3, 12);
+                       xmm4 = _mm_slli_si128(xmm4, 4);
+                       xmm3 = _mm_slli_si128(xmm3, 4);
+                       xmm4 = _mm_or_si128(xmm4, xmm6);
+                       xmm3 = _mm_insert_epi16(xmm3, curr, 1);
+
+                       xmm5 = _mm_madd_epi16(xmm4, xmm7);
+                       xmm6 = _mm_madd_epi16(xmm3, xmm2);
+                       xmm6 = _mm_add_epi32(xmm6, xmm5);
+                       xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
+                       xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
+
+                       DATA16_RESULT(xmm6);
+
+                       xmm3 = _mm_insert_epi16(xmm3, curr, 0);
+
+                       xmm5 = _mm_madd_epi16(xmm4, xmm1);
+                       xmm6 = _mm_madd_epi16(xmm3, xmm0);
+                       xmm6 = _mm_add_epi32(xmm6, xmm5);
+                       xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
+                       xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
+
+                       DATA16_RESULT(xmm6);
+
+                       data_len-=2;
+               }
+       } /* endif(order > 8) */
+       else
+       {
+               FLAC__int32 curr;
+               __m128i xmm0, xmm1, xmm3, xmm6;
+               xmm0 = _mm_loadu_si128((const __m128i*)(qlp_coeff+0));
+               xmm1 = _mm_loadu_si128((const __m128i*)(qlp_coeff+4));
+               xmm0 = _mm_packs_epi32(xmm0, xmm1);
+
+               xmm1 = _mm_loadu_si128((const __m128i*)(data-8));
+               xmm3 = _mm_loadu_si128((const __m128i*)(data-4));
+               xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(0,1,2,3));
+               xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(0,1,2,3));
+               xmm3 = _mm_packs_epi32(xmm3, xmm1);
+
+               /* xmm0: qlp_coeff
+                       xmm3: data */
+
+               xmm6 = _mm_madd_epi16(xmm3, xmm0);
+               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
+               xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
+
+               DATA16_RESULT(xmm6);
+
+               data_len--;
+
+               while(data_len) {
+                       xmm3 = _mm_slli_si128(xmm3, 2);
+                       xmm3 = _mm_insert_epi16(xmm3, curr, 0);
+
+                       xmm6 = _mm_madd_epi16(xmm3, xmm0);
+                       xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 8));
+                       xmm6 = _mm_add_epi32(xmm6, _mm_srli_si128(xmm6, 4));
+
+                       DATA16_RESULT(xmm6);
+
+                       data_len--;
+               }
+       }
+}
+
+#endif /* defined FLAC__CPU_IA32 && !defined FLAC__HAS_NASM */
+
 #endif /* FLAC__SSE2_SUPPORTED */
 #endif /* (FLAC__CPU_IA32 || FLAC__CPU_X86_64) && FLAC__HAS_X86INTRIN */
 #endif /* FLAC__NO_ASM */