OSDN Git Service

[flac] Update FLAC to 1.3.3
[timidity41/timidity41.git] / FLAC / src / lpc_intrin_sse41.c
1 /* libFLAC - Free Lossless Audio Codec library
2  * Copyright (C) 2000-2009  Josh Coalson
3  * Copyright (C) 2011-2016  Xiph.Org Foundation
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *
9  * - Redistributions of source code must retain the above copyright
10  * notice, this list of conditions and the following disclaimer.
11  *
12  * - Redistributions in binary form must reproduce the above copyright
13  * notice, this list of conditions and the following disclaimer in the
14  * documentation and/or other materials provided with the distribution.
15  *
16  * - Neither the name of the Xiph.org Foundation nor the names of its
17  * contributors may be used to endorse or promote products derived from
18  * this software without specific prior written permission.
19  *
20  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
21  * ``AS IS'' AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
22  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
23  * A PARTICULAR PURPOSE ARE DISCLAIMED.  IN NO EVENT SHALL THE FOUNDATION OR
24  * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
25  * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
26  * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
27  * PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
28  * LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
29  * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
30  * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31  */
32
33 #ifdef HAVE_CONFIG_H
34 #  include <config.h>
35 #endif
36
37 #include "private/cpu.h"
38
39 #ifndef FLAC__INTEGER_ONLY_LIBRARY
40 #ifndef FLAC__NO_ASM
41 #if (defined FLAC__CPU_IA32 || defined FLAC__CPU_X86_64) && FLAC__HAS_X86INTRIN
42 #include "private/lpc.h"
43 #ifdef FLAC__SSE4_1_SUPPORTED
44
45 #include "FLAC/assert.h"
46 #include "FLAC/format.h"
47
48 #include <smmintrin.h> /* SSE4.1 */
49
50 #if defined FLAC__CPU_IA32 /* unused for x64 */
51
52 #define RESIDUAL64_RESULT(xmmN)  residual[i] = data[i] - _mm_cvtsi128_si32(_mm_srl_epi64(xmmN, cnt))
53 #define RESIDUAL64_RESULT1(xmmN) residual[i] = data[i] - _mm_cvtsi128_si32(_mm_srli_epi64(xmmN, lp_quantization))
54
55 FLAC__SSE_TARGET("sse4.1")
56 void FLAC__lpc_compute_residual_from_qlp_coefficients_wide_intrin_sse41(const FLAC__int32 *data, uint32_t data_len, const FLAC__int32 qlp_coeff[], uint32_t order, int lp_quantization, FLAC__int32 residual[])
57 {
58         int i;
59         const __m128i cnt = _mm_cvtsi32_si128(lp_quantization);
60
61         FLAC__ASSERT(order > 0);
62         FLAC__ASSERT(order <= 32);
63         FLAC__ASSERT(lp_quantization <= 32); /* there's no _mm_sra_epi64() so we have to use _mm_srl_epi64() */
64
65         if(order <= 12) {
66                 if(order > 8) { /* order == 9, 10, 11, 12 */
67                         if(order > 10) { /* order == 11, 12 */
68                                 if(order == 12) {
69                                         __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
70                                         xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0));  // 0  0  q[1]  q[0]
71                                         xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2));  // 0  0  q[3]  q[2]
72                                         xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4));  // 0  0  q[5]  q[4]
73                                         xmm3 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+6));  // 0  0  q[7]  q[6]
74                                         xmm4 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+8));  // 0  0  q[9]  q[8]
75                                         xmm5 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+10)); // 0  0  q[11] q[10]
76
77                                         xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0)); // 0  q[1]  0  q[0]
78                                         xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0)); // 0  q[3]  0  q[2]
79                                         xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0)); // 0  q[5]  0  q[4]
80                                         xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(3,1,2,0)); // 0  q[7]  0  q[6]
81                                         xmm4 = _mm_shuffle_epi32(xmm4, _MM_SHUFFLE(3,1,2,0)); // 0  q[9]  0  q[8]
82                                         xmm5 = _mm_shuffle_epi32(xmm5, _MM_SHUFFLE(3,1,2,0)); // 0  q[11] 0  q[10]
83
84                                         for(i = 0; i < (int)data_len; i++) {
85                                                 //sum = 0;
86                                                 //sum += qlp_coeff[11] * (FLAC__int64)data[i-12];
87                                                 //sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
88                                                 xmm7 = _mm_loadl_epi64((const __m128i*)(data+i-12));  // 0   0        d[i-11]  d[i-12]
89                                                 xmm7 = _mm_shuffle_epi32(xmm7, _MM_SHUFFLE(2,0,3,1)); // 0  d[i-12]   0        d[i-11]
90                                                 xmm7 = _mm_mul_epi32(xmm7, xmm5);
91
92                                                 //sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
93                                                 //sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
94                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-10));
95                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
96                                                 xmm6 = _mm_mul_epi32(xmm6, xmm4);
97                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
98
99                                                 //sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
100                                                 //sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
101                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-8));
102                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
103                                                 xmm6 = _mm_mul_epi32(xmm6, xmm3);
104                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
105
106                                                 //sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
107                                                 //sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
108                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-6));
109                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
110                                                 xmm6 = _mm_mul_epi32(xmm6, xmm2);
111                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
112
113                                                 //sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
114                                                 //sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
115                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4));
116                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
117                                                 xmm6 = _mm_mul_epi32(xmm6, xmm1);
118                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
119
120                                                 //sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
121                                                 //sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
122                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2));
123                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
124                                                 xmm6 = _mm_mul_epi32(xmm6, xmm0);
125                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
126
127                                                 xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8));
128                                                 RESIDUAL64_RESULT1(xmm7);
129                                         }
130                                 }
131                                 else { /* order == 11 */
132                                         __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm5, xmm6, xmm7;
133                                         xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0));
134                                         xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2));
135                                         xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4));
136                                         xmm3 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+6));
137                                         xmm4 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+8));
138                                         xmm5 = _mm_cvtsi32_si128(qlp_coeff[10]);
139
140                                         xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0));
141                                         xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0));
142                                         xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0));
143                                         xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(3,1,2,0));
144                                         xmm4 = _mm_shuffle_epi32(xmm4, _MM_SHUFFLE(3,1,2,0));
145
146                                         for(i = 0; i < (int)data_len; i++) {
147                                                 //sum = 0;
148                                                 //sum  = qlp_coeff[10] * (FLAC__int64)data[i-11];
149                                                 xmm7 = _mm_cvtsi32_si128(data[i-11]);
150                                                 xmm7 = _mm_mul_epi32(xmm7, xmm5);
151
152                                                 //sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
153                                                 //sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
154                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-10));
155                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
156                                                 xmm6 = _mm_mul_epi32(xmm6, xmm4);
157                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
158
159                                                 //sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
160                                                 //sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
161                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-8));
162                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
163                                                 xmm6 = _mm_mul_epi32(xmm6, xmm3);
164                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
165
166                                                 //sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
167                                                 //sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
168                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-6));
169                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
170                                                 xmm6 = _mm_mul_epi32(xmm6, xmm2);
171                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
172
173                                                 //sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
174                                                 //sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
175                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4));
176                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
177                                                 xmm6 = _mm_mul_epi32(xmm6, xmm1);
178                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
179
180                                                 //sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
181                                                 //sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
182                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2));
183                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
184                                                 xmm6 = _mm_mul_epi32(xmm6, xmm0);
185                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
186
187                                                 xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8));
188                                                 RESIDUAL64_RESULT1(xmm7);
189                                         }
190                                 }
191                         }
192                         else { /* order == 9, 10 */
193                                 if(order == 10) {
194                                         __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm6, xmm7;
195                                         xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0));
196                                         xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2));
197                                         xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4));
198                                         xmm3 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+6));
199                                         xmm4 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+8));
200
201                                         xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0));
202                                         xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0));
203                                         xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0));
204                                         xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(3,1,2,0));
205                                         xmm4 = _mm_shuffle_epi32(xmm4, _MM_SHUFFLE(3,1,2,0));
206
207                                         for(i = 0; i < (int)data_len; i++) {
208                                                 //sum = 0;
209                                                 //sum += qlp_coeff[9] * (FLAC__int64)data[i-10];
210                                                 //sum += qlp_coeff[8] * (FLAC__int64)data[i-9];
211                                                 xmm7 = _mm_loadl_epi64((const __m128i*)(data+i-10));
212                                                 xmm7 = _mm_shuffle_epi32(xmm7, _MM_SHUFFLE(2,0,3,1));
213                                                 xmm7 = _mm_mul_epi32(xmm7, xmm4);
214
215                                                 //sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
216                                                 //sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
217                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-8));
218                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
219                                                 xmm6 = _mm_mul_epi32(xmm6, xmm3);
220                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
221
222                                                 //sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
223                                                 //sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
224                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-6));
225                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
226                                                 xmm6 = _mm_mul_epi32(xmm6, xmm2);
227                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
228
229                                                 //sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
230                                                 //sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
231                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4));
232                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
233                                                 xmm6 = _mm_mul_epi32(xmm6, xmm1);
234                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
235
236                                                 //sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
237                                                 //sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
238                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2));
239                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
240                                                 xmm6 = _mm_mul_epi32(xmm6, xmm0);
241                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
242
243                                                 xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8));
244                                                 RESIDUAL64_RESULT(xmm7);
245                                         }
246                                 }
247                                 else { /* order == 9 */
248                                         __m128i xmm0, xmm1, xmm2, xmm3, xmm4, xmm6, xmm7;
249                                         xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0));
250                                         xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2));
251                                         xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4));
252                                         xmm3 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+6));
253                                         xmm4 = _mm_cvtsi32_si128(qlp_coeff[8]);
254
255                                         xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0));
256                                         xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0));
257                                         xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0));
258                                         xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(3,1,2,0));
259
260                                         for(i = 0; i < (int)data_len; i++) {
261                                                 //sum = 0;
262                                                 //sum  = qlp_coeff[8] * (FLAC__int64)data[i-9];
263                                                 xmm7 = _mm_cvtsi32_si128(data[i-9]);
264                                                 xmm7 = _mm_mul_epi32(xmm7, xmm4);
265
266                                                 //sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
267                                                 //sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
268                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-8));
269                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
270                                                 xmm6 = _mm_mul_epi32(xmm6, xmm3);
271                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
272
273                                                 //sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
274                                                 //sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
275                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-6));
276                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
277                                                 xmm6 = _mm_mul_epi32(xmm6, xmm2);
278                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
279
280                                                 //sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
281                                                 //sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
282                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4));
283                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
284                                                 xmm6 = _mm_mul_epi32(xmm6, xmm1);
285                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
286
287                                                 //sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
288                                                 //sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
289                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2));
290                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
291                                                 xmm6 = _mm_mul_epi32(xmm6, xmm0);
292                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
293
294                                                 xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8));
295                                                 RESIDUAL64_RESULT(xmm7);
296                                         }
297                                 }
298                         }
299                 }
300                 else if(order > 4) { /* order == 5, 6, 7, 8 */
301                         if(order > 6) { /* order == 7, 8 */
302                                 if(order == 8) {
303                                         __m128i xmm0, xmm1, xmm2, xmm3, xmm6, xmm7;
304                                         xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0));
305                                         xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2));
306                                         xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4));
307                                         xmm3 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+6));
308
309                                         xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0));
310                                         xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0));
311                                         xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0));
312                                         xmm3 = _mm_shuffle_epi32(xmm3, _MM_SHUFFLE(3,1,2,0));
313
314                                         for(i = 0; i < (int)data_len; i++) {
315                                                 //sum = 0;
316                                                 //sum += qlp_coeff[7] * (FLAC__int64)data[i-8];
317                                                 //sum += qlp_coeff[6] * (FLAC__int64)data[i-7];
318                                                 xmm7 = _mm_loadl_epi64((const __m128i*)(data+i-8));
319                                                 xmm7 = _mm_shuffle_epi32(xmm7, _MM_SHUFFLE(2,0,3,1));
320                                                 xmm7 = _mm_mul_epi32(xmm7, xmm3);
321
322                                                 //sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
323                                                 //sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
324                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-6));
325                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
326                                                 xmm6 = _mm_mul_epi32(xmm6, xmm2);
327                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
328
329                                                 //sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
330                                                 //sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
331                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4));
332                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
333                                                 xmm6 = _mm_mul_epi32(xmm6, xmm1);
334                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
335
336                                                 //sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
337                                                 //sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
338                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2));
339                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
340                                                 xmm6 = _mm_mul_epi32(xmm6, xmm0);
341                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
342
343                                                 xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8));
344                                                 RESIDUAL64_RESULT(xmm7);
345                                         }
346                                 }
347                                 else { /* order == 7 */
348                                         __m128i xmm0, xmm1, xmm2, xmm3, xmm6, xmm7;
349                                         xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0));
350                                         xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2));
351                                         xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4));
352                                         xmm3 = _mm_cvtsi32_si128(qlp_coeff[6]);
353
354                                         xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0));
355                                         xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0));
356                                         xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0));
357
358                                         for(i = 0; i < (int)data_len; i++) {
359                                                 //sum = 0;
360                                                 //sum  = qlp_coeff[6] * (FLAC__int64)data[i-7];
361                                                 xmm7 = _mm_cvtsi32_si128(data[i-7]);
362                                                 xmm7 = _mm_mul_epi32(xmm7, xmm3);
363
364                                                 //sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
365                                                 //sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
366                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-6));
367                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
368                                                 xmm6 = _mm_mul_epi32(xmm6, xmm2);
369                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
370
371                                                 //sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
372                                                 //sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
373                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4));
374                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
375                                                 xmm6 = _mm_mul_epi32(xmm6, xmm1);
376                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
377
378                                                 //sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
379                                                 //sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
380                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2));
381                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
382                                                 xmm6 = _mm_mul_epi32(xmm6, xmm0);
383                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
384
385                                                 xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8));
386                                                 RESIDUAL64_RESULT(xmm7);
387                                         }
388                                 }
389                         }
390                         else { /* order == 5, 6 */
391                                 if(order == 6) {
392                                         __m128i xmm0, xmm1, xmm2, xmm6, xmm7;
393                                         xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0));
394                                         xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2));
395                                         xmm2 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+4));
396
397                                         xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0));
398                                         xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0));
399                                         xmm2 = _mm_shuffle_epi32(xmm2, _MM_SHUFFLE(3,1,2,0));
400
401                                         for(i = 0; i < (int)data_len; i++) {
402                                                 //sum = 0;
403                                                 //sum += qlp_coeff[5] * (FLAC__int64)data[i-6];
404                                                 //sum += qlp_coeff[4] * (FLAC__int64)data[i-5];
405                                                 xmm7 = _mm_loadl_epi64((const __m128i*)(data+i-6));
406                                                 xmm7 = _mm_shuffle_epi32(xmm7, _MM_SHUFFLE(2,0,3,1));
407                                                 xmm7 = _mm_mul_epi32(xmm7, xmm2);
408
409                                                 //sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
410                                                 //sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
411                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4));
412                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
413                                                 xmm6 = _mm_mul_epi32(xmm6, xmm1);
414                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
415
416                                                 //sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
417                                                 //sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
418                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2));
419                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
420                                                 xmm6 = _mm_mul_epi32(xmm6, xmm0);
421                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
422
423                                                 xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8));
424                                                 RESIDUAL64_RESULT(xmm7);
425                                         }
426                                 }
427                                 else { /* order == 5 */
428                                         __m128i xmm0, xmm1, xmm2, xmm6, xmm7;
429                                         xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0));
430                                         xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2));
431                                         xmm2 = _mm_cvtsi32_si128(qlp_coeff[4]);
432
433                                         xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0));
434                                         xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0));
435
436                                         for(i = 0; i < (int)data_len; i++) {
437                                                 //sum = 0;
438                                                 //sum  = qlp_coeff[4] * (FLAC__int64)data[i-5];
439                                                 xmm7 = _mm_cvtsi32_si128(data[i-5]);
440                                                 xmm7 = _mm_mul_epi32(xmm7, xmm2);
441
442                                                 //sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
443                                                 //sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
444                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-4));
445                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
446                                                 xmm6 = _mm_mul_epi32(xmm6, xmm1);
447                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
448
449                                                 //sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
450                                                 //sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
451                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2));
452                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
453                                                 xmm6 = _mm_mul_epi32(xmm6, xmm0);
454                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
455
456                                                 xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8));
457                                                 RESIDUAL64_RESULT(xmm7);
458                                         }
459                                 }
460                         }
461                 }
462                 else { /* order == 1, 2, 3, 4 */
463                         if(order > 2) { /* order == 3, 4 */
464                                 if(order == 4) {
465                                         __m128i xmm0, xmm1, xmm6, xmm7;
466                                         xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0));
467                                         xmm1 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+2));
468
469                                         xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0));
470                                         xmm1 = _mm_shuffle_epi32(xmm1, _MM_SHUFFLE(3,1,2,0));
471
472                                         for(i = 0; i < (int)data_len; i++) {
473                                                 //sum = 0;
474                                                 //sum += qlp_coeff[3] * (FLAC__int64)data[i-4];
475                                                 //sum += qlp_coeff[2] * (FLAC__int64)data[i-3];
476                                                 xmm7 = _mm_loadl_epi64((const __m128i*)(data+i-4));
477                                                 xmm7 = _mm_shuffle_epi32(xmm7, _MM_SHUFFLE(2,0,3,1));
478                                                 xmm7 = _mm_mul_epi32(xmm7, xmm1);
479
480                                                 //sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
481                                                 //sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
482                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2));
483                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
484                                                 xmm6 = _mm_mul_epi32(xmm6, xmm0);
485                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
486
487                                                 xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8));
488                                                 RESIDUAL64_RESULT(xmm7);
489                                         }
490                                 }
491                                 else { /* order == 3 */
492                                         __m128i xmm0, xmm1, xmm6, xmm7;
493                                         xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0));
494                                         xmm1 = _mm_cvtsi32_si128(qlp_coeff[2]);
495
496                                         xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0));
497
498                                         for(i = 0; i < (int)data_len; i++) {
499                                                 //sum = 0;
500                                                 //sum  = qlp_coeff[2] * (FLAC__int64)data[i-3];
501                                                 xmm7 = _mm_cvtsi32_si128(data[i-3]);
502                                                 xmm7 = _mm_mul_epi32(xmm7, xmm1);
503
504                                                 //sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
505                                                 //sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
506                                                 xmm6 = _mm_loadl_epi64((const __m128i*)(data+i-2));
507                                                 xmm6 = _mm_shuffle_epi32(xmm6, _MM_SHUFFLE(2,0,3,1));
508                                                 xmm6 = _mm_mul_epi32(xmm6, xmm0);
509                                                 xmm7 = _mm_add_epi64(xmm7, xmm6);
510
511                                                 xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8));
512                                                 RESIDUAL64_RESULT(xmm7);
513                                         }
514                                 }
515                         }
516                         else { /* order == 1, 2 */
517                                 if(order == 2) {
518                                         __m128i xmm0, xmm7;
519                                         xmm0 = _mm_loadl_epi64((const __m128i*)(qlp_coeff+0));
520                                         xmm0 = _mm_shuffle_epi32(xmm0, _MM_SHUFFLE(3,1,2,0));
521
522                                         for(i = 0; i < (int)data_len; i++) {
523                                                 //sum = 0;
524                                                 //sum += qlp_coeff[1] * (FLAC__int64)data[i-2];
525                                                 //sum += qlp_coeff[0] * (FLAC__int64)data[i-1];
526                                                 xmm7 = _mm_loadl_epi64((const __m128i*)(data+i-2));
527                                                 xmm7 = _mm_shuffle_epi32(xmm7, _MM_SHUFFLE(2,0,3,1));
528                                                 xmm7 = _mm_mul_epi32(xmm7, xmm0);
529
530                                                 xmm7 = _mm_add_epi64(xmm7, _mm_srli_si128(xmm7, 8));
531                                                 RESIDUAL64_RESULT(xmm7);
532                                         }
533                                 }
534                                 else { /* order == 1 */
535                                         __m128i xmm0, xmm7;
536                                         xmm0 = _mm_cvtsi32_si128(qlp_coeff[0]);
537
538                                         for(i = 0; i < (int)data_len; i++) {
539                                                 //sum = qlp_coeff[0] * (FLAC__int64)data[i-1];
540                                                 xmm7 = _mm_cvtsi32_si128(data[i-1]);
541                                                 xmm7 = _mm_mul_epi32(xmm7, xmm0);
542                                                 RESIDUAL64_RESULT(xmm7);
543                                         }
544                                 }
545                         }
546                 }
547         }
548         else { /* order > 12 */
549                 FLAC__int64 sum;
550                 for(i = 0; i < (int)data_len; i++) {
551                         sum = 0;
552                         switch(order) {
553                                 case 32: sum += qlp_coeff[31] * (FLAC__int64)data[i-32]; /* Falls through. */
554                                 case 31: sum += qlp_coeff[30] * (FLAC__int64)data[i-31]; /* Falls through. */
555                                 case 30: sum += qlp_coeff[29] * (FLAC__int64)data[i-30]; /* Falls through. */
556                                 case 29: sum += qlp_coeff[28] * (FLAC__int64)data[i-29]; /* Falls through. */
557                                 case 28: sum += qlp_coeff[27] * (FLAC__int64)data[i-28]; /* Falls through. */
558                                 case 27: sum += qlp_coeff[26] * (FLAC__int64)data[i-27]; /* Falls through. */
559                                 case 26: sum += qlp_coeff[25] * (FLAC__int64)data[i-26]; /* Falls through. */
560                                 case 25: sum += qlp_coeff[24] * (FLAC__int64)data[i-25]; /* Falls through. */
561                                 case 24: sum += qlp_coeff[23] * (FLAC__int64)data[i-24]; /* Falls through. */
562                                 case 23: sum += qlp_coeff[22] * (FLAC__int64)data[i-23]; /* Falls through. */
563                                 case 22: sum += qlp_coeff[21] * (FLAC__int64)data[i-22]; /* Falls through. */
564                                 case 21: sum += qlp_coeff[20] * (FLAC__int64)data[i-21]; /* Falls through. */
565                                 case 20: sum += qlp_coeff[19] * (FLAC__int64)data[i-20]; /* Falls through. */
566                                 case 19: sum += qlp_coeff[18] * (FLAC__int64)data[i-19]; /* Falls through. */
567                                 case 18: sum += qlp_coeff[17] * (FLAC__int64)data[i-18]; /* Falls through. */
568                                 case 17: sum += qlp_coeff[16] * (FLAC__int64)data[i-17]; /* Falls through. */
569                                 case 16: sum += qlp_coeff[15] * (FLAC__int64)data[i-16]; /* Falls through. */
570                                 case 15: sum += qlp_coeff[14] * (FLAC__int64)data[i-15]; /* Falls through. */
571                                 case 14: sum += qlp_coeff[13] * (FLAC__int64)data[i-14]; /* Falls through. */
572                                 case 13: sum += qlp_coeff[12] * (FLAC__int64)data[i-13];
573                                          sum += qlp_coeff[11] * (FLAC__int64)data[i-12];
574                                          sum += qlp_coeff[10] * (FLAC__int64)data[i-11];
575                                          sum += qlp_coeff[ 9] * (FLAC__int64)data[i-10];
576                                          sum += qlp_coeff[ 8] * (FLAC__int64)data[i- 9];
577                                          sum += qlp_coeff[ 7] * (FLAC__int64)data[i- 8];
578                                          sum += qlp_coeff[ 6] * (FLAC__int64)data[i- 7];
579                                          sum += qlp_coeff[ 5] * (FLAC__int64)data[i- 6];
580                                          sum += qlp_coeff[ 4] * (FLAC__int64)data[i- 5];
581                                          sum += qlp_coeff[ 3] * (FLAC__int64)data[i- 4];
582                                          sum += qlp_coeff[ 2] * (FLAC__int64)data[i- 3];
583                                          sum += qlp_coeff[ 1] * (FLAC__int64)data[i- 2];
584                                          sum += qlp_coeff[ 0] * (FLAC__int64)data[i- 1];
585                         }
586                         residual[i] = data[i] - (FLAC__int32)(sum >> lp_quantization);
587                 }
588         }
589 }
590
591 FLAC__SSE_TARGET("sse4.1")
592 void FLAC__lpc_restore_signal_wide_intrin_sse41(const FLAC__int32 residual[], uint32_t data_len, const FLAC__int32 qlp_coeff[], uint32_t order, int lp_quantization, FLAC__int32 data[])
593 {
594         int i;
595         const __m128i cnt = _mm_cvtsi32_si128(lp_quantization);
596
597         if (!data_len)
598                 return;
599
600         FLAC__ASSERT(order > 0);
601         FLAC__ASSERT(order <= 32);
602         FLAC__ASSERT(lp_quantization <= 32); /* there's no _mm_sra_epi64() so we have to use _mm_srl_epi64() */
603
604         if(order <= 12) {
605                 if(order > 8) { /* order == 9, 10, 11, 12 */
606                         if(order > 10) { /* order == 11, 12 */
607                                 __m128i qlp[6], dat[6];
608                                 __m128i summ, temp;
609                                 qlp[0] = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(qlp_coeff+0)));            // 0  q[1]  0  q[0]
610                                 qlp[1] = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(qlp_coeff+2)));            // 0  q[3]  0  q[2]
611                                 qlp[2] = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(qlp_coeff+4)));            // 0  q[5]  0  q[4]
612                                 qlp[3] = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(qlp_coeff+6)));            // 0  q[7]  0  q[6]
613                                 qlp[4] = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(qlp_coeff+8)));            // 0  q[9]  0  q[8]
614                                 if (order == 12)
615                                         qlp[5] = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(qlp_coeff+10)));   // 0  q[11] 0  q[10]
616                                 else
617                                         qlp[5] = _mm_cvtepu32_epi64(_mm_cvtsi32_si128(qlp_coeff[10]));                                  // 0    0   0  q[10]
618
619                                 dat[5] = _mm_shuffle_epi32(_mm_loadl_epi64((const __m128i*)(data-12)), _MM_SHUFFLE(2,0,3,1));   // 0  d[i-12] 0  d[i-11]
620                                 dat[4] = _mm_shuffle_epi32(_mm_loadl_epi64((const __m128i*)(data-10)), _MM_SHUFFLE(2,0,3,1));   // 0  d[i-10] 0  d[i-9]
621                                 dat[3] = _mm_shuffle_epi32(_mm_loadl_epi64((const __m128i*)(data-8 )), _MM_SHUFFLE(2,0,3,1));   // 0  d[i-8]  0  d[i-7]
622                                 dat[2] = _mm_shuffle_epi32(_mm_loadl_epi64((const __m128i*)(data-6 )), _MM_SHUFFLE(2,0,3,1));   // 0  d[i-6]  0  d[i-5]
623                                 dat[1] = _mm_shuffle_epi32(_mm_loadl_epi64((const __m128i*)(data-4 )), _MM_SHUFFLE(2,0,3,1));   // 0  d[i-4]  0  d[i-3]
624                                 dat[0] = _mm_shuffle_epi32(_mm_loadl_epi64((const __m128i*)(data-2 )), _MM_SHUFFLE(2,0,3,1));   // 0  d[i-2]  0  d[i-1]
625
626                                 summ =                     _mm_mul_epi32(dat[5], qlp[5]) ;
627                                 summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[4], qlp[4]));
628                                 summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[3], qlp[3]));
629                                 summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[2], qlp[2]));
630                                 summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[1], qlp[1]));
631                                 summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[0], qlp[0]));
632
633                                 summ = _mm_add_epi64(summ, _mm_srli_si128(summ, 8));    // ?_64  sum_64
634                                 summ = _mm_srl_epi64(summ, cnt);                                                // ?_64  (sum >> lp_quantization)_64  ==  ?_32  ?_32  ?_32  (sum >> lp_quantization)_32
635                                 temp = _mm_add_epi32(_mm_cvtsi32_si128(residual[0]), summ);     // ?  ?  ?  d[i]
636                                 data[0] = _mm_cvtsi128_si32(temp);
637
638                                 for(i = 1; i < (int)data_len; i++) {
639                                         temp = _mm_slli_si128(temp, 8);
640                                         dat[5] = _mm_alignr_epi8(dat[5], dat[4], 8);    //  ?  d[i-11] ?  d[i-10]
641                                         dat[4] = _mm_alignr_epi8(dat[4], dat[3], 8);    //  ?  d[i-9]  ?  d[i-8]
642                                         dat[3] = _mm_alignr_epi8(dat[3], dat[2], 8);    //  ?  d[i-7]  ?  d[i-6]
643                                         dat[2] = _mm_alignr_epi8(dat[2], dat[1], 8);    //  ?  d[i-5]  ?  d[i-4]
644                                         dat[1] = _mm_alignr_epi8(dat[1], dat[0], 8);    //  ?  d[i-3]  ?  d[i-2]
645                                         dat[0] = _mm_alignr_epi8(dat[0],   temp, 8);    //  ?  d[i-1]  ?  d[i  ]
646
647                                         summ =                     _mm_mul_epi32(dat[5], qlp[5]) ;
648                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[4], qlp[4]));
649                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[3], qlp[3]));
650                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[2], qlp[2]));
651                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[1], qlp[1]));
652                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[0], qlp[0]));
653
654                                         summ = _mm_add_epi64(summ, _mm_srli_si128(summ, 8));    // ?_64  sum_64
655                                         summ = _mm_srl_epi64(summ, cnt);                                                // ?_64  (sum >> lp_quantization)_64  ==  ?_32  ?_32  ?_32  (sum >> lp_quantization)_32
656                                         temp = _mm_add_epi32(_mm_cvtsi32_si128(residual[i]), summ);     // ?  ?  ?  d[i]
657                                         data[i] = _mm_cvtsi128_si32(temp);
658                                 }
659                         }
660                         else { /* order == 9, 10 */
661                                 __m128i qlp[5], dat[5];
662                                 __m128i summ, temp;
663                                 qlp[0] = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(qlp_coeff+0)));
664                                 qlp[1] = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(qlp_coeff+2)));
665                                 qlp[2] = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(qlp_coeff+4)));
666                                 qlp[3] = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(qlp_coeff+6)));
667                                 if (order == 10)
668                                         qlp[4] = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(qlp_coeff+8)));
669                                 else
670                                         qlp[4] = _mm_cvtepu32_epi64(_mm_cvtsi32_si128(qlp_coeff[8]));
671
672                                 dat[4] = _mm_shuffle_epi32(_mm_loadl_epi64((const __m128i*)(data-10)), _MM_SHUFFLE(2,0,3,1));
673                                 dat[3] = _mm_shuffle_epi32(_mm_loadl_epi64((const __m128i*)(data-8 )), _MM_SHUFFLE(2,0,3,1));
674                                 dat[2] = _mm_shuffle_epi32(_mm_loadl_epi64((const __m128i*)(data-6 )), _MM_SHUFFLE(2,0,3,1));
675                                 dat[1] = _mm_shuffle_epi32(_mm_loadl_epi64((const __m128i*)(data-4 )), _MM_SHUFFLE(2,0,3,1));
676                                 dat[0] = _mm_shuffle_epi32(_mm_loadl_epi64((const __m128i*)(data-2 )), _MM_SHUFFLE(2,0,3,1));
677
678                                 summ =                     _mm_mul_epi32(dat[4], qlp[4]) ;
679                                 summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[3], qlp[3]));
680                                 summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[2], qlp[2]));
681                                 summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[1], qlp[1]));
682                                 summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[0], qlp[0]));
683
684                                 summ = _mm_add_epi64(summ, _mm_srli_si128(summ, 8));
685                                 summ = _mm_srl_epi64(summ, cnt);
686                                 temp = _mm_add_epi32(_mm_cvtsi32_si128(residual[0]), summ);
687                                 data[0] = _mm_cvtsi128_si32(temp);
688
689                                 for(i = 1; i < (int)data_len; i++) {
690                                         temp = _mm_slli_si128(temp, 8);
691                                         dat[4] = _mm_alignr_epi8(dat[4], dat[3], 8);
692                                         dat[3] = _mm_alignr_epi8(dat[3], dat[2], 8);
693                                         dat[2] = _mm_alignr_epi8(dat[2], dat[1], 8);
694                                         dat[1] = _mm_alignr_epi8(dat[1], dat[0], 8);
695                                         dat[0] = _mm_alignr_epi8(dat[0],   temp, 8);
696
697                                         summ =                     _mm_mul_epi32(dat[4], qlp[4]) ;
698                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[3], qlp[3]));
699                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[2], qlp[2]));
700                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[1], qlp[1]));
701                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[0], qlp[0]));
702
703                                         summ = _mm_add_epi64(summ, _mm_srli_si128(summ, 8));
704                                         summ = _mm_srl_epi64(summ, cnt);
705                                         temp = _mm_add_epi32(_mm_cvtsi32_si128(residual[i]), summ);
706                                         data[i] = _mm_cvtsi128_si32(temp);
707                                 }
708                         }
709                 }
710                 else if(order > 4) { /* order == 5, 6, 7, 8 */
711                         if(order > 6) { /* order == 7, 8 */
712                                 __m128i qlp[4], dat[4];
713                                 __m128i summ, temp;
714                                 qlp[0] = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(qlp_coeff+0)));
715                                 qlp[1] = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(qlp_coeff+2)));
716                                 qlp[2] = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(qlp_coeff+4)));
717                                 if (order == 8)
718                                         qlp[3] = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(qlp_coeff+6)));
719                                 else
720                                         qlp[3] = _mm_cvtepu32_epi64(_mm_cvtsi32_si128(qlp_coeff[6]));
721
722                                 dat[3] = _mm_shuffle_epi32(_mm_loadl_epi64((const __m128i*)(data-8 )), _MM_SHUFFLE(2,0,3,1));
723                                 dat[2] = _mm_shuffle_epi32(_mm_loadl_epi64((const __m128i*)(data-6 )), _MM_SHUFFLE(2,0,3,1));
724                                 dat[1] = _mm_shuffle_epi32(_mm_loadl_epi64((const __m128i*)(data-4 )), _MM_SHUFFLE(2,0,3,1));
725                                 dat[0] = _mm_shuffle_epi32(_mm_loadl_epi64((const __m128i*)(data-2 )), _MM_SHUFFLE(2,0,3,1));
726
727                                 summ =                     _mm_mul_epi32(dat[3], qlp[3]) ;
728                                 summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[2], qlp[2]));
729                                 summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[1], qlp[1]));
730                                 summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[0], qlp[0]));
731
732                                 summ = _mm_add_epi64(summ, _mm_srli_si128(summ, 8));
733                                 summ = _mm_srl_epi64(summ, cnt);
734                                 temp = _mm_add_epi32(_mm_cvtsi32_si128(residual[0]), summ);
735                                 data[0] = _mm_cvtsi128_si32(temp);
736
737                                 for(i = 1; i < (int)data_len; i++) {
738                                         temp = _mm_slli_si128(temp, 8);
739                                         dat[3] = _mm_alignr_epi8(dat[3], dat[2], 8);
740                                         dat[2] = _mm_alignr_epi8(dat[2], dat[1], 8);
741                                         dat[1] = _mm_alignr_epi8(dat[1], dat[0], 8);
742                                         dat[0] = _mm_alignr_epi8(dat[0],   temp, 8);
743
744                                         summ =                     _mm_mul_epi32(dat[3], qlp[3]) ;
745                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[2], qlp[2]));
746                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[1], qlp[1]));
747                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[0], qlp[0]));
748
749                                         summ = _mm_add_epi64(summ, _mm_srli_si128(summ, 8));
750                                         summ = _mm_srl_epi64(summ, cnt);
751                                         temp = _mm_add_epi32(_mm_cvtsi32_si128(residual[i]), summ);
752                                         data[i] = _mm_cvtsi128_si32(temp);
753                                 }
754                         }
755                         else { /* order == 5, 6 */
756                                 __m128i qlp[3], dat[3];
757                                 __m128i summ, temp;
758                                 qlp[0] = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(qlp_coeff+0)));
759                                 qlp[1] = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(qlp_coeff+2)));
760                                 if (order == 6)
761                                         qlp[2] = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(qlp_coeff+4)));
762                                 else
763                                         qlp[2] = _mm_cvtepu32_epi64(_mm_cvtsi32_si128(qlp_coeff[4]));
764
765                                 dat[2] = _mm_shuffle_epi32(_mm_loadl_epi64((const __m128i*)(data-6 )), _MM_SHUFFLE(2,0,3,1));
766                                 dat[1] = _mm_shuffle_epi32(_mm_loadl_epi64((const __m128i*)(data-4 )), _MM_SHUFFLE(2,0,3,1));
767                                 dat[0] = _mm_shuffle_epi32(_mm_loadl_epi64((const __m128i*)(data-2 )), _MM_SHUFFLE(2,0,3,1));
768
769                                 summ =                     _mm_mul_epi32(dat[2], qlp[2]) ;
770                                 summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[1], qlp[1]));
771                                 summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[0], qlp[0]));
772
773                                 summ = _mm_add_epi64(summ, _mm_srli_si128(summ, 8));
774                                 summ = _mm_srl_epi64(summ, cnt);
775                                 temp = _mm_add_epi32(_mm_cvtsi32_si128(residual[0]), summ);
776                                 data[0] = _mm_cvtsi128_si32(temp);
777
778                                 for(i = 1; i < (int)data_len; i++) {
779                                         temp = _mm_slli_si128(temp, 8);
780                                         dat[2] = _mm_alignr_epi8(dat[2], dat[1], 8);
781                                         dat[1] = _mm_alignr_epi8(dat[1], dat[0], 8);
782                                         dat[0] = _mm_alignr_epi8(dat[0],   temp, 8);
783
784                                         summ =                     _mm_mul_epi32(dat[2], qlp[2]) ;
785                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[1], qlp[1]));
786                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[0], qlp[0]));
787
788                                         summ = _mm_add_epi64(summ, _mm_srli_si128(summ, 8));
789                                         summ = _mm_srl_epi64(summ, cnt);
790                                         temp = _mm_add_epi32(_mm_cvtsi32_si128(residual[i]), summ);
791                                         data[i] = _mm_cvtsi128_si32(temp);
792                                 }
793                         }
794                 }
795                 else { /* order == 1, 2, 3, 4 */
796                         if(order > 2) { /* order == 3, 4 */
797                                 __m128i qlp[2], dat[2];
798                                 __m128i summ, temp;
799                                 qlp[0] = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(qlp_coeff+0)));
800                                 if (order == 4)
801                                         qlp[1] = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(qlp_coeff+2)));
802                                 else
803                                         qlp[1] = _mm_cvtepu32_epi64(_mm_cvtsi32_si128(qlp_coeff[2]));
804
805                                 dat[1] = _mm_shuffle_epi32(_mm_loadl_epi64((const __m128i*)(data-4 )), _MM_SHUFFLE(2,0,3,1));
806                                 dat[0] = _mm_shuffle_epi32(_mm_loadl_epi64((const __m128i*)(data-2 )), _MM_SHUFFLE(2,0,3,1));
807
808                                 summ =                     _mm_mul_epi32(dat[1], qlp[1]) ;
809                                 summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[0], qlp[0]));
810
811                                 summ = _mm_add_epi64(summ, _mm_srli_si128(summ, 8));
812                                 summ = _mm_srl_epi64(summ, cnt);
813                                 temp = _mm_add_epi32(_mm_cvtsi32_si128(residual[0]), summ);
814                                 data[0] = _mm_cvtsi128_si32(temp);
815
816                                 for(i = 1; i < (int)data_len; i++) {
817                                         temp = _mm_slli_si128(temp, 8);
818                                         dat[1] = _mm_alignr_epi8(dat[1], dat[0], 8);
819                                         dat[0] = _mm_alignr_epi8(dat[0],   temp, 8);
820
821                                         summ =                     _mm_mul_epi32(dat[1], qlp[1]) ;
822                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat[0], qlp[0]));
823
824                                         summ = _mm_add_epi64(summ, _mm_srli_si128(summ, 8));
825                                         summ = _mm_srl_epi64(summ, cnt);
826                                         temp = _mm_add_epi32(_mm_cvtsi32_si128(residual[i]), summ);
827                                         data[i] = _mm_cvtsi128_si32(temp);
828                                 }
829                         }
830                         else { /* order == 1, 2 */
831                                 if(order == 2) {
832                                         __m128i qlp0, dat0;
833                                         __m128i summ, temp;
834                                         qlp0 = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(qlp_coeff)));
835
836                                         dat0 = _mm_shuffle_epi32(_mm_loadl_epi64((const __m128i*)(data-2 )), _MM_SHUFFLE(2,0,3,1));
837
838                                         summ = _mm_mul_epi32(dat0, qlp0);
839
840                                         summ = _mm_add_epi64(summ, _mm_srli_si128(summ, 8));
841                                         summ = _mm_srl_epi64(summ, cnt);
842                                         temp = _mm_add_epi32(_mm_cvtsi32_si128(residual[0]), summ);
843                                         data[0] = _mm_cvtsi128_si32(temp);
844
845                                         for(i = 1; i < (int)data_len; i++) {
846                                                 dat0 = _mm_alignr_epi8(dat0, _mm_slli_si128(temp, 8), 8);
847
848                                                 summ = _mm_mul_epi32(dat0, qlp0);
849
850                                                 summ = _mm_add_epi64(summ, _mm_srli_si128(summ, 8));
851                                                 summ = _mm_srl_epi64(summ, cnt);
852                                                 temp = _mm_add_epi32(_mm_cvtsi32_si128(residual[i]), summ);
853                                                 data[i] = _mm_cvtsi128_si32(temp);
854                                         }
855                                 }
856                                 else { /* order == 1 */
857                                         __m128i qlp0;
858                                         __m128i summ, temp;
859                                         qlp0 = _mm_cvtsi32_si128(qlp_coeff[0]);
860                                         temp = _mm_cvtsi32_si128(data[-1]);
861
862                                         summ = _mm_mul_epi32(temp, qlp0);
863                                         summ = _mm_srl_epi64(summ, cnt);
864                                         temp = _mm_add_epi32(_mm_cvtsi32_si128(residual[0]), summ);
865                                         data[0] = _mm_cvtsi128_si32(temp);
866
867                                         for(i = 1; i < (int)data_len; i++) {
868                                                 summ = _mm_mul_epi32(temp, qlp0);
869                                                 summ = _mm_srl_epi64(summ, cnt);
870                                                 temp = _mm_add_epi32(_mm_cvtsi32_si128(residual[i]), summ);
871                                                 data[i] = _mm_cvtsi128_si32(temp);
872                                         }
873                                 }
874                         }
875                 }
876         }
877         else { /* order > 12 */
878                 __m128i qlp[16];
879
880                 for(i = 0; i < (int)order/2; i++)
881                         qlp[i] = _mm_shuffle_epi32(_mm_loadl_epi64((const __m128i*)(qlp_coeff+i*2)), _MM_SHUFFLE(2,0,3,1));     // 0  q[2*i]  0  q[2*i+1]
882                 if(order & 1)
883                         qlp[i] = _mm_shuffle_epi32(_mm_cvtsi32_si128(qlp_coeff[i*2]), _MM_SHUFFLE(2,0,3,1));
884
885                 for(i = 0; i < (int)data_len; i++) {
886                         __m128i summ = _mm_setzero_si128(), dat;
887                         FLAC__int32 * const datai = &data[i];
888
889                         switch((order+1) / 2) {
890                                 case 16: /* order == 31, 32 */
891                                         dat = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(datai-32)));
892                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat, qlp[15]));                /* Falls through. */
893                                 case 15:
894                                         dat = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(datai-30)));
895                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat, qlp[14]));                /* Falls through. */
896                                 case 14:
897                                         dat = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(datai-28)));
898                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat, qlp[13]));                /* Falls through. */
899                                 case 13:
900                                         dat = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(datai-26)));
901                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat, qlp[12]));                /* Falls through. */
902                                 case 12:
903                                         dat = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(datai-24)));
904                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat, qlp[11]));                /* Falls through. */
905                                 case 11:
906                                         dat = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(datai-22)));
907                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat, qlp[10]));                /* Falls through. */
908                                 case 10:
909                                         dat = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(datai-20)));
910                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat, qlp[9]));                 /* Falls through. */
911                                 case  9:
912                                         dat = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(datai-18)));
913                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat, qlp[8]));                 /* Falls through. */
914                                 case  8:
915                                         dat = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(datai-16)));
916                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat, qlp[7]));                 /* Falls through. */
917                                 case  7: /* order == 13, 14 */
918                                         dat = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(datai-14)));
919                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat, qlp[6]));
920                                         dat = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(datai-12)));
921                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat, qlp[5]));
922                                         dat = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(datai-10)));
923                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat, qlp[4]));
924                                         dat = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(datai-8)));
925                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat, qlp[3]));
926                                         dat = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(datai-6)));
927                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat, qlp[2]));
928                                         dat = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(datai-4)));
929                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat, qlp[1]));
930                                         dat = _mm_cvtepu32_epi64(_mm_loadl_epi64((const __m128i*)(datai-2)));
931                                         summ = _mm_add_epi64(summ, _mm_mul_epi32(dat, qlp[0]));
932                         }
933                         summ = _mm_add_epi64(summ, _mm_srli_si128(summ, 8));
934                         summ = _mm_srl_epi64(summ, cnt);
935                         summ = _mm_add_epi32(summ, _mm_cvtsi32_si128(residual[i]));
936                         data[i] = _mm_cvtsi128_si32(summ);
937                 }
938         }
939 }
940
941 FLAC__SSE_TARGET("sse4.1")
942 void FLAC__lpc_restore_signal_intrin_sse41(const FLAC__int32 residual[], uint32_t data_len, const FLAC__int32 qlp_coeff[], uint32_t order, int lp_quantization, FLAC__int32 data[])
943 {
944         if(order < 8) {
945                 FLAC__lpc_restore_signal(residual, data_len, qlp_coeff, order, lp_quantization, data);
946                 return;
947         }
948
949         FLAC__ASSERT(order >= 8);
950         FLAC__ASSERT(order <= 32);
951
952         if(order <= 12) {
953                 int i;
954                 const __m128i cnt = _mm_cvtsi32_si128(lp_quantization);
955
956                 if(order > 8) /* order == 9, 10, 11, 12 */
957                 {
958                         __m128i qlp[3], dat[3];
959                         __m128i summ, temp;
960
961                         qlp[0] = _mm_loadu_si128((const __m128i*)(qlp_coeff + 0));      // q[3]  q[2]  q[1]  q[0]
962                         qlp[1] = _mm_loadu_si128((const __m128i*)(qlp_coeff + 4));      // q[7]  q[6]  q[5]  q[4]
963                         qlp[2] = _mm_loadu_si128((const __m128i*)(qlp_coeff + 8));      // q[11] q[10] q[9]  q[8]
964                         switch (order)
965                         {
966                         case 9:
967                                 qlp[2] = _mm_slli_si128(qlp[2], 12); qlp[2] = _mm_srli_si128(qlp[2], 12); break;        //   0     0     0   q[8]
968                         case 10:
969                                 qlp[2] = _mm_slli_si128(qlp[2], 8); qlp[2] = _mm_srli_si128(qlp[2], 8); break;  //   0     0   q[9]  q[8]
970                         case 11:
971                                 qlp[2] = _mm_slli_si128(qlp[2], 4); qlp[2] = _mm_srli_si128(qlp[2], 4); break;  //   0   q[10] q[9]  q[8]
972                         }
973
974                         dat[2] = _mm_shuffle_epi32(_mm_loadu_si128((const __m128i*)(data - 12)), _MM_SHUFFLE(0, 1, 2, 3));      // d[i-12] d[i-11] d[i-10] d[i-9]
975                         dat[1] = _mm_shuffle_epi32(_mm_loadu_si128((const __m128i*)(data - 8)), _MM_SHUFFLE(0, 1, 2, 3));       // d[i-8]  d[i-7]  d[i-6]  d[i-5]
976                         dat[0] = _mm_shuffle_epi32(_mm_loadu_si128((const __m128i*)(data - 4)), _MM_SHUFFLE(0, 1, 2, 3));       // d[i-4]  d[i-3]  d[i-2]  d[i-1]
977
978                         for (i = 0;;) {
979                                 summ = _mm_mullo_epi32(dat[2], qlp[2]);
980                                 summ = _mm_add_epi32(summ, _mm_mullo_epi32(dat[1], qlp[1]));
981                                 summ = _mm_add_epi32(summ, _mm_mullo_epi32(dat[0], qlp[0]));
982
983                                 summ = _mm_add_epi32(summ, _mm_shuffle_epi32(summ, _MM_SHUFFLE(1,0,3,2)));
984                                 summ = _mm_add_epi32(summ, _mm_shufflelo_epi16(summ, _MM_SHUFFLE(1,0,3,2)));
985
986                                 summ = _mm_sra_epi32(summ, cnt);
987                                 temp = _mm_add_epi32(_mm_cvtsi32_si128(residual[i]), summ);
988                                 data[i] = _mm_cvtsi128_si32(temp);
989
990                                 if(++i >= (int)data_len) break;
991
992                                 temp = _mm_slli_si128(temp, 12);
993                                 dat[2] = _mm_alignr_epi8(dat[2], dat[1], 12);
994                                 dat[1] = _mm_alignr_epi8(dat[1], dat[0], 12);
995                                 dat[0] = _mm_alignr_epi8(dat[0], temp, 12);
996                         }
997                 }
998                 else /* order == 8 */
999                 {
1000                         __m128i qlp[2], dat[2];
1001                         __m128i summ, temp;
1002
1003                         qlp[0] = _mm_loadu_si128((const __m128i*)(qlp_coeff + 0));
1004                         qlp[1] = _mm_loadu_si128((const __m128i*)(qlp_coeff + 4));
1005
1006                         dat[1] = _mm_shuffle_epi32(_mm_loadu_si128((const __m128i*)(data - 8)), _MM_SHUFFLE(0, 1, 2, 3));
1007                         dat[0] = _mm_shuffle_epi32(_mm_loadu_si128((const __m128i*)(data - 4)), _MM_SHUFFLE(0, 1, 2, 3));
1008
1009                         for (i = 0;;) {
1010                                 summ = _mm_add_epi32(_mm_mullo_epi32(dat[1], qlp[1]), _mm_mullo_epi32(dat[0], qlp[0]));
1011
1012                                 summ = _mm_add_epi32(summ, _mm_shuffle_epi32(summ, _MM_SHUFFLE(1,0,3,2)));
1013                                 summ = _mm_add_epi32(summ, _mm_shufflelo_epi16(summ, _MM_SHUFFLE(1,0,3,2)));
1014
1015                                 summ = _mm_sra_epi32(summ, cnt);
1016                                 temp = _mm_add_epi32(_mm_cvtsi32_si128(residual[i]), summ);
1017                                 data[i] = _mm_cvtsi128_si32(temp);
1018
1019                                 if(++i >= (int)data_len) break;
1020
1021                                 temp = _mm_slli_si128(temp, 12);
1022                                 dat[1] = _mm_alignr_epi8(dat[1], dat[0], 12);
1023                                 dat[0] = _mm_alignr_epi8(dat[0], temp, 12);
1024                         }
1025                 }
1026         }
1027         else { /* order > 12 */
1028 #ifdef FLAC__HAS_NASM
1029                 FLAC__lpc_restore_signal_asm_ia32(residual, data_len, qlp_coeff, order, lp_quantization, data);
1030 #else
1031                 FLAC__lpc_restore_signal(residual, data_len, qlp_coeff, order, lp_quantization, data);
1032 #endif
1033         }
1034 }
1035
1036 FLAC__SSE_TARGET("ssse3")
1037 void FLAC__lpc_restore_signal_16_intrin_sse41(const FLAC__int32 residual[], uint32_t data_len, const FLAC__int32 qlp_coeff[], uint32_t order, int lp_quantization, FLAC__int32 data[])
1038 {
1039         if(order < 8) {
1040                 FLAC__lpc_restore_signal(residual, data_len, qlp_coeff, order, lp_quantization, data);
1041                 return;
1042         }
1043
1044         FLAC__ASSERT(order >= 8);
1045         FLAC__ASSERT(order <= 32);
1046
1047         if(order <= 12) {
1048                 int i;
1049                 const __m128i cnt = _mm_cvtsi32_si128(lp_quantization);
1050
1051                 if(order > 8) /* order == 9, 10, 11, 12 */
1052                 {
1053                         __m128i qlp[2], dat[2];
1054                         __m128i summ, temp;
1055
1056                         qlp[0] = _mm_loadu_si128((const __m128i*)(qlp_coeff+0));        // q[3]  q[2]  q[1]  q[0]
1057                         temp   = _mm_loadu_si128((const __m128i*)(qlp_coeff+4));        // q[7]  q[6]  q[5]  q[4]
1058                         qlp[1] = _mm_loadu_si128((const __m128i*)(qlp_coeff+8));        // q[11] q[10] q[9]  q[8]
1059                         switch(order)
1060                         {
1061                         case 9:
1062                                 qlp[1] = _mm_slli_si128(qlp[1], 12); qlp[1] = _mm_srli_si128(qlp[1], 12); break;        //   0     0     0   q[8]
1063                         case 10:
1064                                 qlp[1] = _mm_slli_si128(qlp[1],  8); qlp[1] = _mm_srli_si128(qlp[1],  8); break;        //   0     0   q[9]  q[8]
1065                         case 11:
1066                                 qlp[1] = _mm_slli_si128(qlp[1],  4); qlp[1] = _mm_srli_si128(qlp[1],  4); break;        //   0   q[10] q[9]  q[8]
1067                         }
1068                         qlp[0] = _mm_packs_epi32(qlp[0], temp);                                 // q[7]  q[6]  q[5]  q[4]  q[3]  q[2]  q[1]  q[0]
1069                         qlp[1] = _mm_packs_epi32(qlp[1], _mm_setzero_si128());  //   0     0     0     0   q[11] q[10] q[9]  q[8]
1070
1071                         dat[1] = _mm_shuffle_epi32(_mm_loadu_si128((const __m128i*)(data-12)), _MM_SHUFFLE(0,1,2,3));   // d[i-12] d[i-11] d[i-10] d[i-9]
1072                         temp   = _mm_shuffle_epi32(_mm_loadu_si128((const __m128i*)(data-8)),  _MM_SHUFFLE(0,1,2,3));   // d[i-8]  d[i-7]  d[i-6]  d[i-5]
1073                         dat[0] = _mm_shuffle_epi32(_mm_loadu_si128((const __m128i*)(data-4)),  _MM_SHUFFLE(0,1,2,3));   // d[i-4]  d[i-3]  d[i-2]  d[i-1]
1074
1075                         dat[1] = _mm_packs_epi32(dat[1], _mm_setzero_si128());          //   0       0       0       0     d[i-12] d[i-11] d[i-10] d[i-9]
1076                         dat[0] = _mm_packs_epi32(dat[0], temp);                                         // d[i-8]  d[i-7]  d[i-6]  d[i-5]  d[i-4]  d[i-3]  d[i-2]  d[i-1]
1077
1078                         for(i = 0;;) {
1079                                 summ = _mm_madd_epi16(dat[1], qlp[1]);
1080                                 summ = _mm_add_epi32(summ, _mm_madd_epi16(dat[0], qlp[0]));
1081
1082                                 summ = _mm_add_epi32(summ, _mm_shuffle_epi32(summ, _MM_SHUFFLE(1,0,3,2)));
1083                                 summ = _mm_add_epi32(summ, _mm_shufflelo_epi16(summ, _MM_SHUFFLE(1,0,3,2)));
1084
1085                                 summ = _mm_sra_epi32(summ, cnt);
1086                                 temp = _mm_add_epi32(_mm_cvtsi32_si128(residual[i]), summ);
1087                                 data[i] = _mm_cvtsi128_si32(temp);
1088
1089                                 if(++i >= (int)data_len) break;
1090
1091                                 temp = _mm_slli_si128(temp, 14);
1092                                 dat[1] = _mm_alignr_epi8(dat[1], dat[0], 14);   //   0       0       0     d[i-12] d[i-11] d[i-10] d[i-9]  d[i-8]
1093                                 dat[0] = _mm_alignr_epi8(dat[0],   temp, 14);   // d[i-7]  d[i-6]  d[i-5]  d[i-4]  d[i-3]  d[i-2]  d[i-1]  d[i]
1094                         }
1095                 }
1096                 else /* order == 8 */
1097                 {
1098                         __m128i qlp0, dat0;
1099                         __m128i summ, temp;
1100
1101                         qlp0 = _mm_loadu_si128((const __m128i*)(qlp_coeff+0));  // q[3]  q[2]  q[1]  q[0]
1102                         temp = _mm_loadu_si128((const __m128i*)(qlp_coeff+4));  // q[7]  q[6]  q[5]  q[4]
1103                         qlp0 = _mm_packs_epi32(qlp0, temp);                                             // q[7]  q[6]  q[5]  q[4]  q[3]  q[2]  q[1]  q[0]
1104
1105                         temp = _mm_shuffle_epi32(_mm_loadu_si128((const __m128i*)(data-8)), _MM_SHUFFLE(0,1,2,3));
1106                         dat0 = _mm_shuffle_epi32(_mm_loadu_si128((const __m128i*)(data-4)), _MM_SHUFFLE(0,1,2,3));
1107                         dat0 = _mm_packs_epi32(dat0, temp);                                             // d[i-8]  d[i-7]  d[i-6]  d[i-5]  d[i-4]  d[i-3]  d[i-2]  d[i-1]
1108
1109                         for(i = 0;;) {
1110                                 summ = _mm_madd_epi16(dat0, qlp0);
1111
1112                                 summ = _mm_add_epi32(summ, _mm_shuffle_epi32(summ, _MM_SHUFFLE(1,0,3,2)));
1113                                 summ = _mm_add_epi32(summ, _mm_shufflelo_epi16(summ, _MM_SHUFFLE(1,0,3,2)));
1114
1115                                 summ = _mm_sra_epi32(summ, cnt);
1116                                 temp = _mm_add_epi32(_mm_cvtsi32_si128(residual[i]), summ);
1117                                 data[i] = _mm_cvtsi128_si32(temp);
1118
1119                                 if(++i >= (int)data_len) break;
1120
1121                                 temp = _mm_slli_si128(temp, 14);
1122                                 dat0 = _mm_alignr_epi8(dat0, temp, 14); // d[i-7]  d[i-6]  d[i-5]  d[i-4]  d[i-3]  d[i-2]  d[i-1]  d[i]
1123                         }
1124                 }
1125         }
1126         else { /* order > 12 */
1127 #ifdef FLAC__HAS_NASM
1128                 FLAC__lpc_restore_signal_asm_ia32_mmx(residual, data_len, qlp_coeff, order, lp_quantization, data);
1129 #else
1130                 FLAC__lpc_restore_signal(residual, data_len, qlp_coeff, order, lp_quantization, data);
1131 #endif
1132         }
1133 }
1134
1135 #endif /* defined FLAC__CPU_IA32 */
1136
1137 FLAC__SSE_TARGET("sse4.1")
1138 void FLAC__lpc_compute_residual_from_qlp_coefficients_intrin_sse41(const FLAC__int32 *data, uint32_t data_len, const FLAC__int32 qlp_coeff[], uint32_t order, int lp_quantization, FLAC__int32 residual[])
1139 {
1140         int i;
1141         FLAC__int32 sum;
1142         const __m128i cnt = _mm_cvtsi32_si128(lp_quantization);
1143
1144         FLAC__ASSERT(order > 0);
1145         FLAC__ASSERT(order <= 32);
1146
1147         if(order <= 12) {
1148                 if(order > 8) {
1149                         if(order > 10) {
1150                                 if(order == 12) {
1151                                         __m128i q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10, q11;
1152                                         q0 = _mm_cvtsi32_si128(qlp_coeff[0]); q0 = _mm_shuffle_epi32(q0, _MM_SHUFFLE(0,0,0,0));
1153                                         q1 = _mm_cvtsi32_si128(qlp_coeff[1]); q1 = _mm_shuffle_epi32(q1, _MM_SHUFFLE(0,0,0,0));
1154                                         q2 = _mm_cvtsi32_si128(qlp_coeff[2]); q2 = _mm_shuffle_epi32(q2, _MM_SHUFFLE(0,0,0,0));
1155                                         q3 = _mm_cvtsi32_si128(qlp_coeff[3]); q3 = _mm_shuffle_epi32(q3, _MM_SHUFFLE(0,0,0,0));
1156                                         q4 = _mm_cvtsi32_si128(qlp_coeff[4]); q4 = _mm_shuffle_epi32(q4, _MM_SHUFFLE(0,0,0,0));
1157                                         q5 = _mm_cvtsi32_si128(qlp_coeff[5]); q5 = _mm_shuffle_epi32(q5, _MM_SHUFFLE(0,0,0,0));
1158                                         q6 = _mm_cvtsi32_si128(qlp_coeff[6]); q6 = _mm_shuffle_epi32(q6, _MM_SHUFFLE(0,0,0,0));
1159                                         q7 = _mm_cvtsi32_si128(qlp_coeff[7]); q7 = _mm_shuffle_epi32(q7, _MM_SHUFFLE(0,0,0,0));
1160                                         q8 = _mm_cvtsi32_si128(qlp_coeff[8]); q8 = _mm_shuffle_epi32(q8, _MM_SHUFFLE(0,0,0,0));
1161                                         q9 = _mm_cvtsi32_si128(qlp_coeff[9]); q9 = _mm_shuffle_epi32(q9, _MM_SHUFFLE(0,0,0,0));
1162                                         q10 = _mm_cvtsi32_si128(qlp_coeff[10]); q10 = _mm_shuffle_epi32(q10, _MM_SHUFFLE(0,0,0,0));
1163                                         q11 = _mm_cvtsi32_si128(qlp_coeff[11]); q11 = _mm_shuffle_epi32(q11, _MM_SHUFFLE(0,0,0,0));
1164
1165                                         for(i = 0; i < (int)data_len-3; i+=4) {
1166                                                 __m128i summ, mull;
1167                                                 summ = _mm_mullo_epi32(q11, _mm_loadu_si128((const __m128i*)(data+i-12)));
1168                                                 mull = _mm_mullo_epi32(q10, _mm_loadu_si128((const __m128i*)(data+i-11))); summ = _mm_add_epi32(summ, mull);
1169                                                 mull = _mm_mullo_epi32(q9, _mm_loadu_si128((const __m128i*)(data+i-10))); summ = _mm_add_epi32(summ, mull);
1170                                                 mull = _mm_mullo_epi32(q8, _mm_loadu_si128((const __m128i*)(data+i-9))); summ = _mm_add_epi32(summ, mull);
1171                                                 mull = _mm_mullo_epi32(q7, _mm_loadu_si128((const __m128i*)(data+i-8))); summ = _mm_add_epi32(summ, mull);
1172                                                 mull = _mm_mullo_epi32(q6, _mm_loadu_si128((const __m128i*)(data+i-7))); summ = _mm_add_epi32(summ, mull);
1173                                                 mull = _mm_mullo_epi32(q5, _mm_loadu_si128((const __m128i*)(data+i-6))); summ = _mm_add_epi32(summ, mull);
1174                                                 mull = _mm_mullo_epi32(q4, _mm_loadu_si128((const __m128i*)(data+i-5))); summ = _mm_add_epi32(summ, mull);
1175                                                 mull = _mm_mullo_epi32(q3, _mm_loadu_si128((const __m128i*)(data+i-4))); summ = _mm_add_epi32(summ, mull);
1176                                                 mull = _mm_mullo_epi32(q2, _mm_loadu_si128((const __m128i*)(data+i-3))); summ = _mm_add_epi32(summ, mull);
1177                                                 mull = _mm_mullo_epi32(q1, _mm_loadu_si128((const __m128i*)(data+i-2))); summ = _mm_add_epi32(summ, mull);
1178                                                 mull = _mm_mullo_epi32(q0, _mm_loadu_si128((const __m128i*)(data+i-1))); summ = _mm_add_epi32(summ, mull);
1179                                                 summ = _mm_sra_epi32(summ, cnt);
1180                                                 _mm_storeu_si128((__m128i*)(residual+i), _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(data+i)), summ));
1181                                         }
1182                                 }
1183                                 else { /* order == 11 */
1184                                         __m128i q0, q1, q2, q3, q4, q5, q6, q7, q8, q9, q10;
1185                                         q0 = _mm_cvtsi32_si128(qlp_coeff[0]); q0 = _mm_shuffle_epi32(q0, _MM_SHUFFLE(0,0,0,0));
1186                                         q1 = _mm_cvtsi32_si128(qlp_coeff[1]); q1 = _mm_shuffle_epi32(q1, _MM_SHUFFLE(0,0,0,0));
1187                                         q2 = _mm_cvtsi32_si128(qlp_coeff[2]); q2 = _mm_shuffle_epi32(q2, _MM_SHUFFLE(0,0,0,0));
1188                                         q3 = _mm_cvtsi32_si128(qlp_coeff[3]); q3 = _mm_shuffle_epi32(q3, _MM_SHUFFLE(0,0,0,0));
1189                                         q4 = _mm_cvtsi32_si128(qlp_coeff[4]); q4 = _mm_shuffle_epi32(q4, _MM_SHUFFLE(0,0,0,0));
1190                                         q5 = _mm_cvtsi32_si128(qlp_coeff[5]); q5 = _mm_shuffle_epi32(q5, _MM_SHUFFLE(0,0,0,0));
1191                                         q6 = _mm_cvtsi32_si128(qlp_coeff[6]); q6 = _mm_shuffle_epi32(q6, _MM_SHUFFLE(0,0,0,0));
1192                                         q7 = _mm_cvtsi32_si128(qlp_coeff[7]); q7 = _mm_shuffle_epi32(q7, _MM_SHUFFLE(0,0,0,0));
1193                                         q8 = _mm_cvtsi32_si128(qlp_coeff[8]); q8 = _mm_shuffle_epi32(q8, _MM_SHUFFLE(0,0,0,0));
1194                                         q9 = _mm_cvtsi32_si128(qlp_coeff[9]); q9 = _mm_shuffle_epi32(q9, _MM_SHUFFLE(0,0,0,0));
1195                                         q10 = _mm_cvtsi32_si128(qlp_coeff[10]); q10 = _mm_shuffle_epi32(q10, _MM_SHUFFLE(0,0,0,0));
1196
1197                                         for(i = 0; i < (int)data_len-3; i+=4) {
1198                                                 __m128i summ, mull;
1199                                                 summ = _mm_mullo_epi32(q10, _mm_loadu_si128((const __m128i*)(data+i-11)));
1200                                                 mull = _mm_mullo_epi32(q9, _mm_loadu_si128((const __m128i*)(data+i-10))); summ = _mm_add_epi32(summ, mull);
1201                                                 mull = _mm_mullo_epi32(q8, _mm_loadu_si128((const __m128i*)(data+i-9))); summ = _mm_add_epi32(summ, mull);
1202                                                 mull = _mm_mullo_epi32(q7, _mm_loadu_si128((const __m128i*)(data+i-8))); summ = _mm_add_epi32(summ, mull);
1203                                                 mull = _mm_mullo_epi32(q6, _mm_loadu_si128((const __m128i*)(data+i-7))); summ = _mm_add_epi32(summ, mull);
1204                                                 mull = _mm_mullo_epi32(q5, _mm_loadu_si128((const __m128i*)(data+i-6))); summ = _mm_add_epi32(summ, mull);
1205                                                 mull = _mm_mullo_epi32(q4, _mm_loadu_si128((const __m128i*)(data+i-5))); summ = _mm_add_epi32(summ, mull);
1206                                                 mull = _mm_mullo_epi32(q3, _mm_loadu_si128((const __m128i*)(data+i-4))); summ = _mm_add_epi32(summ, mull);
1207                                                 mull = _mm_mullo_epi32(q2, _mm_loadu_si128((const __m128i*)(data+i-3))); summ = _mm_add_epi32(summ, mull);
1208                                                 mull = _mm_mullo_epi32(q1, _mm_loadu_si128((const __m128i*)(data+i-2))); summ = _mm_add_epi32(summ, mull);
1209                                                 mull = _mm_mullo_epi32(q0, _mm_loadu_si128((const __m128i*)(data+i-1))); summ = _mm_add_epi32(summ, mull);
1210                                                 summ = _mm_sra_epi32(summ, cnt);
1211                                                 _mm_storeu_si128((__m128i*)(residual+i), _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(data+i)), summ));
1212                                         }
1213                                 }
1214                         }
1215                         else {
1216                                 if(order == 10) {
1217                                         __m128i q0, q1, q2, q3, q4, q5, q6, q7, q8, q9;
1218                                         q0 = _mm_cvtsi32_si128(qlp_coeff[0]); q0 = _mm_shuffle_epi32(q0, _MM_SHUFFLE(0,0,0,0));
1219                                         q1 = _mm_cvtsi32_si128(qlp_coeff[1]); q1 = _mm_shuffle_epi32(q1, _MM_SHUFFLE(0,0,0,0));
1220                                         q2 = _mm_cvtsi32_si128(qlp_coeff[2]); q2 = _mm_shuffle_epi32(q2, _MM_SHUFFLE(0,0,0,0));
1221                                         q3 = _mm_cvtsi32_si128(qlp_coeff[3]); q3 = _mm_shuffle_epi32(q3, _MM_SHUFFLE(0,0,0,0));
1222                                         q4 = _mm_cvtsi32_si128(qlp_coeff[4]); q4 = _mm_shuffle_epi32(q4, _MM_SHUFFLE(0,0,0,0));
1223                                         q5 = _mm_cvtsi32_si128(qlp_coeff[5]); q5 = _mm_shuffle_epi32(q5, _MM_SHUFFLE(0,0,0,0));
1224                                         q6 = _mm_cvtsi32_si128(qlp_coeff[6]); q6 = _mm_shuffle_epi32(q6, _MM_SHUFFLE(0,0,0,0));
1225                                         q7 = _mm_cvtsi32_si128(qlp_coeff[7]); q7 = _mm_shuffle_epi32(q7, _MM_SHUFFLE(0,0,0,0));
1226                                         q8 = _mm_cvtsi32_si128(qlp_coeff[8]); q8 = _mm_shuffle_epi32(q8, _MM_SHUFFLE(0,0,0,0));
1227                                         q9 = _mm_cvtsi32_si128(qlp_coeff[9]); q9 = _mm_shuffle_epi32(q9, _MM_SHUFFLE(0,0,0,0));
1228
1229                                         for(i = 0; i < (int)data_len-3; i+=4) {
1230                                                 __m128i summ, mull;
1231                                                 summ = _mm_mullo_epi32(q9, _mm_loadu_si128((const __m128i*)(data+i-10)));
1232                                                 mull = _mm_mullo_epi32(q8, _mm_loadu_si128((const __m128i*)(data+i-9))); summ = _mm_add_epi32(summ, mull);
1233                                                 mull = _mm_mullo_epi32(q7, _mm_loadu_si128((const __m128i*)(data+i-8))); summ = _mm_add_epi32(summ, mull);
1234                                                 mull = _mm_mullo_epi32(q6, _mm_loadu_si128((const __m128i*)(data+i-7))); summ = _mm_add_epi32(summ, mull);
1235                                                 mull = _mm_mullo_epi32(q5, _mm_loadu_si128((const __m128i*)(data+i-6))); summ = _mm_add_epi32(summ, mull);
1236                                                 mull = _mm_mullo_epi32(q4, _mm_loadu_si128((const __m128i*)(data+i-5))); summ = _mm_add_epi32(summ, mull);
1237                                                 mull = _mm_mullo_epi32(q3, _mm_loadu_si128((const __m128i*)(data+i-4))); summ = _mm_add_epi32(summ, mull);
1238                                                 mull = _mm_mullo_epi32(q2, _mm_loadu_si128((const __m128i*)(data+i-3))); summ = _mm_add_epi32(summ, mull);
1239                                                 mull = _mm_mullo_epi32(q1, _mm_loadu_si128((const __m128i*)(data+i-2))); summ = _mm_add_epi32(summ, mull);
1240                                                 mull = _mm_mullo_epi32(q0, _mm_loadu_si128((const __m128i*)(data+i-1))); summ = _mm_add_epi32(summ, mull);
1241                                                 summ = _mm_sra_epi32(summ, cnt);
1242                                                 _mm_storeu_si128((__m128i*)(residual+i), _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(data+i)), summ));
1243                                         }
1244                                 }
1245                                 else { /* order == 9 */
1246                                         __m128i q0, q1, q2, q3, q4, q5, q6, q7, q8;
1247                                         q0 = _mm_cvtsi32_si128(qlp_coeff[0]); q0 = _mm_shuffle_epi32(q0, _MM_SHUFFLE(0,0,0,0));
1248                                         q1 = _mm_cvtsi32_si128(qlp_coeff[1]); q1 = _mm_shuffle_epi32(q1, _MM_SHUFFLE(0,0,0,0));
1249                                         q2 = _mm_cvtsi32_si128(qlp_coeff[2]); q2 = _mm_shuffle_epi32(q2, _MM_SHUFFLE(0,0,0,0));
1250                                         q3 = _mm_cvtsi32_si128(qlp_coeff[3]); q3 = _mm_shuffle_epi32(q3, _MM_SHUFFLE(0,0,0,0));
1251                                         q4 = _mm_cvtsi32_si128(qlp_coeff[4]); q4 = _mm_shuffle_epi32(q4, _MM_SHUFFLE(0,0,0,0));
1252                                         q5 = _mm_cvtsi32_si128(qlp_coeff[5]); q5 = _mm_shuffle_epi32(q5, _MM_SHUFFLE(0,0,0,0));
1253                                         q6 = _mm_cvtsi32_si128(qlp_coeff[6]); q6 = _mm_shuffle_epi32(q6, _MM_SHUFFLE(0,0,0,0));
1254                                         q7 = _mm_cvtsi32_si128(qlp_coeff[7]); q7 = _mm_shuffle_epi32(q7, _MM_SHUFFLE(0,0,0,0));
1255                                         q8 = _mm_cvtsi32_si128(qlp_coeff[8]); q8 = _mm_shuffle_epi32(q8, _MM_SHUFFLE(0,0,0,0));
1256
1257                                         for(i = 0; i < (int)data_len-3; i+=4) {
1258                                                 __m128i summ, mull;
1259                                                 summ = _mm_mullo_epi32(q8, _mm_loadu_si128((const __m128i*)(data+i-9)));
1260                                                 mull = _mm_mullo_epi32(q7, _mm_loadu_si128((const __m128i*)(data+i-8))); summ = _mm_add_epi32(summ, mull);
1261                                                 mull = _mm_mullo_epi32(q6, _mm_loadu_si128((const __m128i*)(data+i-7))); summ = _mm_add_epi32(summ, mull);
1262                                                 mull = _mm_mullo_epi32(q5, _mm_loadu_si128((const __m128i*)(data+i-6))); summ = _mm_add_epi32(summ, mull);
1263                                                 mull = _mm_mullo_epi32(q4, _mm_loadu_si128((const __m128i*)(data+i-5))); summ = _mm_add_epi32(summ, mull);
1264                                                 mull = _mm_mullo_epi32(q3, _mm_loadu_si128((const __m128i*)(data+i-4))); summ = _mm_add_epi32(summ, mull);
1265                                                 mull = _mm_mullo_epi32(q2, _mm_loadu_si128((const __m128i*)(data+i-3))); summ = _mm_add_epi32(summ, mull);
1266                                                 mull = _mm_mullo_epi32(q1, _mm_loadu_si128((const __m128i*)(data+i-2))); summ = _mm_add_epi32(summ, mull);
1267                                                 mull = _mm_mullo_epi32(q0, _mm_loadu_si128((const __m128i*)(data+i-1))); summ = _mm_add_epi32(summ, mull);
1268                                                 summ = _mm_sra_epi32(summ, cnt);
1269                                                 _mm_storeu_si128((__m128i*)(residual+i), _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(data+i)), summ));
1270                                         }
1271                                 }
1272                         }
1273                 }
1274                 else if(order > 4) {
1275                         if(order > 6) {
1276                                 if(order == 8) {
1277                                         __m128i q0, q1, q2, q3, q4, q5, q6, q7;
1278                                         q0 = _mm_cvtsi32_si128(qlp_coeff[0]); q0 = _mm_shuffle_epi32(q0, _MM_SHUFFLE(0,0,0,0));
1279                                         q1 = _mm_cvtsi32_si128(qlp_coeff[1]); q1 = _mm_shuffle_epi32(q1, _MM_SHUFFLE(0,0,0,0));
1280                                         q2 = _mm_cvtsi32_si128(qlp_coeff[2]); q2 = _mm_shuffle_epi32(q2, _MM_SHUFFLE(0,0,0,0));
1281                                         q3 = _mm_cvtsi32_si128(qlp_coeff[3]); q3 = _mm_shuffle_epi32(q3, _MM_SHUFFLE(0,0,0,0));
1282                                         q4 = _mm_cvtsi32_si128(qlp_coeff[4]); q4 = _mm_shuffle_epi32(q4, _MM_SHUFFLE(0,0,0,0));
1283                                         q5 = _mm_cvtsi32_si128(qlp_coeff[5]); q5 = _mm_shuffle_epi32(q5, _MM_SHUFFLE(0,0,0,0));
1284                                         q6 = _mm_cvtsi32_si128(qlp_coeff[6]); q6 = _mm_shuffle_epi32(q6, _MM_SHUFFLE(0,0,0,0));
1285                                         q7 = _mm_cvtsi32_si128(qlp_coeff[7]); q7 = _mm_shuffle_epi32(q7, _MM_SHUFFLE(0,0,0,0));
1286
1287                                         for(i = 0; i < (int)data_len-3; i+=4) {
1288                                                 __m128i summ, mull;
1289                                                 summ = _mm_mullo_epi32(q7, _mm_loadu_si128((const __m128i*)(data+i-8)));
1290                                                 mull = _mm_mullo_epi32(q6, _mm_loadu_si128((const __m128i*)(data+i-7))); summ = _mm_add_epi32(summ, mull);
1291                                                 mull = _mm_mullo_epi32(q5, _mm_loadu_si128((const __m128i*)(data+i-6))); summ = _mm_add_epi32(summ, mull);
1292                                                 mull = _mm_mullo_epi32(q4, _mm_loadu_si128((const __m128i*)(data+i-5))); summ = _mm_add_epi32(summ, mull);
1293                                                 mull = _mm_mullo_epi32(q3, _mm_loadu_si128((const __m128i*)(data+i-4))); summ = _mm_add_epi32(summ, mull);
1294                                                 mull = _mm_mullo_epi32(q2, _mm_loadu_si128((const __m128i*)(data+i-3))); summ = _mm_add_epi32(summ, mull);
1295                                                 mull = _mm_mullo_epi32(q1, _mm_loadu_si128((const __m128i*)(data+i-2))); summ = _mm_add_epi32(summ, mull);
1296                                                 mull = _mm_mullo_epi32(q0, _mm_loadu_si128((const __m128i*)(data+i-1))); summ = _mm_add_epi32(summ, mull);
1297                                                 summ = _mm_sra_epi32(summ, cnt);
1298                                                 _mm_storeu_si128((__m128i*)(residual+i), _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(data+i)), summ));
1299                                         }
1300                                 }
1301                                 else { /* order == 7 */
1302                                         __m128i q0, q1, q2, q3, q4, q5, q6;
1303                                         q0 = _mm_cvtsi32_si128(qlp_coeff[0]); q0 = _mm_shuffle_epi32(q0, _MM_SHUFFLE(0,0,0,0));
1304                                         q1 = _mm_cvtsi32_si128(qlp_coeff[1]); q1 = _mm_shuffle_epi32(q1, _MM_SHUFFLE(0,0,0,0));
1305                                         q2 = _mm_cvtsi32_si128(qlp_coeff[2]); q2 = _mm_shuffle_epi32(q2, _MM_SHUFFLE(0,0,0,0));
1306                                         q3 = _mm_cvtsi32_si128(qlp_coeff[3]); q3 = _mm_shuffle_epi32(q3, _MM_SHUFFLE(0,0,0,0));
1307                                         q4 = _mm_cvtsi32_si128(qlp_coeff[4]); q4 = _mm_shuffle_epi32(q4, _MM_SHUFFLE(0,0,0,0));
1308                                         q5 = _mm_cvtsi32_si128(qlp_coeff[5]); q5 = _mm_shuffle_epi32(q5, _MM_SHUFFLE(0,0,0,0));
1309                                         q6 = _mm_cvtsi32_si128(qlp_coeff[6]); q6 = _mm_shuffle_epi32(q6, _MM_SHUFFLE(0,0,0,0));
1310
1311                                         for(i = 0; i < (int)data_len-3; i+=4) {
1312                                                 __m128i summ, mull;
1313                                                 summ = _mm_mullo_epi32(q6, _mm_loadu_si128((const __m128i*)(data+i-7)));
1314                                                 mull = _mm_mullo_epi32(q5, _mm_loadu_si128((const __m128i*)(data+i-6))); summ = _mm_add_epi32(summ, mull);
1315                                                 mull = _mm_mullo_epi32(q4, _mm_loadu_si128((const __m128i*)(data+i-5))); summ = _mm_add_epi32(summ, mull);
1316                                                 mull = _mm_mullo_epi32(q3, _mm_loadu_si128((const __m128i*)(data+i-4))); summ = _mm_add_epi32(summ, mull);
1317                                                 mull = _mm_mullo_epi32(q2, _mm_loadu_si128((const __m128i*)(data+i-3))); summ = _mm_add_epi32(summ, mull);
1318                                                 mull = _mm_mullo_epi32(q1, _mm_loadu_si128((const __m128i*)(data+i-2))); summ = _mm_add_epi32(summ, mull);
1319                                                 mull = _mm_mullo_epi32(q0, _mm_loadu_si128((const __m128i*)(data+i-1))); summ = _mm_add_epi32(summ, mull);
1320                                                 summ = _mm_sra_epi32(summ, cnt);
1321                                                 _mm_storeu_si128((__m128i*)(residual+i), _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(data+i)), summ));
1322                                         }
1323                                 }
1324                         }
1325                         else {
1326                                 if(order == 6) {
1327                                         __m128i q0, q1, q2, q3, q4, q5;
1328                                         q0 = _mm_cvtsi32_si128(qlp_coeff[0]); q0 = _mm_shuffle_epi32(q0, _MM_SHUFFLE(0,0,0,0));
1329                                         q1 = _mm_cvtsi32_si128(qlp_coeff[1]); q1 = _mm_shuffle_epi32(q1, _MM_SHUFFLE(0,0,0,0));
1330                                         q2 = _mm_cvtsi32_si128(qlp_coeff[2]); q2 = _mm_shuffle_epi32(q2, _MM_SHUFFLE(0,0,0,0));
1331                                         q3 = _mm_cvtsi32_si128(qlp_coeff[3]); q3 = _mm_shuffle_epi32(q3, _MM_SHUFFLE(0,0,0,0));
1332                                         q4 = _mm_cvtsi32_si128(qlp_coeff[4]); q4 = _mm_shuffle_epi32(q4, _MM_SHUFFLE(0,0,0,0));
1333                                         q5 = _mm_cvtsi32_si128(qlp_coeff[5]); q5 = _mm_shuffle_epi32(q5, _MM_SHUFFLE(0,0,0,0));
1334
1335                                         for(i = 0; i < (int)data_len-3; i+=4) {
1336                                                 __m128i summ, mull;
1337                                                 summ = _mm_mullo_epi32(q5, _mm_loadu_si128((const __m128i*)(data+i-6)));
1338                                                 mull = _mm_mullo_epi32(q4, _mm_loadu_si128((const __m128i*)(data+i-5))); summ = _mm_add_epi32(summ, mull);
1339                                                 mull = _mm_mullo_epi32(q3, _mm_loadu_si128((const __m128i*)(data+i-4))); summ = _mm_add_epi32(summ, mull);
1340                                                 mull = _mm_mullo_epi32(q2, _mm_loadu_si128((const __m128i*)(data+i-3))); summ = _mm_add_epi32(summ, mull);
1341                                                 mull = _mm_mullo_epi32(q1, _mm_loadu_si128((const __m128i*)(data+i-2))); summ = _mm_add_epi32(summ, mull);
1342                                                 mull = _mm_mullo_epi32(q0, _mm_loadu_si128((const __m128i*)(data+i-1))); summ = _mm_add_epi32(summ, mull);
1343                                                 summ = _mm_sra_epi32(summ, cnt);
1344                                                 _mm_storeu_si128((__m128i*)(residual+i), _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(data+i)), summ));
1345                                         }
1346                                 }
1347                                 else { /* order == 5 */
1348                                         __m128i q0, q1, q2, q3, q4;
1349                                         q0 = _mm_cvtsi32_si128(qlp_coeff[0]); q0 = _mm_shuffle_epi32(q0, _MM_SHUFFLE(0,0,0,0));
1350                                         q1 = _mm_cvtsi32_si128(qlp_coeff[1]); q1 = _mm_shuffle_epi32(q1, _MM_SHUFFLE(0,0,0,0));
1351                                         q2 = _mm_cvtsi32_si128(qlp_coeff[2]); q2 = _mm_shuffle_epi32(q2, _MM_SHUFFLE(0,0,0,0));
1352                                         q3 = _mm_cvtsi32_si128(qlp_coeff[3]); q3 = _mm_shuffle_epi32(q3, _MM_SHUFFLE(0,0,0,0));
1353                                         q4 = _mm_cvtsi32_si128(qlp_coeff[4]); q4 = _mm_shuffle_epi32(q4, _MM_SHUFFLE(0,0,0,0));
1354
1355                                         for(i = 0; i < (int)data_len-3; i+=4) {
1356                                                 __m128i summ, mull;
1357                                                 summ = _mm_mullo_epi32(q4, _mm_loadu_si128((const __m128i*)(data+i-5)));
1358                                                 mull = _mm_mullo_epi32(q3, _mm_loadu_si128((const __m128i*)(data+i-4))); summ = _mm_add_epi32(summ, mull);
1359                                                 mull = _mm_mullo_epi32(q2, _mm_loadu_si128((const __m128i*)(data+i-3))); summ = _mm_add_epi32(summ, mull);
1360                                                 mull = _mm_mullo_epi32(q1, _mm_loadu_si128((const __m128i*)(data+i-2))); summ = _mm_add_epi32(summ, mull);
1361                                                 mull = _mm_mullo_epi32(q0, _mm_loadu_si128((const __m128i*)(data+i-1))); summ = _mm_add_epi32(summ, mull);
1362                                                 summ = _mm_sra_epi32(summ, cnt);
1363                                                 _mm_storeu_si128((__m128i*)(residual+i), _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(data+i)), summ));
1364                                         }
1365                                 }
1366                         }
1367                 }
1368                 else {
1369                         if(order > 2) {
1370                                 if(order == 4) {
1371                                         __m128i q0, q1, q2, q3;
1372                                         q0 = _mm_cvtsi32_si128(qlp_coeff[0]); q0 = _mm_shuffle_epi32(q0, _MM_SHUFFLE(0,0,0,0));
1373                                         q1 = _mm_cvtsi32_si128(qlp_coeff[1]); q1 = _mm_shuffle_epi32(q1, _MM_SHUFFLE(0,0,0,0));
1374                                         q2 = _mm_cvtsi32_si128(qlp_coeff[2]); q2 = _mm_shuffle_epi32(q2, _MM_SHUFFLE(0,0,0,0));
1375                                         q3 = _mm_cvtsi32_si128(qlp_coeff[3]); q3 = _mm_shuffle_epi32(q3, _MM_SHUFFLE(0,0,0,0));
1376
1377                                         for(i = 0; i < (int)data_len-3; i+=4) {
1378                                                 __m128i summ, mull;
1379                                                 summ = _mm_mullo_epi32(q3, _mm_loadu_si128((const __m128i*)(data+i-4)));
1380                                                 mull = _mm_mullo_epi32(q2, _mm_loadu_si128((const __m128i*)(data+i-3))); summ = _mm_add_epi32(summ, mull);
1381                                                 mull = _mm_mullo_epi32(q1, _mm_loadu_si128((const __m128i*)(data+i-2))); summ = _mm_add_epi32(summ, mull);
1382                                                 mull = _mm_mullo_epi32(q0, _mm_loadu_si128((const __m128i*)(data+i-1))); summ = _mm_add_epi32(summ, mull);
1383                                                 summ = _mm_sra_epi32(summ, cnt);
1384                                                 _mm_storeu_si128((__m128i*)(residual+i), _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(data+i)), summ));
1385                                         }
1386                                 }
1387                                 else { /* order == 3 */
1388                                         __m128i q0, q1, q2;
1389                                         q0 = _mm_cvtsi32_si128(qlp_coeff[0]); q0 = _mm_shuffle_epi32(q0, _MM_SHUFFLE(0,0,0,0));
1390                                         q1 = _mm_cvtsi32_si128(qlp_coeff[1]); q1 = _mm_shuffle_epi32(q1, _MM_SHUFFLE(0,0,0,0));
1391                                         q2 = _mm_cvtsi32_si128(qlp_coeff[2]); q2 = _mm_shuffle_epi32(q2, _MM_SHUFFLE(0,0,0,0));
1392
1393                                         for(i = 0; i < (int)data_len-3; i+=4) {
1394                                                 __m128i summ, mull;
1395                                                 summ = _mm_mullo_epi32(q2, _mm_loadu_si128((const __m128i*)(data+i-3)));
1396                                                 mull = _mm_mullo_epi32(q1, _mm_loadu_si128((const __m128i*)(data+i-2))); summ = _mm_add_epi32(summ, mull);
1397                                                 mull = _mm_mullo_epi32(q0, _mm_loadu_si128((const __m128i*)(data+i-1))); summ = _mm_add_epi32(summ, mull);
1398                                                 summ = _mm_sra_epi32(summ, cnt);
1399                                                 _mm_storeu_si128((__m128i*)(residual+i), _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(data+i)), summ));
1400                                         }
1401                                 }
1402                         }
1403                         else {
1404                                 if(order == 2) {
1405                                         __m128i q0, q1;
1406                                         q0 = _mm_cvtsi32_si128(qlp_coeff[0]); q0 = _mm_shuffle_epi32(q0, _MM_SHUFFLE(0,0,0,0));
1407                                         q1 = _mm_cvtsi32_si128(qlp_coeff[1]); q1 = _mm_shuffle_epi32(q1, _MM_SHUFFLE(0,0,0,0));
1408
1409                                         for(i = 0; i < (int)data_len-3; i+=4) {
1410                                                 __m128i summ, mull;
1411                                                 summ = _mm_mullo_epi32(q1, _mm_loadu_si128((const __m128i*)(data+i-2)));
1412                                                 mull = _mm_mullo_epi32(q0, _mm_loadu_si128((const __m128i*)(data+i-1))); summ = _mm_add_epi32(summ, mull);
1413                                                 summ = _mm_sra_epi32(summ, cnt);
1414                                                 _mm_storeu_si128((__m128i*)(residual+i), _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(data+i)), summ));
1415                                         }
1416                                 }
1417                                 else { /* order == 1 */
1418                                         __m128i q0;
1419                                         q0 = _mm_cvtsi32_si128(qlp_coeff[0]); q0 = _mm_shuffle_epi32(q0, _MM_SHUFFLE(0,0,0,0));
1420
1421                                         for(i = 0; i < (int)data_len-3; i+=4) {
1422                                                 __m128i summ;
1423                                                 summ = _mm_mullo_epi32(q0, _mm_loadu_si128((const __m128i*)(data+i-1)));
1424                                                 summ = _mm_sra_epi32(summ, cnt);
1425                                                 _mm_storeu_si128((__m128i*)(residual+i), _mm_sub_epi32(_mm_loadu_si128((const __m128i*)(data+i)), summ));
1426                                         }
1427                                 }
1428                         }
1429                 }
1430                 for(; i < (int)data_len; i++) {
1431                         sum = 0;
1432                         switch(order) {
1433                                 case 12: sum += qlp_coeff[11] * data[i-12]; /* Falls through. */
1434                                 case 11: sum += qlp_coeff[10] * data[i-11]; /* Falls through. */
1435                                 case 10: sum += qlp_coeff[ 9] * data[i-10]; /* Falls through. */
1436                                 case 9:  sum += qlp_coeff[ 8] * data[i- 9]; /* Falls through. */
1437                                 case 8:  sum += qlp_coeff[ 7] * data[i- 8]; /* Falls through. */
1438                                 case 7:  sum += qlp_coeff[ 6] * data[i- 7]; /* Falls through. */
1439                                 case 6:  sum += qlp_coeff[ 5] * data[i- 6]; /* Falls through. */
1440                                 case 5:  sum += qlp_coeff[ 4] * data[i- 5]; /* Falls through. */
1441                                 case 4:  sum += qlp_coeff[ 3] * data[i- 4]; /* Falls through. */
1442                                 case 3:  sum += qlp_coeff[ 2] * data[i- 3]; /* Falls through. */
1443                                 case 2:  sum += qlp_coeff[ 1] * data[i- 2]; /* Falls through. */
1444                                 case 1:  sum += qlp_coeff[ 0] * data[i- 1];
1445                         }
1446                         residual[i] = data[i] - (sum >> lp_quantization);
1447                 }
1448         }
1449         else { /* order > 12 */
1450                 for(i = 0; i < (int)data_len; i++) {
1451                         sum = 0;
1452                         switch(order) {
1453                                 case 32: sum += qlp_coeff[31] * data[i-32]; /* Falls through. */
1454                                 case 31: sum += qlp_coeff[30] * data[i-31]; /* Falls through. */
1455                                 case 30: sum += qlp_coeff[29] * data[i-30]; /* Falls through. */
1456                                 case 29: sum += qlp_coeff[28] * data[i-29]; /* Falls through. */
1457                                 case 28: sum += qlp_coeff[27] * data[i-28]; /* Falls through. */
1458                                 case 27: sum += qlp_coeff[26] * data[i-27]; /* Falls through. */
1459                                 case 26: sum += qlp_coeff[25] * data[i-26]; /* Falls through. */
1460                                 case 25: sum += qlp_coeff[24] * data[i-25]; /* Falls through. */
1461                                 case 24: sum += qlp_coeff[23] * data[i-24]; /* Falls through. */
1462                                 case 23: sum += qlp_coeff[22] * data[i-23]; /* Falls through. */
1463                                 case 22: sum += qlp_coeff[21] * data[i-22]; /* Falls through. */
1464                                 case 21: sum += qlp_coeff[20] * data[i-21]; /* Falls through. */
1465                                 case 20: sum += qlp_coeff[19] * data[i-20]; /* Falls through. */
1466                                 case 19: sum += qlp_coeff[18] * data[i-19]; /* Falls through. */
1467                                 case 18: sum += qlp_coeff[17] * data[i-18]; /* Falls through. */
1468                                 case 17: sum += qlp_coeff[16] * data[i-17]; /* Falls through. */
1469                                 case 16: sum += qlp_coeff[15] * data[i-16]; /* Falls through. */
1470                                 case 15: sum += qlp_coeff[14] * data[i-15]; /* Falls through. */
1471                                 case 14: sum += qlp_coeff[13] * data[i-14]; /* Falls through. */
1472                                 case 13: sum += qlp_coeff[12] * data[i-13];
1473                                          sum += qlp_coeff[11] * data[i-12];
1474                                          sum += qlp_coeff[10] * data[i-11];
1475                                          sum += qlp_coeff[ 9] * data[i-10];
1476                                          sum += qlp_coeff[ 8] * data[i- 9];
1477                                          sum += qlp_coeff[ 7] * data[i- 8];
1478                                          sum += qlp_coeff[ 6] * data[i- 7];
1479                                          sum += qlp_coeff[ 5] * data[i- 6];
1480                                          sum += qlp_coeff[ 4] * data[i- 5];
1481                                          sum += qlp_coeff[ 3] * data[i- 4];
1482                                          sum += qlp_coeff[ 2] * data[i- 3];
1483                                          sum += qlp_coeff[ 1] * data[i- 2];
1484                                          sum += qlp_coeff[ 0] * data[i- 1];
1485                         }
1486                         residual[i] = data[i] - (sum >> lp_quantization);
1487                 }
1488         }
1489 }
1490
1491 #endif /* FLAC__SSE4_1_SUPPORTED */
1492 #endif /* (FLAC__CPU_IA32 || FLAC__CPU_X86_64) && FLAC__HAS_X86INTRIN */
1493 #endif /* FLAC__NO_ASM */
1494 #endif /* FLAC__INTEGER_ONLY_LIBRARY */