1 ; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse2 | FileCheck %s --check-prefixes=CHECK,SSE,SSE2
3 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+sse4.1 | FileCheck %s --check-prefixes=CHECK,SSE,SSE41
4 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2 | FileCheck %s --check-prefixes=CHECK,AVX,AVX-SLOW
5 ; RUN: llc < %s -mtriple=x86_64-unknown-unknown -mattr=+avx2,+fast-variable-shuffle | FileCheck %s --check-prefixes=CHECK,AVX,AVX-FAST
8 define <4 x i32> @combine_vec_shl_zero(<4 x i32> %x) {
9 ; SSE-LABEL: combine_vec_shl_zero:
11 ; SSE-NEXT: xorps %xmm0, %xmm0
14 ; AVX-LABEL: combine_vec_shl_zero:
16 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
18 %1 = shl <4 x i32> zeroinitializer, %x
22 ; fold (shl x, c >= size(x)) -> undef
23 define <4 x i32> @combine_vec_shl_outofrange0(<4 x i32> %x) {
24 ; CHECK-LABEL: combine_vec_shl_outofrange0:
27 %1 = shl <4 x i32> %x, <i32 33, i32 33, i32 33, i32 33>
31 define <4 x i32> @combine_vec_shl_outofrange1(<4 x i32> %x) {
32 ; CHECK-LABEL: combine_vec_shl_outofrange1:
35 %1 = shl <4 x i32> %x, <i32 33, i32 34, i32 35, i32 36>
39 define <4 x i32> @combine_vec_shl_outofrange2(<4 x i32> %a0) {
40 ; CHECK-LABEL: combine_vec_shl_outofrange2:
43 %1 = and <4 x i32> %a0, <i32 2147483647, i32 2147483647, i32 2147483647, i32 2147483647>
44 %2 = shl <4 x i32> %1, <i32 33, i32 33, i32 33, i32 33>
48 ; fold (shl x, 0) -> x
49 define <4 x i32> @combine_vec_shl_by_zero(<4 x i32> %x) {
50 ; CHECK-LABEL: combine_vec_shl_by_zero:
53 %1 = shl <4 x i32> %x, zeroinitializer
57 ; if (shl x, c) is known to be zero, return 0
58 define <4 x i32> @combine_vec_shl_known_zero0(<4 x i32> %x) {
59 ; SSE-LABEL: combine_vec_shl_known_zero0:
61 ; SSE-NEXT: xorps %xmm0, %xmm0
64 ; AVX-LABEL: combine_vec_shl_known_zero0:
66 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
68 %1 = and <4 x i32> %x, <i32 4294901760, i32 4294901760, i32 4294901760, i32 4294901760>
69 %2 = shl <4 x i32> %1, <i32 16, i32 16, i32 16, i32 16>
73 define <4 x i32> @combine_vec_shl_known_zero1(<4 x i32> %x) {
74 ; SSE2-LABEL: combine_vec_shl_known_zero1:
76 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm0
77 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65536,32768,16384,8192]
78 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
79 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
80 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
81 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
82 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
83 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
84 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
87 ; SSE41-LABEL: combine_vec_shl_known_zero1:
89 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm0
90 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
93 ; AVX-LABEL: combine_vec_shl_known_zero1:
95 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
96 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
98 %1 = and <4 x i32> %x, <i32 4294901760, i32 8589803520, i32 17179607040, i32 34359214080>
99 %2 = shl <4 x i32> %1, <i32 16, i32 15, i32 14, i32 13>
103 ; fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), (trunc c))).
104 define <4 x i32> @combine_vec_shl_trunc_and(<4 x i32> %x, <4 x i64> %y) {
105 ; SSE2-LABEL: combine_vec_shl_trunc_and:
107 ; SSE2-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
108 ; SSE2-NEXT: andps {{.*}}(%rip), %xmm1
109 ; SSE2-NEXT: pslld $23, %xmm1
110 ; SSE2-NEXT: paddd {{.*}}(%rip), %xmm1
111 ; SSE2-NEXT: cvttps2dq %xmm1, %xmm1
112 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
113 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
114 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
115 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
116 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
117 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
118 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
121 ; SSE41-LABEL: combine_vec_shl_trunc_and:
123 ; SSE41-NEXT: shufps {{.*#+}} xmm1 = xmm1[0,2],xmm2[0,2]
124 ; SSE41-NEXT: andps {{.*}}(%rip), %xmm1
125 ; SSE41-NEXT: pslld $23, %xmm1
126 ; SSE41-NEXT: paddd {{.*}}(%rip), %xmm1
127 ; SSE41-NEXT: cvttps2dq %xmm1, %xmm1
128 ; SSE41-NEXT: pmulld %xmm1, %xmm0
131 ; AVX-SLOW-LABEL: combine_vec_shl_trunc_and:
133 ; AVX-SLOW-NEXT: vpshufd {{.*#+}} ymm1 = ymm1[0,2,2,3,4,6,6,7]
134 ; AVX-SLOW-NEXT: vpermq {{.*#+}} ymm1 = ymm1[0,2,2,3]
135 ; AVX-SLOW-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
136 ; AVX-SLOW-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
137 ; AVX-SLOW-NEXT: vzeroupper
138 ; AVX-SLOW-NEXT: retq
140 ; AVX-FAST-LABEL: combine_vec_shl_trunc_and:
142 ; AVX-FAST-NEXT: vmovdqa {{.*#+}} ymm2 = [0,2,4,6,4,6,6,7]
143 ; AVX-FAST-NEXT: vpermd %ymm1, %ymm2, %ymm1
144 ; AVX-FAST-NEXT: vpand {{.*}}(%rip), %xmm1, %xmm1
145 ; AVX-FAST-NEXT: vpsllvd %xmm1, %xmm0, %xmm0
146 ; AVX-FAST-NEXT: vzeroupper
147 ; AVX-FAST-NEXT: retq
148 %1 = and <4 x i64> %y, <i64 15, i64 255, i64 4095, i64 65535>
149 %2 = trunc <4 x i64> %1 to <4 x i32>
150 %3 = shl <4 x i32> %x, %2
154 ; fold (shl (shl x, c1), c2) -> (shl x, (add c1, c2))
155 define <4 x i32> @combine_vec_shl_shl0(<4 x i32> %x) {
156 ; SSE-LABEL: combine_vec_shl_shl0:
158 ; SSE-NEXT: pslld $6, %xmm0
161 ; AVX-LABEL: combine_vec_shl_shl0:
163 ; AVX-NEXT: vpslld $6, %xmm0, %xmm0
165 %1 = shl <4 x i32> %x, <i32 2, i32 2, i32 2, i32 2>
166 %2 = shl <4 x i32> %1, <i32 4, i32 4, i32 4, i32 4>
170 define <4 x i32> @combine_vec_shl_shl1(<4 x i32> %x) {
171 ; SSE2-LABEL: combine_vec_shl_shl1:
173 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [16,64,256,1024]
174 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
175 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
176 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
177 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
178 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
179 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
180 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
183 ; SSE41-LABEL: combine_vec_shl_shl1:
185 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
188 ; AVX-LABEL: combine_vec_shl_shl1:
190 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
192 %1 = shl <4 x i32> %x, <i32 0, i32 1, i32 2, i32 3>
193 %2 = shl <4 x i32> %1, <i32 4, i32 5, i32 6, i32 7>
197 ; fold (shl (shl x, c1), c2) -> 0
198 define <4 x i32> @combine_vec_shl_shlr_zero0(<4 x i32> %x) {
199 ; SSE-LABEL: combine_vec_shl_shlr_zero0:
201 ; SSE-NEXT: xorps %xmm0, %xmm0
204 ; AVX-LABEL: combine_vec_shl_shlr_zero0:
206 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
208 %1 = shl <4 x i32> %x, <i32 16, i32 16, i32 16, i32 16>
209 %2 = shl <4 x i32> %1, <i32 20, i32 20, i32 20, i32 20>
213 define <4 x i32> @combine_vec_shl_shl_zero1(<4 x i32> %x) {
214 ; SSE-LABEL: combine_vec_shl_shl_zero1:
216 ; SSE-NEXT: xorps %xmm0, %xmm0
219 ; AVX-LABEL: combine_vec_shl_shl_zero1:
221 ; AVX-NEXT: vxorps %xmm0, %xmm0, %xmm0
223 %1 = shl <4 x i32> %x, <i32 17, i32 18, i32 19, i32 20>
224 %2 = shl <4 x i32> %1, <i32 25, i32 26, i32 27, i32 28>
228 ; fold (shl (ext (shl x, c1)), c2) -> (ext (shl x, (add c1, c2)))
229 define <8 x i32> @combine_vec_shl_ext_shl0(<8 x i16> %x) {
230 ; SSE2-LABEL: combine_vec_shl_ext_shl0:
232 ; SSE2-NEXT: movdqa %xmm0, %xmm1
233 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
234 ; SSE2-NEXT: pslld $20, %xmm0
235 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
236 ; SSE2-NEXT: pslld $20, %xmm1
239 ; SSE41-LABEL: combine_vec_shl_ext_shl0:
241 ; SSE41-NEXT: movdqa %xmm0, %xmm1
242 ; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
243 ; SSE41-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
244 ; SSE41-NEXT: pslld $20, %xmm1
245 ; SSE41-NEXT: pslld $20, %xmm0
248 ; AVX-LABEL: combine_vec_shl_ext_shl0:
250 ; AVX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
251 ; AVX-NEXT: vpslld $20, %ymm0, %ymm0
253 %1 = shl <8 x i16> %x, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4>
254 %2 = sext <8 x i16> %1 to <8 x i32>
255 %3 = shl <8 x i32> %2, <i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16, i32 16>
259 define <8 x i32> @combine_vec_shl_ext_shl1(<8 x i16> %x) {
260 ; SSE2-LABEL: combine_vec_shl_ext_shl1:
262 ; SSE2-NEXT: pmullw {{.*}}(%rip), %xmm0
263 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm0[4],xmm1[5],xmm0[5],xmm1[6],xmm0[6],xmm1[7],xmm0[7]
264 ; SSE2-NEXT: psrad $16, %xmm1
265 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0,0,1,1,2,2,3,3]
266 ; SSE2-NEXT: psrad $16, %xmm0
267 ; SSE2-NEXT: movdqa %xmm0, %xmm2
268 ; SSE2-NEXT: pslld $31, %xmm2
269 ; SSE2-NEXT: pslld $30, %xmm0
270 ; SSE2-NEXT: movsd {{.*#+}} xmm0 = xmm2[0],xmm0[1]
271 ; SSE2-NEXT: movdqa %xmm1, %xmm2
272 ; SSE2-NEXT: pslld $29, %xmm2
273 ; SSE2-NEXT: pslld $28, %xmm1
274 ; SSE2-NEXT: movsd {{.*#+}} xmm1 = xmm2[0],xmm1[1]
277 ; SSE41-LABEL: combine_vec_shl_ext_shl1:
279 ; SSE41-NEXT: pmullw {{.*}}(%rip), %xmm0
280 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
281 ; SSE41-NEXT: pmovsxwd %xmm1, %xmm1
282 ; SSE41-NEXT: pmovsxwd %xmm0, %xmm0
283 ; SSE41-NEXT: movdqa %xmm0, %xmm2
284 ; SSE41-NEXT: pslld $30, %xmm2
285 ; SSE41-NEXT: pslld $31, %xmm0
286 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm2[4,5,6,7]
287 ; SSE41-NEXT: movdqa %xmm1, %xmm2
288 ; SSE41-NEXT: pslld $28, %xmm2
289 ; SSE41-NEXT: pslld $29, %xmm1
290 ; SSE41-NEXT: pblendw {{.*#+}} xmm1 = xmm1[0,1,2,3],xmm2[4,5,6,7]
293 ; AVX-LABEL: combine_vec_shl_ext_shl1:
295 ; AVX-NEXT: vpmullw {{.*}}(%rip), %xmm0, %xmm0
296 ; AVX-NEXT: vpmovsxwd %xmm0, %ymm0
297 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0
299 %1 = shl <8 x i16> %x, <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>
300 %2 = sext <8 x i16> %1 to <8 x i32>
301 %3 = shl <8 x i32> %2, <i32 31, i32 31, i32 30, i32 30, i32 29, i32 29, i32 28, i32 28>
305 ; fold (shl (zext (srl x, C)), C) -> (zext (shl (srl x, C), C))
306 define <8 x i32> @combine_vec_shl_zext_lshr0(<8 x i16> %x) {
307 ; SSE2-LABEL: combine_vec_shl_zext_lshr0:
309 ; SSE2-NEXT: movdqa %xmm0, %xmm1
310 ; SSE2-NEXT: pand {{.*}}(%rip), %xmm1
311 ; SSE2-NEXT: pxor %xmm2, %xmm2
312 ; SSE2-NEXT: movdqa %xmm1, %xmm0
313 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm2[0],xmm0[1],xmm2[1],xmm0[2],xmm2[2],xmm0[3],xmm2[3]
314 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm1 = xmm1[4],xmm2[4],xmm1[5],xmm2[5],xmm1[6],xmm2[6],xmm1[7],xmm2[7]
317 ; SSE41-LABEL: combine_vec_shl_zext_lshr0:
319 ; SSE41-NEXT: pand {{.*}}(%rip), %xmm0
320 ; SSE41-NEXT: pmovzxwd {{.*#+}} xmm2 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
321 ; SSE41-NEXT: pshufd {{.*#+}} xmm0 = xmm0[2,3,0,1]
322 ; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
323 ; SSE41-NEXT: movdqa %xmm2, %xmm0
326 ; AVX-LABEL: combine_vec_shl_zext_lshr0:
328 ; AVX-NEXT: vpand {{.*}}(%rip), %xmm0, %xmm0
329 ; AVX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
331 %1 = lshr <8 x i16> %x, <i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4, i16 4>
332 %2 = zext <8 x i16> %1 to <8 x i32>
333 %3 = shl <8 x i32> %2, <i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4, i32 4>
337 define <8 x i32> @combine_vec_shl_zext_lshr1(<8 x i16> %x) {
338 ; SSE2-LABEL: combine_vec_shl_zext_lshr1:
340 ; SSE2-NEXT: pmulhuw {{.*}}(%rip), %xmm0
341 ; SSE2-NEXT: pxor %xmm1, %xmm1
342 ; SSE2-NEXT: movdqa %xmm0, %xmm2
343 ; SSE2-NEXT: punpckhwd {{.*#+}} xmm2 = xmm2[4],xmm1[4],xmm2[5],xmm1[5],xmm2[6],xmm1[6],xmm2[7],xmm1[7]
344 ; SSE2-NEXT: punpcklwd {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1],xmm0[2],xmm1[2],xmm0[3],xmm1[3]
345 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2,4,8,16]
346 ; SSE2-NEXT: pshufd {{.*#+}} xmm3 = xmm0[1,1,3,3]
347 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
348 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
349 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
350 ; SSE2-NEXT: pmuludq %xmm3, %xmm1
351 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
352 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
353 ; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [32,64,128,256]
354 ; SSE2-NEXT: pshufd {{.*#+}} xmm4 = xmm2[1,1,3,3]
355 ; SSE2-NEXT: pmuludq %xmm3, %xmm2
356 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
357 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm3[1,1,3,3]
358 ; SSE2-NEXT: pmuludq %xmm4, %xmm2
359 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[0,2,2,3]
360 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm2[0],xmm1[1],xmm2[1]
363 ; SSE41-LABEL: combine_vec_shl_zext_lshr1:
365 ; SSE41-NEXT: pmulhuw {{.*}}(%rip), %xmm0
366 ; SSE41-NEXT: pshufd {{.*#+}} xmm1 = xmm0[2,3,0,1]
367 ; SSE41-NEXT: pmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero
368 ; SSE41-NEXT: pmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero
369 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
370 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm1
373 ; AVX-LABEL: combine_vec_shl_zext_lshr1:
375 ; AVX-NEXT: vpmulhuw {{.*}}(%rip), %xmm0, %xmm0
376 ; AVX-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero
377 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %ymm0, %ymm0
379 %1 = lshr <8 x i16> %x, <i16 1, i16 2, i16 3, i16 4, i16 5, i16 6, i16 7, i16 8>
380 %2 = zext <8 x i16> %1 to <8 x i32>
381 %3 = shl <8 x i32> %2, <i32 1, i32 2, i32 3, i32 4, i32 5, i32 6, i32 7, i32 8>
385 ; fold (shl (sr[la] exact X, C1), C2) -> (shl X, (C2-C1)) if C1 <= C2
386 define <4 x i32> @combine_vec_shl_ge_ashr_extact0(<4 x i32> %x) {
387 ; SSE-LABEL: combine_vec_shl_ge_ashr_extact0:
389 ; SSE-NEXT: pslld $2, %xmm0
392 ; AVX-LABEL: combine_vec_shl_ge_ashr_extact0:
394 ; AVX-NEXT: vpslld $2, %xmm0, %xmm0
396 %1 = ashr exact <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
397 %2 = shl <4 x i32> %1, <i32 5, i32 5, i32 5, i32 5>
401 define <4 x i32> @combine_vec_shl_ge_ashr_extact1(<4 x i32> %x) {
402 ; SSE2-LABEL: combine_vec_shl_ge_ashr_extact1:
404 ; SSE2-NEXT: movdqa %xmm0, %xmm1
405 ; SSE2-NEXT: psrad $8, %xmm1
406 ; SSE2-NEXT: movdqa %xmm0, %xmm2
407 ; SSE2-NEXT: psrad $5, %xmm2
408 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1]
409 ; SSE2-NEXT: movdqa %xmm0, %xmm1
410 ; SSE2-NEXT: psrad $4, %xmm1
411 ; SSE2-NEXT: psrad $3, %xmm0
412 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
413 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm2[0,3]
414 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32,64,128,256]
415 ; SSE2-NEXT: movaps %xmm0, %xmm1
416 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
417 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
418 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,3,3]
419 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
420 ; SSE2-NEXT: pmuludq %xmm0, %xmm2
421 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
422 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
423 ; SSE2-NEXT: movdqa %xmm1, %xmm0
426 ; SSE41-LABEL: combine_vec_shl_ge_ashr_extact1:
428 ; SSE41-NEXT: movdqa %xmm0, %xmm1
429 ; SSE41-NEXT: psrad $8, %xmm1
430 ; SSE41-NEXT: movdqa %xmm0, %xmm2
431 ; SSE41-NEXT: psrad $4, %xmm2
432 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
433 ; SSE41-NEXT: movdqa %xmm0, %xmm1
434 ; SSE41-NEXT: psrad $5, %xmm1
435 ; SSE41-NEXT: psrad $3, %xmm0
436 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
437 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
438 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
441 ; AVX-LABEL: combine_vec_shl_ge_ashr_extact1:
443 ; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
444 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
446 %1 = ashr exact <4 x i32> %x, <i32 3, i32 4, i32 5, i32 8>
447 %2 = shl <4 x i32> %1, <i32 5, i32 6, i32 7, i32 8>
451 ; fold (shl (sr[la] exact X, C1), C2) -> (sr[la] X, (C2-C1)) if C1 > C2
452 define <4 x i32> @combine_vec_shl_lt_ashr_extact0(<4 x i32> %x) {
453 ; SSE-LABEL: combine_vec_shl_lt_ashr_extact0:
455 ; SSE-NEXT: psrad $2, %xmm0
458 ; AVX-LABEL: combine_vec_shl_lt_ashr_extact0:
460 ; AVX-NEXT: vpsrad $2, %xmm0, %xmm0
462 %1 = ashr exact <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
463 %2 = shl <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
467 define <4 x i32> @combine_vec_shl_lt_ashr_extact1(<4 x i32> %x) {
468 ; SSE2-LABEL: combine_vec_shl_lt_ashr_extact1:
470 ; SSE2-NEXT: movdqa %xmm0, %xmm1
471 ; SSE2-NEXT: psrad $8, %xmm1
472 ; SSE2-NEXT: movdqa %xmm0, %xmm2
473 ; SSE2-NEXT: psrad $7, %xmm2
474 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1]
475 ; SSE2-NEXT: movdqa %xmm0, %xmm1
476 ; SSE2-NEXT: psrad $6, %xmm1
477 ; SSE2-NEXT: psrad $5, %xmm0
478 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
479 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm2[0,3]
480 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [8,16,32,256]
481 ; SSE2-NEXT: movaps %xmm0, %xmm1
482 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
483 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
484 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,3,3]
485 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
486 ; SSE2-NEXT: pmuludq %xmm0, %xmm2
487 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
488 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
489 ; SSE2-NEXT: movdqa %xmm1, %xmm0
492 ; SSE41-LABEL: combine_vec_shl_lt_ashr_extact1:
494 ; SSE41-NEXT: movdqa %xmm0, %xmm1
495 ; SSE41-NEXT: psrad $8, %xmm1
496 ; SSE41-NEXT: movdqa %xmm0, %xmm2
497 ; SSE41-NEXT: psrad $6, %xmm2
498 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
499 ; SSE41-NEXT: movdqa %xmm0, %xmm1
500 ; SSE41-NEXT: psrad $7, %xmm1
501 ; SSE41-NEXT: psrad $5, %xmm0
502 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
503 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
504 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
507 ; AVX-LABEL: combine_vec_shl_lt_ashr_extact1:
509 ; AVX-NEXT: vpsravd {{.*}}(%rip), %xmm0, %xmm0
510 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
512 %1 = ashr exact <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
513 %2 = shl <4 x i32> %1, <i32 3, i32 4, i32 5, i32 8>
517 ; fold (shl (srl x, c1), c2) -> (and (shl x, (sub c2, c1), MASK) if C2 > C1
518 define <4 x i32> @combine_vec_shl_gt_lshr0(<4 x i32> %x) {
519 ; SSE-LABEL: combine_vec_shl_gt_lshr0:
521 ; SSE-NEXT: pslld $2, %xmm0
522 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0
525 ; AVX-LABEL: combine_vec_shl_gt_lshr0:
527 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [4294967264,4294967264,4294967264,4294967264]
528 ; AVX-NEXT: vpslld $2, %xmm0, %xmm0
529 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
531 %1 = lshr <4 x i32> %x, <i32 3, i32 3, i32 3, i32 3>
532 %2 = shl <4 x i32> %1, <i32 5, i32 5, i32 5, i32 5>
536 define <4 x i32> @combine_vec_shl_gt_lshr1(<4 x i32> %x) {
537 ; SSE2-LABEL: combine_vec_shl_gt_lshr1:
539 ; SSE2-NEXT: movdqa %xmm0, %xmm1
540 ; SSE2-NEXT: psrld $8, %xmm1
541 ; SSE2-NEXT: movdqa %xmm0, %xmm2
542 ; SSE2-NEXT: psrld $5, %xmm2
543 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1]
544 ; SSE2-NEXT: movdqa %xmm0, %xmm1
545 ; SSE2-NEXT: psrld $4, %xmm1
546 ; SSE2-NEXT: psrld $3, %xmm0
547 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
548 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm2[0,3]
549 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [32,64,128,256]
550 ; SSE2-NEXT: movaps %xmm0, %xmm1
551 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
552 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
553 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,3,3]
554 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
555 ; SSE2-NEXT: pmuludq %xmm0, %xmm2
556 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
557 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
558 ; SSE2-NEXT: movdqa %xmm1, %xmm0
561 ; SSE41-LABEL: combine_vec_shl_gt_lshr1:
563 ; SSE41-NEXT: movdqa %xmm0, %xmm1
564 ; SSE41-NEXT: psrld $8, %xmm1
565 ; SSE41-NEXT: movdqa %xmm0, %xmm2
566 ; SSE41-NEXT: psrld $4, %xmm2
567 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
568 ; SSE41-NEXT: movdqa %xmm0, %xmm1
569 ; SSE41-NEXT: psrld $5, %xmm1
570 ; SSE41-NEXT: psrld $3, %xmm0
571 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
572 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
573 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
576 ; AVX-LABEL: combine_vec_shl_gt_lshr1:
578 ; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
579 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
581 %1 = lshr <4 x i32> %x, <i32 3, i32 4, i32 5, i32 8>
582 %2 = shl <4 x i32> %1, <i32 5, i32 6, i32 7, i32 8>
586 ; fold (shl (srl x, c1), c2) -> (and (srl x, (sub c1, c2), MASK) if C1 >= C2
587 define <4 x i32> @combine_vec_shl_le_lshr0(<4 x i32> %x) {
588 ; SSE-LABEL: combine_vec_shl_le_lshr0:
590 ; SSE-NEXT: psrld $2, %xmm0
591 ; SSE-NEXT: pand {{.*}}(%rip), %xmm0
594 ; AVX-LABEL: combine_vec_shl_le_lshr0:
596 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [1073741816,1073741816,1073741816,1073741816]
597 ; AVX-NEXT: vpsrld $2, %xmm0, %xmm0
598 ; AVX-NEXT: vpand %xmm1, %xmm0, %xmm0
600 %1 = lshr <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
601 %2 = shl <4 x i32> %1, <i32 3, i32 3, i32 3, i32 3>
605 define <4 x i32> @combine_vec_shl_le_lshr1(<4 x i32> %x) {
606 ; SSE2-LABEL: combine_vec_shl_le_lshr1:
608 ; SSE2-NEXT: movdqa %xmm0, %xmm1
609 ; SSE2-NEXT: psrld $8, %xmm1
610 ; SSE2-NEXT: movdqa %xmm0, %xmm2
611 ; SSE2-NEXT: psrld $7, %xmm2
612 ; SSE2-NEXT: punpckhqdq {{.*#+}} xmm2 = xmm2[1],xmm1[1]
613 ; SSE2-NEXT: movdqa %xmm0, %xmm1
614 ; SSE2-NEXT: psrld $6, %xmm1
615 ; SSE2-NEXT: psrld $5, %xmm0
616 ; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm1[0]
617 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[0,3],xmm2[0,3]
618 ; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [8,16,32,256]
619 ; SSE2-NEXT: movaps %xmm0, %xmm1
620 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
621 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
622 ; SSE2-NEXT: shufps {{.*#+}} xmm0 = xmm0[1,1,3,3]
623 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm2[1,1,3,3]
624 ; SSE2-NEXT: pmuludq %xmm0, %xmm2
625 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm2[0,2,2,3]
626 ; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1]
627 ; SSE2-NEXT: movdqa %xmm1, %xmm0
630 ; SSE41-LABEL: combine_vec_shl_le_lshr1:
632 ; SSE41-NEXT: movdqa %xmm0, %xmm1
633 ; SSE41-NEXT: psrld $8, %xmm1
634 ; SSE41-NEXT: movdqa %xmm0, %xmm2
635 ; SSE41-NEXT: psrld $6, %xmm2
636 ; SSE41-NEXT: pblendw {{.*#+}} xmm2 = xmm2[0,1,2,3],xmm1[4,5,6,7]
637 ; SSE41-NEXT: movdqa %xmm0, %xmm1
638 ; SSE41-NEXT: psrld $7, %xmm1
639 ; SSE41-NEXT: psrld $5, %xmm0
640 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1,2,3],xmm1[4,5,6,7]
641 ; SSE41-NEXT: pblendw {{.*#+}} xmm0 = xmm0[0,1],xmm2[2,3],xmm0[4,5],xmm2[6,7]
642 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
645 ; AVX-LABEL: combine_vec_shl_le_lshr1:
647 ; AVX-NEXT: vpsrlvd {{.*}}(%rip), %xmm0, %xmm0
648 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
650 %1 = lshr <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
651 %2 = shl <4 x i32> %1, <i32 3, i32 4, i32 5, i32 8>
655 ; fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
656 define <4 x i32> @combine_vec_shl_ashr0(<4 x i32> %x) {
657 ; SSE-LABEL: combine_vec_shl_ashr0:
659 ; SSE-NEXT: andps {{.*}}(%rip), %xmm0
662 ; AVX-LABEL: combine_vec_shl_ashr0:
664 ; AVX-NEXT: vbroadcastss {{.*#+}} xmm1 = [4294967264,4294967264,4294967264,4294967264]
665 ; AVX-NEXT: vandps %xmm1, %xmm0, %xmm0
667 %1 = ashr <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
668 %2 = shl <4 x i32> %1, <i32 5, i32 5, i32 5, i32 5>
672 define <4 x i32> @combine_vec_shl_ashr1(<4 x i32> %x) {
673 ; SSE-LABEL: combine_vec_shl_ashr1:
675 ; SSE-NEXT: andps {{.*}}(%rip), %xmm0
678 ; AVX-LABEL: combine_vec_shl_ashr1:
680 ; AVX-NEXT: vandps {{.*}}(%rip), %xmm0, %xmm0
682 %1 = ashr <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
683 %2 = shl <4 x i32> %1, <i32 5, i32 6, i32 7, i32 8>
687 ; fold (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
688 define <4 x i32> @combine_vec_shl_add0(<4 x i32> %x) {
689 ; SSE-LABEL: combine_vec_shl_add0:
691 ; SSE-NEXT: pslld $2, %xmm0
692 ; SSE-NEXT: paddd {{.*}}(%rip), %xmm0
695 ; AVX-LABEL: combine_vec_shl_add0:
697 ; AVX-NEXT: vpslld $2, %xmm0, %xmm0
698 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [20,20,20,20]
699 ; AVX-NEXT: vpaddd %xmm1, %xmm0, %xmm0
701 %1 = add <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
702 %2 = shl <4 x i32> %1, <i32 2, i32 2, i32 2, i32 2>
706 define <4 x i32> @combine_vec_shl_add1(<4 x i32> %x) {
707 ; SSE2-LABEL: combine_vec_shl_add1:
709 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2,4,8,16]
710 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
711 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
712 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
713 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
714 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
715 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
716 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
717 ; SSE2-NEXT: paddd {{.*}}(%rip), %xmm0
720 ; SSE41-LABEL: combine_vec_shl_add1:
722 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
723 ; SSE41-NEXT: paddd {{.*}}(%rip), %xmm0
726 ; AVX-LABEL: combine_vec_shl_add1:
728 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
729 ; AVX-NEXT: vpaddd {{.*}}(%rip), %xmm0, %xmm0
731 %1 = add <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
732 %2 = shl <4 x i32> %1, <i32 1, i32 2, i32 3, i32 4>
736 ; fold (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
737 define <4 x i32> @combine_vec_shl_or0(<4 x i32> %x) {
738 ; SSE-LABEL: combine_vec_shl_or0:
740 ; SSE-NEXT: pslld $2, %xmm0
741 ; SSE-NEXT: por {{.*}}(%rip), %xmm0
744 ; AVX-LABEL: combine_vec_shl_or0:
746 ; AVX-NEXT: vpslld $2, %xmm0, %xmm0
747 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [20,20,20,20]
748 ; AVX-NEXT: vpor %xmm1, %xmm0, %xmm0
750 %1 = or <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
751 %2 = shl <4 x i32> %1, <i32 2, i32 2, i32 2, i32 2>
755 define <4 x i32> @combine_vec_shl_or1(<4 x i32> %x) {
756 ; SSE2-LABEL: combine_vec_shl_or1:
758 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [2,4,8,16]
759 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
760 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
761 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
762 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
763 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
764 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
765 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
766 ; SSE2-NEXT: por {{.*}}(%rip), %xmm0
769 ; SSE41-LABEL: combine_vec_shl_or1:
771 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
772 ; SSE41-NEXT: por {{.*}}(%rip), %xmm0
775 ; AVX-LABEL: combine_vec_shl_or1:
777 ; AVX-NEXT: vpsllvd {{.*}}(%rip), %xmm0, %xmm0
778 ; AVX-NEXT: vpor {{.*}}(%rip), %xmm0, %xmm0
780 %1 = or <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
781 %2 = shl <4 x i32> %1, <i32 1, i32 2, i32 3, i32 4>
785 ; fold (shl (mul x, c1), c2) -> (mul x, c1 << c2)
786 define <4 x i32> @combine_vec_shl_mul0(<4 x i32> %x) {
787 ; SSE2-LABEL: combine_vec_shl_mul0:
789 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [20,20,20,20]
790 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
791 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
792 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
793 ; SSE2-NEXT: pmuludq %xmm1, %xmm2
794 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm2[0,2,2,3]
795 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
798 ; SSE41-LABEL: combine_vec_shl_mul0:
800 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
803 ; AVX-LABEL: combine_vec_shl_mul0:
805 ; AVX-NEXT: vpbroadcastd {{.*#+}} xmm1 = [20,20,20,20]
806 ; AVX-NEXT: vpmulld %xmm1, %xmm0, %xmm0
808 %1 = mul <4 x i32> %x, <i32 5, i32 5, i32 5, i32 5>
809 %2 = shl <4 x i32> %1, <i32 2, i32 2, i32 2, i32 2>
813 define <4 x i32> @combine_vec_shl_mul1(<4 x i32> %x) {
814 ; SSE2-LABEL: combine_vec_shl_mul1:
816 ; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [10,24,56,128]
817 ; SSE2-NEXT: pshufd {{.*#+}} xmm2 = xmm0[1,1,3,3]
818 ; SSE2-NEXT: pmuludq %xmm1, %xmm0
819 ; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,2,2,3]
820 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[1,1,3,3]
821 ; SSE2-NEXT: pmuludq %xmm2, %xmm1
822 ; SSE2-NEXT: pshufd {{.*#+}} xmm1 = xmm1[0,2,2,3]
823 ; SSE2-NEXT: punpckldq {{.*#+}} xmm0 = xmm0[0],xmm1[0],xmm0[1],xmm1[1]
826 ; SSE41-LABEL: combine_vec_shl_mul1:
828 ; SSE41-NEXT: pmulld {{.*}}(%rip), %xmm0
831 ; AVX-LABEL: combine_vec_shl_mul1:
833 ; AVX-NEXT: vpmulld {{.*}}(%rip), %xmm0, %xmm0
835 %1 = mul <4 x i32> %x, <i32 5, i32 6, i32 7, i32 8>
836 %2 = shl <4 x i32> %1, <i32 1, i32 2, i32 3, i32 4>