From 6c409226e672c0b4371e1cd9248589df849fcf0d Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Mon, 26 Feb 2018 04:43:24 +0000 Subject: [PATCH] [X86] Don't use getZExtValue when we have no idea how large the input elements are. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@326066 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86ISelLowering.cpp | 4 +- test/CodeGen/X86/avg.ll | 1049 ++++++++++++++++++++++++++++++++++++ 2 files changed, 1051 insertions(+), 2 deletions(-) diff --git a/lib/Target/X86/X86ISelLowering.cpp b/lib/Target/X86/X86ISelLowering.cpp index 504bee73fe2..47382149f37 100644 --- a/lib/Target/X86/X86ISelLowering.cpp +++ b/lib/Target/X86/X86ISelLowering.cpp @@ -34401,8 +34401,8 @@ static SDValue detectAVGPattern(SDValue In, EVT VT, SelectionDAG &DAG, ConstantSDNode *C = dyn_cast(Op); if (!C) return false; - uint64_t Val = C->getZExtValue(); - if (Val < Min || Val > Max) + const APInt &Val = C->getAPIntValue(); + if (Val.ult(Min) || Val.ugt(Max)) return false; } return true; diff --git a/test/CodeGen/X86/avg.ll b/test/CodeGen/X86/avg.ll index 08911fb26d9..bcba6c16795 100644 --- a/test/CodeGen/X86/avg.ll +++ b/test/CodeGen/X86/avg.ll @@ -2023,3 +2023,1052 @@ define <512 x i8> @avg_v512i8_3(<512 x i8> %a, <512 x i8> %b) nounwind { %res = trunc <512 x i16> %lshr to <512 x i8> ret <512 x i8> %res } + +; This is not an avg, but its structurally similar and previously caused a crash +; because the constants can't be read with APInt::getZExtValue. +define void @not_avg_v16i8_wide_constants(<16 x i8>* %a, <16 x i8>* %b) nounwind { +; SSE2-LABEL: not_avg_v16i8_wide_constants: +; SSE2: # %bb.0: +; SSE2-NEXT: pushq %rbp +; SSE2-NEXT: pushq %r15 +; SSE2-NEXT: pushq %r14 +; SSE2-NEXT: pushq %r13 +; SSE2-NEXT: pushq %r12 +; SSE2-NEXT: pushq %rbx +; SSE2-NEXT: subq $56, %rsp +; SSE2-NEXT: movaps (%rdi), %xmm1 +; SSE2-NEXT: movaps (%rsi), %xmm0 +; SSE2-NEXT: movaps %xmm1, -{{[0-9]+}}(%rsp) +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edi +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r15d +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %edx +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r13d +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r10d +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r9d +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebx +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r12d +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebp +; SSE2-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebp +; SSE2-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebp +; SSE2-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebp +; SSE2-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ebp +; SSE2-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: movaps %xmm0, -{{[0-9]+}}(%rsp) +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r11d +; SSE2-NEXT: addq %rax, %r11 +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-NEXT: addq %rdi, %rax +; SSE2-NEXT: movq %rax, %rdi +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r14d +; SSE2-NEXT: addq %r15, %r14 +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-NEXT: addq %rsi, %rax +; SSE2-NEXT: movq %rax, %r15 +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %esi +; SSE2-NEXT: addq %rdx, %rsi +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r8d +; SSE2-NEXT: addq %r13, %r8 +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-NEXT: addq %r10, %rax +; SSE2-NEXT: movq %rax, %r10 +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-NEXT: addq %rcx, %rax +; SSE2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %r13d +; SSE2-NEXT: addq %r9, %r13 +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %ecx +; SSE2-NEXT: addq %rbx, %rcx +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-NEXT: addq %r12, %rax +; SSE2-NEXT: movq %rax, %r9 +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-NEXT: addq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload +; SSE2-NEXT: movq %rax, %rbp +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-NEXT: addq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload +; SSE2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-NEXT: addq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload +; SSE2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-NEXT: addq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload +; SSE2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: movzbl -{{[0-9]+}}(%rsp), %eax +; SSE2-NEXT: addq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload +; SSE2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: xorl %eax, %eax +; SSE2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: addq $-1, %r11 +; SSE2-NEXT: movq %r11, {{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: movl $0, %r12d +; SSE2-NEXT: adcq $-1, %r12 +; SSE2-NEXT: addq $-1, %rdi +; SSE2-NEXT: movq %rdi, {{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: movl $0, %edx +; SSE2-NEXT: adcq $-1, %rdx +; SSE2-NEXT: addq $-1, %r14 +; SSE2-NEXT: movq %r14, {{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: movl $0, %edi +; SSE2-NEXT: adcq $-1, %rdi +; SSE2-NEXT: addq $-1, %r15 +; SSE2-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: movl $0, %eax +; SSE2-NEXT: adcq $-1, %rax +; SSE2-NEXT: addq $-1, %rsi +; SSE2-NEXT: movq %rsi, (%rsp) # 8-byte Spill +; SSE2-NEXT: movl $0, %r15d +; SSE2-NEXT: adcq $-1, %r15 +; SSE2-NEXT: addq $-1, %r8 +; SSE2-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: movl $0, %r14d +; SSE2-NEXT: adcq $-1, %r14 +; SSE2-NEXT: addq $-1, %r10 +; SSE2-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: movl $0, %esi +; SSE2-NEXT: adcq $-1, %rsi +; SSE2-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: movq -{{[0-9]+}}(%rsp), %r10 # 8-byte Reload +; SSE2-NEXT: addq $-1, %r10 +; SSE2-NEXT: movl $0, %esi +; SSE2-NEXT: adcq $-1, %rsi +; SSE2-NEXT: movq %rsi, %r8 +; SSE2-NEXT: addq $-1, %r13 +; SSE2-NEXT: movl $0, %esi +; SSE2-NEXT: adcq $-1, %rsi +; SSE2-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: addq $-1, %rcx +; SSE2-NEXT: movl $0, %esi +; SSE2-NEXT: adcq $-1, %rsi +; SSE2-NEXT: addq $-1, %r9 +; SSE2-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: movl $0, %r9d +; SSE2-NEXT: adcq $-1, %r9 +; SSE2-NEXT: addq $-1, %rbp +; SSE2-NEXT: movq %rbp, {{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: movl $0, %r11d +; SSE2-NEXT: adcq $-1, %r11 +; SSE2-NEXT: addq $-1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; SSE2-NEXT: movl $0, %ebx +; SSE2-NEXT: adcq $-1, %rbx +; SSE2-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: addq $-1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; SSE2-NEXT: movl $0, %ebp +; SSE2-NEXT: adcq $-1, %rbp +; SSE2-NEXT: addq $-1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; SSE2-NEXT: movl $0, %ebx +; SSE2-NEXT: adcq $-1, %rbx +; SSE2-NEXT: movq %rbx, {{[0-9]+}}(%rsp) # 8-byte Spill +; SSE2-NEXT: addq $-1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; SSE2-NEXT: adcq $-1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; SSE2-NEXT: shldq $63, %rcx, %rsi +; SSE2-NEXT: movq -{{[0-9]+}}(%rsp), %rbx # 8-byte Reload +; SSE2-NEXT: shldq $63, %r13, %rbx +; SSE2-NEXT: shldq $63, %r10, %r8 +; SSE2-NEXT: movq -{{[0-9]+}}(%rsp), %r10 # 8-byte Reload +; SSE2-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; SSE2-NEXT: shldq $63, %rcx, %r10 +; SSE2-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; SSE2-NEXT: shldq $63, %rcx, %r14 +; SSE2-NEXT: movq (%rsp), %rcx # 8-byte Reload +; SSE2-NEXT: shldq $63, %rcx, %r15 +; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; SSE2-NEXT: shldq $63, %rcx, %rdi +; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; SSE2-NEXT: shldq $63, %rcx, %rdx +; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; SSE2-NEXT: shldq $63, %rcx, %r12 +; SSE2-NEXT: movq %r12, %xmm11 +; SSE2-NEXT: movq %rdx, %xmm5 +; SSE2-NEXT: movq %rdi, %xmm13 +; SSE2-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; SSE2-NEXT: shrdq $1, %rax, %rcx +; SSE2-NEXT: movq %rcx, %xmm15 +; SSE2-NEXT: shrq %rax +; SSE2-NEXT: movq %rax, %xmm8 +; SSE2-NEXT: movq %r15, %xmm9 +; SSE2-NEXT: movq %r14, %xmm6 +; SSE2-NEXT: movq %r10, %xmm7 +; SSE2-NEXT: movq %r8, %xmm0 +; SSE2-NEXT: movdqa %xmm0, -{{[0-9]+}}(%rsp) # 16-byte Spill +; SSE2-NEXT: movq %rbx, %xmm10 +; SSE2-NEXT: movq %rsi, %xmm4 +; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; SSE2-NEXT: shrdq $1, %r9, %rax +; SSE2-NEXT: movq %rax, %xmm1 +; SSE2-NEXT: shrq %r9 +; SSE2-NEXT: movq %r9, %xmm12 +; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; SSE2-NEXT: shrdq $1, %r11, %rax +; SSE2-NEXT: movq %rax, %xmm2 +; SSE2-NEXT: shrq %r11 +; SSE2-NEXT: movq %r11, %xmm14 +; SSE2-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; SSE2-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; SSE2-NEXT: shrdq $1, %rcx, %rax +; SSE2-NEXT: movq %rax, %xmm3 +; SSE2-NEXT: movq %rcx, %rax +; SSE2-NEXT: shrq %rax +; SSE2-NEXT: pslldq {{.*#+}} xmm11 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm11[0] +; SSE2-NEXT: pslldq {{.*#+}} xmm5 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm5[0,1] +; SSE2-NEXT: pand {{.*}}(%rip), %xmm5 +; SSE2-NEXT: por %xmm11, %xmm5 +; SSE2-NEXT: movq %rax, %xmm11 +; SSE2-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; SSE2-NEXT: movq %rbp, %rcx +; SSE2-NEXT: shrdq $1, %rcx, %rax +; SSE2-NEXT: pslldq {{.*#+}} xmm13 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm13[0,1,2] +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm15 = xmm15[0],xmm8[0] +; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255,255,255,255,255,255,0,255,255] +; SSE2-NEXT: pshufd {{.*#+}} xmm15 = xmm15[0,1,2,0] +; SSE2-NEXT: pand %xmm0, %xmm15 +; SSE2-NEXT: pandn %xmm13, %xmm0 +; SSE2-NEXT: movq %rax, %xmm8 +; SSE2-NEXT: shrq %rcx +; SSE2-NEXT: por %xmm15, %xmm0 +; SSE2-NEXT: movdqa {{.*#+}} xmm13 = [65535,65535,65535,65535,65535,65535,65535,0] +; SSE2-NEXT: pand %xmm13, %xmm0 +; SSE2-NEXT: pandn %xmm5, %xmm13 +; SSE2-NEXT: movq %rcx, %xmm15 +; SSE2-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; SSE2-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; SSE2-NEXT: shrdq $1, %rcx, %rax +; SSE2-NEXT: por %xmm0, %xmm13 +; SSE2-NEXT: pslldq {{.*#+}} xmm9 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm9[0,1,2,3,4] +; SSE2-NEXT: movdqa {{.*#+}} xmm0 = [255,255,255,255,255,255,255,255,255,255,255,0,255,255,255,255] +; SSE2-NEXT: pslldq {{.*#+}} xmm6 = zero,zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm6[0,1,2,3,4,5] +; SSE2-NEXT: pand %xmm0, %xmm6 +; SSE2-NEXT: pandn %xmm9, %xmm0 +; SSE2-NEXT: movq %rax, %xmm9 +; SSE2-NEXT: shrq %rcx +; SSE2-NEXT: por %xmm6, %xmm0 +; SSE2-NEXT: pslldq {{.*#+}} xmm7 = zero,zero,zero,zero,zero,zero,zero,zero,zero,xmm7[0,1,2,3,4,5,6] +; SSE2-NEXT: movdqa {{.*#+}} xmm6 = [255,255,255,255,255,255,255,255,255,0,255,255,255,255,255,255] +; SSE2-NEXT: pshufd $68, -{{[0-9]+}}(%rsp), %xmm5 # 16-byte Folded Reload +; SSE2-NEXT: # xmm5 = mem[0,1,0,1] +; SSE2-NEXT: pand %xmm6, %xmm5 +; SSE2-NEXT: pandn %xmm7, %xmm6 +; SSE2-NEXT: movq %rcx, %xmm7 +; SSE2-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; SSE2-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; SSE2-NEXT: shrdq $1, %rax, %rcx +; SSE2-NEXT: por %xmm5, %xmm6 +; SSE2-NEXT: movdqa {{.*#+}} xmm5 = [65535,65535,65535,65535,65535,0,65535,65535] +; SSE2-NEXT: pand %xmm5, %xmm6 +; SSE2-NEXT: pandn %xmm0, %xmm5 +; SSE2-NEXT: movq %rcx, %xmm0 +; SSE2-NEXT: shrq %rax +; SSE2-NEXT: por %xmm6, %xmm5 +; SSE2-NEXT: movq %rax, %xmm6 +; SSE2-NEXT: pshufd {{.*#+}} xmm5 = xmm5[0,1,2,2] +; SSE2-NEXT: punpckhdq {{.*#+}} xmm5 = xmm5[2],xmm13[2],xmm5[3],xmm13[3] +; SSE2-NEXT: punpcklbw {{.*#+}} xmm4 = xmm4[0],xmm10[0],xmm4[1],xmm10[1],xmm4[2],xmm10[2],xmm4[3],xmm10[3],xmm4[4],xmm10[4],xmm4[5],xmm10[5],xmm4[6],xmm10[6],xmm4[7],xmm10[7] +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm1 = xmm1[0],xmm12[0] +; SSE2-NEXT: pslld $24, %xmm1 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm2 = xmm2[0],xmm14[0] +; SSE2-NEXT: movdqa {{.*#+}} xmm10 = [255,255,255,0,255,255,255,255,255,255,255,255,255,255,255,255] +; SSE2-NEXT: pslld $16, %xmm2 +; SSE2-NEXT: pand %xmm10, %xmm2 +; SSE2-NEXT: pandn %xmm1, %xmm10 +; SSE2-NEXT: por %xmm2, %xmm10 +; SSE2-NEXT: movdqa {{.*#+}} xmm1 = [65535,0,65535,65535,65535,65535,65535,65535] +; SSE2-NEXT: pand %xmm1, %xmm4 +; SSE2-NEXT: pandn %xmm10, %xmm1 +; SSE2-NEXT: por %xmm4, %xmm1 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm11[0] +; SSE2-NEXT: psllq $56, %xmm3 +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm8 = xmm8[0],xmm15[0] +; SSE2-NEXT: movdqa {{.*#+}} xmm2 = [255,255,255,255,255,255,255,0,255,255,255,255,255,255,255,255] +; SSE2-NEXT: psllq $48, %xmm8 +; SSE2-NEXT: pand %xmm2, %xmm8 +; SSE2-NEXT: pandn %xmm3, %xmm2 +; SSE2-NEXT: por %xmm8, %xmm2 +; SSE2-NEXT: movdqa {{.*#+}} xmm3 = [65535,65535,65535,0,65535,65535,65535,65535] +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm9 = xmm9[0],xmm7[0] +; SSE2-NEXT: psllq $40, %xmm9 +; SSE2-NEXT: movdqa {{.*#+}} xmm4 = [255,255,255,255,255,0,255,255,255,255,255,255,255,255,255,255] +; SSE2-NEXT: punpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm6[0] +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm0[0,0,1,1] +; SSE2-NEXT: pand %xmm4, %xmm0 +; SSE2-NEXT: pandn %xmm9, %xmm4 +; SSE2-NEXT: por %xmm0, %xmm4 +; SSE2-NEXT: pand %xmm3, %xmm4 +; SSE2-NEXT: pandn %xmm2, %xmm3 +; SSE2-NEXT: por %xmm3, %xmm4 +; SSE2-NEXT: pshufd {{.*#+}} xmm0 = xmm4[1,1,2,3] +; SSE2-NEXT: punpckldq {{.*#+}} xmm1 = xmm1[0],xmm0[0],xmm1[1],xmm0[1] +; SSE2-NEXT: movsd {{.*#+}} xmm5 = xmm1[0],xmm5[1] +; SSE2-NEXT: movupd %xmm5, (%rax) +; SSE2-NEXT: addq $56, %rsp +; SSE2-NEXT: popq %rbx +; SSE2-NEXT: popq %r12 +; SSE2-NEXT: popq %r13 +; SSE2-NEXT: popq %r14 +; SSE2-NEXT: popq %r15 +; SSE2-NEXT: popq %rbp +; SSE2-NEXT: retq +; +; AVX1-LABEL: not_avg_v16i8_wide_constants: +; AVX1: # %bb.0: +; AVX1-NEXT: pushq %rbp +; AVX1-NEXT: pushq %r15 +; AVX1-NEXT: pushq %r14 +; AVX1-NEXT: pushq %r13 +; AVX1-NEXT: pushq %r12 +; AVX1-NEXT: pushq %rbx +; AVX1-NEXT: subq $24, %rsp +; AVX1-NEXT: movq %rsi, %r8 +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero +; AVX1-NEXT: vpextrq $1, %xmm2, %rcx +; AVX1-NEXT: vmovq %xmm2, %rax +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero +; AVX1-NEXT: vpextrq $1, %xmm1, %rbx +; AVX1-NEXT: vmovq %xmm1, %rbp +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero +; AVX1-NEXT: vpextrq $1, %xmm1, %r10 +; AVX1-NEXT: vmovq %xmm1, %r12 +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero +; AVX1-NEXT: vpextrq $1, %xmm0, %r15 +; AVX1-NEXT: vmovq %xmm0, %r14 +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero +; AVX1-NEXT: vpextrq $1, %xmm2, %rdx +; AVX1-NEXT: vmovq %xmm2, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero +; AVX1-NEXT: vpextrq $1, %xmm1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX1-NEXT: vmovq %xmm1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm0[0],zero,xmm0[1],zero +; AVX1-NEXT: vpextrq $1, %xmm1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm2 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm4 = xmm3[0],zero,xmm3[1],zero +; AVX1-NEXT: vpextrq $1, %xmm4, %r9 +; AVX1-NEXT: addq %rcx, %r9 +; AVX1-NEXT: vmovq %xmm4, %r13 +; AVX1-NEXT: addq %rax, %r13 +; AVX1-NEXT: vpshufd {{.*#+}} xmm3 = xmm3[2,3,0,1] +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm3[0],zero,xmm3[1],zero +; AVX1-NEXT: vpextrq $1, %xmm3, %rcx +; AVX1-NEXT: addq %rbx, %rcx +; AVX1-NEXT: vmovq %xmm3, %r11 +; AVX1-NEXT: addq %rbp, %r11 +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero +; AVX1-NEXT: vpextrq $1, %xmm3, %rax +; AVX1-NEXT: addq %r10, %rax +; AVX1-NEXT: movq %rax, %rsi +; AVX1-NEXT: vmovq %xmm3, %rax +; AVX1-NEXT: addq %r12, %rax +; AVX1-NEXT: movq %rax, %rbx +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero +; AVX1-NEXT: vpextrq $1, %xmm2, %rax +; AVX1-NEXT: addq %r15, %rax +; AVX1-NEXT: movq %rax, %r15 +; AVX1-NEXT: vmovq %xmm2, %rax +; AVX1-NEXT: addq %r14, %rax +; AVX1-NEXT: movq %rax, %r14 +; AVX1-NEXT: vmovq %xmm1, %rax +; AVX1-NEXT: vpshufd {{.*#+}} xmm0 = xmm0[2,3,0,1] +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm0 = xmm0[0],zero,xmm0[1],zero +; AVX1-NEXT: vpmovzxbw {{.*#+}} xmm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm3 = xmm2[0],zero,xmm2[1],zero +; AVX1-NEXT: vpextrq $1, %xmm3, %rbp +; AVX1-NEXT: addq %rdx, %rbp +; AVX1-NEXT: movq %rbp, %r8 +; AVX1-NEXT: vmovq %xmm3, %rbp +; AVX1-NEXT: addq -{{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload +; AVX1-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: vpshufd {{.*#+}} xmm2 = xmm2[2,3,0,1] +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm2[0],zero,xmm2[1],zero +; AVX1-NEXT: vpextrq $1, %xmm2, %rdx +; AVX1-NEXT: addq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload +; AVX1-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: vmovq %xmm2, %rdx +; AVX1-NEXT: addq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload +; AVX1-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; AVX1-NEXT: vpmovzxwd {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm2 = xmm1[0],zero,xmm1[1],zero +; AVX1-NEXT: vpextrq $1, %xmm2, %rdx +; AVX1-NEXT: addq -{{[0-9]+}}(%rsp), %rdx # 8-byte Folded Reload +; AVX1-NEXT: movq %rdx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: vmovq %xmm2, %r12 +; AVX1-NEXT: addq %rax, %r12 +; AVX1-NEXT: vpextrq $1, %xmm0, %rax +; AVX1-NEXT: vpshufd {{.*#+}} xmm1 = xmm1[2,3,0,1] +; AVX1-NEXT: vpmovzxdq {{.*#+}} xmm1 = xmm1[0],zero,xmm1[1],zero +; AVX1-NEXT: vpextrq $1, %xmm1, %r10 +; AVX1-NEXT: addq %rax, %r10 +; AVX1-NEXT: vmovq %xmm0, %rax +; AVX1-NEXT: vmovq %xmm1, %rdi +; AVX1-NEXT: addq %rax, %rdi +; AVX1-NEXT: addq $-1, %r9 +; AVX1-NEXT: movq %r9, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movl $0, %eax +; AVX1-NEXT: adcq $-1, %rax +; AVX1-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: addq $-1, %r13 +; AVX1-NEXT: movq %r13, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movl $0, %eax +; AVX1-NEXT: adcq $-1, %rax +; AVX1-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: addq $-1, %rcx +; AVX1-NEXT: movq %rcx, (%rsp) # 8-byte Spill +; AVX1-NEXT: movl $0, %eax +; AVX1-NEXT: adcq $-1, %rax +; AVX1-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: addq $-1, %r11 +; AVX1-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movl $0, %eax +; AVX1-NEXT: adcq $-1, %rax +; AVX1-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: addq $-1, %rsi +; AVX1-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movl $0, %eax +; AVX1-NEXT: adcq $-1, %rax +; AVX1-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: addq $-1, %rbx +; AVX1-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movl $0, %eax +; AVX1-NEXT: adcq $-1, %rax +; AVX1-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: addq $-1, %r15 +; AVX1-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movl $0, %ebp +; AVX1-NEXT: adcq $-1, %rbp +; AVX1-NEXT: addq $-1, %r14 +; AVX1-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movl $0, %r15d +; AVX1-NEXT: adcq $-1, %r15 +; AVX1-NEXT: addq $-1, %r8 +; AVX1-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movl $0, %eax +; AVX1-NEXT: adcq $-1, %rax +; AVX1-NEXT: movq %rax, %rsi +; AVX1-NEXT: addq $-1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX1-NEXT: movl $0, %r13d +; AVX1-NEXT: adcq $-1, %r13 +; AVX1-NEXT: addq $-1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX1-NEXT: movl $0, %r14d +; AVX1-NEXT: adcq $-1, %r14 +; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; AVX1-NEXT: addq $-1, %rdx +; AVX1-NEXT: movl $0, %r11d +; AVX1-NEXT: adcq $-1, %r11 +; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX1-NEXT: addq $-1, %rax +; AVX1-NEXT: movl $0, %ebx +; AVX1-NEXT: adcq $-1, %rbx +; AVX1-NEXT: addq $-1, %r12 +; AVX1-NEXT: movl $0, %r9d +; AVX1-NEXT: adcq $-1, %r9 +; AVX1-NEXT: addq $-1, %r10 +; AVX1-NEXT: movl $0, %r8d +; AVX1-NEXT: adcq $-1, %r8 +; AVX1-NEXT: addq $-1, %rdi +; AVX1-NEXT: movl $0, %ecx +; AVX1-NEXT: adcq $-1, %rcx +; AVX1-NEXT: shldq $63, %rdi, %rcx +; AVX1-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: shldq $63, %r10, %r8 +; AVX1-NEXT: shldq $63, %r12, %r9 +; AVX1-NEXT: shldq $63, %rax, %rbx +; AVX1-NEXT: shldq $63, %rdx, %r11 +; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; AVX1-NEXT: shldq $63, %rdx, %r14 +; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; AVX1-NEXT: shldq $63, %rdx, %r13 +; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX1-NEXT: shldq $63, %rax, %rsi +; AVX1-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX1-NEXT: shldq $63, %rax, %r15 +; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX1-NEXT: shldq $63, %rax, %rbp +; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX1-NEXT: shldq $63, %rax, %rsi +; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX1-NEXT: shldq $63, %rax, %rcx +; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rdi # 8-byte Reload +; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX1-NEXT: shldq $63, %rax, %rdi +; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %r12 # 8-byte Reload +; AVX1-NEXT: movq (%rsp), %rax # 8-byte Reload +; AVX1-NEXT: shldq $63, %rax, %r12 +; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %r10 # 8-byte Reload +; AVX1-NEXT: movq {{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX1-NEXT: shldq $63, %rax, %r10 +; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; AVX1-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX1-NEXT: shldq $63, %rdx, %rax +; AVX1-NEXT: vmovq %rax, %xmm8 +; AVX1-NEXT: vmovq %r10, %xmm0 +; AVX1-NEXT: vmovq %r12, %xmm1 +; AVX1-NEXT: vmovq %rdi, %xmm11 +; AVX1-NEXT: vmovq %rcx, %xmm2 +; AVX1-NEXT: vmovq %rsi, %xmm13 +; AVX1-NEXT: vmovq %rbp, %xmm14 +; AVX1-NEXT: vmovq %r15, %xmm15 +; AVX1-NEXT: vmovq -{{[0-9]+}}(%rsp), %xmm9 # 8-byte Folded Reload +; AVX1-NEXT: # xmm9 = mem[0],zero +; AVX1-NEXT: vmovq %r13, %xmm10 +; AVX1-NEXT: vmovq %r14, %xmm12 +; AVX1-NEXT: vmovq %r11, %xmm3 +; AVX1-NEXT: vmovq %rbx, %xmm4 +; AVX1-NEXT: vmovq %r9, %xmm5 +; AVX1-NEXT: vmovq %r8, %xmm6 +; AVX1-NEXT: vmovq -{{[0-9]+}}(%rsp), %xmm7 # 8-byte Folded Reload +; AVX1-NEXT: # xmm7 = mem[0],zero +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm8 = xmm0[0],xmm8[0] +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm11[0],xmm1[0] +; AVX1-NEXT: vshufps {{.*#+}} xmm8 = xmm8[0,2],xmm0[0,2] +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm13[0],xmm2[0] +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm15[0],xmm14[0] +; AVX1-NEXT: vshufps {{.*#+}} xmm11 = xmm0[0,2],xmm1[0,2] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15] +; AVX1-NEXT: vpshufb %xmm1, %xmm8, %xmm0 +; AVX1-NEXT: vpshufb %xmm1, %xmm11, %xmm2 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm2 = xmm10[0],xmm9[0] +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm3[0],xmm12[0] +; AVX1-NEXT: vshufps {{.*#+}} xmm2 = xmm2[0,2],xmm3[0,2] +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm5[0],xmm4[0] +; AVX1-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> +; AVX1-NEXT: vpshufb %xmm4, %xmm0, %xmm0 +; AVX1-NEXT: vpshufb %xmm1, %xmm2, %xmm2 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm7[0],xmm6[0] +; AVX1-NEXT: vshufps {{.*#+}} xmm3 = xmm3[0,2],xmm5[0,2] +; AVX1-NEXT: vpshufb %xmm1, %xmm3, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm2[0],xmm1[0] +; AVX1-NEXT: vpshufb %xmm4, %xmm1, %xmm1 +; AVX1-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX1-NEXT: vmovdqu %xmm0, (%rax) +; AVX1-NEXT: addq $24, %rsp +; AVX1-NEXT: popq %rbx +; AVX1-NEXT: popq %r12 +; AVX1-NEXT: popq %r13 +; AVX1-NEXT: popq %r14 +; AVX1-NEXT: popq %r15 +; AVX1-NEXT: popq %rbp +; AVX1-NEXT: retq +; +; AVX2-LABEL: not_avg_v16i8_wide_constants: +; AVX2: # %bb.0: +; AVX2-NEXT: pushq %rbp +; AVX2-NEXT: pushq %r15 +; AVX2-NEXT: pushq %r14 +; AVX2-NEXT: pushq %r13 +; AVX2-NEXT: pushq %r12 +; AVX2-NEXT: pushq %rbx +; AVX2-NEXT: subq $16, %rsp +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 +; AVX2-NEXT: vpextrq $1, %xmm3, %rcx +; AVX2-NEXT: vmovq %xmm3, %rax +; AVX2-NEXT: vpextrq $1, %xmm2, %rbx +; AVX2-NEXT: vmovq %xmm2, %rdx +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1 +; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpextrq $1, %xmm2, %rdi +; AVX2-NEXT: vmovq %xmm2, %r11 +; AVX2-NEXT: vpextrq $1, %xmm1, %r13 +; AVX2-NEXT: vmovq %xmm1, %r12 +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpextrq $1, %xmm2, %rbp +; AVX2-NEXT: vmovq %xmm2, %r10 +; AVX2-NEXT: vpextrq $1, %xmm1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX2-NEXT: vmovq %xmm1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX2-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX2-NEXT: vpextrq $1, %xmm1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX2-NEXT: vmovq %xmm1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX2-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero,xmm2[4],zero,xmm2[5],zero,xmm2[6],zero,xmm2[7],zero +; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; AVX2-NEXT: vextracti128 $1, %ymm3, %xmm4 +; AVX2-NEXT: vpextrq $1, %xmm4, %r15 +; AVX2-NEXT: addq %rcx, %r15 +; AVX2-NEXT: vmovq %xmm4, %r9 +; AVX2-NEXT: addq %rax, %r9 +; AVX2-NEXT: vpextrq $1, %xmm3, %rax +; AVX2-NEXT: addq %rbx, %rax +; AVX2-NEXT: movq %rax, %rbx +; AVX2-NEXT: vmovq %xmm3, %rax +; AVX2-NEXT: addq %rdx, %rax +; AVX2-NEXT: movq %rax, %r8 +; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm2 +; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 +; AVX2-NEXT: vpextrq $1, %xmm3, %rax +; AVX2-NEXT: addq %rdi, %rax +; AVX2-NEXT: movq %rax, %rcx +; AVX2-NEXT: vmovq %xmm3, %rax +; AVX2-NEXT: addq %r11, %rax +; AVX2-NEXT: movq %rax, %r11 +; AVX2-NEXT: vpextrq $1, %xmm2, %r14 +; AVX2-NEXT: addq %r13, %r14 +; AVX2-NEXT: vmovq %xmm2, %rax +; AVX2-NEXT: addq %r12, %rax +; AVX2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX2-NEXT: vextracti128 $1, %ymm2, %xmm3 +; AVX2-NEXT: vpextrq $1, %xmm3, %rax +; AVX2-NEXT: addq %rbp, %rax +; AVX2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: vmovq %xmm3, %rax +; AVX2-NEXT: addq %r10, %rax +; AVX2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: vpextrq $1, %xmm2, %rax +; AVX2-NEXT: addq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload +; AVX2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: vmovq %xmm2, %rax +; AVX2-NEXT: addq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload +; AVX2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm1 +; AVX2-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX2-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX2-NEXT: vpextrq $1, %xmm2, %rbp +; AVX2-NEXT: addq -{{[0-9]+}}(%rsp), %rbp # 8-byte Folded Reload +; AVX2-NEXT: vmovq %xmm2, %r10 +; AVX2-NEXT: addq -{{[0-9]+}}(%rsp), %r10 # 8-byte Folded Reload +; AVX2-NEXT: vpextrq $1, %xmm0, %rax +; AVX2-NEXT: vpextrq $1, %xmm1, %rdi +; AVX2-NEXT: addq %rax, %rdi +; AVX2-NEXT: vmovq %xmm0, %rdx +; AVX2-NEXT: vmovq %xmm1, %rsi +; AVX2-NEXT: addq %rdx, %rsi +; AVX2-NEXT: addq $-1, %r15 +; AVX2-NEXT: movq %r15, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: movl $0, %eax +; AVX2-NEXT: adcq $-1, %rax +; AVX2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: addq $-1, %r9 +; AVX2-NEXT: movq %r9, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: movl $0, %eax +; AVX2-NEXT: adcq $-1, %rax +; AVX2-NEXT: movq %rax, (%rsp) # 8-byte Spill +; AVX2-NEXT: addq $-1, %rbx +; AVX2-NEXT: movq %rbx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: movl $0, %eax +; AVX2-NEXT: adcq $-1, %rax +; AVX2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: addq $-1, %r8 +; AVX2-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: movl $0, %r13d +; AVX2-NEXT: adcq $-1, %r13 +; AVX2-NEXT: addq $-1, %rcx +; AVX2-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: movl $0, %eax +; AVX2-NEXT: adcq $-1, %rax +; AVX2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: addq $-1, %r11 +; AVX2-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: movl $0, %r15d +; AVX2-NEXT: adcq $-1, %r15 +; AVX2-NEXT: addq $-1, %r14 +; AVX2-NEXT: movq %r14, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: movl $0, %ebx +; AVX2-NEXT: adcq $-1, %rbx +; AVX2-NEXT: addq $-1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX2-NEXT: movl $0, %r8d +; AVX2-NEXT: adcq $-1, %r8 +; AVX2-NEXT: addq $-1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX2-NEXT: movl $0, %eax +; AVX2-NEXT: adcq $-1, %rax +; AVX2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: addq $-1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX2-NEXT: movl $0, %eax +; AVX2-NEXT: adcq $-1, %rax +; AVX2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: addq $-1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX2-NEXT: movl $0, %r12d +; AVX2-NEXT: adcq $-1, %r12 +; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; AVX2-NEXT: addq $-1, %rcx +; AVX2-NEXT: movl $0, %r11d +; AVX2-NEXT: adcq $-1, %r11 +; AVX2-NEXT: addq $-1, %rbp +; AVX2-NEXT: movl $0, %r14d +; AVX2-NEXT: adcq $-1, %r14 +; AVX2-NEXT: addq $-1, %r10 +; AVX2-NEXT: movl $0, %r9d +; AVX2-NEXT: adcq $-1, %r9 +; AVX2-NEXT: addq $-1, %rdi +; AVX2-NEXT: movl $0, %edx +; AVX2-NEXT: adcq $-1, %rdx +; AVX2-NEXT: addq $-1, %rsi +; AVX2-NEXT: movl $0, %eax +; AVX2-NEXT: adcq $-1, %rax +; AVX2-NEXT: shldq $63, %rsi, %rax +; AVX2-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX2-NEXT: shldq $63, %rdi, %rdx +; AVX2-NEXT: shldq $63, %r10, %r9 +; AVX2-NEXT: shldq $63, %rbp, %r14 +; AVX2-NEXT: shldq $63, %rcx, %r11 +; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; AVX2-NEXT: shldq $63, %rcx, %r12 +; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; AVX2-NEXT: shldq $63, %rcx, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %r10 # 8-byte Reload +; AVX2-NEXT: shldq $63, %rcx, %r10 +; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; AVX2-NEXT: shldq $63, %rcx, %r8 +; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX2-NEXT: shldq $63, %rax, %rbx +; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX2-NEXT: shldq $63, %rax, %r15 +; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; AVX2-NEXT: shldq $63, %rcx, %rax +; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; AVX2-NEXT: shldq $63, %rcx, %r13 +; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; AVX2-NEXT: shldq $63, %rcx, %rbp +; AVX2-NEXT: movq (%rsp), %rdi # 8-byte Reload +; AVX2-NEXT: movq {{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; AVX2-NEXT: shldq $63, %rcx, %rdi +; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; AVX2-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; AVX2-NEXT: shldq $63, %rcx, %rsi +; AVX2-NEXT: vmovq %rsi, %xmm8 +; AVX2-NEXT: vmovq %rdi, %xmm9 +; AVX2-NEXT: vmovq %rbp, %xmm10 +; AVX2-NEXT: vmovq %r13, %xmm11 +; AVX2-NEXT: vmovq %rax, %xmm12 +; AVX2-NEXT: vmovq %r15, %xmm13 +; AVX2-NEXT: vmovq %rbx, %xmm14 +; AVX2-NEXT: vmovq %r8, %xmm15 +; AVX2-NEXT: vmovq %r10, %xmm0 +; AVX2-NEXT: vmovq -{{[0-9]+}}(%rsp), %xmm1 # 8-byte Folded Reload +; AVX2-NEXT: # xmm1 = mem[0],zero +; AVX2-NEXT: vmovq %r12, %xmm2 +; AVX2-NEXT: vmovq %r11, %xmm3 +; AVX2-NEXT: vmovq %r14, %xmm4 +; AVX2-NEXT: vmovq %r9, %xmm5 +; AVX2-NEXT: vmovq %rdx, %xmm6 +; AVX2-NEXT: vmovq -{{[0-9]+}}(%rsp), %xmm7 # 8-byte Folded Reload +; AVX2-NEXT: # xmm7 = mem[0],zero +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm8 = xmm9[0],xmm8[0] +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm11[0],xmm10[0] +; AVX2-NEXT: vinserti128 $1, %xmm8, %ymm9, %ymm8 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm13[0],xmm12[0] +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm10 = xmm15[0],xmm14[0] +; AVX2-NEXT: vinserti128 $1, %xmm9, %ymm10, %ymm9 +; AVX2-NEXT: vpshufd {{.*#+}} ymm8 = ymm8[0,2,2,3,4,6,6,7] +; AVX2-NEXT: vpermq {{.*#+}} ymm8 = ymm8[0,2,2,3] +; AVX2-NEXT: vpshufd {{.*#+}} ymm9 = ymm9[0,2,2,3,4,6,6,7] +; AVX2-NEXT: vpermq {{.*#+}} ymm9 = ymm9[0,2,2,3] +; AVX2-NEXT: vinserti128 $1, %xmm9, %ymm8, %ymm8 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0] +; AVX2-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX2-NEXT: vmovdqa {{.*#+}} ymm1 = [0,1,4,5,8,9,12,13,8,9,12,13,12,13,14,15,16,17,20,21,24,25,28,29,24,25,28,29,28,29,30,31] +; AVX2-NEXT: vpshufb %ymm1, %ymm8, %ymm2 +; AVX2-NEXT: vpermq {{.*#+}} ymm2 = ymm2[0,2,2,3] +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm5[0],xmm4[0] +; AVX2-NEXT: vmovdqa {{.*#+}} xmm4 = <0,2,4,6,8,10,12,14,u,u,u,u,u,u,u,u> +; AVX2-NEXT: vpshufb %xmm4, %xmm2, %xmm2 +; AVX2-NEXT: vpshufd {{.*#+}} ymm0 = ymm0[0,2,2,3,4,6,6,7] +; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm5 = xmm7[0],xmm6[0] +; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm5, %ymm3 +; AVX2-NEXT: vpshufd {{.*#+}} ymm3 = ymm3[0,2,2,3,4,6,6,7] +; AVX2-NEXT: vpermq {{.*#+}} ymm3 = ymm3[0,2,2,3] +; AVX2-NEXT: vinserti128 $1, %xmm3, %ymm0, %ymm0 +; AVX2-NEXT: vpshufb %ymm1, %ymm0, %ymm0 +; AVX2-NEXT: vpermq {{.*#+}} ymm0 = ymm0[0,2,2,3] +; AVX2-NEXT: vpshufb %xmm4, %xmm0, %xmm0 +; AVX2-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm0[0],xmm2[0] +; AVX2-NEXT: vmovdqu %xmm0, (%rax) +; AVX2-NEXT: addq $16, %rsp +; AVX2-NEXT: popq %rbx +; AVX2-NEXT: popq %r12 +; AVX2-NEXT: popq %r13 +; AVX2-NEXT: popq %r14 +; AVX2-NEXT: popq %r15 +; AVX2-NEXT: popq %rbp +; AVX2-NEXT: vzeroupper +; AVX2-NEXT: retq +; +; AVX512-LABEL: not_avg_v16i8_wide_constants: +; AVX512: # %bb.0: +; AVX512-NEXT: pushq %rbp +; AVX512-NEXT: pushq %r15 +; AVX512-NEXT: pushq %r14 +; AVX512-NEXT: pushq %r13 +; AVX512-NEXT: pushq %r12 +; AVX512-NEXT: pushq %rbx +; AVX512-NEXT: subq $24, %rsp +; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm0 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3 +; AVX512-NEXT: vpextrq $1, %xmm3, %rcx +; AVX512-NEXT: vmovq %xmm3, %rax +; AVX512-NEXT: vpextrq $1, %xmm2, %rbx +; AVX512-NEXT: vmovq %xmm2, %rbp +; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX512-NEXT: vpextrq $1, %xmm2, %rdi +; AVX512-NEXT: vmovq %xmm2, %r8 +; AVX512-NEXT: vpextrq $1, %xmm1, %r13 +; AVX512-NEXT: vmovq %xmm1, %r12 +; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm0 +; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero,xmm0[4],zero,xmm0[5],zero,xmm0[6],zero,xmm0[7],zero +; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX512-NEXT: vpextrq $1, %xmm2, %r15 +; AVX512-NEXT: vmovq %xmm2, %r14 +; AVX512-NEXT: vpextrq $1, %xmm1, %rdx +; AVX512-NEXT: vmovq %xmm1, %r9 +; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm0 = xmm0[0],zero,xmm0[1],zero,xmm0[2],zero,xmm0[3],zero +; AVX512-NEXT: vextracti128 $1, %ymm0, %xmm1 +; AVX512-NEXT: vpextrq $1, %xmm1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX512-NEXT: vmovq %xmm1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX512-NEXT: vpmovzxbw {{.*#+}} ymm1 = mem[0],zero,mem[1],zero,mem[2],zero,mem[3],zero,mem[4],zero,mem[5],zero,mem[6],zero,mem[7],zero,mem[8],zero,mem[9],zero,mem[10],zero,mem[11],zero,mem[12],zero,mem[13],zero,mem[14],zero,mem[15],zero +; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm2 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3 +; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm3 = xmm3[0],zero,xmm3[1],zero,xmm3[2],zero,xmm3[3],zero +; AVX512-NEXT: vextracti128 $1, %ymm3, %xmm4 +; AVX512-NEXT: vpextrq $1, %xmm4, %rsi +; AVX512-NEXT: addq %rcx, %rsi +; AVX512-NEXT: vmovq %xmm4, %rcx +; AVX512-NEXT: addq %rax, %rcx +; AVX512-NEXT: vpextrq $1, %xmm3, %rax +; AVX512-NEXT: addq %rbx, %rax +; AVX512-NEXT: movq %rax, %rbx +; AVX512-NEXT: vmovq %xmm3, %rax +; AVX512-NEXT: addq %rbp, %rax +; AVX512-NEXT: movq %rax, %r10 +; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3 +; AVX512-NEXT: vpextrq $1, %xmm3, %rax +; AVX512-NEXT: addq %rdi, %rax +; AVX512-NEXT: movq %rax, %rdi +; AVX512-NEXT: vmovq %xmm3, %rax +; AVX512-NEXT: addq %r8, %rax +; AVX512-NEXT: movq %rax, %r8 +; AVX512-NEXT: vpextrq $1, %xmm2, %rbp +; AVX512-NEXT: addq %r13, %rbp +; AVX512-NEXT: vmovq %xmm2, %r11 +; AVX512-NEXT: addq %r12, %r11 +; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm1 +; AVX512-NEXT: vpmovzxwd {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero,xmm1[4],zero,xmm1[5],zero,xmm1[6],zero,xmm1[7],zero +; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm2 = xmm2[0],zero,xmm2[1],zero,xmm2[2],zero,xmm2[3],zero +; AVX512-NEXT: vextracti128 $1, %ymm2, %xmm3 +; AVX512-NEXT: vpextrq $1, %xmm3, %rax +; AVX512-NEXT: addq %r15, %rax +; AVX512-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: vmovq %xmm3, %rax +; AVX512-NEXT: addq %r14, %rax +; AVX512-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: vpextrq $1, %xmm2, %rax +; AVX512-NEXT: addq %rdx, %rax +; AVX512-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: vmovq %xmm2, %rax +; AVX512-NEXT: addq %r9, %rax +; AVX512-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: vpmovzxdq {{.*#+}} ymm1 = xmm1[0],zero,xmm1[1],zero,xmm1[2],zero,xmm1[3],zero +; AVX512-NEXT: vextracti128 $1, %ymm1, %xmm2 +; AVX512-NEXT: vpextrq $1, %xmm2, %rax +; AVX512-NEXT: addq -{{[0-9]+}}(%rsp), %rax # 8-byte Folded Reload +; AVX512-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: vmovq %xmm2, %r14 +; AVX512-NEXT: addq -{{[0-9]+}}(%rsp), %r14 # 8-byte Folded Reload +; AVX512-NEXT: vpextrq $1, %xmm0, %rax +; AVX512-NEXT: vpextrq $1, %xmm1, %r9 +; AVX512-NEXT: addq %rax, %r9 +; AVX512-NEXT: vmovq %xmm0, %rax +; AVX512-NEXT: vmovq %xmm1, %rdx +; AVX512-NEXT: addq %rax, %rdx +; AVX512-NEXT: addq $-1, %rsi +; AVX512-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: movl $0, %eax +; AVX512-NEXT: adcq $-1, %rax +; AVX512-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: addq $-1, %rcx +; AVX512-NEXT: movq %rcx, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: movl $0, %eax +; AVX512-NEXT: adcq $-1, %rax +; AVX512-NEXT: movq %rax, {{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: addq $-1, %rbx +; AVX512-NEXT: movq %rbx, (%rsp) # 8-byte Spill +; AVX512-NEXT: movl $0, %eax +; AVX512-NEXT: adcq $-1, %rax +; AVX512-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: addq $-1, %r10 +; AVX512-NEXT: movq %r10, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: movl $0, %eax +; AVX512-NEXT: adcq $-1, %rax +; AVX512-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: addq $-1, %rdi +; AVX512-NEXT: movq %rdi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: movl $0, %eax +; AVX512-NEXT: adcq $-1, %rax +; AVX512-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: addq $-1, %r8 +; AVX512-NEXT: movq %r8, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: movl $0, %eax +; AVX512-NEXT: adcq $-1, %rax +; AVX512-NEXT: movq %rax, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: addq $-1, %rbp +; AVX512-NEXT: movq %rbp, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: movl $0, %r13d +; AVX512-NEXT: adcq $-1, %r13 +; AVX512-NEXT: addq $-1, %r11 +; AVX512-NEXT: movq %r11, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: movl $0, %r15d +; AVX512-NEXT: adcq $-1, %r15 +; AVX512-NEXT: addq $-1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX512-NEXT: movl $0, %eax +; AVX512-NEXT: adcq $-1, %rax +; AVX512-NEXT: movq %rax, %rsi +; AVX512-NEXT: addq $-1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX512-NEXT: movl $0, %r12d +; AVX512-NEXT: adcq $-1, %r12 +; AVX512-NEXT: addq $-1, -{{[0-9]+}}(%rsp) # 8-byte Folded Spill +; AVX512-NEXT: movl $0, %ebx +; AVX512-NEXT: adcq $-1, %rbx +; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; AVX512-NEXT: addq $-1, %rbp +; AVX512-NEXT: movl $0, %r11d +; AVX512-NEXT: adcq $-1, %r11 +; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX512-NEXT: addq $-1, %rax +; AVX512-NEXT: movl $0, %r10d +; AVX512-NEXT: adcq $-1, %r10 +; AVX512-NEXT: addq $-1, %r14 +; AVX512-NEXT: movl $0, %r8d +; AVX512-NEXT: adcq $-1, %r8 +; AVX512-NEXT: addq $-1, %r9 +; AVX512-NEXT: movl $0, %edi +; AVX512-NEXT: adcq $-1, %rdi +; AVX512-NEXT: addq $-1, %rdx +; AVX512-NEXT: movl $0, %ecx +; AVX512-NEXT: adcq $-1, %rcx +; AVX512-NEXT: shldq $63, %rdx, %rcx +; AVX512-NEXT: movq %rcx, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: shldq $63, %r9, %rdi +; AVX512-NEXT: shldq $63, %r14, %r8 +; AVX512-NEXT: shldq $63, %rax, %r10 +; AVX512-NEXT: shldq $63, %rbp, %r11 +; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; AVX512-NEXT: shldq $63, %rdx, %rbx +; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; AVX512-NEXT: shldq $63, %rdx, %r12 +; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; AVX512-NEXT: shldq $63, %rdx, %rsi +; AVX512-NEXT: movq %rsi, -{{[0-9]+}}(%rsp) # 8-byte Spill +; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX512-NEXT: shldq $63, %rax, %r15 +; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX512-NEXT: shldq $63, %rax, %r13 +; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rsi # 8-byte Reload +; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX512-NEXT: shldq $63, %rax, %rsi +; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rcx # 8-byte Reload +; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX512-NEXT: shldq $63, %rax, %rcx +; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rax # 8-byte Reload +; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; AVX512-NEXT: shldq $63, %rdx, %rax +; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %r14 # 8-byte Reload +; AVX512-NEXT: movq (%rsp), %rdx # 8-byte Reload +; AVX512-NEXT: shldq $63, %rdx, %r14 +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %r9 # 8-byte Reload +; AVX512-NEXT: movq {{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; AVX512-NEXT: shldq $63, %rdx, %r9 +; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rdx # 8-byte Reload +; AVX512-NEXT: movq -{{[0-9]+}}(%rsp), %rbp # 8-byte Reload +; AVX512-NEXT: shldq $63, %rdx, %rbp +; AVX512-NEXT: vmovq %rbp, %xmm8 +; AVX512-NEXT: vmovq %r9, %xmm9 +; AVX512-NEXT: vmovq %r14, %xmm10 +; AVX512-NEXT: vmovq %rax, %xmm11 +; AVX512-NEXT: vmovq %rcx, %xmm12 +; AVX512-NEXT: vmovq %rsi, %xmm13 +; AVX512-NEXT: vmovq %r13, %xmm14 +; AVX512-NEXT: vmovq %r15, %xmm15 +; AVX512-NEXT: vmovq -{{[0-9]+}}(%rsp), %xmm0 # 8-byte Folded Reload +; AVX512-NEXT: # xmm0 = mem[0],zero +; AVX512-NEXT: vmovq %r12, %xmm1 +; AVX512-NEXT: vmovq %rbx, %xmm2 +; AVX512-NEXT: vmovq %r11, %xmm3 +; AVX512-NEXT: vmovq %r10, %xmm4 +; AVX512-NEXT: vmovq %r8, %xmm5 +; AVX512-NEXT: vmovq %rdi, %xmm6 +; AVX512-NEXT: vmovq -{{[0-9]+}}(%rsp), %xmm7 # 8-byte Folded Reload +; AVX512-NEXT: # xmm7 = mem[0],zero +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm8 = xmm9[0],xmm8[0] +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm11[0],xmm10[0] +; AVX512-NEXT: vinserti128 $1, %xmm8, %ymm9, %ymm8 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm9 = xmm13[0],xmm12[0] +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm10 = xmm15[0],xmm14[0] +; AVX512-NEXT: vinserti128 $1, %xmm9, %ymm10, %ymm9 +; AVX512-NEXT: vinserti64x4 $1, %ymm8, %zmm9, %zmm8 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm0 = xmm1[0],xmm0[0] +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm3[0],xmm2[0] +; AVX512-NEXT: vinserti128 $1, %xmm0, %ymm1, %ymm0 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm1 = xmm5[0],xmm4[0] +; AVX512-NEXT: vpmovqd %zmm8, %ymm2 +; AVX512-NEXT: vpunpcklqdq {{.*#+}} xmm3 = xmm7[0],xmm6[0] +; AVX512-NEXT: vinserti128 $1, %xmm1, %ymm3, %ymm1 +; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm1, %zmm0 +; AVX512-NEXT: vpmovqd %zmm0, %ymm0 +; AVX512-NEXT: vinserti64x4 $1, %ymm0, %zmm2, %zmm0 +; AVX512-NEXT: vpmovdb %zmm0, (%rax) +; AVX512-NEXT: addq $24, %rsp +; AVX512-NEXT: popq %rbx +; AVX512-NEXT: popq %r12 +; AVX512-NEXT: popq %r13 +; AVX512-NEXT: popq %r14 +; AVX512-NEXT: popq %r15 +; AVX512-NEXT: popq %rbp +; AVX512-NEXT: vzeroupper +; AVX512-NEXT: retq + %1 = load <16 x i8>, <16 x i8>* %a + %2 = load <16 x i8>, <16 x i8>* %b + %3 = zext <16 x i8> %1 to <16 x i128> + %4 = zext <16 x i8> %2 to <16 x i128> + %5 = add nuw nsw <16 x i128> %3, + %6 = add nuw nsw <16 x i128> %5, %4 + %7 = lshr <16 x i128> %6, + %8 = trunc <16 x i128> %7 to <16 x i8> + store <16 x i8> %8, <16 x i8>* undef, align 4 + ret void +} -- 2.11.0