}
}
+ // fold (A-C1)-C2 -> A-(C1+C2)
+ if (N0.getOpcode() == ISD::SUB &&
+ isConstantOrConstantVector(N1, /* NoOpaques */ true) &&
+ isConstantOrConstantVector(N0.getOperand(1), /* NoOpaques */ true)) {
+ SDValue NewC = DAG.FoldConstantArithmetic(
+ ISD::ADD, DL, VT, N0.getOperand(1).getNode(), N1.getNode());
+ assert(NewC && "Constant folding failed");
+ return DAG.getNode(ISD::SUB, DL, VT, N0.getOperand(0), NewC);
+ }
+
// fold ((A+(B+or-C))-B) -> A+or-C
if (N0.getOpcode() == ISD::ADD &&
(N0.getOperand(1).getOpcode() == ISD::SUB ||
define <4 x i32> @sub_const_sub_const(<4 x i32> %arg) {
; CHECK-LABEL: sub_const_sub_const:
; CHECK: // %bb.0:
-; CHECK-NEXT: movi v1.4s, #8
-; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s
-; CHECK-NEXT: movi v1.4s, #2
+; CHECK-NEXT: movi v1.4s, #10
; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s
; CHECK-NEXT: ret
%t0 = sub <4 x i32> %arg, <i32 8, i32 8, i32 8, i32 8>
; CHECK-NEXT: .cfi_def_cfa_offset 32
; CHECK-NEXT: .cfi_offset w30, -16
; CHECK-NEXT: movi v1.4s, #8
-; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s
; CHECK-NEXT: str q0, [sp] // 16-byte Folded Spill
+; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s
; CHECK-NEXT: bl use
; CHECK-NEXT: ldr q1, [sp] // 16-byte Folded Reload
; CHECK-NEXT: ldr x30, [sp, #16] // 8-byte Folded Reload
-; CHECK-NEXT: movi v0.4s, #2
+; CHECK-NEXT: movi v0.4s, #10
; CHECK-NEXT: sub v0.4s, v1.4s, v0.4s
; CHECK-NEXT: add sp, sp, #32 // =32
; CHECK-NEXT: ret
; CHECK: // %bb.0:
; CHECK-NEXT: adrp x8, .LCPI14_0
; CHECK-NEXT: ldr q1, [x8, :lo12:.LCPI14_0]
-; CHECK-NEXT: adrp x8, .LCPI14_1
-; CHECK-NEXT: ldr q2, [x8, :lo12:.LCPI14_1]
; CHECK-NEXT: sub v0.4s, v0.4s, v1.4s
-; CHECK-NEXT: sub v0.4s, v0.4s, v2.4s
; CHECK-NEXT: ret
%t0 = sub <4 x i32> %arg, <i32 21, i32 undef, i32 8, i32 8>
%t1 = sub <4 x i32> %t0, <i32 2, i32 3, i32 undef, i32 2>
; X86-LABEL: sub_const_sub_const:
; X86: # %bb.0:
; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0
-; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: sub_const_sub_const:
; X64: # %bb.0:
; X64-NEXT: psubd {{.*}}(%rip), %xmm0
-; X64-NEXT: psubd {{.*}}(%rip), %xmm0
; X64-NEXT: retq
%t0 = sub <4 x i32> %arg, <i32 8, i32 8, i32 8, i32 8>
%t1 = sub <4 x i32> %t0, <i32 2, i32 2, i32 2, i32 2>
; X86: # %bb.0:
; X86-NEXT: subl $28, %esp
; X86-NEXT: .cfi_def_cfa_offset 32
-; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0
; X86-NEXT: movdqu %xmm0, (%esp) # 16-byte Spill
+; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0
; X86-NEXT: calll use
; X86-NEXT: movdqu (%esp), %xmm0 # 16-byte Reload
; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0
; X64: # %bb.0:
; X64-NEXT: subq $24, %rsp
; X64-NEXT: .cfi_def_cfa_offset 32
-; X64-NEXT: psubd {{.*}}(%rip), %xmm0
; X64-NEXT: movdqa %xmm0, (%rsp) # 16-byte Spill
+; X64-NEXT: psubd {{.*}}(%rip), %xmm0
; X64-NEXT: callq use
; X64-NEXT: movdqa (%rsp), %xmm0 # 16-byte Reload
; X64-NEXT: psubd {{.*}}(%rip), %xmm0
; X86-LABEL: sub_const_sub_const_nonsplat:
; X86: # %bb.0:
; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0
-; X86-NEXT: psubd {{\.LCPI.*}}, %xmm0
; X86-NEXT: retl
;
; X64-LABEL: sub_const_sub_const_nonsplat:
; X64: # %bb.0:
; X64-NEXT: psubd {{.*}}(%rip), %xmm0
-; X64-NEXT: psubd {{.*}}(%rip), %xmm0
; X64-NEXT: retq
%t0 = sub <4 x i32> %arg, <i32 21, i32 undef, i32 8, i32 8>
%t1 = sub <4 x i32> %t0, <i32 2, i32 3, i32 undef, i32 2>