if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
// Offset the demanded elts by the subvector index.
uint64_t Idx = SubIdx->getZExtValue();
- APInt DemandedSrc = DemandedElts.zext(NumSrcElts).shl(Idx);
+ APInt DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
computeKnownBits(Src, Known, DemandedSrc, Depth + 1);
} else {
computeKnownBits(Src, Known, Depth + 1);
if (SubIdx && SubIdx->getAPIntValue().ule(NumSrcElts - NumElts)) {
// Offset the demanded elts by the subvector index.
uint64_t Idx = SubIdx->getZExtValue();
- APInt DemandedSrc = DemandedElts.zext(NumSrcElts).shl(Idx);
+ APInt DemandedSrc = DemandedElts.zextOrSelf(NumSrcElts).shl(Idx);
return ComputeNumSignBits(Src, DemandedSrc, Depth + 1);
}
return ComputeNumSignBits(Src, Depth + 1);
--- /dev/null
+; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
+; RUN: llc -march=amdgcn -mtriple=amdgcn-- -verify-machineinstrs < %s | FileCheck %s
+
+; Test for ICE in SelectionDAG::computeKnownBits when visiting EXTRACT_SUBVECTOR
+; with DemandedElts already as wide as the source vector.
+
+define <3 x i32> @quux() #0 {
+; CHECK-LABEL: quux:
+; CHECK: ; %bb.0: ; %bb
+; CHECK-NEXT: s_waitcnt vmcnt(0) expcnt(0) lgkmcnt(0)
+; CHECK-NEXT: v_mov_b32_e32 v0, 0
+; CHECK-NEXT: v_mov_b32_e32 v1, 1
+; CHECK-NEXT: v_mov_b32_e32 v2, 1
+; CHECK-NEXT: s_setpc_b64 s[30:31]
+bb:
+ %tmp = shufflevector <4 x i8> <i8 1, i8 2, i8 3, i8 4>, <4 x i8> undef, <3 x i32> <i32 0, i32 1, i32 2>
+ %tmp1 = extractelement <3 x i8> %tmp, i64 0
+ %tmp2 = zext i8 %tmp1 to i32
+ %tmp3 = insertelement <3 x i32> undef, i32 %tmp2, i32 0
+ %tmp4 = extractelement <3 x i8> %tmp, i64 1
+ %tmp5 = zext i8 %tmp4 to i32
+ %tmp6 = insertelement <3 x i32> %tmp3, i32 %tmp5, i32 1
+ %tmp7 = extractelement <3 x i8> %tmp, i64 2
+ %tmp8 = zext i8 %tmp7 to i32
+ %tmp9 = insertelement <3 x i32> %tmp6, i32 %tmp8, i32 2
+ %tmp10 = lshr <3 x i32> %tmp9, <i32 1, i32 1, i32 1>
+ ret <3 x i32> %tmp10
+}
+
+attributes #0 = { noinline optnone }