setOperationAction(ISD::FP_TO_SINT, MVT::v2i64, Legal);
setOperationAction(ISD::FP_TO_UINT, MVT::v2i64, Legal);
- // Vector operation legalization checks the result type of
- // SIGN_EXTEND_INREG, overall legalization checks the inner type.
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i64, Legal);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i32, Legal);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i16, Custom);
- setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::v2i8, Custom);
-
setOperationAction(ISD::FNEG, MVT::v4f32, Legal);
setOperationAction(ISD::FNEG, MVT::v2f64, Legal);
setOperationAction(ISD::FABS, MVT::v4f32, Legal);
return DAG.getMemIntrinsicNode(NodeTy, dl, Tys, Ops, MemVT, MMO);
}
-SDValue PPCTargetLowering::LowerSIGN_EXTEND_INREG(SDValue Op,
- SelectionDAG &DAG) const {
- SDLoc dl(Op);
- // For v2i64 (VSX), we can pattern patch the v2i32 case (using fp <-> int
- // instructions), but for smaller types, we need to first extend up to v2i32
- // before doing going farther.
- if (Op.getValueType() == MVT::v2i64) {
- EVT ExtVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
- if (ExtVT != MVT::v2i32) {
- Op = DAG.getNode(ISD::BITCAST, dl, MVT::v4i32, Op.getOperand(0));
- Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v4i32, Op,
- DAG.getValueType(EVT::getVectorVT(*DAG.getContext(),
- ExtVT.getVectorElementType(), 4)));
- Op = DAG.getNode(ISD::BITCAST, dl, MVT::v2i64, Op);
- Op = DAG.getNode(ISD::SIGN_EXTEND_INREG, dl, MVT::v2i64, Op,
- DAG.getValueType(MVT::v2i32));
- }
-
- return Op;
- }
-
- return SDValue();
-}
-
SDValue PPCTargetLowering::LowerSCALAR_TO_VECTOR(SDValue Op,
SelectionDAG &DAG) const {
SDLoc dl(Op);
case ISD::VECTOR_SHUFFLE: return LowerVECTOR_SHUFFLE(Op, DAG);
case ISD::INTRINSIC_WO_CHAIN: return LowerINTRINSIC_WO_CHAIN(Op, DAG);
case ISD::SCALAR_TO_VECTOR: return LowerSCALAR_TO_VECTOR(Op, DAG);
- case ISD::SIGN_EXTEND_INREG: return LowerSIGN_EXTEND_INREG(Op, DAG);
case ISD::EXTRACT_VECTOR_ELT: return LowerEXTRACT_VECTOR_ELT(Op, DAG);
case ISD::INSERT_VECTOR_ELT: return LowerINSERT_VECTOR_ELT(Op, DAG);
case ISD::MUL: return LowerMUL(Op, DAG);
def : Pat<(v1i128 (bitconvert v2f64:$A)),
(COPY_TO_REGCLASS $A, VRRC)>;
-// sign extension patterns
-// To extend "in place" from v2i32 to v2i64, we have input data like:
-// | undef | i32 | undef | i32 |
-// but xvcvsxwdp expects the input in big-Endian format:
-// | i32 | undef | i32 | undef |
-// so we need to shift everything to the left by one i32 (word) before
-// the conversion.
-def : Pat<(sext_inreg v2i64:$C, v2i32),
- (XVCVDPSXDS (XVCVSXWDP (XXSLDWI $C, $C, 1)))>;
-def : Pat<(v2f64 (sint_to_fp (sext_inreg v2i64:$C, v2i32))),
- (XVCVSXWDP (XXSLDWI $C, $C, 1))>;
-
def : Pat<(v2f64 (PPCsvec2fp v4i32:$C, 0)),
(v2f64 (XVCVSXWDP (v2i64 (XXMRGHW $C, $C))))>;
def : Pat<(v2f64 (PPCsvec2fp v4i32:$C, 1)),
--- /dev/null
+; RUN: llc -verify-machineinstrs < %s -mtriple=powerpc64le-unknown-unknown -mcpu=pwr8 | FileCheck %s
+
+; Ensure this does not crash
+
+define <2 x i8> @test1(<2 x i8> %a) {
+ %1 = shl nuw <2 x i8> %a, <i8 7, i8 7>
+ %2 = ashr exact <2 x i8> %1, <i8 7, i8 7>
+ ret <2 x i8> %2
+}
+; CHECK-LABEL: @test1
+; CHECK: vspltisb [[REG1:[0-9]+]], 7
+; CHECK: vslb [[REG2:[0-9]+]], 2, [[REG1]]
+; CHECK: vsrab [[REG3:[0-9]+]], [[REG2]], [[REG1]]
+
+define <2 x i16> @test2(<2 x i16> %a) {
+ %1 = shl nuw <2 x i16> %a, <i16 15, i16 15>
+ %2 = ashr exact <2 x i16> %1, <i16 15, i16 15>
+ ret <2 x i16> %2
+}
+
+; CHECK-LABEL: @test2
+; CHECK: vspltish [[REG1:[0-9]+]], 15
+; CHECK: vslh [[REG2:[0-9]+]], 2, [[REG1]]
+; CHECK: vsrah [[REG3:[0-9]+]], [[REG2]], [[REG1]]