From 6e1d1470c2c637ecce5e5ac1da358ecabdc1f7e3 Mon Sep 17 00:00:00 2001 From: Scott Michel Date: Mon, 16 Mar 2009 18:47:25 +0000 Subject: [PATCH] CellSPU: Incorporate Tilmann's 128-bit operation patch. Evidently, it gets the llvm-gcc bootstrap a bit further along. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@67048 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/CellSPU/SPUISelLowering.cpp | 34 +++---- lib/Target/CellSPU/SPUInstrInfo.cpp | 7 +- lib/Target/CellSPU/SPUInstrInfo.td | 171 ++++++++++++++++++--------------- test/CodeGen/CellSPU/trunc.ll | 120 +++++++++++++---------- 4 files changed, 183 insertions(+), 149 deletions(-) diff --git a/lib/Target/CellSPU/SPUISelLowering.cpp b/lib/Target/CellSPU/SPUISelLowering.cpp index 3a8fb5dcab6..e840ee0747f 100644 --- a/lib/Target/CellSPU/SPUISelLowering.cpp +++ b/lib/Target/CellSPU/SPUISelLowering.cpp @@ -779,7 +779,7 @@ LowerSTORE(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) { result = DAG.getNode(SPUISD::SHUFB, dl, vecVT, vectorizeOp, alignLoadVec, - DAG.getNode(ISD::BIT_CONVERT, dl, + DAG.getNode(ISD::BIT_CONVERT, dl, MVT::v4i32, insertEltOp)); result = DAG.getStore(the_chain, dl, result, basePtr, @@ -1035,7 +1035,7 @@ LowerFORMAL_ARGUMENTS(SDValue Op, SelectionDAG &DAG, int &VarArgsFrameIndex) ArgOffset += StackSlotSize; } if (!MemOps.empty()) - Root = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, + Root = DAG.getNode(ISD::TokenFactor, dl, MVT::Other, &MemOps[0], MemOps.size()); } @@ -1156,7 +1156,7 @@ LowerCALL(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) { // and flag operands which copy the outgoing args into the appropriate regs. SDValue InFlag; for (unsigned i = 0, e = RegsToPass.size(); i != e; ++i) { - Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, + Chain = DAG.getCopyToReg(Chain, dl, RegsToPass[i].first, RegsToPass[i].second, InFlag); InFlag = Chain.getValue(1); } @@ -1239,7 +1239,7 @@ LowerCALL(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) { case MVT::Other: break; case MVT::i32: if (TheCall->getValueType(1) == MVT::i32) { - Chain = DAG.getCopyFromReg(Chain, dl, SPU::R4, + Chain = DAG.getCopyFromReg(Chain, dl, SPU::R4, MVT::i32, InFlag).getValue(1); ResultVals[0] = Chain.getValue(0); Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i32, @@ -1247,20 +1247,20 @@ LowerCALL(SDValue Op, SelectionDAG &DAG, const SPUSubtarget *ST) { ResultVals[1] = Chain.getValue(0); NumResults = 2; } else { - Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i32, + Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i32, InFlag).getValue(1); ResultVals[0] = Chain.getValue(0); NumResults = 1; } break; case MVT::i64: - Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i64, + Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i64, InFlag).getValue(1); ResultVals[0] = Chain.getValue(0); NumResults = 1; break; case MVT::i128: - Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i128, + Chain = DAG.getCopyFromReg(Chain, dl, SPU::R3, MVT::i128, InFlag).getValue(1); ResultVals[0] = Chain.getValue(0); NumResults = 1; @@ -1860,7 +1860,7 @@ static SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) { DAG.getTargetConstant(V2Elt, MVT::i32), DAG.getCopyFromReg(InitTempReg, dl, VReg, PtrVT)); // Use shuffle mask in SHUFB synthetic instruction: - return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V2, V1, + return DAG.getNode(SPUISD::SHUFB, dl, V1.getValueType(), V2, V1, ShufMaskOp); } else if (rotate) { int rotamt = (MaxElts - V0Elt) * EltVT.getSizeInBits()/8; @@ -2401,7 +2401,7 @@ static SDValue LowerCTPOP(SDValue Op, SelectionDAG &DAG) { SDValue Comp1 = DAG.getNode(ISD::SRL, dl, MVT::i32, - DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32), + DAG.getCopyFromReg(CNTB_rescopy, dl, CNTB_reg, MVT::i32), Shift1); SDValue Sum1 = @@ -2588,7 +2588,7 @@ static SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG, } SDValue result = - DAG.getSetCC(dl, ccResultVT, lhsSelect, rhsSelect, + DAG.getSetCC(dl, ccResultVT, lhsSelect, rhsSelect, (ISD::CondCode) compareOp); if ((CC->get() & 0x8) == 0) { @@ -2649,14 +2649,15 @@ static SDValue LowerSELECT_CC(SDValue Op, SelectionDAG &DAG, //! Custom lower ISD::TRUNCATE static SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) { + // Type to truncate to MVT VT = Op.getValueType(); MVT::SimpleValueType simpleVT = VT.getSimpleVT(); MVT VecVT = MVT::getVectorVT(VT, (128 / VT.getSizeInBits())); DebugLoc dl = Op.getDebugLoc(); + // Type to truncate from SDValue Op0 = Op.getOperand(0); MVT Op0VT = Op0.getValueType(); - MVT Op0VecVT = MVT::getVectorVT(Op0VT, (128 / Op0VT.getSizeInBits())); if (Op0VT.getSimpleVT() == MVT::i128 && simpleVT == MVT::i64) { // Create shuffle mask, least significant doubleword of quadword @@ -2669,15 +2670,10 @@ static SDValue LowerTRUNCATE(SDValue Op, SelectionDAG &DAG) DAG.getConstant(maskHigh, MVT::i32), DAG.getConstant(maskLow, MVT::i32)); + SDValue truncShuffle = DAG.getNode(SPUISD::SHUFB, dl, VecVT, + Op0, Op0, shufMask); - SDValue PromoteScalar = DAG.getNode(SPUISD::PREFSLOT2VEC, dl, - Op0VecVT, Op0); - - SDValue truncShuffle = DAG.getNode(SPUISD::SHUFB, dl, Op0VecVT, - PromoteScalar, PromoteScalar, shufMask); - - return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, - DAG.getNode(ISD::BIT_CONVERT, dl, VecVT, truncShuffle)); + return DAG.getNode(SPUISD::VEC2PREFSLOT, dl, VT, truncShuffle); } return SDValue(); // Leave the truncate unmolested diff --git a/lib/Target/CellSPU/SPUInstrInfo.cpp b/lib/Target/CellSPU/SPUInstrInfo.cpp index 032eb147e13..8623f30112e 100644 --- a/lib/Target/CellSPU/SPUInstrInfo.cpp +++ b/lib/Target/CellSPU/SPUInstrInfo.cpp @@ -133,18 +133,22 @@ SPUInstrInfo::isMoveInstr(const MachineInstr& MI, case SPU::ORi128_f32: case SPU::ORi128_r16: case SPU::ORi128_r8: +*/ case SPU::ORi128_vec: +/* case SPU::ORr64_i128: case SPU::ORf64_i128: case SPU::ORr32_i128: case SPU::ORf32_i128: case SPU::ORr16_i128: case SPU::ORr8_i128: - case SPU::ORvec_i128: */ + case SPU::ORvec_i128: /* case SPU::ORr16_r32: case SPU::ORr8_r32: + case SPU::ORf32_r32: + case SPU::ORr32_f32: case SPU::ORr32_r16: case SPU::ORr32_r8: case SPU::ORr16_r64: @@ -177,6 +181,7 @@ SPUInstrInfo::isMoveInstr(const MachineInstr& MI, case SPU::ORr16: case SPU::ORr32: case SPU::ORr64: + case SPU::ORr128: case SPU::ORf32: case SPU::ORf64: assert(MI.getNumOperands() == 3 && diff --git a/lib/Target/CellSPU/SPUInstrInfo.td b/lib/Target/CellSPU/SPUInstrInfo.td index 250a57dd128..e1d9228ef93 100644 --- a/lib/Target/CellSPU/SPUInstrInfo.td +++ b/lib/Target/CellSPU/SPUInstrInfo.td @@ -1429,9 +1429,6 @@ class ORExtractElt: /* class ORCvtRegGPRC: ORCvtForm<(outs GPRC:$rT), (ins rclass:$rA)>; */ -/* class ORCvtVecGPRC: - ORCvtForm<(outs GPRC:$rT), (ins VECREG:$rA)>; */ - /* class ORCvtGPRCReg: ORCvtForm<(outs rclass:$rT), (ins GPRC:$rA)>; */ @@ -1447,8 +1444,11 @@ class ORCvtFormR64Reg pattern = [ ]>: class ORCvtFormRegR64 pattern = [ ]>: ORCvtForm<(outs R64C:$rT), (ins rclass:$rA), pattern>; -/* class ORCvtGPRCVec: - ORCvtForm<(outs VECREG:$rT), (ins GPRC:$rA)>; */ +class ORCvtGPRCVec: + ORCvtForm<(outs VECREG:$rT), (ins GPRC:$rA)>; + +class ORCvtVecGPRC: + ORCvtForm<(outs GPRC:$rT), (ins VECREG:$rA)>; multiclass BitwiseOr { @@ -1496,8 +1496,14 @@ multiclass BitwiseOr def f32_v4f32: ORExtractElt; def f64_v2f64: ORExtractElt; + // Conversion from vector to GPRC + def i128_vec: ORCvtVecGPRC; + + // Conversion from GPRC to vector + def vec_i128: ORCvtGPRCVec; + /* - // Conversion from GPRC to register + // Conversion from register to GPRC def i128_r64: ORCvtRegGPRC; def i128_f64: ORCvtRegGPRC; def i128_r32: ORCvtRegGPRC; @@ -1505,36 +1511,30 @@ multiclass BitwiseOr def i128_r16: ORCvtRegGPRC; def i128_r8: ORCvtRegGPRC; - // Conversion from GPRC to vector - def i128_vec: ORCvtVecGPRC; - - // Conversion from register to GPRC + // Conversion from GPRC to register def r64_i128: ORCvtGPRCReg; def f64_i128: ORCvtGPRCReg; def r32_i128: ORCvtGPRCReg; def f32_i128: ORCvtGPRCReg; def r16_i128: ORCvtGPRCReg; def r8_i128: ORCvtGPRCReg; - - // Conversion from vector to GPRC - def vec_i128: ORCvtGPRCVec; */ /* // Conversion from register to R32C: - def r16_r32: ORCvtFormRegR32; - def r8_r32: ORCvtFormRegR32; + def r32_r16: ORCvtFormRegR32; + def r32_r8: ORCvtFormRegR32; // Conversion from R32C to register def r32_r16: ORCvtFormR32Reg; def r32_r8: ORCvtFormR32Reg; */ - // Conversion to register from R64C: + // Conversion from R64C to register: def r32_r64: ORCvtFormR64Reg; // def r16_r64: ORCvtFormR64Reg; // def r8_r64: ORCvtFormR64Reg; - // Conversion to R64C from register + // Conversion to R64C from register: def r64_r32: ORCvtFormRegR64; // def r64_r16: ORCvtFormRegR64; // def r64_r8: ORCvtFormRegR64; @@ -1659,6 +1659,7 @@ multiclass BitwiseOrComplement def v4i32: ORCVecInst; def v2i64: ORCVecInst; + def r128: ORCRegInst; def r64: ORCRegInst; def r32: ORCRegInst; def r16: ORCRegInst; @@ -1840,72 +1841,64 @@ def XORIr32: [(set R32C:$rT, (xor R32C:$rA, i32ImmSExt10:$val))]>; // NAND: -def NANDv16i8: - RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "nand\t$rT, $rA, $rB", IntegerOp, - [(set (v16i8 VECREG:$rT), (vnot (and (v16i8 VECREG:$rA), - (v16i8 VECREG:$rB))))]>; - -def NANDv8i16: - RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "nand\t$rT, $rA, $rB", IntegerOp, - [(set (v8i16 VECREG:$rT), (vnot (and (v8i16 VECREG:$rA), - (v8i16 VECREG:$rB))))]>; -def NANDv4i32: - RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "nand\t$rT, $rA, $rB", IntegerOp, - [(set (v4i32 VECREG:$rT), (vnot (and (v4i32 VECREG:$rA), - (v4i32 VECREG:$rB))))]>; +class NANDInst pattern>: + RRForm<0b10010011000, OOL, IOL, "nand\t$rT, $rA, $rB", + IntegerOp, pattern>; -def NANDr32: - RRForm<0b10010010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB), - "nand\t$rT, $rA, $rB", IntegerOp, - [(set R32C:$rT, (not (and R32C:$rA, R32C:$rB)))]>; +class NANDVecInst: + NANDInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), + [(set (vectype VECREG:$rT), (vnot (and (vectype VECREG:$rA), + (vectype VECREG:$rB))))]>; +class NANDRegInst: + NANDInst<(outs rclass:$rT), (ins rclass:$rA, rclass:$rB), + [(set rclass:$rT, (not (and rclass:$rA, rclass:$rB)))]>; -def NANDr16: - RRForm<0b10010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB), - "nand\t$rT, $rA, $rB", IntegerOp, - [(set R16C:$rT, (not (and R16C:$rA, R16C:$rB)))]>; +multiclass BitwiseNand +{ + def v16i8: NANDVecInst; + def v8i16: NANDVecInst; + def v4i32: NANDVecInst; + def v2i64: NANDVecInst; + + def r128: NANDRegInst; + def r64: NANDRegInst; + def r32: NANDRegInst; + def r16: NANDRegInst; + def r8: NANDRegInst; +} -def NANDr8: - RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB), - "nand\t$rT, $rA, $rB", IntegerOp, - [(set R8C:$rT, (not (and R8C:$rA, R8C:$rB)))]>; +defm NAND : BitwiseNand; // NOR: -def NORv16i8: - RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "nor\t$rT, $rA, $rB", IntegerOp, - [(set (v16i8 VECREG:$rT), (vnot (or (v16i8 VECREG:$rA), - (v16i8 VECREG:$rB))))]>; - -def NORv8i16: - RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "nor\t$rT, $rA, $rB", IntegerOp, - [(set (v8i16 VECREG:$rT), (vnot (or (v8i16 VECREG:$rA), - (v8i16 VECREG:$rB))))]>; -def NORv4i32: - RRForm<0b10010010000, (outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), - "nor\t$rT, $rA, $rB", IntegerOp, - [(set (v4i32 VECREG:$rT), (vnot (or (v4i32 VECREG:$rA), - (v4i32 VECREG:$rB))))]>; +class NORInst pattern>: + RRForm<0b10010010000, OOL, IOL, "nor\t$rT, $rA, $rB", + IntegerOp, pattern>; -def NORr32: - RRForm<0b10010010000, (outs R32C:$rT), (ins R32C:$rA, R32C:$rB), - "nor\t$rT, $rA, $rB", IntegerOp, - [(set R32C:$rT, (not (or R32C:$rA, R32C:$rB)))]>; +class NORVecInst: + NORInst<(outs VECREG:$rT), (ins VECREG:$rA, VECREG:$rB), + [(set (vectype VECREG:$rT), (vnot (or (vectype VECREG:$rA), + (vectype VECREG:$rB))))]>; +class NORRegInst: + NORInst<(outs rclass:$rT), (ins rclass:$rA, rclass:$rB), + [(set rclass:$rT, (not (or rclass:$rA, rclass:$rB)))]>; -def NORr16: - RRForm<0b10010010000, (outs R16C:$rT), (ins R16C:$rA, R16C:$rB), - "nor\t$rT, $rA, $rB", IntegerOp, - [(set R16C:$rT, (not (or R16C:$rA, R16C:$rB)))]>; +multiclass BitwiseNor +{ + def v16i8: NORVecInst; + def v8i16: NORVecInst; + def v4i32: NORVecInst; + def v2i64: NORVecInst; + + def r128: NORRegInst; + def r64: NORRegInst; + def r32: NORRegInst; + def r16: NORRegInst; + def r8: NORRegInst; +} -def NORr8: - RRForm<0b10010010000, (outs R8C:$rT), (ins R8C:$rA, R8C:$rB), - "nor\t$rT, $rA, $rB", IntegerOp, - [(set R8C:$rT, (not (or R8C:$rA, R8C:$rB)))]>; +defm NOR : BitwiseNor; // Select bits: class SELBInst pattern>: @@ -4361,8 +4354,7 @@ def LNOP : SPUInstr<(outs), (ins), "lnop", LoadNOP> { //===----------------------------------------------------------------------===// // Bit conversions (type conversions between vector/packed types) -// NOTE: Promotions are handled using the XS* instructions. Truncation -// is not handled. +// NOTE: Promotions are handled using the XS* instructions. //===----------------------------------------------------------------------===// def : Pat<(v16i8 (bitconvert (v8i16 VECREG:$src))), (v16i8 VECREG:$src)>; def : Pat<(v16i8 (bitconvert (v4i32 VECREG:$src))), (v16i8 VECREG:$src)>; @@ -4400,8 +4392,31 @@ def : Pat<(v2f64 (bitconvert (v4i32 VECREG:$src))), (v2f64 VECREG:$src)>; def : Pat<(v2f64 (bitconvert (v2i64 VECREG:$src))), (v2f64 VECREG:$src)>; def : Pat<(v2f64 (bitconvert (v2f64 VECREG:$src))), (v2f64 VECREG:$src)>; -def : Pat<(f32 (bitconvert (i32 R32C:$src))), (f32 R32FP:$src)>; -def : Pat<(f64 (bitconvert (i64 R64C:$src))), (f64 R64FP:$src)>; +def : Pat<(i128 (bitconvert (v16i8 VECREG:$src))), + (ORi128_vec VECREG:$src)>; +def : Pat<(i128 (bitconvert (v8i16 VECREG:$src))), + (ORi128_vec VECREG:$src)>; +def : Pat<(i128 (bitconvert (v4i32 VECREG:$src))), + (ORi128_vec VECREG:$src)>; +def : Pat<(i128 (bitconvert (v2i64 VECREG:$src))), + (ORi128_vec VECREG:$src)>; +def : Pat<(i128 (bitconvert (v4f32 VECREG:$src))), + (ORi128_vec VECREG:$src)>; +def : Pat<(i128 (bitconvert (v2f64 VECREG:$src))), + (ORi128_vec VECREG:$src)>; + +def : Pat<(v16i8 (bitconvert (i128 GPRC:$src))), + (v16i8 (ORvec_i128 GPRC:$src))>; +def : Pat<(v8i16 (bitconvert (i128 GPRC:$src))), + (v8i16 (ORvec_i128 GPRC:$src))>; +def : Pat<(v4i32 (bitconvert (i128 GPRC:$src))), + (v4i32 (ORvec_i128 GPRC:$src))>; +def : Pat<(v2i64 (bitconvert (i128 GPRC:$src))), + (v2i64 (ORvec_i128 GPRC:$src))>; +def : Pat<(v4f32 (bitconvert (i128 GPRC:$src))), + (v4f32 (ORvec_i128 GPRC:$src))>; +def : Pat<(v2f64 (bitconvert (i128 GPRC:$src))), + (v2f64 (ORvec_i128 GPRC:$src))>; //===----------------------------------------------------------------------===// // Instruction patterns: @@ -4598,7 +4613,7 @@ def : Pat<(add (SPUhi tjumptable:$in, 0), (SPUlo tjumptable:$in, 0)), def : Pat<(add (SPUhi tconstpool:$in, 0), (SPUlo tconstpool:$in, 0)), (IOHLlo (ILHUhi tconstpool:$in), tconstpool:$in)>; -// Instrinsics: +// Intrinsics: include "CellSDKIntrinsics.td" // Various math operator instruction sequences include "SPUMathInstr.td" diff --git a/test/CodeGen/CellSPU/trunc.ll b/test/CodeGen/CellSPU/trunc.ll index 1c6e1f6cb14..db22564f434 100644 --- a/test/CodeGen/CellSPU/trunc.ll +++ b/test/CodeGen/CellSPU/trunc.ll @@ -1,76 +1,94 @@ ; RUN: llvm-as -o - %s | llc -march=cellspu > %t1.s -; RUN: grep shufb %t1.s | count 10 +; RUN: grep shufb %t1.s | count 19 ; RUN: grep {ilhu.*1799} %t1.s | count 1 -; RUN: grep {ilhu.*771} %t1.s | count 1 +; RUN: grep {ilhu.*771} %t1.s | count 2 ; RUN: grep {ilhu.*1543} %t1.s | count 1 ; RUN: grep {ilhu.*1029} %t1.s | count 1 -; RUN: grep {ilhu.*515} %t1.s | count 2 -; RUN: grep xsbh %t1.s | count 2 -; RUN: grep sfh %t1.s | count 1 +; RUN: grep {ilhu.*515} %t1.s | count 1 +; RUN: grep {ilhu.*3855} %t1.s | count 1 +; RUN: grep {ilhu.*3599} %t1.s | count 1 +; RUN: grep {ilhu.*3085} %t1.s | count 1 +; RUN: grep {iohl.*3855} %t1.s | count 1 +; RUN: grep {iohl.*3599} %t1.s | count 2 +; RUN: grep {iohl.*1543} %t1.s | count 2 +; RUN: grep {iohl.*771} %t1.s | count 2 +; RUN: grep {iohl.*515} %t1.s | count 1 +; RUN: grep {iohl.*1799} %t1.s | count 1 +; RUN: grep lqa %t1.s | count 1 +; RUN: grep cbd %t1.s | count 4 +; RUN: grep chd %t1.s | count 3 +; RUN: grep cwd %t1.s | count 1 +; RUN: grep cdd %t1.s | count 1 ; ModuleID = 'trunc.bc' target datalayout = "E-p:32:32:128-i1:8:128-i8:8:128-i16:16:128-i32:32:128-i64:32:128-f32:32:128-f64:64:128-v64:64:64-v128:128:128-a0:0:128-s0:128:128" target triple = "spu" -; codegen for i128 arguments is not implemented yet on CellSPU -; once this changes uncomment the functions below -; and update the expected results accordingly +define <16 x i8> @trunc_i128_i8(i128 %u, <16 x i8> %v) { +entry: + %0 = trunc i128 %u to i8 + %tmp1 = insertelement <16 x i8> %v, i8 %0, i32 15 + ret <16 x i8> %tmp1 +} -;define i8 @trunc_i128_i8(i128 %u) nounwind readnone { -;entry: -; %0 = trunc i128 %u to i8 -; ret i8 %0 -;} -;define i16 @trunc_i128_i16(i128 %u) nounwind readnone { -;entry: -; %0 = trunc i128 %u to i16 -; ret i16 %0 -;} -;define i32 @trunc_i128_i32(i128 %u) nounwind readnone { -;entry: -; %0 = trunc i128 %u to i32 -; ret i32 %0 -;} -;define i64 @trunc_i128_i64(i128 %u) nounwind readnone { -;entry: -; %0 = trunc i128 %u to i64 -; ret i64 %0 -;} +define <8 x i16> @trunc_i128_i16(i128 %u, <8 x i16> %v) { +entry: + %0 = trunc i128 %u to i16 + %tmp1 = insertelement <8 x i16> %v, i16 %0, i32 8 + ret <8 x i16> %tmp1 +} -define <16 x i8> @trunc_i64_i8(i64 %u, <16 x i8> %v) nounwind readnone { +define <4 x i32> @trunc_i128_i32(i128 %u, <4 x i32> %v) { entry: - %0 = trunc i64 %u to i8 - %tmp1 = insertelement <16 x i8> %v, i8 %0, i32 10 - ret <16 x i8> %tmp1 + %0 = trunc i128 %u to i32 + %tmp1 = insertelement <4 x i32> %v, i32 %0, i32 2 + ret <4 x i32> %tmp1 } -define <8 x i16> @trunc_i64_i16(i64 %u, <8 x i16> %v) nounwind readnone { + +define <2 x i64> @trunc_i128_i64(i128 %u, <2 x i64> %v) { entry: - %0 = trunc i64 %u to i16 - %tmp1 = insertelement <8 x i16> %v, i16 %0, i32 6 - ret <8 x i16> %tmp1 + %0 = trunc i128 %u to i64 + %tmp1 = insertelement <2 x i64> %v, i64 %0, i32 1 + ret <2 x i64> %tmp1 } -define i32 @trunc_i64_i32(i64 %u, i32 %v) nounwind readnone { + +define <16 x i8> @trunc_i64_i8(i64 %u, <16 x i8> %v) { entry: - %0 = trunc i64 %u to i32 - ret i32 %0 + %0 = trunc i64 %u to i8 + %tmp1 = insertelement <16 x i8> %v, i8 %0, i32 10 + ret <16 x i8> %tmp1 } -define i8 @trunc_i32_i8(i32 %u, i8 %v) nounwind readnone { +define <8 x i16> @trunc_i64_i16(i64 %u, <8 x i16> %v) { entry: - %0 = trunc i32 %u to i8 - %1 = sub i8 %0, %v - ret i8 %1 + %0 = trunc i64 %u to i16 + %tmp1 = insertelement <8 x i16> %v, i16 %0, i32 6 + ret <8 x i16> %tmp1 } -define <8 x i16> @trunc_i32_i16(i32 %u, <8 x i16> %v) nounwind readnone { + +define i32 @trunc_i64_i32(i64 %u) { +entry: + %0 = trunc i64 %u to i32 + ret i32 %0 +} + +define <16 x i8> @trunc_i32_i8(i32 %u, <16 x i8> %v) { +entry: + %0 = trunc i32 %u to i8 + %tmp1 = insertelement <16 x i8> %v, i8 %0, i32 7 + ret <16 x i8> %tmp1 +} + +define <8 x i16> @trunc_i32_i16(i32 %u, <8 x i16> %v) { entry: - %0 = trunc i32 %u to i16 - %tmp1 = insertelement <8 x i16> %v, i16 %0, i32 3 - ret <8 x i16> %tmp1 + %0 = trunc i32 %u to i16 + %tmp1 = insertelement <8 x i16> %v, i16 %0, i32 3 + ret <8 x i16> %tmp1 } -define <16 x i8> @trunc_i16_i8(i16 %u, <16 x i8> %v) nounwind readnone { +define <16 x i8> @trunc_i16_i8(i16 %u, <16 x i8> %v) { entry: - %0 = trunc i16 %u to i8 - %tmp1 = insertelement <16 x i8> %v, i8 %0, i32 5 - ret <16 x i8> %tmp1 + %0 = trunc i16 %u to i8 + %tmp1 = insertelement <16 x i8> %v, i8 %0, i32 5 + ret <16 x i8> %tmp1 } -- 2.11.0