From: Owen Anderson Date: Wed, 27 Oct 2010 23:56:39 +0000 (+0000) Subject: Add correct NEON encodings for vext, vtrn, vuzp, and vzip. X-Git-Tag: android-x86-6.0-r1~1003^2~829 X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=3eff4af42ddbac97807348eadd292ff5f276fe69;p=android-x86%2Fexternal-llvm.git Add correct NEON encodings for vext, vtrn, vuzp, and vzip. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@117512 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/lib/Target/ARM/ARMInstrNEON.td b/lib/Target/ARM/ARMInstrNEON.td index 5208c2502ca..5b02bc8f4a0 100644 --- a/lib/Target/ARM/ARMInstrNEON.td +++ b/lib/Target/ARM/ARMInstrNEON.td @@ -3871,14 +3871,20 @@ class VEXTd (ins DPR:$lhs, DPR:$rhs, i32imm:$index), NVExtFrm, IIC_VEXTD, OpcodeStr, Dt, "$dst, $lhs, $rhs, $index", "", [(set DPR:$dst, (Ty (NEONvext (Ty DPR:$lhs), - (Ty DPR:$rhs), imm:$index)))]>; + (Ty DPR:$rhs), imm:$index)))]> { + bits<4> index; + let Inst{11-8} = index{3-0}; +} class VEXTq : N3V<0,1,0b11,{?,?,?,?},1,0, (outs QPR:$dst), (ins QPR:$lhs, QPR:$rhs, i32imm:$index), NVExtFrm, IIC_VEXTQ, OpcodeStr, Dt, "$dst, $lhs, $rhs, $index", "", [(set QPR:$dst, (Ty (NEONvext (Ty QPR:$lhs), - (Ty QPR:$rhs), imm:$index)))]>; + (Ty QPR:$rhs), imm:$index)))]> { + bits<4> index; + let Inst{11-8} = index{3-0}; +} def VEXTd8 : VEXTd<"vext", "8", v8i8>; def VEXTd16 : VEXTd<"vext", "16", v4i16>; diff --git a/test/MC/ARM/neon-shuffle-encoding.ll b/test/MC/ARM/neon-shuffle-encoding.ll new file mode 100644 index 00000000000..273a5288599 --- /dev/null +++ b/test/MC/ARM/neon-shuffle-encoding.ll @@ -0,0 +1,213 @@ +; RUN: llc -show-mc-encoding -march=arm -mcpu=cortex-a8 -mattr=+neon < %s | FileCheck %s + +define <8 x i8> @test_vextd(<8 x i8>* %A, <8 x i8>* %B) nounwind { + %tmp1 = load <8 x i8>* %A + %tmp2 = load <8 x i8>* %B +; CHECK: vext.8 d16, d17, d16, #3 @ encoding: [0xa0,0x03,0xf1,0xf2] + %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> + ret <8 x i8> %tmp3 +} + +define <8 x i8> @test_vextRd(<8 x i8>* %A, <8 x i8>* %B) nounwind { + %tmp1 = load <8 x i8>* %A + %tmp2 = load <8 x i8>* %B +; CHECK: vext.8 d16, d17, d16, #5 @ encoding: [0xa0,0x05,0xf1,0xf2] + %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> + ret <8 x i8> %tmp3 +} + +define <16 x i8> @test_vextq(<16 x i8>* %A, <16 x i8>* %B) nounwind { + %tmp1 = load <16 x i8>* %A + %tmp2 = load <16 x i8>* %B +; CHECK: vext.8 q8, q8, q9, #3 @ encoding: [0xe2,0x03,0xf0,0xf2 + %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> + ret <16 x i8> %tmp3 +} + +define <16 x i8> @test_vextRq(<16 x i8>* %A, <16 x i8>* %B) nounwind { + %tmp1 = load <16 x i8>* %A + %tmp2 = load <16 x i8>* %B +; CHECK: vext.8 q8, q8, q9, #7 @ encoding: [0xe2,0x07,0xf0,0xf2] + %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> + ret <16 x i8> %tmp3 +} + +define <4 x i16> @test_vextd16(<4 x i16>* %A, <4 x i16>* %B) nounwind { + %tmp1 = load <4 x i16>* %A + %tmp2 = load <4 x i16>* %B +; CHECK: vext.16 d16, d17, d16, #3 @ encoding: [0xa0,0x03,0xf1,0xf2] + %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> + ret <4 x i16> %tmp3 +} + +define <4 x i32> @test_vextq32(<4 x i32>* %A, <4 x i32>* %B) nounwind { + %tmp1 = load <4 x i32>* %A + %tmp2 = load <4 x i32>* %B +; CHECK: vext.32 q8, q8, q9, #3 @ encoding: [0xe2,0x03,0xf0,0xf2] + %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> + ret <4 x i32> %tmp3 +} + +define <8 x i8> @vtrni8(<8 x i8>* %A, <8 x i8>* %B) nounwind { + %tmp1 = load <8 x i8>* %A + %tmp2 = load <8 x i8>* %B +; CHECK: vtrn.8 d17, d16 @ encoding: [0xa0,0x10,0xf2,0xf3] + %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> + %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> + %tmp5 = add <8 x i8> %tmp3, %tmp4 + ret <8 x i8> %tmp5 +} + +define <4 x i16> @vtrni16(<4 x i16>* %A, <4 x i16>* %B) nounwind { + %tmp1 = load <4 x i16>* %A + %tmp2 = load <4 x i16>* %B +; CHECK: vtrn.16 d17, d16 @ encoding: [0xa0,0x10,0xf6,0xf3] + %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> + %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> + %tmp5 = add <4 x i16> %tmp3, %tmp4 + ret <4 x i16> %tmp5 +} + +define <2 x i32> @vtrni32(<2 x i32>* %A, <2 x i32>* %B) nounwind { + %tmp1 = load <2 x i32>* %A + %tmp2 = load <2 x i32>* %B +; CHECK: vtrn.32 d17, d16 @ encoding: [0xa0,0x10,0xfa,0xf3] + %tmp3 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> + %tmp4 = shufflevector <2 x i32> %tmp1, <2 x i32> %tmp2, <2 x i32> + %tmp5 = add <2 x i32> %tmp3, %tmp4 + ret <2 x i32> %tmp5 +} + +define <16 x i8> @vtrnQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { + %tmp1 = load <16 x i8>* %A + %tmp2 = load <16 x i8>* %B +; CHECK: vadd.i8 q8, q9, q8 @ encoding: [0xe0,0x08,0x42,0xf2] + %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> + %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> + %tmp5 = add <16 x i8> %tmp3, %tmp4 + ret <16 x i8> %tmp5 +} + +define <8 x i16> @vtrnQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { + %tmp1 = load <8 x i16>* %A + %tmp2 = load <8 x i16>* %B +; CHECK: vadd.i16 q8, q9, q8 @ encoding: [0xe0,0x08,0x52,0xf2] + %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> + %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> + %tmp5 = add <8 x i16> %tmp3, %tmp4 + ret <8 x i16> %tmp5 +} + +define <4 x i32> @vtrnQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { + %tmp1 = load <4 x i32>* %A + %tmp2 = load <4 x i32>* %B +; CHECK: vtrn.32 q9, q8 @ encoding: [0xe0,0x20,0xfa,0xf3] + %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> + %tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> + %tmp5 = add <4 x i32> %tmp3, %tmp4 + ret <4 x i32> %tmp5 +} + +define <8 x i8> @vuzpi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { + %tmp1 = load <8 x i8>* %A + %tmp2 = load <8 x i8>* %B +; CHECK: vuzp.8 d17, d16 @ encoding: [0x20,0x11,0xf2,0xf3] + %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> + %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> + %tmp5 = add <8 x i8> %tmp3, %tmp4 + ret <8 x i8> %tmp5 +} + +define <4 x i16> @vuzpi16(<4 x i16>* %A, <4 x i16>* %B) nounwind { + %tmp1 = load <4 x i16>* %A + %tmp2 = load <4 x i16>* %B +; CHECK: vuzp.16 d17, d16 @ encoding: [0x20,0x11,0xf6,0xf3 + %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> + %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> + %tmp5 = add <4 x i16> %tmp3, %tmp4 + ret <4 x i16> %tmp5 +} + +; VUZP.32 is equivalent to VTRN.32 for 64-bit vectors. + +define <16 x i8> @vuzpQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { + %tmp1 = load <16 x i8>* %A + %tmp2 = load <16 x i8>* %B +; CHECK: vuzp.8 q9, q8 @ encoding: [0x60,0x21,0xf2,0xf3] + %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> + %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> + %tmp5 = add <16 x i8> %tmp3, %tmp4 + ret <16 x i8> %tmp5 +} + +define <8 x i16> @vuzpQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { + %tmp1 = load <8 x i16>* %A + %tmp2 = load <8 x i16>* %B +; CHECK: vuzp.16 q9, q8 @ encoding: [0x60,0x21,0xf6,0xf3] + %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> + %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> + %tmp5 = add <8 x i16> %tmp3, %tmp4 + ret <8 x i16> %tmp5 +} + +define <4 x i32> @vuzpQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { + %tmp1 = load <4 x i32>* %A + %tmp2 = load <4 x i32>* %B +; CHECK: vuzp.32 q9, q8 @ encoding: [0x60,0x21,0xfa,0xf3] + %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> + %tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> + %tmp5 = add <4 x i32> %tmp3, %tmp4 + ret <4 x i32> %tmp5 +} + +define <8 x i8> @vzipi8(<8 x i8>* %A, <8 x i8>* %B) nounwind { + %tmp1 = load <8 x i8>* %A + %tmp2 = load <8 x i8>* %B +; CHECK: vzip.8 d17, d16 @ encoding: [0xa0,0x11,0xf2,0xf3] + %tmp3 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> + %tmp4 = shufflevector <8 x i8> %tmp1, <8 x i8> %tmp2, <8 x i32> + %tmp5 = add <8 x i8> %tmp3, %tmp4 + ret <8 x i8> %tmp5 +} + +define <4 x i16> @vzipi16(<4 x i16>* %A, <4 x i16>* %B) nounwind { + %tmp1 = load <4 x i16>* %A + %tmp2 = load <4 x i16>* %B +; CHECK: vzip.16 d17, d16 @ encoding: [0xa0,0x11,0xf6,0xf3] + %tmp3 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> + %tmp4 = shufflevector <4 x i16> %tmp1, <4 x i16> %tmp2, <4 x i32> + %tmp5 = add <4 x i16> %tmp3, %tmp4 + ret <4 x i16> %tmp5 +} + +; VZIP.32 is equivalent to VTRN.32 for 64-bit vectors. + +define <16 x i8> @vzipQi8(<16 x i8>* %A, <16 x i8>* %B) nounwind { + %tmp1 = load <16 x i8>* %A + %tmp2 = load <16 x i8>* %B +; CHECK: vzip.8 q9, q8 @ encoding: [0xe0,0x21,0xf2,0xf3] + %tmp3 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> + %tmp4 = shufflevector <16 x i8> %tmp1, <16 x i8> %tmp2, <16 x i32> + %tmp5 = add <16 x i8> %tmp3, %tmp4 + ret <16 x i8> %tmp5 +} + +define <8 x i16> @vzipQi16(<8 x i16>* %A, <8 x i16>* %B) nounwind { + %tmp1 = load <8 x i16>* %A + %tmp2 = load <8 x i16>* %B +; CHECK: vzip.16 q9, q8 @ encoding: [0xe0,0x21,0xf6,0xf3] + %tmp3 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> + %tmp4 = shufflevector <8 x i16> %tmp1, <8 x i16> %tmp2, <8 x i32> + %tmp5 = add <8 x i16> %tmp3, %tmp4 + ret <8 x i16> %tmp5 +} + +define <4 x i32> @vzipQi32(<4 x i32>* %A, <4 x i32>* %B) nounwind { + %tmp1 = load <4 x i32>* %A + %tmp2 = load <4 x i32>* %B +; CHECK: vzip.32 q9, q8 @ encoding: [0xe0,0x21,0xfa,0xf3] + %tmp3 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> + %tmp4 = shufflevector <4 x i32> %tmp1, <4 x i32> %tmp2, <4 x i32> + %tmp5 = add <4 x i32> %tmp3, %tmp4 + ret <4 x i32> %tmp5 +}