From 6eebe31eee3b90e1da13da102151e3dcca615342 Mon Sep 17 00:00:00 2001 From: Krzysztof Parzyszek Date: Fri, 10 Feb 2017 23:46:45 +0000 Subject: [PATCH] [Hexagon] Introduce Hexagon V62 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@294805 91177308-0d34-0410-b5e6-96231b3b80d8 --- include/llvm/IR/IntrinsicsHexagon.td | 837 ++++- include/llvm/Support/ELF.h | 2 + lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp | 9 +- .../Hexagon/Disassembler/HexagonDisassembler.cpp | 36 +- lib/Target/Hexagon/Hexagon.td | 3 + lib/Target/Hexagon/HexagonDepArch.h | 2 +- lib/Target/Hexagon/HexagonDepArch.td | 2 + lib/Target/Hexagon/HexagonDepITypes.h | 1 + lib/Target/Hexagon/HexagonDepITypes.td | 1 + lib/Target/Hexagon/HexagonDepInstrInfo.td | 3300 ++++++++++++++++++++ lib/Target/Hexagon/HexagonIICHVX.td | 102 + lib/Target/Hexagon/HexagonIICScalar.td | 164 + lib/Target/Hexagon/HexagonMapAsm2IntrinV62.gen.td | 204 ++ lib/Target/Hexagon/HexagonRegisterInfo.cpp | 32 +- lib/Target/Hexagon/HexagonRegisterInfo.td | 80 +- lib/Target/Hexagon/HexagonSchedule.td | 9 + lib/Target/Hexagon/HexagonScheduleV62.td | 129 + lib/Target/Hexagon/HexagonSubtarget.cpp | 1 + lib/Target/Hexagon/HexagonSubtarget.h | 3 + .../Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp | 15 +- test/MC/Hexagon/elf-flags.s | 2 + test/MC/Hexagon/v62_all.s | 552 ++++ test/MC/Hexagon/v62_jumps.s | 13 + test/MC/Hexagon/v62a.s | 19 + test/MC/Hexagon/v62a_regs.s | 44 + 25 files changed, 5479 insertions(+), 83 deletions(-) create mode 100644 lib/Target/Hexagon/HexagonIICHVX.td create mode 100644 lib/Target/Hexagon/HexagonIICScalar.td create mode 100644 lib/Target/Hexagon/HexagonMapAsm2IntrinV62.gen.td create mode 100644 lib/Target/Hexagon/HexagonScheduleV62.td create mode 100644 test/MC/Hexagon/v62_all.s create mode 100644 test/MC/Hexagon/v62_jumps.s create mode 100644 test/MC/Hexagon/v62a.s create mode 100644 test/MC/Hexagon/v62a_regs.s diff --git a/include/llvm/IR/IntrinsicsHexagon.td b/include/llvm/IR/IntrinsicsHexagon.td index 6519f051dee..17586dabf06 100644 --- a/include/llvm/IR/IntrinsicsHexagon.td +++ b/include/llvm/IR/IntrinsicsHexagon.td @@ -5659,22 +5659,6 @@ class Hexagon_v2048v2048v1024v1024i_Intrinsic [IntrNoMem]>; // -// Hexagon_LLiLLiLLi_Intrinsic -// tag : M6_vabsdiffb -class Hexagon_LLiLLiLLi_Intrinsic - : Hexagon_Intrinsic; - -// -// Hexagon_LLii_Intrinsic -// tag : S6_vsplatrbp -class Hexagon_LLii_Intrinsic - : Hexagon_Intrinsic; - -// // BUILTIN_INFO(HEXAGON.S6_rol_i_r,SI_ftype_SISI,2) // tag : S6_rol_i_r def int_hexagon_S6_rol_i_r : @@ -9342,6 +9326,274 @@ Hexagon_v1024v1024v512v512i_Intrinsic<"HEXAGON_V6_vlutvwh_oracc">; def int_hexagon_V6_vlutvwh_oracc_128B : Hexagon_v2048v2048v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvwh_oracc_128B">; +/// +/// HexagonV62 intrinsics +/// + +// +// Hexagon_LLiLLiLLi_Intrinsic +// tag : M6_vabsdiffb +class Hexagon_LLiLLiLLi_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_LLii_Intrinsic +// tag : S6_vsplatrbp +class Hexagon_LLii_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v512v512i_Intrinsic +// tag : V6_vlsrb +class Hexagon_V62_v512v512i_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v1024v1024i_Intrinsic +// tag : V6_vlsrb_128B +class Hexagon_V62_v1024v1024i_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v512v512v512i_Intrinsic +// tag : V6_vasrwuhrndsat +class Hexagon_V62_v512v512v512i_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v1024v1024v1024i_Intrinsic +// tag : V6_vasrwuhrndsat_128B +class Hexagon_V62_v1024v1024v1024i_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v512v512v512_Intrinsic +// tag : V6_vrounduwuh +class Hexagon_V62_v512v512v512_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v1024v1024v1024_Intrinsic +// tag : V6_vrounduwuh_128B +class Hexagon_V62_v1024v1024v1024_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v2048v2048v2048_Intrinsic +// tag : V6_vadduwsat_dv_128B +class Hexagon_V62_v2048v2048v2048_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v1024v1024v512v512_Intrinsic +// tag : V6_vaddhw_acc +class Hexagon_V62_v1024v1024v512v512_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v2048v2048v1024v1024_Intrinsic +// tag : V6_vaddhw_acc_128B +class Hexagon_V62_v2048v2048v1024v1024_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v1024v512v512_Intrinsic +// tag : V6_vmpyewuh_64 +class Hexagon_V62_v1024v512v512_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v2048v1024v1024_Intrinsic +// tag : V6_vmpyewuh_64_128B +class Hexagon_V62_v2048v1024v1024_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v2048v2048i_Intrinsic +// tag : V6_vmpauhb_128B +class Hexagon_V62_v2048v2048i_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v2048v2048v2048i_Intrinsic +// tag : V6_vmpauhb_acc_128B +class Hexagon_V62_v2048v2048v2048i_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v512v64ii_Intrinsic +// tag : V6_vandnqrt +class Hexagon_V62_v512v64ii_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v1024v128ii_Intrinsic +// tag : V6_vandnqrt_128B +class Hexagon_V62_v1024v128ii_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v512v512v64ii_Intrinsic +// tag : V6_vandnqrt_acc +class Hexagon_V62_v512v512v64ii_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v1024v1024v128ii_Intrinsic +// tag : V6_vandnqrt_acc_128B +class Hexagon_V62_v1024v1024v128ii_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v512v64iv512_Intrinsic +// tag : V6_vandvqv +class Hexagon_V62_v512v64iv512_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v1024v128iv1024_Intrinsic +// tag : V6_vandvqv_128B +class Hexagon_V62_v1024v128iv1024_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v64ii_Intrinsic +// tag : V6_pred_scalar2v2 +class Hexagon_V62_v64ii_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v128ii_Intrinsic +// tag : V6_pred_scalar2v2_128B +class Hexagon_V62_v128ii_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v64iv64iv64i_Intrinsic +// tag : V6_shuffeqw +class Hexagon_V62_v64iv64iv64i_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v128iv128iv128i_Intrinsic +// tag : V6_shuffeqw_128B +class Hexagon_V62_v128iv128iv128i_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v512i_Intrinsic +// tag : V6_lvsplath +class Hexagon_V62_v512i_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v1024i_Intrinsic +// tag : V6_lvsplath_128B +class Hexagon_V62_v1024i_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v512v512v512v512i_Intrinsic +// tag : V6_vlutvvb_oracci +class Hexagon_V62_v512v512v512v512i_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v1024v1024v1024v1024i_Intrinsic +// tag : V6_vlutvvb_oracci_128B +class Hexagon_V62_v1024v1024v1024v1024i_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v1024v512v512i_Intrinsic +// tag : V6_vlutvwhi +class Hexagon_V62_v1024v512v512i_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v2048v1024v1024i_Intrinsic +// tag : V6_vlutvwhi_128B +class Hexagon_V62_v2048v1024v1024i_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v1024v1024v512v512i_Intrinsic +// tag : V6_vlutvwh_oracci +class Hexagon_V62_v1024v1024v512v512i_Intrinsic + : Hexagon_Intrinsic; + +// +// Hexagon_V62_v2048v2048v1024v1024i_Intrinsic +// tag : V6_vlutvwh_oracci_128B +class Hexagon_V62_v2048v2048v1024v1024i_Intrinsic + : Hexagon_Intrinsic; + // // BUILTIN_INFO(HEXAGON.M6_vabsdiffb,DI_ftype_DIDI,2) // tag : M6_vabsdiffb @@ -9355,12 +9607,6 @@ def int_hexagon_M6_vabsdiffub : Hexagon_LLiLLiLLi_Intrinsic<"HEXAGON_M6_vabsdiffub">; // -// BUILTIN_INFO(HEXAGON.S6_vsplatrbp,DI_ftype_SI,1) -// tag : S6_vsplatrbp -def int_hexagon_S6_vsplatrbp : -Hexagon_LLii_Intrinsic<"HEXAGON_S6_vsplatrbp">; - -// // BUILTIN_INFO(HEXAGON.S6_vtrunehb_ppp,DI_ftype_DIDI,2) // tag : S6_vtrunehb_ppp def int_hexagon_S6_vtrunehb_ppp : @@ -9371,3 +9617,550 @@ Hexagon_LLiLLiLLi_Intrinsic<"HEXAGON_S6_vtrunehb_ppp">; // tag : S6_vtrunohb_ppp def int_hexagon_S6_vtrunohb_ppp : Hexagon_LLiLLiLLi_Intrinsic<"HEXAGON_S6_vtrunohb_ppp">; + +// +// BUILTIN_INFO(HEXAGON.S6_vsplatrbp,DI_ftype_SI,1) +// tag : S6_vsplatrbp +def int_hexagon_S6_vsplatrbp : +Hexagon_LLii_Intrinsic<"HEXAGON_S6_vsplatrbp">; + +// +// BUILTIN_INFO(HEXAGON.V6_vlsrb,VI_ftype_VISI,2) +// tag : V6_vlsrb +def int_hexagon_V6_vlsrb : +Hexagon_V62_v512v512i_Intrinsic<"HEXAGON_V6_vlsrb">; + +// +// BUILTIN_INFO(HEXAGON.V6_vlsrb_128B,VI_ftype_VISI,2) +// tag : V6_vlsrb_128B +def int_hexagon_V6_vlsrb_128B : +Hexagon_V62_v1024v1024i_Intrinsic<"HEXAGON_V6_vlsrb_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vasrwuhrndsat,VI_ftype_VIVISI,3) +// tag : V6_vasrwuhrndsat +def int_hexagon_V6_vasrwuhrndsat : +Hexagon_V62_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrwuhrndsat">; + +// +// BUILTIN_INFO(HEXAGON.V6_vasrwuhrndsat_128B,VI_ftype_VIVISI,3) +// tag : V6_vasrwuhrndsat_128B +def int_hexagon_V6_vasrwuhrndsat_128B : +Hexagon_V62_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrwuhrndsat_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vasruwuhrndsat,VI_ftype_VIVISI,3) +// tag : V6_vasruwuhrndsat +def int_hexagon_V6_vasruwuhrndsat : +Hexagon_V62_v512v512v512i_Intrinsic<"HEXAGON_V6_vasruwuhrndsat">; + +// +// BUILTIN_INFO(HEXAGON.V6_vasruwuhrndsat_128B,VI_ftype_VIVISI,3) +// tag : V6_vasruwuhrndsat_128B +def int_hexagon_V6_vasruwuhrndsat_128B : +Hexagon_V62_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasruwuhrndsat_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vasrhbsat,VI_ftype_VIVISI,3) +// tag : V6_vasrhbsat +def int_hexagon_V6_vasrhbsat : +Hexagon_V62_v512v512v512i_Intrinsic<"HEXAGON_V6_vasrhbsat">; + +// +// BUILTIN_INFO(HEXAGON.V6_vasrhbsat_128B,VI_ftype_VIVISI,3) +// tag : V6_vasrhbsat_128B +def int_hexagon_V6_vasrhbsat_128B : +Hexagon_V62_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vasrhbsat_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vrounduwuh,VI_ftype_VIVI,2) +// tag : V6_vrounduwuh +def int_hexagon_V6_vrounduwuh : +Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vrounduwuh">; + +// +// BUILTIN_INFO(HEXAGON.V6_vrounduwuh_128B,VI_ftype_VIVI,2) +// tag : V6_vrounduwuh_128B +def int_hexagon_V6_vrounduwuh_128B : +Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrounduwuh_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vrounduhub,VI_ftype_VIVI,2) +// tag : V6_vrounduhub +def int_hexagon_V6_vrounduhub : +Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vrounduhub">; + +// +// BUILTIN_INFO(HEXAGON.V6_vrounduhub_128B,VI_ftype_VIVI,2) +// tag : V6_vrounduhub_128B +def int_hexagon_V6_vrounduhub_128B : +Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vrounduhub_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vadduwsat,VI_ftype_VIVI,2) +// tag : V6_vadduwsat +def int_hexagon_V6_vadduwsat : +Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vadduwsat">; + +// +// BUILTIN_INFO(HEXAGON.V6_vadduwsat_128B,VI_ftype_VIVI,2) +// tag : V6_vadduwsat_128B +def int_hexagon_V6_vadduwsat_128B : +Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vadduwsat_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vadduwsat_dv,VD_ftype_VDVD,2) +// tag : V6_vadduwsat_dv +def int_hexagon_V6_vadduwsat_dv : +Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vadduwsat_dv">; + +// +// BUILTIN_INFO(HEXAGON.V6_vadduwsat_dv_128B,VD_ftype_VDVD,2) +// tag : V6_vadduwsat_dv_128B +def int_hexagon_V6_vadduwsat_dv_128B : +Hexagon_V62_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vadduwsat_dv_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vsubuwsat,VI_ftype_VIVI,2) +// tag : V6_vsubuwsat +def int_hexagon_V6_vsubuwsat : +Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vsubuwsat">; + +// +// BUILTIN_INFO(HEXAGON.V6_vsubuwsat_128B,VI_ftype_VIVI,2) +// tag : V6_vsubuwsat_128B +def int_hexagon_V6_vsubuwsat_128B : +Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubuwsat_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vsubuwsat_dv,VD_ftype_VDVD,2) +// tag : V6_vsubuwsat_dv +def int_hexagon_V6_vsubuwsat_dv : +Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubuwsat_dv">; + +// +// BUILTIN_INFO(HEXAGON.V6_vsubuwsat_dv_128B,VD_ftype_VDVD,2) +// tag : V6_vsubuwsat_dv_128B +def int_hexagon_V6_vsubuwsat_dv_128B : +Hexagon_V62_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsubuwsat_dv_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vaddbsat,VI_ftype_VIVI,2) +// tag : V6_vaddbsat +def int_hexagon_V6_vaddbsat : +Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vaddbsat">; + +// +// BUILTIN_INFO(HEXAGON.V6_vaddbsat_128B,VI_ftype_VIVI,2) +// tag : V6_vaddbsat_128B +def int_hexagon_V6_vaddbsat_128B : +Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddbsat_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vaddbsat_dv,VD_ftype_VDVD,2) +// tag : V6_vaddbsat_dv +def int_hexagon_V6_vaddbsat_dv : +Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddbsat_dv">; + +// +// BUILTIN_INFO(HEXAGON.V6_vaddbsat_dv_128B,VD_ftype_VDVD,2) +// tag : V6_vaddbsat_dv_128B +def int_hexagon_V6_vaddbsat_dv_128B : +Hexagon_V62_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vaddbsat_dv_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vsubbsat,VI_ftype_VIVI,2) +// tag : V6_vsubbsat +def int_hexagon_V6_vsubbsat : +Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vsubbsat">; + +// +// BUILTIN_INFO(HEXAGON.V6_vsubbsat_128B,VI_ftype_VIVI,2) +// tag : V6_vsubbsat_128B +def int_hexagon_V6_vsubbsat_128B : +Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubbsat_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vsubbsat_dv,VD_ftype_VDVD,2) +// tag : V6_vsubbsat_dv +def int_hexagon_V6_vsubbsat_dv : +Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubbsat_dv">; + +// +// BUILTIN_INFO(HEXAGON.V6_vsubbsat_dv_128B,VD_ftype_VDVD,2) +// tag : V6_vsubbsat_dv_128B +def int_hexagon_V6_vsubbsat_dv_128B : +Hexagon_V62_v2048v2048v2048_Intrinsic<"HEXAGON_V6_vsubbsat_dv_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vaddububb_sat,VI_ftype_VIVI,2) +// tag : V6_vaddububb_sat +def int_hexagon_V6_vaddububb_sat : +Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vaddububb_sat">; + +// +// BUILTIN_INFO(HEXAGON.V6_vaddububb_sat_128B,VI_ftype_VIVI,2) +// tag : V6_vaddububb_sat_128B +def int_hexagon_V6_vaddububb_sat_128B : +Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddububb_sat_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vsubububb_sat,VI_ftype_VIVI,2) +// tag : V6_vsubububb_sat +def int_hexagon_V6_vsubububb_sat : +Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vsubububb_sat">; + +// +// BUILTIN_INFO(HEXAGON.V6_vsubububb_sat_128B,VI_ftype_VIVI,2) +// tag : V6_vsubububb_sat_128B +def int_hexagon_V6_vsubububb_sat_128B : +Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsubububb_sat_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vaddhw_acc,VD_ftype_VDVIVI,3) +// tag : V6_vaddhw_acc +def int_hexagon_V6_vaddhw_acc : +Hexagon_V62_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vaddhw_acc">; + +// +// BUILTIN_INFO(HEXAGON.V6_vaddhw_acc_128B,VD_ftype_VDVIVI,3) +// tag : V6_vaddhw_acc_128B +def int_hexagon_V6_vaddhw_acc_128B : +Hexagon_V62_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vaddhw_acc_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vadduhw_acc,VD_ftype_VDVIVI,3) +// tag : V6_vadduhw_acc +def int_hexagon_V6_vadduhw_acc : +Hexagon_V62_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vadduhw_acc">; + +// +// BUILTIN_INFO(HEXAGON.V6_vadduhw_acc_128B,VD_ftype_VDVIVI,3) +// tag : V6_vadduhw_acc_128B +def int_hexagon_V6_vadduhw_acc_128B : +Hexagon_V62_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vadduhw_acc_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vaddubh_acc,VD_ftype_VDVIVI,3) +// tag : V6_vaddubh_acc +def int_hexagon_V6_vaddubh_acc : +Hexagon_V62_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vaddubh_acc">; + +// +// BUILTIN_INFO(HEXAGON.V6_vaddubh_acc_128B,VD_ftype_VDVIVI,3) +// tag : V6_vaddubh_acc_128B +def int_hexagon_V6_vaddubh_acc_128B : +Hexagon_V62_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vaddubh_acc_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vmpyewuh_64,VD_ftype_VIVI,2) +// tag : V6_vmpyewuh_64 +def int_hexagon_V6_vmpyewuh_64 : +Hexagon_V62_v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyewuh_64">; + +// +// BUILTIN_INFO(HEXAGON.V6_vmpyewuh_64_128B,VD_ftype_VIVI,2) +// tag : V6_vmpyewuh_64_128B +def int_hexagon_V6_vmpyewuh_64_128B : +Hexagon_V62_v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyewuh_64_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vmpyowh_64_acc,VD_ftype_VDVIVI,3) +// tag : V6_vmpyowh_64_acc +def int_hexagon_V6_vmpyowh_64_acc : +Hexagon_V62_v1024v1024v512v512_Intrinsic<"HEXAGON_V6_vmpyowh_64_acc">; + +// +// BUILTIN_INFO(HEXAGON.V6_vmpyowh_64_acc_128B,VD_ftype_VDVIVI,3) +// tag : V6_vmpyowh_64_acc_128B +def int_hexagon_V6_vmpyowh_64_acc_128B : +Hexagon_V62_v2048v2048v1024v1024_Intrinsic<"HEXAGON_V6_vmpyowh_64_acc_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vmpauhb,VD_ftype_VDSI,2) +// tag : V6_vmpauhb +def int_hexagon_V6_vmpauhb : +Hexagon_V62_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpauhb">; + +// +// BUILTIN_INFO(HEXAGON.V6_vmpauhb_128B,VD_ftype_VDSI,2) +// tag : V6_vmpauhb_128B +def int_hexagon_V6_vmpauhb_128B : +Hexagon_V62_v2048v2048i_Intrinsic<"HEXAGON_V6_vmpauhb_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vmpauhb_acc,VD_ftype_VDVDSI,3) +// tag : V6_vmpauhb_acc +def int_hexagon_V6_vmpauhb_acc : +Hexagon_V62_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vmpauhb_acc">; + +// +// BUILTIN_INFO(HEXAGON.V6_vmpauhb_acc_128B,VD_ftype_VDVDSI,3) +// tag : V6_vmpauhb_acc_128B +def int_hexagon_V6_vmpauhb_acc_128B : +Hexagon_V62_v2048v2048v2048i_Intrinsic<"HEXAGON_V6_vmpauhb_acc_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vmpyiwub,VI_ftype_VISI,2) +// tag : V6_vmpyiwub +def int_hexagon_V6_vmpyiwub : +Hexagon_V62_v512v512i_Intrinsic<"HEXAGON_V6_vmpyiwub">; + +// +// BUILTIN_INFO(HEXAGON.V6_vmpyiwub_128B,VI_ftype_VISI,2) +// tag : V6_vmpyiwub_128B +def int_hexagon_V6_vmpyiwub_128B : +Hexagon_V62_v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyiwub_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vmpyiwub_acc,VI_ftype_VIVISI,3) +// tag : V6_vmpyiwub_acc +def int_hexagon_V6_vmpyiwub_acc : +Hexagon_V62_v512v512v512i_Intrinsic<"HEXAGON_V6_vmpyiwub_acc">; + +// +// BUILTIN_INFO(HEXAGON.V6_vmpyiwub_acc_128B,VI_ftype_VIVISI,3) +// tag : V6_vmpyiwub_acc_128B +def int_hexagon_V6_vmpyiwub_acc_128B : +Hexagon_V62_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vmpyiwub_acc_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vandnqrt,VI_ftype_QVSI,2) +// tag : V6_vandnqrt +def int_hexagon_V6_vandnqrt : +Hexagon_V62_v512v64ii_Intrinsic<"HEXAGON_V6_vandnqrt">; + +// +// BUILTIN_INFO(HEXAGON.V6_vandnqrt_128B,VI_ftype_QVSI,2) +// tag : V6_vandnqrt_128B +def int_hexagon_V6_vandnqrt_128B : +Hexagon_V62_v1024v128ii_Intrinsic<"HEXAGON_V6_vandnqrt_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vandnqrt_acc,VI_ftype_VIQVSI,3) +// tag : V6_vandnqrt_acc +def int_hexagon_V6_vandnqrt_acc : +Hexagon_V62_v512v512v64ii_Intrinsic<"HEXAGON_V6_vandnqrt_acc">; + +// +// BUILTIN_INFO(HEXAGON.V6_vandnqrt_acc_128B,VI_ftype_VIQVSI,3) +// tag : V6_vandnqrt_acc_128B +def int_hexagon_V6_vandnqrt_acc_128B : +Hexagon_V62_v1024v1024v128ii_Intrinsic<"HEXAGON_V6_vandnqrt_acc_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vandvqv,VI_ftype_QVVI,2) +// tag : V6_vandvqv +def int_hexagon_V6_vandvqv : +Hexagon_V62_v512v64iv512_Intrinsic<"HEXAGON_V6_vandvqv">; + +// +// BUILTIN_INFO(HEXAGON.V6_vandvqv_128B,VI_ftype_QVVI,2) +// tag : V6_vandvqv_128B +def int_hexagon_V6_vandvqv_128B : +Hexagon_V62_v1024v128iv1024_Intrinsic<"HEXAGON_V6_vandvqv_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vandvnqv,VI_ftype_QVVI,2) +// tag : V6_vandvnqv +def int_hexagon_V6_vandvnqv : +Hexagon_V62_v512v64iv512_Intrinsic<"HEXAGON_V6_vandvnqv">; + +// +// BUILTIN_INFO(HEXAGON.V6_vandvnqv_128B,VI_ftype_QVVI,2) +// tag : V6_vandvnqv_128B +def int_hexagon_V6_vandvnqv_128B : +Hexagon_V62_v1024v128iv1024_Intrinsic<"HEXAGON_V6_vandvnqv_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_pred_scalar2v2,QV_ftype_SI,1) +// tag : V6_pred_scalar2v2 +def int_hexagon_V6_pred_scalar2v2 : +Hexagon_V62_v64ii_Intrinsic<"HEXAGON_V6_pred_scalar2v2">; + +// +// BUILTIN_INFO(HEXAGON.V6_pred_scalar2v2_128B,QV_ftype_SI,1) +// tag : V6_pred_scalar2v2_128B +def int_hexagon_V6_pred_scalar2v2_128B : +Hexagon_V62_v128ii_Intrinsic<"HEXAGON_V6_pred_scalar2v2_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_shuffeqw,QV_ftype_QVQV,2) +// tag : V6_shuffeqw +def int_hexagon_V6_shuffeqw : +Hexagon_V62_v64iv64iv64i_Intrinsic<"HEXAGON_V6_shuffeqw">; + +// +// BUILTIN_INFO(HEXAGON.V6_shuffeqw_128B,QV_ftype_QVQV,2) +// tag : V6_shuffeqw_128B +def int_hexagon_V6_shuffeqw_128B : +Hexagon_V62_v128iv128iv128i_Intrinsic<"HEXAGON_V6_shuffeqw_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_shuffeqh,QV_ftype_QVQV,2) +// tag : V6_shuffeqh +def int_hexagon_V6_shuffeqh : +Hexagon_V62_v64iv64iv64i_Intrinsic<"HEXAGON_V6_shuffeqh">; + +// +// BUILTIN_INFO(HEXAGON.V6_shuffeqh_128B,QV_ftype_QVQV,2) +// tag : V6_shuffeqh_128B +def int_hexagon_V6_shuffeqh_128B : +Hexagon_V62_v128iv128iv128i_Intrinsic<"HEXAGON_V6_shuffeqh_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vmaxb,VI_ftype_VIVI,2) +// tag : V6_vmaxb +def int_hexagon_V6_vmaxb : +Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vmaxb">; + +// +// BUILTIN_INFO(HEXAGON.V6_vmaxb_128B,VI_ftype_VIVI,2) +// tag : V6_vmaxb_128B +def int_hexagon_V6_vmaxb_128B : +Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vmaxb_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vminb,VI_ftype_VIVI,2) +// tag : V6_vminb +def int_hexagon_V6_vminb : +Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vminb">; + +// +// BUILTIN_INFO(HEXAGON.V6_vminb_128B,VI_ftype_VIVI,2) +// tag : V6_vminb_128B +def int_hexagon_V6_vminb_128B : +Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vminb_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vsatuwuh,VI_ftype_VIVI,2) +// tag : V6_vsatuwuh +def int_hexagon_V6_vsatuwuh : +Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vsatuwuh">; + +// +// BUILTIN_INFO(HEXAGON.V6_vsatuwuh_128B,VI_ftype_VIVI,2) +// tag : V6_vsatuwuh_128B +def int_hexagon_V6_vsatuwuh_128B : +Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vsatuwuh_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_lvsplath,VI_ftype_SI,1) +// tag : V6_lvsplath +def int_hexagon_V6_lvsplath : +Hexagon_V62_v512i_Intrinsic<"HEXAGON_V6_lvsplath">; + +// +// BUILTIN_INFO(HEXAGON.V6_lvsplath_128B,VI_ftype_SI,1) +// tag : V6_lvsplath_128B +def int_hexagon_V6_lvsplath_128B : +Hexagon_V62_v1024i_Intrinsic<"HEXAGON_V6_lvsplath_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_lvsplatb,VI_ftype_SI,1) +// tag : V6_lvsplatb +def int_hexagon_V6_lvsplatb : +Hexagon_V62_v512i_Intrinsic<"HEXAGON_V6_lvsplatb">; + +// +// BUILTIN_INFO(HEXAGON.V6_lvsplatb_128B,VI_ftype_SI,1) +// tag : V6_lvsplatb_128B +def int_hexagon_V6_lvsplatb_128B : +Hexagon_V62_v1024i_Intrinsic<"HEXAGON_V6_lvsplatb_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vaddclbw,VI_ftype_VIVI,2) +// tag : V6_vaddclbw +def int_hexagon_V6_vaddclbw : +Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vaddclbw">; + +// +// BUILTIN_INFO(HEXAGON.V6_vaddclbw_128B,VI_ftype_VIVI,2) +// tag : V6_vaddclbw_128B +def int_hexagon_V6_vaddclbw_128B : +Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddclbw_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vaddclbh,VI_ftype_VIVI,2) +// tag : V6_vaddclbh +def int_hexagon_V6_vaddclbh : +Hexagon_V62_v512v512v512_Intrinsic<"HEXAGON_V6_vaddclbh">; + +// +// BUILTIN_INFO(HEXAGON.V6_vaddclbh_128B,VI_ftype_VIVI,2) +// tag : V6_vaddclbh_128B +def int_hexagon_V6_vaddclbh_128B : +Hexagon_V62_v1024v1024v1024_Intrinsic<"HEXAGON_V6_vaddclbh_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vlutvvbi,VI_ftype_VIVISI,3) +// tag : V6_vlutvvbi +def int_hexagon_V6_vlutvvbi : +Hexagon_V62_v512v512v512i_Intrinsic<"HEXAGON_V6_vlutvvbi">; + +// +// BUILTIN_INFO(HEXAGON.V6_vlutvvbi_128B,VI_ftype_VIVISI,3) +// tag : V6_vlutvvbi_128B +def int_hexagon_V6_vlutvvbi_128B : +Hexagon_V62_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvvbi_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vlutvvb_oracci,VI_ftype_VIVIVISI,4) +// tag : V6_vlutvvb_oracci +def int_hexagon_V6_vlutvvb_oracci : +Hexagon_V62_v512v512v512v512i_Intrinsic<"HEXAGON_V6_vlutvvb_oracci">; + +// +// BUILTIN_INFO(HEXAGON.V6_vlutvvb_oracci_128B,VI_ftype_VIVIVISI,4) +// tag : V6_vlutvvb_oracci_128B +def int_hexagon_V6_vlutvvb_oracci_128B : +Hexagon_V62_v1024v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvvb_oracci_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vlutvwhi,VD_ftype_VIVISI,3) +// tag : V6_vlutvwhi +def int_hexagon_V6_vlutvwhi : +Hexagon_V62_v1024v512v512i_Intrinsic<"HEXAGON_V6_vlutvwhi">; + +// +// BUILTIN_INFO(HEXAGON.V6_vlutvwhi_128B,VD_ftype_VIVISI,3) +// tag : V6_vlutvwhi_128B +def int_hexagon_V6_vlutvwhi_128B : +Hexagon_V62_v2048v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvwhi_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vlutvwh_oracci,VD_ftype_VDVIVISI,4) +// tag : V6_vlutvwh_oracci +def int_hexagon_V6_vlutvwh_oracci : +Hexagon_V62_v1024v1024v512v512i_Intrinsic<"HEXAGON_V6_vlutvwh_oracci">; + +// +// BUILTIN_INFO(HEXAGON.V6_vlutvwh_oracci_128B,VD_ftype_VDVIVISI,4) +// tag : V6_vlutvwh_oracci_128B +def int_hexagon_V6_vlutvwh_oracci_128B : +Hexagon_V62_v2048v2048v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvwh_oracci_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vlutvvb_nm,VI_ftype_VIVISI,3) +// tag : V6_vlutvvb_nm +def int_hexagon_V6_vlutvvb_nm : +Hexagon_V62_v512v512v512i_Intrinsic<"HEXAGON_V6_vlutvvb_nm">; + +// +// BUILTIN_INFO(HEXAGON.V6_vlutvvb_nm_128B,VI_ftype_VIVISI,3) +// tag : V6_vlutvvb_nm_128B +def int_hexagon_V6_vlutvvb_nm_128B : +Hexagon_V62_v1024v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvvb_nm_128B">; + +// +// BUILTIN_INFO(HEXAGON.V6_vlutvwh_nm,VD_ftype_VIVISI,3) +// tag : V6_vlutvwh_nm +def int_hexagon_V6_vlutvwh_nm : +Hexagon_V62_v1024v512v512i_Intrinsic<"HEXAGON_V6_vlutvwh_nm">; + +// +// BUILTIN_INFO(HEXAGON.V6_vlutvwh_nm_128B,VD_ftype_VIVISI,3) +// tag : V6_vlutvwh_nm_128B +def int_hexagon_V6_vlutvwh_nm_128B : +Hexagon_V62_v2048v1024v1024i_Intrinsic<"HEXAGON_V6_vlutvwh_nm_128B">; + diff --git a/include/llvm/Support/ELF.h b/include/llvm/Support/ELF.h index 9bbec86a45c..489d821b749 100644 --- a/include/llvm/Support/ELF.h +++ b/include/llvm/Support/ELF.h @@ -556,6 +556,7 @@ enum { EF_HEXAGON_MACH_V5 = 0x00000004, // Hexagon V5 EF_HEXAGON_MACH_V55 = 0x00000005, // Hexagon V55 EF_HEXAGON_MACH_V60 = 0x00000060, // Hexagon V60 + EF_HEXAGON_MACH_V62 = 0x00000062, // Hexagon V62 // Highest ISA version flags EF_HEXAGON_ISA_MACH = 0x00000000, // Same as specified in bits[11:0] @@ -566,6 +567,7 @@ enum { EF_HEXAGON_ISA_V5 = 0x00000040, // Hexagon V5 ISA EF_HEXAGON_ISA_V55 = 0x00000050, // Hexagon V55 ISA EF_HEXAGON_ISA_V60 = 0x00000060, // Hexagon V60 ISA + EF_HEXAGON_ISA_V62 = 0x00000062, // Hexagon V62 ISA }; // Hexagon-specific section indexes for common small data diff --git a/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp b/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp index 60dd8ebe448..1aab5a00021 100644 --- a/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp +++ b/lib/Target/Hexagon/AsmParser/HexagonAsmParser.cpp @@ -907,6 +907,9 @@ bool HexagonAsmParser::ParseDirectiveComm(bool IsLocal, SMLoc Loc) { // validate register against architecture bool HexagonAsmParser::RegisterMatchesArch(unsigned MatchNum) const { + if (HexagonMCRegisterClasses[Hexagon::V62RegsRegClassID].contains(MatchNum)) + if (!getSTI().getFeatureBits()[Hexagon::ArchV62]) + return false; return true; } @@ -1012,11 +1015,15 @@ bool HexagonAsmParser::parseOperand(OperandVector &Operands) { bool HexagonAsmParser::isLabel(AsmToken &Token) { MCAsmLexer &Lexer = getLexer(); AsmToken const &Second = Lexer.getTok(); - AsmToken Third = Lexer.peekTok(); + AsmToken Third = Lexer.peekTok(); StringRef String = Token.getString(); if (Token.is(AsmToken::TokenKind::LCurly) || Token.is(AsmToken::TokenKind::RCurly)) return false; + // special case for parsing vwhist256:sat + if (String.lower() == "vwhist256" && Second.is(AsmToken::Colon) && + Third.getString().lower() == "sat") + return false; if (!Token.is(AsmToken::TokenKind::Identifier)) return true; if (!matchRegister(String.lower())) diff --git a/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp b/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp index 694da86b546..cf9d8bf0cd0 100644 --- a/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp +++ b/lib/Target/Hexagon/Disassembler/HexagonDisassembler.cpp @@ -551,19 +551,23 @@ static DecodeStatus DecodeVecPredRegsRegisterClass(MCInst &Inst, unsigned RegNo, static DecodeStatus DecodeCtrRegsRegisterClass(MCInst &Inst, unsigned RegNo, uint64_t /*Address*/, const void *Decoder) { + using namespace Hexagon; static const MCPhysReg CtrlRegDecoderTable[] = { - Hexagon::SA0, Hexagon::LC0, Hexagon::SA1, - Hexagon::LC1, Hexagon::P3_0, Hexagon::C5, - Hexagon::C6, Hexagon::C7, Hexagon::USR, - Hexagon::PC, Hexagon::UGP, Hexagon::GP, - Hexagon::CS0, Hexagon::CS1, Hexagon::UPCL, - Hexagon::UPC + /* 0 */ SA0, LC0, SA1, LC1, + /* 4 */ P3_0, C5, C6, C7, + /* 8 */ USR, PC, UGP, GP, + /* 12 */ CS0, CS1, UPCL, UPCH, + /* 16 */ FRAMELIMIT, FRAMEKEY, PKTCOUNTLO, PKTCOUNTHI, + /* 20 */ 0, 0, 0, 0, + /* 24 */ 0, 0, 0, 0, + /* 28 */ 0, 0, UTIMERLO, UTIMERHI }; if (RegNo >= array_lengthof(CtrlRegDecoderTable)) return MCDisassembler::Fail; - if (CtrlRegDecoderTable[RegNo] == Hexagon::NoRegister) + static_assert(NoRegister == 0, "Expecting NoRegister to be 0"); + if (CtrlRegDecoderTable[RegNo] == NoRegister) return MCDisassembler::Fail; unsigned Register = CtrlRegDecoderTable[RegNo]; @@ -574,19 +578,23 @@ static DecodeStatus DecodeCtrRegsRegisterClass(MCInst &Inst, unsigned RegNo, static DecodeStatus DecodeCtrRegs64RegisterClass(MCInst &Inst, unsigned RegNo, uint64_t /*Address*/, const void *Decoder) { + using namespace Hexagon; static const MCPhysReg CtrlReg64DecoderTable[] = { - Hexagon::C1_0, Hexagon::NoRegister, Hexagon::C3_2, - Hexagon::NoRegister, - Hexagon::C7_6, Hexagon::NoRegister, Hexagon::C9_8, - Hexagon::NoRegister, Hexagon::C11_10, Hexagon::NoRegister, - Hexagon::CS, Hexagon::NoRegister, Hexagon::UPC, - Hexagon::NoRegister + /* 0 */ C1_0, 0, C3_2, 0, + /* 4 */ C5_4, 0, C7_6, 0, + /* 8 */ C9_8, 0, C11_10, 0, + /* 12 */ CS, 0, UPC, 0, + /* 16 */ C17_16, 0, PKTCOUNT, 0, + /* 20 */ 0, 0, 0, 0, + /* 24 */ 0, 0, 0, 0, + /* 28 */ 0, 0, UTIMER, 0 }; if (RegNo >= array_lengthof(CtrlReg64DecoderTable)) return MCDisassembler::Fail; - if (CtrlReg64DecoderTable[RegNo] == Hexagon::NoRegister) + static_assert(NoRegister == 0, "Expecting NoRegister to be 0"); + if (CtrlReg64DecoderTable[RegNo] == NoRegister) return MCDisassembler::Fail; unsigned Register = CtrlReg64DecoderTable[RegNo]; diff --git a/lib/Target/Hexagon/Hexagon.td b/lib/Target/Hexagon/Hexagon.td index 25388ccf590..4767165141a 100644 --- a/lib/Target/Hexagon/Hexagon.td +++ b/lib/Target/Hexagon/Hexagon.td @@ -252,6 +252,7 @@ include "HexagonPatterns.td" include "HexagonDepMappings.td" include "HexagonIntrinsics.td" include "HexagonIntrinsicsDerived.td" +include "HexagonMapAsm2IntrinV62.gen.td" def HexagonInstrInfo : InstrInfo; @@ -271,6 +272,8 @@ def : Proc<"hexagonv55", HexagonModelV55, [ArchV4, ArchV5, ArchV55]>; def : Proc<"hexagonv60", HexagonModelV60, [ArchV4, ArchV5, ArchV55, ArchV60, ExtensionHVX]>; +def : Proc<"hexagonv62", HexagonModelV62, + [ArchV4, ArchV5, ArchV55, ArchV60, ArchV62, ExtensionHVX]>; //===----------------------------------------------------------------------===// // Declare the target which we are implementing diff --git a/lib/Target/Hexagon/HexagonDepArch.h b/lib/Target/Hexagon/HexagonDepArch.h index 82265535ce8..1009aa39cef 100644 --- a/lib/Target/Hexagon/HexagonDepArch.h +++ b/lib/Target/Hexagon/HexagonDepArch.h @@ -7,4 +7,4 @@ // //===----------------------------------------------------------------------===// -enum HexagonArchEnum { V4,V5,V55,V60 }; +enum HexagonArchEnum { V4,V5,V55,V60,V62 }; diff --git a/lib/Target/Hexagon/HexagonDepArch.td b/lib/Target/Hexagon/HexagonDepArch.td index d1c08453c25..5b1d02c136f 100644 --- a/lib/Target/Hexagon/HexagonDepArch.td +++ b/lib/Target/Hexagon/HexagonDepArch.td @@ -7,6 +7,8 @@ // //===----------------------------------------------------------------------===// +def ArchV62: SubtargetFeature<"v62", "HexagonArchVersion", "V62", "Enable Hexagon V62 architecture">; +def HasV62T : Predicate<"HST->hasV62TOps()">, AssemblerPredicate<"ArchV62">; def ArchV60: SubtargetFeature<"v60", "HexagonArchVersion", "V60", "Enable Hexagon V60 architecture">; def HasV60T : Predicate<"HST->hasV60TOps()">, AssemblerPredicate<"ArchV60">; def ArchV55: SubtargetFeature<"v55", "HexagonArchVersion", "V55", "Enable Hexagon V55 architecture">; diff --git a/lib/Target/Hexagon/HexagonDepITypes.h b/lib/Target/Hexagon/HexagonDepITypes.h index e827c7ae79b..f8ae39a3799 100644 --- a/lib/Target/Hexagon/HexagonDepITypes.h +++ b/lib/Target/Hexagon/HexagonDepITypes.h @@ -15,6 +15,7 @@ enum Type { TypeALU32_ADDI = 2, TypeALU64 = 3, TypeCJ = 4, + TypeCOPROC_VMEM = 5, TypeCR = 7, TypeCVI_HIST = 10, TypeCVI_VA = 16, diff --git a/lib/Target/Hexagon/HexagonDepITypes.td b/lib/Target/Hexagon/HexagonDepITypes.td index 00524e33b15..f1d689ce12f 100644 --- a/lib/Target/Hexagon/HexagonDepITypes.td +++ b/lib/Target/Hexagon/HexagonDepITypes.td @@ -13,6 +13,7 @@ def TypeALU32_3op : IType<1>; def TypeALU32_ADDI : IType<2>; def TypeALU64 : IType<3>; def TypeCJ : IType<4>; +def TypeCOPROC_VMEM : IType<5>; def TypeCR : IType<7>; def TypeCVI_HIST : IType<10>; def TypeCVI_VA : IType<16>; diff --git a/lib/Target/Hexagon/HexagonDepInstrInfo.td b/lib/Target/Hexagon/HexagonDepInstrInfo.td index 7a2450b5c1a..4c4a2788855 100644 --- a/lib/Target/Hexagon/HexagonDepInstrInfo.td +++ b/lib/Target/Hexagon/HexagonDepInstrInfo.td @@ -3247,6 +3247,17 @@ let opNewValue = 0; let prefersSlot3 = 1; let Defs = [USR_OVF]; } +def A6_vminub_RdP : HInst< +(outs DoubleRegs:$Rdd32, PredRegs:$Pe4), +(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32), +"$Rdd32,$Pe4 = vminub($Rtt32,$Rss32)", +M_tc_2_SLOT23, TypeM>, Enc_766909, Requires<[HasV62T]> { +let Inst{7-7} = 0b0; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b11101010111; +let isPredicateLate = 1; +let prefersSlot3 = 1; +} def C2_all8 : HInst< (outs PredRegs:$Pd4), (ins PredRegs:$Ps4), @@ -16939,6 +16950,26 @@ let Inst{13-13} = 0b0; let Inst{31-21} = 0b11101000100; let prefersSlot3 = 1; } +def M6_vabsdiffb : HInst< +(outs DoubleRegs:$Rdd32), +(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32), +"$Rdd32 = vabsdiffb($Rtt32,$Rss32)", +M_tc_2_SLOT23, TypeM>, Enc_11687333, Requires<[HasV62T]> { +let Inst{7-5} = 0b000; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b11101000111; +let prefersSlot3 = 1; +} +def M6_vabsdiffub : HInst< +(outs DoubleRegs:$Rdd32), +(ins DoubleRegs:$Rtt32, DoubleRegs:$Rss32), +"$Rdd32 = vabsdiffub($Rtt32,$Rss32)", +M_tc_2_SLOT23, TypeM>, Enc_11687333, Requires<[HasV62T]> { +let Inst{7-5} = 0b000; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b11101000101; +let prefersSlot3 = 1; +} def PS_loadrbabs : HInst< (outs IntRegs:$Rd32), (ins u32_0Imm:$Ii), @@ -24687,6 +24718,32 @@ let opNewValue = 0; let prefersSlot3 = 1; let Constraints = "$Rx32 = $Rx32in"; } +def S6_vsplatrbp : HInst< +(outs DoubleRegs:$Rdd32), +(ins IntRegs:$Rs32), +"$Rdd32 = vsplatb($Rs32)", +S_2op_tc_1_SLOT23, TypeS_2op>, Enc_4030179, Requires<[HasV62T]> { +let Inst{13-5} = 0b000000100; +let Inst{31-21} = 0b10000100010; +} +def S6_vtrunehb_ppp : HInst< +(outs DoubleRegs:$Rdd32), +(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32), +"$Rdd32 = vtrunehb($Rss32,$Rtt32)", +S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8333157, Requires<[HasV62T]> { +let Inst{7-5} = 0b011; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b11000001100; +} +def S6_vtrunohb_ppp : HInst< +(outs DoubleRegs:$Rdd32), +(ins DoubleRegs:$Rss32, DoubleRegs:$Rtt32), +"$Rdd32 = vtrunohb($Rss32,$Rtt32)", +S_3op_tc_1_SLOT23, TypeS_3op>, Enc_8333157, Requires<[HasV62T]> { +let Inst{7-5} = 0b101; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b11000001100; +} def SA1_addi : HInst< (outs GeneralSubRegs:$Rx16), (ins IntRegs:$Rx16in, s32_0Imm:$Ii), @@ -25867,6 +25924,52 @@ let isPseudo = 1; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_lvsplatb : HInst< +(outs VectorRegs:$Vd32), +(ins IntRegs:$Rt32), +"$Vd32.b = vsplat($Rt32)", +CVI_VX, TypeCVI_VX>, Enc_9768377, Requires<[HasV62T,UseHVX]> { +let Inst{13-5} = 0b000000010; +let Inst{31-21} = 0b00011001110; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_lvsplatb_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins IntRegs:$Rt32), +"$Vd32.b = vsplat($Rt32)", +CVI_VX, TypeCVI_VX>, Enc_9768377, Requires<[HasV62T,UseHVX]> { +let Inst{13-5} = 0b000000010; +let Inst{31-21} = 0b00011001110; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_lvsplath : HInst< +(outs VectorRegs:$Vd32), +(ins IntRegs:$Rt32), +"$Vd32.h = vsplat($Rt32)", +CVI_VX, TypeCVI_VX>, Enc_9768377, Requires<[HasV62T,UseHVX]> { +let Inst{13-5} = 0b000000001; +let Inst{31-21} = 0b00011001110; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_lvsplath_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins IntRegs:$Rt32), +"$Vd32.h = vsplat($Rt32)", +CVI_VX, TypeCVI_VX>, Enc_9768377, Requires<[HasV62T,UseHVX]> { +let Inst{13-5} = 0b000000001; +let Inst{31-21} = 0b00011001110; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} def V6_lvsplatw : HInst< (outs VectorRegs:$Vd32), (ins IntRegs:$Rt32), @@ -26046,6 +26149,29 @@ let opNewValue = 0; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_pred_scalar2v2 : HInst< +(outs VecPredRegs:$Qd4), +(ins IntRegs:$Rt32), +"$Qd4 = vsetq2($Rt32)", +CVI_VP_LONG, TypeCVI_VP>, Enc_12781442, Requires<[HasV62T,UseHVX]> { +let Inst{13-2} = 0b000000010011; +let Inst{31-21} = 0b00011001101; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_pred_scalar2v2_128B : HInst< +(outs VecPredRegs128B:$Qd4), +(ins IntRegs:$Rt32), +"$Qd4 = vsetq2($Rt32)", +CVI_VP_LONG, TypeCVI_VP>, Enc_12781442, Requires<[HasV62T,UseHVX]> { +let Inst{13-2} = 0b000000010011; +let Inst{31-21} = 0b00011001101; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} def V6_pred_xor : HInst< (outs VecPredRegs:$Qd4), (ins VecPredRegs:$Qs4, VecPredRegs:$Qt4), @@ -26073,6 +26199,60 @@ let opNewValue = 0; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_shuffeqh : HInst< +(outs VecPredRegs:$Qd4), +(ins VecPredRegs:$Qs4, VecPredRegs:$Qt4), +"$Qd4.b = vshuffe($Qs4.h,$Qt4.h)", +CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV62T,UseHVX]> { +let Inst{7-2} = 0b000110; +let Inst{13-10} = 0b0000; +let Inst{21-16} = 0b000011; +let Inst{31-24} = 0b00011110; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_shuffeqh_128B : HInst< +(outs VecPredRegs128B:$Qd4), +(ins VecPredRegs128B:$Qs4, VecPredRegs128B:$Qt4), +"$Qd4.b = vshuffe($Qs4.h,$Qt4.h)", +CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV62T,UseHVX]> { +let Inst{7-2} = 0b000110; +let Inst{13-10} = 0b0000; +let Inst{21-16} = 0b000011; +let Inst{31-24} = 0b00011110; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_shuffeqw : HInst< +(outs VecPredRegs:$Qd4), +(ins VecPredRegs:$Qs4, VecPredRegs:$Qt4), +"$Qd4.h = vshuffe($Qs4.w,$Qt4.w)", +CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV62T,UseHVX]> { +let Inst{7-2} = 0b000111; +let Inst{13-10} = 0b0000; +let Inst{21-16} = 0b000011; +let Inst{31-24} = 0b00011110; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_shuffeqw_128B : HInst< +(outs VecPredRegs128B:$Qd4), +(ins VecPredRegs128B:$Qs4, VecPredRegs128B:$Qt4), +"$Qd4.h = vshuffe($Qs4.w,$Qt4.w)", +CVI_VA_DV, TypeCVI_VA_DV>, Enc_6091631, Requires<[HasV62T,UseHVX]> { +let Inst{7-2} = 0b000111; +let Inst{13-10} = 0b0000; +let Inst{21-16} = 0b000011; +let Inst{31-24} = 0b00011110; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} def V6_st0 : HInst< (outs), (ins IntRegs:$Rt32, VectorRegs:$Vs32), @@ -26531,6 +26711,117 @@ let mayLoad = 1; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_vL32b_cur_npred_ai : HInst< +(outs VectorRegs:$Vd32), +(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_6Imm:$Ii), +"if (!$Pv4) $Vd32.cur = vmem($Rt32+#$Ii)", +CVI_VM_CUR_LD, TypeCVI_VM_CUR_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b101; +let Inst{31-21} = 0b00101000100; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = BaseImmOffset; +let accessSize = Vector64Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vL32b_cur_npred_ai_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_7Imm:$Ii), +"if (!$Pv4) $Vd32.cur = vmem($Rt32+#$Ii)", +CVI_VM_CUR_LD, TypeCVI_VM_CUR_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b101; +let Inst{31-21} = 0b00101000100; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = BaseImmOffset; +let accessSize = Vector128Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vL32b_cur_npred_pi : HInst< +(outs VectorRegs:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_6Imm:$Ii), +"if (!$Pv4) $Vd32.cur = vmem($Rx32++#$Ii)", +CVI_VM_CUR_LD, TypeCVI_VM_CUR_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b101; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00101001100; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector64Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_cur_npred_pi_128B : HInst< +(outs VectorRegs128B:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_7Imm:$Ii), +"if (!$Pv4) $Vd32.cur = vmem($Rx32++#$Ii)", +CVI_VM_CUR_LD, TypeCVI_VM_CUR_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b101; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00101001100; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector128Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_cur_npred_ppu : HInst< +(outs VectorRegs:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2), +"if (!$Pv4) $Vd32.cur = vmem($Rx32++$Mu2)", +CVI_VM_CUR_LD, TypeCVI_VM_CUR_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> { +let Inst{10-5} = 0b000101; +let Inst{31-21} = 0b00101011100; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector64Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_cur_npred_ppu_128B : HInst< +(outs VectorRegs128B:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2), +"if (!$Pv4) $Vd32.cur = vmem($Rx32++$Mu2)", +CVI_VM_CUR_LD, TypeCVI_VM_CUR_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> { +let Inst{10-5} = 0b000101; +let Inst{31-21} = 0b00101011100; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector128Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Rx32 = $Rx32in"; +} def V6_vL32b_cur_pi : HInst< (outs VectorRegs:$Vd32, IntRegs:$Rx32), (ins IntRegs:$Rx32in, s3_6Imm:$Ii), @@ -26599,6 +26890,222 @@ let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; let Constraints = "$Rx32 = $Rx32in"; } +def V6_vL32b_cur_pred_ai : HInst< +(outs VectorRegs:$Vd32), +(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_6Imm:$Ii), +"if ($Pv4) $Vd32.cur = vmem($Rt32+#$Ii)", +CVI_VM_CUR_LD, TypeCVI_VM_CUR_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b100; +let Inst{31-21} = 0b00101000100; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = BaseImmOffset; +let accessSize = Vector64Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vL32b_cur_pred_ai_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_7Imm:$Ii), +"if ($Pv4) $Vd32.cur = vmem($Rt32+#$Ii)", +CVI_VM_CUR_LD, TypeCVI_VM_CUR_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b100; +let Inst{31-21} = 0b00101000100; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = BaseImmOffset; +let accessSize = Vector128Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vL32b_cur_pred_pi : HInst< +(outs VectorRegs:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_6Imm:$Ii), +"if ($Pv4) $Vd32.cur = vmem($Rx32++#$Ii)", +CVI_VM_CUR_LD, TypeCOPROC_VMEM>, Enc_14560494, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b100; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00101001100; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector64Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_cur_pred_pi_128B : HInst< +(outs VectorRegs128B:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_7Imm:$Ii), +"if ($Pv4) $Vd32.cur = vmem($Rx32++#$Ii)", +CVI_VM_CUR_LD, TypeCOPROC_VMEM>, Enc_15560488, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b100; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00101001100; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector128Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_cur_pred_ppu : HInst< +(outs VectorRegs:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2), +"if ($Pv4) $Vd32.cur = vmem($Rx32++$Mu2)", +CVI_VM_CUR_LD, TypeCVI_VM_CUR_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> { +let Inst{10-5} = 0b000100; +let Inst{31-21} = 0b00101011100; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector64Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_cur_pred_ppu_128B : HInst< +(outs VectorRegs128B:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2), +"if ($Pv4) $Vd32.cur = vmem($Rx32++$Mu2)", +CVI_VM_CUR_LD, TypeCVI_VM_CUR_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> { +let Inst{10-5} = 0b000100; +let Inst{31-21} = 0b00101011100; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector128Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_npred_ai : HInst< +(outs VectorRegs:$Vd32), +(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_6Imm:$Ii), +"if (!$Pv4) $Vd32 = vmem($Rt32+#$Ii)", +CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b011; +let Inst{31-21} = 0b00101000100; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = BaseImmOffset; +let accessSize = Vector64Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vL32b_npred_ai_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_7Imm:$Ii), +"if (!$Pv4) $Vd32 = vmem($Rt32+#$Ii)", +CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b011; +let Inst{31-21} = 0b00101000100; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = BaseImmOffset; +let accessSize = Vector128Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vL32b_npred_pi : HInst< +(outs VectorRegs:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_6Imm:$Ii), +"if (!$Pv4) $Vd32 = vmem($Rx32++#$Ii)", +CVI_VM_LD, TypeCVI_VM_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b011; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00101001100; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector64Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_npred_pi_128B : HInst< +(outs VectorRegs128B:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_7Imm:$Ii), +"if (!$Pv4) $Vd32 = vmem($Rx32++#$Ii)", +CVI_VM_LD, TypeCVI_VM_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b011; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00101001100; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector128Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_npred_ppu : HInst< +(outs VectorRegs:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2), +"if (!$Pv4) $Vd32 = vmem($Rx32++$Mu2)", +CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> { +let Inst{10-5} = 0b000011; +let Inst{31-21} = 0b00101011100; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector64Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_npred_ppu_128B : HInst< +(outs VectorRegs128B:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2), +"if (!$Pv4) $Vd32 = vmem($Rx32++$Mu2)", +CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> { +let Inst{10-5} = 0b000011; +let Inst{31-21} = 0b00101011100; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector128Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Rx32 = $Rx32in"; +} def V6_vL32b_nt_ai : HInst< (outs VectorRegs:$Vd32), (ins IntRegs:$Rt32, s4_6Imm:$Ii), @@ -26671,6 +27178,123 @@ let mayLoad = 1; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_vL32b_nt_cur_npred_ai : HInst< +(outs VectorRegs:$Vd32), +(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_6Imm:$Ii), +"if (!$Pv4) $Vd32.cur = vmem($Rt32+#$Ii):nt", +CVI_VM_CUR_LD, TypeCVI_VM_CUR_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b101; +let Inst{31-21} = 0b00101000110; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = BaseImmOffset; +let accessSize = Vector64Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vL32b_nt_cur_npred_ai_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_7Imm:$Ii), +"if (!$Pv4) $Vd32.cur = vmem($Rt32+#$Ii):nt", +CVI_VM_CUR_LD, TypeCVI_VM_CUR_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b101; +let Inst{31-21} = 0b00101000110; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = BaseImmOffset; +let accessSize = Vector128Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vL32b_nt_cur_npred_pi : HInst< +(outs VectorRegs:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_6Imm:$Ii), +"if (!$Pv4) $Vd32.cur = vmem($Rx32++#$Ii):nt", +CVI_VM_CUR_LD, TypeCVI_VM_CUR_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b101; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00101001110; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector64Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_nt_cur_npred_pi_128B : HInst< +(outs VectorRegs128B:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_7Imm:$Ii), +"if (!$Pv4) $Vd32.cur = vmem($Rx32++#$Ii):nt", +CVI_VM_CUR_LD, TypeCVI_VM_CUR_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b101; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00101001110; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector128Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_nt_cur_npred_ppu : HInst< +(outs VectorRegs:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2), +"if (!$Pv4) $Vd32.cur = vmem($Rx32++$Mu2):nt", +CVI_VM_CUR_LD, TypeCVI_VM_CUR_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> { +let Inst{10-5} = 0b000101; +let Inst{31-21} = 0b00101011110; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector64Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_nt_cur_npred_ppu_128B : HInst< +(outs VectorRegs128B:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2), +"if (!$Pv4) $Vd32.cur = vmem($Rx32++$Mu2):nt", +CVI_VM_CUR_LD, TypeCVI_VM_CUR_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> { +let Inst{10-5} = 0b000101; +let Inst{31-21} = 0b00101011110; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector128Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Rx32 = $Rx32in"; +} def V6_vL32b_nt_cur_pi : HInst< (outs VectorRegs:$Vd32, IntRegs:$Rx32), (ins IntRegs:$Rx32in, s3_6Imm:$Ii), @@ -26743,6 +27367,234 @@ let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; let Constraints = "$Rx32 = $Rx32in"; } +def V6_vL32b_nt_cur_pred_ai : HInst< +(outs VectorRegs:$Vd32), +(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_6Imm:$Ii), +"if ($Pv4) $Vd32.cur = vmem($Rt32+#$Ii):nt", +CVI_VM_CUR_LD, TypeCVI_VM_CUR_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b100; +let Inst{31-21} = 0b00101000110; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = BaseImmOffset; +let accessSize = Vector64Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vL32b_nt_cur_pred_ai_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_7Imm:$Ii), +"if ($Pv4) $Vd32.cur = vmem($Rt32+#$Ii):nt", +CVI_VM_CUR_LD, TypeCVI_VM_CUR_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b100; +let Inst{31-21} = 0b00101000110; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = BaseImmOffset; +let accessSize = Vector128Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vL32b_nt_cur_pred_pi : HInst< +(outs VectorRegs:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_6Imm:$Ii), +"if ($Pv4) $Vd32.cur = vmem($Rx32++#$Ii):nt", +CVI_VM_CUR_LD, TypeCVI_VM_CUR_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b100; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00101001110; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector64Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_nt_cur_pred_pi_128B : HInst< +(outs VectorRegs128B:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_7Imm:$Ii), +"if ($Pv4) $Vd32.cur = vmem($Rx32++#$Ii):nt", +CVI_VM_CUR_LD, TypeCVI_VM_CUR_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b100; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00101001110; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector128Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_nt_cur_pred_ppu : HInst< +(outs VectorRegs:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2), +"if ($Pv4) $Vd32.cur = vmem($Rx32++$Mu2):nt", +CVI_VM_CUR_LD, TypeCOPROC_VMEM>, Enc_3158657, Requires<[HasV62T,UseHVX]> { +let Inst{10-5} = 0b000100; +let Inst{31-21} = 0b00101011110; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector64Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_nt_cur_pred_ppu_128B : HInst< +(outs VectorRegs128B:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2), +"if ($Pv4) $Vd32.cur = vmem($Rx32++$Mu2):nt", +CVI_VM_CUR_LD, TypeCOPROC_VMEM>, Enc_3158657, Requires<[HasV62T,UseHVX]> { +let Inst{10-5} = 0b000100; +let Inst{31-21} = 0b00101011110; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector128Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_nt_npred_ai : HInst< +(outs VectorRegs:$Vd32), +(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_6Imm:$Ii), +"if (!$Pv4) $Vd32 = vmem($Rt32+#$Ii):nt", +CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b011; +let Inst{31-21} = 0b00101000110; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = BaseImmOffset; +let accessSize = Vector64Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vL32b_nt_npred_ai_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_7Imm:$Ii), +"if (!$Pv4) $Vd32 = vmem($Rt32+#$Ii):nt", +CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b011; +let Inst{31-21} = 0b00101000110; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = BaseImmOffset; +let accessSize = Vector128Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vL32b_nt_npred_pi : HInst< +(outs VectorRegs:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_6Imm:$Ii), +"if (!$Pv4) $Vd32 = vmem($Rx32++#$Ii):nt", +CVI_VM_LD, TypeCVI_VM_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b011; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00101001110; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector64Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_nt_npred_pi_128B : HInst< +(outs VectorRegs128B:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_7Imm:$Ii), +"if (!$Pv4) $Vd32 = vmem($Rx32++#$Ii):nt", +CVI_VM_LD, TypeCVI_VM_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b011; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00101001110; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector128Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_nt_npred_ppu : HInst< +(outs VectorRegs:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2), +"if (!$Pv4) $Vd32 = vmem($Rx32++$Mu2):nt", +CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> { +let Inst{10-5} = 0b000011; +let Inst{31-21} = 0b00101011110; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector64Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_nt_npred_ppu_128B : HInst< +(outs VectorRegs128B:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2), +"if (!$Pv4) $Vd32 = vmem($Rx32++$Mu2):nt", +CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> { +let Inst{10-5} = 0b000011; +let Inst{31-21} = 0b00101011110; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector128Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Rx32 = $Rx32in"; +} def V6_vL32b_nt_pi : HInst< (outs VectorRegs:$Vd32, IntRegs:$Rx32), (ins IntRegs:$Rx32in, s3_6Imm:$Ii), @@ -26819,6 +27671,117 @@ let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; let Constraints = "$Rx32 = $Rx32in"; } +def V6_vL32b_nt_pred_ai : HInst< +(outs VectorRegs:$Vd32), +(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_6Imm:$Ii), +"if ($Pv4) $Vd32 = vmem($Rt32+#$Ii):nt", +CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b010; +let Inst{31-21} = 0b00101000110; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = BaseImmOffset; +let accessSize = Vector64Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vL32b_nt_pred_ai_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_7Imm:$Ii), +"if ($Pv4) $Vd32 = vmem($Rt32+#$Ii):nt", +CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b010; +let Inst{31-21} = 0b00101000110; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = BaseImmOffset; +let accessSize = Vector128Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vL32b_nt_pred_pi : HInst< +(outs VectorRegs:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_6Imm:$Ii), +"if ($Pv4) $Vd32 = vmem($Rx32++#$Ii):nt", +CVI_VM_LD, TypeCVI_VM_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b010; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00101001110; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector64Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_nt_pred_pi_128B : HInst< +(outs VectorRegs128B:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_7Imm:$Ii), +"if ($Pv4) $Vd32 = vmem($Rx32++#$Ii):nt", +CVI_VM_LD, TypeCVI_VM_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b010; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00101001110; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector128Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_nt_pred_ppu : HInst< +(outs VectorRegs:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2), +"if ($Pv4) $Vd32 = vmem($Rx32++$Mu2):nt", +CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> { +let Inst{10-5} = 0b000010; +let Inst{31-21} = 0b00101011110; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector64Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_nt_pred_ppu_128B : HInst< +(outs VectorRegs128B:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2), +"if ($Pv4) $Vd32 = vmem($Rx32++$Mu2):nt", +CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> { +let Inst{10-5} = 0b000010; +let Inst{31-21} = 0b00101011110; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector128Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Rx32 = $Rx32in"; +} def V6_vL32b_nt_tmp_ai : HInst< (outs VectorRegs:$Vd32), (ins IntRegs:$Rt32, s4_6Imm:$Ii), @@ -26854,6 +27817,123 @@ let mayLoad = 1; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_vL32b_nt_tmp_npred_ai : HInst< +(outs VectorRegs:$Vd32), +(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_6Imm:$Ii), +"if (!$Pv4) $Vd32.tmp = vmem($Rt32+#$Ii):nt", +CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b111; +let Inst{31-21} = 0b00101000110; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = BaseImmOffset; +let accessSize = Vector64Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vL32b_nt_tmp_npred_ai_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_7Imm:$Ii), +"if (!$Pv4) $Vd32.tmp = vmem($Rt32+#$Ii):nt", +CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b111; +let Inst{31-21} = 0b00101000110; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = BaseImmOffset; +let accessSize = Vector128Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vL32b_nt_tmp_npred_pi : HInst< +(outs VectorRegs:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_6Imm:$Ii), +"if (!$Pv4) $Vd32.tmp = vmem($Rx32++#$Ii):nt", +CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b111; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00101001110; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector64Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_nt_tmp_npred_pi_128B : HInst< +(outs VectorRegs128B:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_7Imm:$Ii), +"if (!$Pv4) $Vd32.tmp = vmem($Rx32++#$Ii):nt", +CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b111; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00101001110; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector128Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_nt_tmp_npred_ppu : HInst< +(outs VectorRegs:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2), +"if (!$Pv4) $Vd32.tmp = vmem($Rx32++$Mu2):nt", +CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> { +let Inst{10-5} = 0b000111; +let Inst{31-21} = 0b00101011110; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector64Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_nt_tmp_npred_ppu_128B : HInst< +(outs VectorRegs128B:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2), +"if (!$Pv4) $Vd32.tmp = vmem($Rx32++$Mu2):nt", +CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> { +let Inst{10-5} = 0b000111; +let Inst{31-21} = 0b00101011110; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector128Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Rx32 = $Rx32in"; +} def V6_vL32b_nt_tmp_pi : HInst< (outs VectorRegs:$Vd32, IntRegs:$Rx32), (ins IntRegs:$Rx32in, s3_6Imm:$Ii), @@ -26926,6 +28006,117 @@ let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; let Constraints = "$Rx32 = $Rx32in"; } +def V6_vL32b_nt_tmp_pred_ai : HInst< +(outs VectorRegs:$Vd32), +(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_6Imm:$Ii), +"if ($Pv4) $Vd32.tmp = vmem($Rt32+#$Ii):nt", +CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b110; +let Inst{31-21} = 0b00101000110; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = BaseImmOffset; +let accessSize = Vector64Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vL32b_nt_tmp_pred_ai_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_7Imm:$Ii), +"if ($Pv4) $Vd32.tmp = vmem($Rt32+#$Ii):nt", +CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b110; +let Inst{31-21} = 0b00101000110; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = BaseImmOffset; +let accessSize = Vector128Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vL32b_nt_tmp_pred_pi : HInst< +(outs VectorRegs:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_6Imm:$Ii), +"if ($Pv4) $Vd32.tmp = vmem($Rx32++#$Ii):nt", +CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b110; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00101001110; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector64Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_nt_tmp_pred_pi_128B : HInst< +(outs VectorRegs128B:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_7Imm:$Ii), +"if ($Pv4) $Vd32.tmp = vmem($Rx32++#$Ii):nt", +CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b110; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00101001110; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector128Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_nt_tmp_pred_ppu : HInst< +(outs VectorRegs:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2), +"if ($Pv4) $Vd32.tmp = vmem($Rx32++$Mu2):nt", +CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> { +let Inst{10-5} = 0b000110; +let Inst{31-21} = 0b00101011110; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector64Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_nt_tmp_pred_ppu_128B : HInst< +(outs VectorRegs128B:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2), +"if ($Pv4) $Vd32.tmp = vmem($Rx32++$Mu2):nt", +CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> { +let Inst{10-5} = 0b000110; +let Inst{31-21} = 0b00101011110; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector128Access; +let isCVLoad = 1; +let isNonTemporal = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Rx32 = $Rx32in"; +} def V6_vL32b_pi : HInst< (outs VectorRegs:$Vd32, IntRegs:$Rx32), (ins IntRegs:$Rx32in, s3_6Imm:$Ii), @@ -26998,6 +28189,111 @@ let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; let Constraints = "$Rx32 = $Rx32in"; } +def V6_vL32b_pred_ai : HInst< +(outs VectorRegs:$Vd32), +(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_6Imm:$Ii), +"if ($Pv4) $Vd32 = vmem($Rt32+#$Ii)", +CVI_VM_LD, TypeCVI_VM_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b010; +let Inst{31-21} = 0b00101000100; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = BaseImmOffset; +let accessSize = Vector64Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vL32b_pred_ai_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_7Imm:$Ii), +"if ($Pv4) $Vd32 = vmem($Rt32+#$Ii)", +CVI_VM_LD, TypeCVI_VM_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b010; +let Inst{31-21} = 0b00101000100; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = BaseImmOffset; +let accessSize = Vector128Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vL32b_pred_pi : HInst< +(outs VectorRegs:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_6Imm:$Ii), +"if ($Pv4) $Vd32 = vmem($Rx32++#$Ii)", +CVI_VM_LD, TypeCVI_VM_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b010; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00101001100; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector64Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_pred_pi_128B : HInst< +(outs VectorRegs128B:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_7Imm:$Ii), +"if ($Pv4) $Vd32 = vmem($Rx32++#$Ii)", +CVI_VM_LD, TypeCVI_VM_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b010; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00101001100; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector128Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_pred_ppu : HInst< +(outs VectorRegs:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2), +"if ($Pv4) $Vd32 = vmem($Rx32++$Mu2)", +CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> { +let Inst{10-5} = 0b000010; +let Inst{31-21} = 0b00101011100; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector64Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_pred_ppu_128B : HInst< +(outs VectorRegs128B:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2), +"if ($Pv4) $Vd32 = vmem($Rx32++$Mu2)", +CVI_VM_LD, TypeCVI_VM_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> { +let Inst{10-5} = 0b000010; +let Inst{31-21} = 0b00101011100; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector128Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Rx32 = $Rx32in"; +} def V6_vL32b_tmp_ai : HInst< (outs VectorRegs:$Vd32), (ins IntRegs:$Rt32, s4_6Imm:$Ii), @@ -27031,6 +28327,117 @@ let mayLoad = 1; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_vL32b_tmp_npred_ai : HInst< +(outs VectorRegs:$Vd32), +(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_6Imm:$Ii), +"if (!$Pv4) $Vd32.tmp = vmem($Rt32+#$Ii)", +CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b111; +let Inst{31-21} = 0b00101000100; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = BaseImmOffset; +let accessSize = Vector64Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vL32b_tmp_npred_ai_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_7Imm:$Ii), +"if (!$Pv4) $Vd32.tmp = vmem($Rt32+#$Ii)", +CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b111; +let Inst{31-21} = 0b00101000100; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = BaseImmOffset; +let accessSize = Vector128Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vL32b_tmp_npred_pi : HInst< +(outs VectorRegs:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_6Imm:$Ii), +"if (!$Pv4) $Vd32.tmp = vmem($Rx32++#$Ii)", +CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b111; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00101001100; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector64Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_tmp_npred_pi_128B : HInst< +(outs VectorRegs128B:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_7Imm:$Ii), +"if (!$Pv4) $Vd32.tmp = vmem($Rx32++#$Ii)", +CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b111; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00101001100; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector128Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_tmp_npred_ppu : HInst< +(outs VectorRegs:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2), +"if (!$Pv4) $Vd32.tmp = vmem($Rx32++$Mu2)", +CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> { +let Inst{10-5} = 0b000111; +let Inst{31-21} = 0b00101011100; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector64Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_tmp_npred_ppu_128B : HInst< +(outs VectorRegs128B:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2), +"if (!$Pv4) $Vd32.tmp = vmem($Rx32++$Mu2)", +CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> { +let Inst{10-5} = 0b000111; +let Inst{31-21} = 0b00101011100; +let isPredicated = 1; +let isPredicatedFalse = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector128Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Rx32 = $Rx32in"; +} def V6_vL32b_tmp_pi : HInst< (outs VectorRegs:$Vd32, IntRegs:$Rx32), (ins IntRegs:$Rx32in, s3_6Imm:$Ii), @@ -27099,6 +28506,111 @@ let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; let Constraints = "$Rx32 = $Rx32in"; } +def V6_vL32b_tmp_pred_ai : HInst< +(outs VectorRegs:$Vd32), +(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_6Imm:$Ii), +"if ($Pv4) $Vd32.tmp = vmem($Rt32+#$Ii)", +CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_13338314, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b110; +let Inst{31-21} = 0b00101000100; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = BaseImmOffset; +let accessSize = Vector64Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vL32b_tmp_pred_ai_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins PredRegs:$Pv4, IntRegs:$Rt32, s4_7Imm:$Ii), +"if ($Pv4) $Vd32.tmp = vmem($Rt32+#$Ii)", +CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_738356, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b110; +let Inst{31-21} = 0b00101000100; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = BaseImmOffset; +let accessSize = Vector128Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vL32b_tmp_pred_pi : HInst< +(outs VectorRegs:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_6Imm:$Ii), +"if ($Pv4) $Vd32.tmp = vmem($Rx32++#$Ii)", +CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_14560494, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b110; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00101001100; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector64Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_tmp_pred_pi_128B : HInst< +(outs VectorRegs128B:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, s3_7Imm:$Ii), +"if ($Pv4) $Vd32.tmp = vmem($Rx32++#$Ii)", +CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_15560488, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b110; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00101001100; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector128Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_tmp_pred_ppu : HInst< +(outs VectorRegs:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2), +"if ($Pv4) $Vd32.tmp = vmem($Rx32++$Mu2)", +CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> { +let Inst{10-5} = 0b000110; +let Inst{31-21} = 0b00101011100; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector64Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Rx32 = $Rx32in"; +} +def V6_vL32b_tmp_pred_ppu_128B : HInst< +(outs VectorRegs128B:$Vd32, IntRegs:$Rx32), +(ins PredRegs:$Pv4, IntRegs:$Rx32in, ModRegs:$Mu2), +"if ($Pv4) $Vd32.tmp = vmem($Rx32++$Mu2)", +CVI_VM_TMP_LD, TypeCVI_VM_TMP_LD>, Enc_3158657, Requires<[HasV62T,UseHVX]> { +let Inst{10-5} = 0b000110; +let Inst{31-21} = 0b00101011100; +let isPredicated = 1; +let hasNewValue = 1; +let opNewValue = 0; +let addrMode = PostInc; +let accessSize = Vector128Access; +let isCVLoad = 1; +let mayLoad = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Rx32 = $Rx32in"; +} def V6_vS32Ub_ai : HInst< (outs), (ins IntRegs:$Rt32, s4_6Imm:$Ii, VectorRegs:$Vs32), @@ -29642,6 +31154,183 @@ let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; let Constraints = "$Vx32 = $Vx32in"; } +def V6_vaddbsat : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vd32.b = vadd($Vu32.b,$Vv32.b):sat", +CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b000; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011111000; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vaddbsat_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vd32.b = vadd($Vu32.b,$Vv32.b):sat", +CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b000; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011111000; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vaddbsat_alt : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vd32 = vaddb($Vu32,$Vv32):sat", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vaddbsat_alt_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vd32 = vaddb($Vu32,$Vv32):sat", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vaddbsat_dv : HInst< +(outs VecDblRegs:$Vdd32), +(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32), +"$Vdd32.b = vadd($Vuu32.b,$Vvv32.b):sat", +CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b000; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011110101; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vaddbsat_dv_128B : HInst< +(outs VecDblRegs128B:$Vdd32), +(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32), +"$Vdd32.b = vadd($Vuu32.b,$Vvv32.b):sat", +CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b000; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011110101; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vaddbsat_dv_alt : HInst< +(outs VecDblRegs:$Vdd32), +(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32), +"$Vdd32 = vaddb($Vuu32,$Vvv32):sat", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vaddbsat_dv_alt_128B : HInst< +(outs VecDblRegs128B:$Vdd32), +(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32), +"$Vdd32 = vaddb($Vuu32,$Vvv32):sat", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vaddcarry : HInst< +(outs VectorRegs:$Vd32, VecPredRegs:$Qx4), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32, VecPredRegs:$Qx4in), +"$Vd32.w = vadd($Vu32.w,$Vv32.w,$Qx4):carry", +CVI_VA, TypeCVI_VA>, Enc_13691337, Requires<[HasV62T,UseHVX]> { +let Inst{7-7} = 0b0; +let Inst{13-13} = 0b1; +let Inst{31-21} = 0b00011100101; +let hasNewValue = 1; +let opNewValue = 0; +let hasNewValue2 = 1; +let opNewValue2 = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Qx4 = $Qx4in"; +} +def V6_vaddcarry_128B : HInst< +(outs VectorRegs128B:$Vd32, VecPredRegs128B:$Qx4), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, VecPredRegs128B:$Qx4in), +"$Vd32.w = vadd($Vu32.w,$Vv32.w,$Qx4):carry", +CVI_VA, TypeCVI_VA>, Enc_13691337, Requires<[HasV62T,UseHVX]> { +let Inst{7-7} = 0b0; +let Inst{13-13} = 0b1; +let Inst{31-21} = 0b00011100101; +let hasNewValue = 1; +let opNewValue = 0; +let hasNewValue2 = 1; +let opNewValue2 = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Qx4 = $Qx4in"; +} +def V6_vaddclbh : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vd32.h = vadd(vclb($Vu32.h),$Vv32.h)", +CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b000; +let Inst{13-13} = 0b1; +let Inst{31-21} = 0b00011111000; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vaddclbh_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vd32.h = vadd(vclb($Vu32.h),$Vv32.h)", +CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b000; +let Inst{13-13} = 0b1; +let Inst{31-21} = 0b00011111000; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vaddclbw : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vd32.w = vadd(vclb($Vu32.w),$Vv32.w)", +CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b001; +let Inst{13-13} = 0b1; +let Inst{31-21} = 0b00011111000; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vaddclbw_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vd32.w = vadd(vclb($Vu32.w),$Vv32.w)", +CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b001; +let Inst{13-13} = 0b1; +let Inst{31-21} = 0b00011111000; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} def V6_vaddh : HInst< (outs VectorRegs:$Vd32), (ins VectorRegs:$Vu32, VectorRegs:$Vv32), @@ -29975,6 +31664,62 @@ let opNewValue = 0; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_vaddhw_acc : HInst< +(outs VecDblRegs:$Vxx32), +(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vxx32.w += vadd($Vu32.h,$Vv32.h)", +CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b010; +let Inst{13-13} = 0b1; +let Inst{31-21} = 0b00011100001; +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Vxx32 = $Vxx32in"; +} +def V6_vaddhw_acc_128B : HInst< +(outs VecDblRegs128B:$Vxx32), +(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vxx32.w += vadd($Vu32.h,$Vv32.h)", +CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b010; +let Inst{13-13} = 0b1; +let Inst{31-21} = 0b00011100001; +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Vxx32 = $Vxx32in"; +} +def V6_vaddhw_acc_alt : HInst< +(outs VecDblRegs:$Vxx32), +(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vxx32 += vaddh($Vu32,$Vv32)", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Vxx32 = $Vxx32in"; +} +def V6_vaddhw_acc_alt_128B : HInst< +(outs VecDblRegs128B:$Vxx32), +(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vxx32 += vaddh($Vu32,$Vv32)", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Vxx32 = $Vxx32in"; +} def V6_vaddhw_alt : HInst< (outs VecDblRegs:$Vdd32), (ins VectorRegs:$Vu32, VectorRegs:$Vv32), @@ -30023,6 +31768,62 @@ let opNewValue = 0; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_vaddubh_acc : HInst< +(outs VecDblRegs:$Vxx32), +(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vxx32.h += vadd($Vu32.ub,$Vv32.ub)", +CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b101; +let Inst{13-13} = 0b1; +let Inst{31-21} = 0b00011100010; +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Vxx32 = $Vxx32in"; +} +def V6_vaddubh_acc_128B : HInst< +(outs VecDblRegs128B:$Vxx32), +(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vxx32.h += vadd($Vu32.ub,$Vv32.ub)", +CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b101; +let Inst{13-13} = 0b1; +let Inst{31-21} = 0b00011100010; +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Vxx32 = $Vxx32in"; +} +def V6_vaddubh_acc_alt : HInst< +(outs VecDblRegs:$Vxx32), +(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vxx32 += vaddub($Vu32,$Vv32)", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Vxx32 = $Vxx32in"; +} +def V6_vaddubh_acc_alt_128B : HInst< +(outs VecDblRegs128B:$Vxx32), +(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vxx32 += vaddub($Vu32,$Vv32)", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Vxx32 = $Vxx32in"; +} def V6_vaddubh_alt : HInst< (outs VecDblRegs:$Vdd32), (ins VectorRegs:$Vu32, VectorRegs:$Vv32), @@ -30142,6 +31943,31 @@ let isCodeGenOnly = 1; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_vaddububb_sat : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vd32.ub = vadd($Vu32.ub,$Vv32.b):sat", +CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b100; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011110101; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vaddububb_sat_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vd32.ub = vadd($Vu32.ub,$Vv32.b):sat", +CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b100; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011110101; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} def V6_vadduhsat : HInst< (outs VectorRegs:$Vd32), (ins VectorRegs:$Vu32, VectorRegs:$Vv32), @@ -30263,6 +32089,62 @@ let opNewValue = 0; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_vadduhw_acc : HInst< +(outs VecDblRegs:$Vxx32), +(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vxx32.w += vadd($Vu32.uh,$Vv32.uh)", +CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b100; +let Inst{13-13} = 0b1; +let Inst{31-21} = 0b00011100010; +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Vxx32 = $Vxx32in"; +} +def V6_vadduhw_acc_128B : HInst< +(outs VecDblRegs128B:$Vxx32), +(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vxx32.w += vadd($Vu32.uh,$Vv32.uh)", +CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b100; +let Inst{13-13} = 0b1; +let Inst{31-21} = 0b00011100010; +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Vxx32 = $Vxx32in"; +} +def V6_vadduhw_acc_alt : HInst< +(outs VecDblRegs:$Vxx32), +(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vxx32 += vadduh($Vu32,$Vv32)", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Vxx32 = $Vxx32in"; +} +def V6_vadduhw_acc_alt_128B : HInst< +(outs VecDblRegs128B:$Vxx32), +(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vxx32 += vadduh($Vu32,$Vv32)", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Vxx32 = $Vxx32in"; +} def V6_vadduhw_alt : HInst< (outs VecDblRegs:$Vdd32), (ins VectorRegs:$Vu32, VectorRegs:$Vv32), @@ -30286,6 +32168,102 @@ let isCodeGenOnly = 1; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_vadduwsat : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vd32.uw = vadd($Vu32.uw,$Vv32.uw):sat", +CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b001; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011111011; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vadduwsat_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vd32.uw = vadd($Vu32.uw,$Vv32.uw):sat", +CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b001; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011111011; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vadduwsat_alt : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vd32 = vadduw($Vu32,$Vv32):sat", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vadduwsat_alt_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vd32 = vadduw($Vu32,$Vv32):sat", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vadduwsat_dv : HInst< +(outs VecDblRegs:$Vdd32), +(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32), +"$Vdd32.uw = vadd($Vuu32.uw,$Vvv32.uw):sat", +CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b010; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011110101; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vadduwsat_dv_128B : HInst< +(outs VecDblRegs128B:$Vdd32), +(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32), +"$Vdd32.uw = vadd($Vuu32.uw,$Vvv32.uw):sat", +CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b010; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011110101; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vadduwsat_dv_alt : HInst< +(outs VecDblRegs:$Vdd32), +(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32), +"$Vdd32 = vadduw($Vuu32,$Vvv32):sat", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vadduwsat_dv_alt_128B : HInst< +(outs VecDblRegs128B:$Vdd32), +(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32), +"$Vdd32 = vadduw($Vuu32,$Vvv32):sat", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} def V6_vaddw : HInst< (outs VectorRegs:$Vd32), (ins VectorRegs:$Vu32, VectorRegs:$Vv32), @@ -30667,6 +32645,110 @@ let opNewValue = 0; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_vandnqrt : HInst< +(outs VectorRegs:$Vd32), +(ins VecPredRegs:$Qu4, IntRegs:$Rt32), +"$Vd32 = vand(!$Qu4,$Rt32)", +CVI_VX, TypeCVI_VX>, Enc_4711514, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b101; +let Inst{13-10} = 0b0001; +let Inst{31-21} = 0b00011001101; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vandnqrt_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VecPredRegs128B:$Qu4, IntRegs:$Rt32), +"$Vd32 = vand(!$Qu4,$Rt32)", +CVI_VX, TypeCVI_VX>, Enc_4711514, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b101; +let Inst{13-10} = 0b0001; +let Inst{31-21} = 0b00011001101; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vandnqrt_acc : HInst< +(outs VectorRegs:$Vx32), +(ins VectorRegs:$Vx32in, VecPredRegs:$Qu4, IntRegs:$Rt32), +"$Vx32 |= vand(!$Qu4,$Rt32)", +CVI_VX, TypeCVI_VX>, Enc_4944558, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b011; +let Inst{13-10} = 0b1001; +let Inst{31-21} = 0b00011001011; +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Vx32 = $Vx32in"; +} +def V6_vandnqrt_acc_128B : HInst< +(outs VectorRegs128B:$Vx32), +(ins VectorRegs128B:$Vx32in, VecPredRegs128B:$Qu4, IntRegs:$Rt32), +"$Vx32 |= vand(!$Qu4,$Rt32)", +CVI_VX, TypeCVI_VX>, Enc_4944558, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b011; +let Inst{13-10} = 0b1001; +let Inst{31-21} = 0b00011001011; +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Vx32 = $Vx32in"; +} +def V6_vandnqrt_acc_alt : HInst< +(outs VectorRegs:$Vx32), +(ins VectorRegs:$Vx32in, VecPredRegs:$Qu4, IntRegs:$Rt32), +"$Vx32.ub |= vand(!$Qu4.ub,$Rt32.ub)", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Vx32 = $Vx32in"; +} +def V6_vandnqrt_acc_alt_128B : HInst< +(outs VectorRegs128B:$Vx32), +(ins VectorRegs128B:$Vx32in, VecPredRegs128B:$Qu4, IntRegs:$Rt32), +"$Vx32.ub |= vand(!$Qu4.ub,$Rt32.ub)", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Vx32 = $Vx32in"; +} +def V6_vandnqrt_alt : HInst< +(outs VectorRegs:$Vd32), +(ins VecPredRegs:$Qu4, IntRegs:$Rt32), +"$Vd32.ub = vand(!$Qu4.ub,$Rt32.ub)", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vandnqrt_alt_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VecPredRegs128B:$Qu4, IntRegs:$Rt32), +"$Vd32.ub = vand(!$Qu4.ub,$Rt32.ub)", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} def V6_vandqrt : HInst< (outs VectorRegs:$Vd32), (ins VecPredRegs:$Qu4, IntRegs:$Rt32), @@ -30771,6 +32853,60 @@ let isCodeGenOnly = 1; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_vandvnqv : HInst< +(outs VectorRegs:$Vd32), +(ins VecPredRegs:$Qv4, VectorRegs:$Vu32), +"$Vd32 = vand(!$Qv4,$Vu32)", +CVI_VA, TypeCVI_VA>, Enc_1220199, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b001; +let Inst{13-13} = 0b1; +let Inst{21-16} = 0b000011; +let Inst{31-24} = 0b00011110; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vandvnqv_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vu32), +"$Vd32 = vand(!$Qv4,$Vu32)", +CVI_VA, TypeCVI_VA>, Enc_1220199, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b001; +let Inst{13-13} = 0b1; +let Inst{21-16} = 0b000011; +let Inst{31-24} = 0b00011110; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vandvqv : HInst< +(outs VectorRegs:$Vd32), +(ins VecPredRegs:$Qv4, VectorRegs:$Vu32), +"$Vd32 = vand($Qv4,$Vu32)", +CVI_VA, TypeCVI_VA>, Enc_1220199, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b000; +let Inst{13-13} = 0b1; +let Inst{21-16} = 0b000011; +let Inst{31-24} = 0b00011110; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vandvqv_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VecPredRegs128B:$Qv4, VectorRegs128B:$Vu32), +"$Vd32 = vand($Qv4,$Vu32)", +CVI_VA, TypeCVI_VA>, Enc_1220199, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b000; +let Inst{13-13} = 0b1; +let Inst{21-16} = 0b000011; +let Inst{31-24} = 0b00011110; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} def V6_vandvrt : HInst< (outs VecPredRegs:$Qd4), (ins VectorRegs:$Vu32, IntRegs:$Rt32), @@ -31206,6 +33342,31 @@ let opNewValue = 0; let isPseudo = 1; let isCodeGenOnly = 1; } +def V6_vasrhbsat : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8), +"$Vd32.b = vasr($Vu32.h,$Vv32.h,$Rt8):sat", +CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b000; +let Inst{13-13} = 0b0; +let Inst{31-24} = 0b00011000; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vasrhbsat_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8), +"$Vd32.b = vasr($Vu32.h,$Vv32.h,$Rt8):sat", +CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b000; +let Inst{13-13} = 0b0; +let Inst{31-24} = 0b00011000; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} def V6_vasrhubrndsat : HInst< (outs VectorRegs:$Vd32), (ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8), @@ -31324,6 +33485,31 @@ let isCodeGenOnly = 1; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_vasruwuhrndsat : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8), +"$Vd32.uh = vasr($Vu32.uw,$Vv32.uw,$Rt8):rnd:sat", +CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b001; +let Inst{13-13} = 0b0; +let Inst{31-24} = 0b00011000; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vasruwuhrndsat_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8), +"$Vd32.uh = vasr($Vu32.uw,$Vv32.uw,$Rt8):rnd:sat", +CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b001; +let Inst{13-13} = 0b0; +let Inst{31-24} = 0b00011000; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} def V6_vasrw : HInst< (outs VectorRegs:$Vd32), (ins VectorRegs:$Vu32, IntRegs:$Rt32), @@ -31533,6 +33719,31 @@ let opNewValue = 0; let isPseudo = 1; let isCodeGenOnly = 1; } +def V6_vasrwuhrndsat : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8), +"$Vd32.uh = vasr($Vu32.w,$Vv32.w,$Rt8):rnd:sat", +CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b010; +let Inst{13-13} = 0b0; +let Inst{31-24} = 0b00011000; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vasrwuhrndsat_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8), +"$Vd32.uh = vasr($Vu32.w,$Vv32.w,$Rt8):rnd:sat", +CVI_VS, TypeCVI_VS>, Enc_11083408, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b010; +let Inst{13-13} = 0b0; +let Inst{31-24} = 0b00011000; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} def V6_vasrwuhsat : HInst< (outs VectorRegs:$Vd32), (ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8), @@ -34596,6 +36807,31 @@ let opNewValue = 0; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_vlsrb : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, IntRegs:$Rt32), +"$Vd32.ub = vlsr($Vu32.ub,$Rt32)", +CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b011; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011001100; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vlsrb_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, IntRegs:$Rt32), +"$Vd32.ub = vlsr($Vu32.ub,$Rt32)", +CVI_VS, TypeCVI_VS>, Enc_16214129, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b011; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011001100; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} def V6_vlsrh : HInst< (outs VectorRegs:$Vd32), (ins VectorRegs:$Vu32, IntRegs:$Rt32), @@ -34813,6 +37049,31 @@ let opNewValue = 0; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_vlutvvb_nm : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8), +"$Vd32.b = vlut32($Vu32.b,$Vv32.b,$Rt8):nomatch", +CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b011; +let Inst{13-13} = 0b0; +let Inst{31-24} = 0b00011000; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vlutvvb_nm_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8), +"$Vd32.b = vlut32($Vu32.b,$Vv32.b,$Rt8):nomatch", +CVI_VP_LONG, TypeCVI_VP>, Enc_11083408, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b011; +let Inst{13-13} = 0b0; +let Inst{31-24} = 0b00011000; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} def V6_vlutvvb_oracc : HInst< (outs VectorRegs:$Vx32), (ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8), @@ -34842,6 +37103,56 @@ let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; let Constraints = "$Vx32 = $Vx32in"; } +def V6_vlutvvb_oracci : HInst< +(outs VectorRegs:$Vx32), +(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, VectorRegs:$Vv32, u3_0Imm:$Ii), +"$Vx32.b |= vlut32($Vu32.b,$Vv32.b,#$Ii)", +CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_8280533, Requires<[HasV62T,UseHVX]> { +let Inst{13-13} = 0b1; +let Inst{31-21} = 0b00011100110; +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Vx32 = $Vx32in"; +} +def V6_vlutvvb_oracci_128B : HInst< +(outs VectorRegs128B:$Vx32), +(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, u3_0Imm:$Ii), +"$Vx32.b |= vlut32($Vu32.b,$Vv32.b,#$Ii)", +CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_8280533, Requires<[HasV62T,UseHVX]> { +let Inst{13-13} = 0b1; +let Inst{31-21} = 0b00011100110; +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Vx32 = $Vx32in"; +} +def V6_vlutvvbi : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32, u3_0Imm:$Ii), +"$Vd32.b = vlut32($Vu32.b,$Vv32.b,#$Ii)", +CVI_VP_LONG, TypeCVI_VP>, Enc_7171569, Requires<[HasV62T,UseHVX]> { +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011110001; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vlutvvbi_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, u3_0Imm:$Ii), +"$Vd32.b = vlut32($Vu32.b,$Vv32.b,#$Ii)", +CVI_VP_LONG, TypeCVI_VP>, Enc_7171569, Requires<[HasV62T,UseHVX]> { +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011110001; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} def V6_vlutvwh : HInst< (outs VecDblRegs:$Vdd32), (ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8), @@ -34867,6 +37178,31 @@ let opNewValue = 0; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_vlutvwh_nm : HInst< +(outs VecDblRegs:$Vdd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8), +"$Vdd32.h = vlut16($Vu32.b,$Vv32.h,$Rt8):nomatch", +CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b100; +let Inst{13-13} = 0b0; +let Inst{31-24} = 0b00011000; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vlutvwh_nm_128B : HInst< +(outs VecDblRegs128B:$Vdd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, IntRegsLow8:$Rt8), +"$Vdd32.h = vlut16($Vu32.b,$Vv32.h,$Rt8):nomatch", +CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_14767681, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b100; +let Inst{13-13} = 0b0; +let Inst{31-24} = 0b00011000; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} def V6_vlutvwh_oracc : HInst< (outs VecDblRegs:$Vxx32), (ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32, IntRegsLow8:$Rt8), @@ -34896,6 +37232,104 @@ let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; let Constraints = "$Vxx32 = $Vxx32in"; } +def V6_vlutvwh_oracci : HInst< +(outs VecDblRegs:$Vxx32), +(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32, u3_0Imm:$Ii), +"$Vxx32.h |= vlut16($Vu32.b,$Vv32.h,#$Ii)", +CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_3457570, Requires<[HasV62T,UseHVX]> { +let Inst{13-13} = 0b1; +let Inst{31-21} = 0b00011100111; +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Vxx32 = $Vxx32in"; +} +def V6_vlutvwh_oracci_128B : HInst< +(outs VecDblRegs128B:$Vxx32), +(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, u3_0Imm:$Ii), +"$Vxx32.h |= vlut16($Vu32.b,$Vv32.h,#$Ii)", +CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_3457570, Requires<[HasV62T,UseHVX]> { +let Inst{13-13} = 0b1; +let Inst{31-21} = 0b00011100111; +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Vxx32 = $Vxx32in"; +} +def V6_vlutvwhi : HInst< +(outs VecDblRegs:$Vdd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32, u3_0Imm:$Ii), +"$Vdd32.h = vlut16($Vu32.b,$Vv32.h,#$Ii)", +CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_13261538, Requires<[HasV62T,UseHVX]> { +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011110011; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vlutvwhi_128B : HInst< +(outs VecDblRegs128B:$Vdd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, u3_0Imm:$Ii), +"$Vdd32.h = vlut16($Vu32.b,$Vv32.h,#$Ii)", +CVI_VP_VS_LONG, TypeCVI_VP_VS>, Enc_13261538, Requires<[HasV62T,UseHVX]> { +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011110011; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vmaxb : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vd32.b = vmax($Vu32.b,$Vv32.b)", +CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b101; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011111001; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vmaxb_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vd32.b = vmax($Vu32.b,$Vv32.b)", +CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b101; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011111001; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vmaxb_alt : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vd32 = vmaxb($Vu32,$Vv32)", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vmaxb_alt_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vd32 = vmaxb($Vu32,$Vv32)", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} def V6_vmaxh : HInst< (outs VectorRegs:$Vd32), (ins VectorRegs:$Vu32, VectorRegs:$Vv32), @@ -35088,6 +37522,54 @@ let isCodeGenOnly = 1; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_vminb : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vd32.b = vmin($Vu32.b,$Vv32.b)", +CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b100; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011111001; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vminb_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vd32.b = vmin($Vu32.b,$Vv32.b)", +CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b100; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011111001; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vminb_alt : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vd32 = vminb($Vu32,$Vv32)", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vminb_alt_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vd32 = vminb($Vu32,$Vv32)", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} def V6_vminh : HInst< (outs VectorRegs:$Vd32), (ins VectorRegs:$Vu32, VectorRegs:$Vv32), @@ -35584,6 +38066,110 @@ let isCodeGenOnly = 1; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_vmpauhb : HInst< +(outs VecDblRegs:$Vdd32), +(ins VecDblRegs:$Vuu32, IntRegs:$Rt32), +"$Vdd32.w = vmpa($Vuu32.uh,$Rt32.b)", +CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b101; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011001100; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vmpauhb_128B : HInst< +(outs VecDblRegs128B:$Vdd32), +(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32), +"$Vdd32.w = vmpa($Vuu32.uh,$Rt32.b)", +CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5023792, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b101; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011001100; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vmpauhb_acc : HInst< +(outs VecDblRegs:$Vxx32), +(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32), +"$Vxx32.w += vmpa($Vuu32.uh,$Rt32.b)", +CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b010; +let Inst{13-13} = 0b1; +let Inst{31-21} = 0b00011001100; +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Vxx32 = $Vxx32in"; +} +def V6_vmpauhb_acc_128B : HInst< +(outs VecDblRegs128B:$Vxx32), +(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32), +"$Vxx32.w += vmpa($Vuu32.uh,$Rt32.b)", +CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_4327792, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b010; +let Inst{13-13} = 0b1; +let Inst{31-21} = 0b00011001100; +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Vxx32 = $Vxx32in"; +} +def V6_vmpauhb_acc_alt : HInst< +(outs VecDblRegs:$Vxx32), +(ins VecDblRegs:$Vxx32in, VecDblRegs:$Vuu32, IntRegs:$Rt32), +"$Vxx32 += vmpauhb($Vuu32,$Rt32)", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Vxx32 = $Vxx32in"; +} +def V6_vmpauhb_acc_alt_128B : HInst< +(outs VecDblRegs128B:$Vxx32), +(ins VecDblRegs128B:$Vxx32in, VecDblRegs128B:$Vuu32, IntRegs:$Rt32), +"$Vxx32 += vmpauhb($Vuu32,$Rt32)", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Vxx32 = $Vxx32in"; +} +def V6_vmpauhb_alt : HInst< +(outs VecDblRegs:$Vdd32), +(ins VecDblRegs:$Vuu32, IntRegs:$Rt32), +"$Vdd32 = vmpauhb($Vuu32,$Rt32)", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vmpauhb_alt_128B : HInst< +(outs VecDblRegs128B:$Vdd32), +(ins VecDblRegs128B:$Vuu32, IntRegs:$Rt32), +"$Vdd32 = vmpauhb($Vuu32,$Rt32)", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} def V6_vmpybus : HInst< (outs VecDblRegs:$Vdd32), (ins VectorRegs:$Vu32, IntRegs:$Rt32), @@ -35921,6 +38507,31 @@ let opNewValue = 0; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_vmpyewuh_64 : HInst< +(outs VecDblRegs:$Vdd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vdd32 = vmpye($Vu32.w,$Vv32.uh)", +CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b110; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011110101; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vmpyewuh_64_128B : HInst< +(outs VecDblRegs128B:$Vdd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vdd32 = vmpye($Vu32.w,$Vv32.uh)", +CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_15290236, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b110; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011110101; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} def V6_vmpyewuh_alt : HInst< (outs VectorRegs:$Vd32), (ins VectorRegs:$Vu32, VectorRegs:$Vv32), @@ -37049,6 +39660,110 @@ let isCodeGenOnly = 1; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_vmpyiwub : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, IntRegs:$Rt32), +"$Vd32.w = vmpyi($Vu32.w,$Rt32.ub)", +CVI_VX_LONG, TypeCVI_VX>, Enc_16214129, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b110; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011001100; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vmpyiwub_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, IntRegs:$Rt32), +"$Vd32.w = vmpyi($Vu32.w,$Rt32.ub)", +CVI_VX_LONG, TypeCVI_VX>, Enc_16214129, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b110; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011001100; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vmpyiwub_acc : HInst< +(outs VectorRegs:$Vx32), +(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32), +"$Vx32.w += vmpyi($Vu32.w,$Rt32.ub)", +CVI_VX_LONG, TypeCVI_VX>, Enc_10058269, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b001; +let Inst{13-13} = 0b1; +let Inst{31-21} = 0b00011001100; +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Vx32 = $Vx32in"; +} +def V6_vmpyiwub_acc_128B : HInst< +(outs VectorRegs128B:$Vx32), +(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32), +"$Vx32.w += vmpyi($Vu32.w,$Rt32.ub)", +CVI_VX_LONG, TypeCVI_VX>, Enc_10058269, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b001; +let Inst{13-13} = 0b1; +let Inst{31-21} = 0b00011001100; +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Vx32 = $Vx32in"; +} +def V6_vmpyiwub_acc_alt : HInst< +(outs VectorRegs:$Vx32), +(ins VectorRegs:$Vx32in, VectorRegs:$Vu32, IntRegs:$Rt32), +"$Vx32 += vmpyiwub($Vu32,$Rt32)", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Vx32 = $Vx32in"; +} +def V6_vmpyiwub_acc_alt_128B : HInst< +(outs VectorRegs128B:$Vx32), +(ins VectorRegs128B:$Vx32in, VectorRegs128B:$Vu32, IntRegs:$Rt32), +"$Vx32 += vmpyiwub($Vu32,$Rt32)", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Vx32 = $Vx32in"; +} +def V6_vmpyiwub_alt : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, IntRegs:$Rt32), +"$Vd32 = vmpyiwub($Vu32,$Rt32)", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vmpyiwub_alt_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, IntRegs:$Rt32), +"$Vd32 = vmpyiwub($Vu32,$Rt32)", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} def V6_vmpyowh : HInst< (outs VectorRegs:$Vd32), (ins VectorRegs:$Vu32, VectorRegs:$Vv32), @@ -37074,6 +39789,35 @@ let opNewValue = 0; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_vmpyowh_64_acc : HInst< +(outs VecDblRegs:$Vxx32), +(ins VecDblRegs:$Vxx32in, VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vxx32 += vmpyo($Vu32.w,$Vv32.h)", +CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b011; +let Inst{13-13} = 0b1; +let Inst{31-21} = 0b00011100001; +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Vxx32 = $Vxx32in"; +} +def V6_vmpyowh_64_acc_128B : HInst< +(outs VecDblRegs128B:$Vxx32), +(ins VecDblRegs128B:$Vxx32in, VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vxx32 += vmpyo($Vu32.w,$Vv32.h)", +CVI_VX_DV_LONG, TypeCVI_VX_DV>, Enc_5972412, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b011; +let Inst{13-13} = 0b1; +let Inst{31-21} = 0b00011100001; +let hasNewValue = 1; +let opNewValue = 0; +let isAccumulator = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Vxx32 = $Vxx32in"; +} def V6_vmpyowh_alt : HInst< (outs VectorRegs:$Vd32), (ins VectorRegs:$Vu32, VectorRegs:$Vv32), @@ -39348,6 +42092,102 @@ let isCodeGenOnly = 1; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_vrounduhub : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vd32.ub = vround($Vu32.uh,$Vv32.uh):sat", +CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b011; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011111111; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vrounduhub_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vd32.ub = vround($Vu32.uh,$Vv32.uh):sat", +CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b011; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011111111; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vrounduhub_alt : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vd32 = vrounduhub($Vu32,$Vv32):sat", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vrounduhub_alt_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vd32 = vrounduhub($Vu32,$Vv32):sat", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vrounduwuh : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vd32.uh = vround($Vu32.uw,$Vv32.uw):sat", +CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b100; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011111111; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vrounduwuh_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vd32.uh = vround($Vu32.uw,$Vv32.uw):sat", +CVI_VS, TypeCVI_VS>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b100; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011111111; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vrounduwuh_alt : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vd32 = vrounduwuh($Vu32,$Vv32):sat", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vrounduwuh_alt_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vd32 = vrounduwuh($Vu32,$Vv32):sat", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} def V6_vroundwh : HInst< (outs VectorRegs:$Vd32), (ins VectorRegs:$Vu32, VectorRegs:$Vv32), @@ -39596,6 +42436,54 @@ let isCodeGenOnly = 1; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_vsatuwuh : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vd32.uh = vsat($Vu32.uw,$Vv32.uw)", +CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b110; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011111001; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vsatuwuh_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vd32.uh = vsat($Vu32.uw,$Vv32.uw)", +CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b110; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011111001; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vsatuwuh_alt : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vd32 = vsatuwuh($Vu32,$Vv32)", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vsatuwuh_alt_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vd32 = vsatuwuh($Vu32,$Vv32)", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} def V6_vsatwh : HInst< (outs VectorRegs:$Vd32), (ins VectorRegs:$Vu32, VectorRegs:$Vv32), @@ -40384,6 +43272,133 @@ let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; let Constraints = "$Vx32 = $Vx32in"; } +def V6_vsubbsat : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vd32.b = vsub($Vu32.b,$Vv32.b):sat", +CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b010; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011111001; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vsubbsat_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vd32.b = vsub($Vu32.b,$Vv32.b):sat", +CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b010; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011111001; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vsubbsat_alt : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vd32 = vsubb($Vu32,$Vv32):sat", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vsubbsat_alt_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vd32 = vsubb($Vu32,$Vv32):sat", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vsubbsat_dv : HInst< +(outs VecDblRegs:$Vdd32), +(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32), +"$Vdd32.b = vsub($Vuu32.b,$Vvv32.b):sat", +CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b001; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011110101; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vsubbsat_dv_128B : HInst< +(outs VecDblRegs128B:$Vdd32), +(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32), +"$Vdd32.b = vsub($Vuu32.b,$Vvv32.b):sat", +CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b001; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011110101; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vsubbsat_dv_alt : HInst< +(outs VecDblRegs:$Vdd32), +(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32), +"$Vdd32 = vsubb($Vuu32,$Vvv32):sat", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vsubbsat_dv_alt_128B : HInst< +(outs VecDblRegs128B:$Vdd32), +(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32), +"$Vdd32 = vsubb($Vuu32,$Vvv32):sat", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vsubcarry : HInst< +(outs VectorRegs:$Vd32, VecPredRegs:$Qx4), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32, VecPredRegs:$Qx4in), +"$Vd32.w = vsub($Vu32.w,$Vv32.w,$Qx4):carry", +CVI_VA, TypeCVI_VA>, Enc_13691337, Requires<[HasV62T,UseHVX]> { +let Inst{7-7} = 0b1; +let Inst{13-13} = 0b1; +let Inst{31-21} = 0b00011100101; +let hasNewValue = 1; +let opNewValue = 0; +let hasNewValue2 = 1; +let opNewValue2 = 1; +let DecoderNamespace = "EXT_mmvec"; +let Constraints = "$Qx4 = $Qx4in"; +} +def V6_vsubcarry_128B : HInst< +(outs VectorRegs128B:$Vd32, VecPredRegs128B:$Qx4), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32, VecPredRegs128B:$Qx4in), +"$Vd32.w = vsub($Vu32.w,$Vv32.w,$Qx4):carry", +CVI_VA, TypeCVI_VA>, Enc_13691337, Requires<[HasV62T,UseHVX]> { +let Inst{7-7} = 0b1; +let Inst{13-13} = 0b1; +let Inst{31-21} = 0b00011100101; +let hasNewValue = 1; +let opNewValue = 0; +let hasNewValue2 = 1; +let opNewValue2 = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +let Constraints = "$Qx4 = $Qx4in"; +} def V6_vsubh : HInst< (outs VectorRegs:$Vd32), (ins VectorRegs:$Vu32, VectorRegs:$Vv32), @@ -40876,6 +43891,31 @@ let isCodeGenOnly = 1; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_vsubububb_sat : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vd32.ub = vsub($Vu32.ub,$Vv32.b):sat", +CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b101; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011110101; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vsubububb_sat_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vd32.ub = vsub($Vu32.ub,$Vv32.b):sat", +CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b101; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011110101; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} def V6_vsubuhsat : HInst< (outs VectorRegs:$Vd32), (ins VectorRegs:$Vu32, VectorRegs:$Vv32), @@ -41020,6 +44060,102 @@ let isCodeGenOnly = 1; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_vsubuwsat : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vd32.uw = vsub($Vu32.uw,$Vv32.uw):sat", +CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b100; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011111110; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vsubuwsat_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vd32.uw = vsub($Vu32.uw,$Vv32.uw):sat", +CVI_VA, TypeCVI_VA>, Enc_6223403, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b100; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011111110; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vsubuwsat_alt : HInst< +(outs VectorRegs:$Vd32), +(ins VectorRegs:$Vu32, VectorRegs:$Vv32), +"$Vd32 = vsubuw($Vu32,$Vv32):sat", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vsubuwsat_alt_128B : HInst< +(outs VectorRegs128B:$Vd32), +(ins VectorRegs128B:$Vu32, VectorRegs128B:$Vv32), +"$Vd32 = vsubuw($Vu32,$Vv32):sat", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vsubuwsat_dv : HInst< +(outs VecDblRegs:$Vdd32), +(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32), +"$Vdd32.uw = vsub($Vuu32.uw,$Vvv32.uw):sat", +CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b011; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011110101; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vsubuwsat_dv_128B : HInst< +(outs VecDblRegs128B:$Vdd32), +(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32), +"$Vdd32.uw = vsub($Vuu32.uw,$Vvv32.uw):sat", +CVI_VA_DV, TypeCVI_VA_DV>, Enc_13211717, Requires<[HasV62T,UseHVX]> { +let Inst{7-5} = 0b011; +let Inst{13-13} = 0b0; +let Inst{31-21} = 0b00011110101; +let hasNewValue = 1; +let opNewValue = 0; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vsubuwsat_dv_alt : HInst< +(outs VecDblRegs:$Vdd32), +(ins VecDblRegs:$Vuu32, VecDblRegs:$Vvv32), +"$Vdd32 = vsubuw($Vuu32,$Vvv32):sat", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vsubuwsat_dv_alt_128B : HInst< +(outs VecDblRegs128B:$Vdd32), +(ins VecDblRegs128B:$Vuu32, VecDblRegs128B:$Vvv32), +"$Vdd32 = vsubuw($Vuu32,$Vvv32):sat", +PSEUDO, TypeMAPPING>, Requires<[HasV62T,UseHVX]> { +let hasNewValue = 1; +let opNewValue = 0; +let isPseudo = 1; +let isCodeGenOnly = 1; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} def V6_vsubw : HInst< (outs VectorRegs:$Vd32), (ins VectorRegs:$Vu32, VectorRegs:$Vv32), @@ -41988,6 +45124,170 @@ let isCodeGenOnly = 1; let DecoderNamespace = "EXT_mmvec"; let isCodeGenOnly = 1; } +def V6_vwhist128 : HInst< +(outs), +(ins), +"vwhist128", +CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV62T,UseHVX]> { +let Inst{13-0} = 0b10010010000000; +let Inst{31-16} = 0b0001111000000000; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vwhist128_128B : HInst< +(outs), +(ins), +"vwhist128", +CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV62T,UseHVX]> { +let Inst{13-0} = 0b10010010000000; +let Inst{31-16} = 0b0001111000000000; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vwhist128m : HInst< +(outs), +(ins u1_0Imm:$Ii), +"vwhist128(#$Ii)", +CVI_HIST, TypeCVI_HIST>, Enc_1291652, Requires<[HasV62T,UseHVX]> { +let Inst{7-0} = 0b10000000; +let Inst{13-9} = 0b10011; +let Inst{31-16} = 0b0001111000000000; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vwhist128m_128B : HInst< +(outs), +(ins u1_0Imm:$Ii), +"vwhist128(#$Ii)", +CVI_HIST, TypeCVI_HIST>, Enc_1291652, Requires<[HasV62T,UseHVX]> { +let Inst{7-0} = 0b10000000; +let Inst{13-9} = 0b10011; +let Inst{31-16} = 0b0001111000000000; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vwhist128q : HInst< +(outs), +(ins VecPredRegs:$Qv4), +"vwhist128($Qv4)", +CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV62T,UseHVX]> { +let Inst{13-0} = 0b10010010000000; +let Inst{21-16} = 0b000010; +let Inst{31-24} = 0b00011110; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vwhist128q_128B : HInst< +(outs), +(ins VecPredRegs128B:$Qv4), +"vwhist128($Qv4)", +CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV62T,UseHVX]> { +let Inst{13-0} = 0b10010010000000; +let Inst{21-16} = 0b000010; +let Inst{31-24} = 0b00011110; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vwhist128qm : HInst< +(outs), +(ins VecPredRegs:$Qv4, u1_0Imm:$Ii), +"vwhist128($Qv4,#$Ii)", +CVI_HIST, TypeCVI_HIST>, Enc_7978128, Requires<[HasV62T,UseHVX]> { +let Inst{7-0} = 0b10000000; +let Inst{13-9} = 0b10011; +let Inst{21-16} = 0b000010; +let Inst{31-24} = 0b00011110; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vwhist128qm_128B : HInst< +(outs), +(ins VecPredRegs128B:$Qv4, u1_0Imm:$Ii), +"vwhist128($Qv4,#$Ii)", +CVI_HIST, TypeCVI_HIST>, Enc_7978128, Requires<[HasV62T,UseHVX]> { +let Inst{7-0} = 0b10000000; +let Inst{13-9} = 0b10011; +let Inst{21-16} = 0b000010; +let Inst{31-24} = 0b00011110; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vwhist256 : HInst< +(outs), +(ins), +"vwhist256", +CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV62T,UseHVX]> { +let Inst{13-0} = 0b10001010000000; +let Inst{31-16} = 0b0001111000000000; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vwhist256_128B : HInst< +(outs), +(ins), +"vwhist256", +CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV62T,UseHVX]> { +let Inst{13-0} = 0b10001010000000; +let Inst{31-16} = 0b0001111000000000; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vwhist256_sat : HInst< +(outs), +(ins), +"vwhist256:sat", +CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV62T,UseHVX]> { +let Inst{13-0} = 0b10001110000000; +let Inst{31-16} = 0b0001111000000000; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vwhist256_sat_128B : HInst< +(outs), +(ins), +"vwhist256:sat", +CVI_HIST, TypeCVI_HIST>, Enc_0, Requires<[HasV62T,UseHVX]> { +let Inst{13-0} = 0b10001110000000; +let Inst{31-16} = 0b0001111000000000; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vwhist256q : HInst< +(outs), +(ins VecPredRegs:$Qv4), +"vwhist256($Qv4)", +CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV62T,UseHVX]> { +let Inst{13-0} = 0b10001010000000; +let Inst{21-16} = 0b000010; +let Inst{31-24} = 0b00011110; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vwhist256q_128B : HInst< +(outs), +(ins VecPredRegs128B:$Qv4), +"vwhist256($Qv4)", +CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV62T,UseHVX]> { +let Inst{13-0} = 0b10001010000000; +let Inst{21-16} = 0b000010; +let Inst{31-24} = 0b00011110; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} +def V6_vwhist256q_sat : HInst< +(outs), +(ins VecPredRegs:$Qv4), +"vwhist256($Qv4):sat", +CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV62T,UseHVX]> { +let Inst{13-0} = 0b10001110000000; +let Inst{21-16} = 0b000010; +let Inst{31-24} = 0b00011110; +let DecoderNamespace = "EXT_mmvec"; +} +def V6_vwhist256q_sat_128B : HInst< +(outs), +(ins VecPredRegs128B:$Qv4), +"vwhist256($Qv4):sat", +CVI_HIST, TypeCVI_HIST>, Enc_4109168, Requires<[HasV62T,UseHVX]> { +let Inst{13-0} = 0b10001110000000; +let Inst{21-16} = 0b000010; +let Inst{31-24} = 0b00011110; +let DecoderNamespace = "EXT_mmvec"; +let isCodeGenOnly = 1; +} def V6_vxor : HInst< (outs VectorRegs:$Vd32), (ins VectorRegs:$Vu32, VectorRegs:$Vv32), diff --git a/lib/Target/Hexagon/HexagonIICHVX.td b/lib/Target/Hexagon/HexagonIICHVX.td new file mode 100644 index 00000000000..4081a225832 --- /dev/null +++ b/lib/Target/Hexagon/HexagonIICHVX.td @@ -0,0 +1,102 @@ +//===--- HexagonIICHVX.td -------------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +// +// Though all these itinerary classes exist for V60 onwards, they are being +// listed here as 'HVXV62Itin' because itinerary class description prior to V62 +// doesn't include operand cycle info. In future, I plan to merge them +// together and call it 'HVXItin'. +// +class HVXV62Itin { + list HVXV62Itin_list = [ + InstrItinData], + [3, 1, 1, 1]>, + InstrItinData], + [3, 1, 1, 1]>, + InstrItinData], + [3, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [CVI_XLANE,CVI_SHIFT, + CVI_MPY0, CVI_MPY1]>], + [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [CVI_XLSHF, CVI_MPY01]>], + [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [CVI_MPY0, CVI_MPY1]>], + [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [CVI_MPY0, CVI_MPY1]>], + [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [CVI_MPY0, CVI_MPY1]>], + [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [CVI_MPY01]>], [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [CVI_MPY01]>], [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [CVI_MPY01]>], [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [CVI_MPY01]>], [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [CVI_XLANE]>], [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [CVI_XLANE]>], [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [CVI_XLSHF]>], [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [CVI_XLSHF]>], [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [CVI_XLSHF]>], [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [CVI_XLSHF]>], [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [CVI_XLSHF]>], [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [CVI_SHIFT]>], [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [CVI_XLANE, CVI_SHIFT, + CVI_MPY0, CVI_MPY1]>], + [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [CVI_LD], 0>, + InstrStage<1, [CVI_XLANE, CVI_SHIFT, + CVI_MPY0, CVI_MPY1]>], + [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [CVI_LD]>],[1, 1, 1, 1, 10]>, + InstrItinData, + InstrStage<1, [CVI_LD], 0>, + InstrStage<1, [CVI_XLANE, CVI_SHIFT, + CVI_MPY0, CVI_MPY1]>], + [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [SLOT1], 0>, + InstrStage<1, [CVI_LD], 0>, + InstrStage<1, [CVI_XLANE]>], [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [CVI_ST], 0>, + InstrStage<1, [CVI_XLANE, CVI_SHIFT, + CVI_MPY0, CVI_MPY1]>], + [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [CVI_ST]>], [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [SLOT1], 0>, + InstrStage<1, [CVI_ST], 0>, + InstrStage<1, [CVI_XLANE]>], [1, 1, 1, 1]>, + InstrItinData, + InstrStage<1, [CVI_ALL]>], [1, 1, 1, 1]>]; +} diff --git a/lib/Target/Hexagon/HexagonIICScalar.td b/lib/Target/Hexagon/HexagonIICScalar.td new file mode 100644 index 00000000000..e69cfbdad68 --- /dev/null +++ b/lib/Target/Hexagon/HexagonIICScalar.td @@ -0,0 +1,164 @@ +//===--- HexagonIICScalar.td ----------------------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +// These itinerary class descriptions are based on the instruction timing +// classes as per V62. Curretnly, they are just extracted from +// HexagonScheduleV62.td but will soon be auto-generated by HexagonGen.py. + +class ScalarItin { + list ScalarItin_list = [ + InstrItinData], [1, 1, 1]>, + InstrItinData], [2, 1, 1]>, + InstrItinData], [1, 1, 1]>, + InstrItinData], [2, 1, 1]>, + InstrItinData], [2, 1, 1]>, + InstrItinData], [1, 1, 1]>, + + // ALU64 + InstrItinData], + [1, 1, 1]>, + InstrItinData], + [2, 1, 1]>, + InstrItinData], + [2, 1, 1]>, + InstrItinData], + [3, 1, 1]>, + + // CR -> System + InstrItinData], [2, 1, 1]>, + InstrItinData], [2, 1, 1]>, + InstrItinData], [3, 1, 1]>, + + // Jump (conditional/unconditional/return etc) + InstrItinData], + [2, 1, 1, 1]>, + InstrItinData], + [3, 1, 1, 1]>, + InstrItinData], + [1, 1, 1, 1]>, + InstrItinData], + [2, 1, 1, 1]>, + InstrItinData], + [2, 1, 1, 1]>, + InstrItinData], [2, 1, 1, 1]>, + + // JR + InstrItinData], [2, 1, 1]>, + InstrItinData], [3, 1, 1]>, + + // Extender + InstrItinData], [2, 1, 1, 1]>, + + // Load + InstrItinData], + [3, 1]>, + InstrItinData], + [3, 1]>, + InstrItinData], [4, 1]>, + InstrItinData], [3, 1]>, + + // M + InstrItinData], + [1, 1, 1]>, + InstrItinData], + [2, 1, 1]>, + InstrItinData], + [2, 1, 1]>, + InstrItinData], + [3, 1, 1]>, + InstrItinData], + [3, 1, 1]>, + InstrItinData], + [3, 1, 1, 1]>, + InstrItinData], + [4, 1, 1]>, + InstrItinData], + [4, 1, 1]>, + InstrItinData], + [3, 1, 1]>, + + // Store + InstrItinData], + [1, 1, 1]>, + InstrItinData], + [1, 1, 1]>, + InstrItinData], [3, 1, 1]>, + InstrItinData], [3, 1, 1]>, + InstrItinData], [1, 1, 1]>, + InstrItinData], [1, 1, 1]>, + + // S + InstrItinData], + [1, 1, 1]>, + InstrItinData], + [2, 1, 1]>, + InstrItinData], + [2, 1, 1]>, + // The S_2op_tc_3x_SLOT23 slots are 4 cycles on v60. + InstrItinData], + [4, 1, 1]>, + InstrItinData], + [1, 1, 1]>, + InstrItinData], + [2, 1, 1]>, + InstrItinData], + [2, 1, 1]>, + InstrItinData], + [3, 1, 1]>, + InstrItinData], + [3, 1, 1]>, + InstrItinData], + [3, 1, 1]>, + + // New Value Compare Jump + InstrItinData], + [3, 1, 1, 1]>, + + // Mem ops + InstrItinData], + [1, 1, 1, 1]>, + InstrItinData], + [2, 1, 1, 1]>, + InstrItinData], + [1, 1, 1, 1]>, + InstrItinData], + [1, 1, 1, 1]>, + InstrItinData], + [3, 1, 1, 1]>, + InstrItinData], + [1, 1, 1, 1]>, + + // Endloop + InstrItinData], + [2]>, + InstrItinData], + [1, 1, 1, 1]>, + + // Duplex and Compound + InstrItinData], [1, 1, 1]>, + InstrItinData], [1, 1, 1]>, + InstrItinData], [1, 1, 1]>, + // Misc + InstrItinData], + [1, 1, 1]>, + InstrItinData], + [1, 1, 1]>, + InstrItinData, + InstrStage<1, [SLOT2, SLOT3]>], [1, 1, 1]>]; +} diff --git a/lib/Target/Hexagon/HexagonMapAsm2IntrinV62.gen.td b/lib/Target/Hexagon/HexagonMapAsm2IntrinV62.gen.td new file mode 100644 index 00000000000..0b4ac14c7a4 --- /dev/null +++ b/lib/Target/Hexagon/HexagonMapAsm2IntrinV62.gen.td @@ -0,0 +1,204 @@ +//===--- HexagonMapAsm2IntrinV62.gen.td -----------------------------------===// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +multiclass T_VR_HVX_gen_pat { + def: Pat<(IntID VectorRegs:$src1, IntRegs:$src2), + (MI VectorRegs:$src1, IntRegs:$src2)>, + Requires<[UseHVXSgl]>; + def: Pat<(!cast(IntID#"_128B") VectorRegs128B:$src1, IntRegs:$src2), + (!cast(MI#"_128B") VectorRegs128B:$src1, IntRegs:$src2)>, + Requires<[UseHVXDbl]>; +} + +multiclass T_VVL_HVX_gen_pat { + def: Pat<(IntID VectorRegs:$src1, VectorRegs:$src2, IntRegsLow8:$src3), + (MI VectorRegs:$src1, VectorRegs:$src2, IntRegsLow8:$src3)>, + Requires<[UseHVXSgl]>; + def: Pat<(!cast(IntID#"_128B") VectorRegs128B:$src1, VectorRegs128B:$src2, IntRegsLow8:$src3), + (!cast(MI#"_128B") VectorRegs128B:$src1, VectorRegs128B:$src2, IntRegsLow8:$src3)>, + Requires<[UseHVXDbl]>; +} + +multiclass T_VV_HVX_gen_pat { + def: Pat<(IntID VectorRegs:$src1, VectorRegs:$src2), + (MI VectorRegs:$src1, VectorRegs:$src2)>, + Requires<[UseHVXSgl]>; + def: Pat<(!cast(IntID#"_128B") VectorRegs128B:$src1, VectorRegs128B:$src2), + (!cast(MI#"_128B") VectorRegs128B:$src1, VectorRegs128B:$src2)>, + Requires<[UseHVXDbl]>; +} + +multiclass T_WW_HVX_gen_pat { + def: Pat<(IntID VecDblRegs:$src1, VecDblRegs:$src2), + (MI VecDblRegs:$src1, VecDblRegs:$src2)>, + Requires<[UseHVXSgl]>; + def: Pat<(!cast(IntID#"_128B") VecDblRegs128B:$src1, VecDblRegs128B:$src2), + (!cast(MI#"_128B") VecDblRegs128B:$src1, VecDblRegs128B:$src2)>, + Requires<[UseHVXDbl]>; +} + +multiclass T_WVV_HVX_gen_pat { + def: Pat<(IntID VecDblRegs:$src1, VectorRegs:$src2, VectorRegs:$src3), + (MI VecDblRegs:$src1, VectorRegs:$src2, VectorRegs:$src3)>, + Requires<[UseHVXSgl]>; + def: Pat<(!cast(IntID#"_128B") VecDblRegs128B:$src1, VectorRegs128B:$src2, VectorRegs128B:$src3), + (!cast(MI#"_128B") VecDblRegs128B:$src1, VectorRegs128B:$src2, VectorRegs128B:$src3)>, + Requires<[UseHVXDbl]>; +} + +multiclass T_WR_HVX_gen_pat { + def: Pat<(IntID VecDblRegs:$src1, IntRegs:$src2), + (MI VecDblRegs:$src1, IntRegs:$src2)>, + Requires<[UseHVXSgl]>; + def: Pat<(!cast(IntID#"_128B") VecDblRegs128B:$src1, IntRegs:$src2), + (!cast(MI#"_128B") VecDblRegs128B:$src1, IntRegs:$src2)>, + Requires<[UseHVXDbl]>; +} + +multiclass T_WWR_HVX_gen_pat { + def: Pat<(IntID VecDblRegs:$src1, VecDblRegs:$src2, IntRegs:$src3), + (MI VecDblRegs:$src1, VecDblRegs:$src2, IntRegs:$src3)>, + Requires<[UseHVXSgl]>; + def: Pat<(!cast(IntID#"_128B") VecDblRegs128B:$src1, VecDblRegs128B:$src2, IntRegs:$src3), + (!cast(MI#"_128B") VecDblRegs128B:$src1, VecDblRegs128B:$src2, IntRegs:$src3)>, + Requires<[UseHVXDbl]>; +} + +multiclass T_VVR_HVX_gen_pat { + def: Pat<(IntID VectorRegs:$src1, VectorRegs:$src2, IntRegs:$src3), + (MI VectorRegs:$src1, VectorRegs:$src2, IntRegs:$src3)>, + Requires<[UseHVXSgl]>; + def: Pat<(!cast(IntID#"_128B") VectorRegs128B:$src1, VectorRegs128B:$src2, IntRegs:$src3), + (!cast(MI#"_128B") VectorRegs128B:$src1, VectorRegs128B:$src2, IntRegs:$src3)>, + Requires<[UseHVXDbl]>; +} + +multiclass T_ZR_HVX_gen_pat { + def: Pat<(IntID VecPredRegs:$src1, IntRegs:$src2), + (MI VecPredRegs:$src1, IntRegs:$src2)>, + Requires<[UseHVXSgl]>; + def: Pat<(!cast(IntID#"_128B") VecPredRegs128B:$src1, IntRegs:$src2), + (!cast(MI#"_128B") VecPredRegs128B:$src1, IntRegs:$src2)>, + Requires<[UseHVXDbl]>; +} + +multiclass T_VZR_HVX_gen_pat { + def: Pat<(IntID VectorRegs:$src1, VecPredRegs:$src2, IntRegs:$src3), + (MI VectorRegs:$src1, VecPredRegs:$src2, IntRegs:$src3)>, + Requires<[UseHVXSgl]>; + def: Pat<(!cast(IntID#"_128B") VectorRegs128B:$src1, VecPredRegs128B:$src2, IntRegs:$src3), + (!cast(MI#"_128B") VectorRegs128B:$src1, VecPredRegs128B:$src2, IntRegs:$src3)>, + Requires<[UseHVXDbl]>; +} + +multiclass T_ZV_HVX_gen_pat { + def: Pat<(IntID VecPredRegs:$src1, VectorRegs:$src2), + (MI VecPredRegs:$src1, VectorRegs:$src2)>, + Requires<[UseHVXSgl]>; + def: Pat<(!cast(IntID#"_128B") VecPredRegs128B:$src1, VectorRegs128B:$src2), + (!cast(MI#"_128B") VecPredRegs128B:$src1, VectorRegs128B:$src2)>, + Requires<[UseHVXDbl]>; +} + +multiclass T_R_HVX_gen_pat { + def: Pat<(IntID IntRegs:$src1), + (MI IntRegs:$src1)>, + Requires<[UseHVXSgl]>; + def: Pat<(!cast(IntID#"_128B") IntRegs:$src1), + (!cast(MI#"_128B") IntRegs:$src1)>, + Requires<[UseHVXDbl]>; +} + +multiclass T_ZZ_HVX_gen_pat { + def: Pat<(IntID VecPredRegs:$src1, VecPredRegs:$src2), + (MI VecPredRegs:$src1, VecPredRegs:$src2)>, + Requires<[UseHVXSgl]>; + def: Pat<(!cast(IntID#"_128B") VecPredRegs128B:$src1, VecPredRegs128B:$src2), + (!cast(MI#"_128B") VecPredRegs128B:$src1, VecPredRegs128B:$src2)>, + Requires<[UseHVXDbl]>; +} + +multiclass T_VVI_HVX_gen_pat { + def: Pat<(IntID VectorRegs:$src1, VectorRegs:$src2, imm:$src3), + (MI VectorRegs:$src1, VectorRegs:$src2, imm:$src3)>, + Requires<[UseHVXSgl]>; + def: Pat<(!cast(IntID#"_128B") VectorRegs128B:$src1, VectorRegs128B:$src2, imm:$src3), + (!cast(MI#"_128B") VectorRegs128B:$src1, VectorRegs128B:$src2, imm:$src3)>, + Requires<[UseHVXDbl]>; +} + +multiclass T_VVVI_HVX_gen_pat { + def: Pat<(IntID VectorRegs:$src1, VectorRegs:$src2, VectorRegs:$src3, imm:$src4), + (MI VectorRegs:$src1, VectorRegs:$src2, VectorRegs:$src3, imm:$src4)>, + Requires<[UseHVXSgl]>; + def: Pat<(!cast(IntID#"_128B") VectorRegs128B:$src1, VectorRegs128B:$src2, VectorRegs128B:$src3, imm:$src4), + (!cast(MI#"_128B") VectorRegs128B:$src1, VectorRegs128B:$src2, VectorRegs128B:$src3, imm:$src4)>, + Requires<[UseHVXDbl]>; +} + +multiclass T_WVVI_HVX_gen_pat { + def: Pat<(IntID VecDblRegs:$src1, VectorRegs:$src2, VectorRegs:$src3, imm:$src4), + (MI VecDblRegs:$src1, VectorRegs:$src2, VectorRegs:$src3, imm:$src4)>, + Requires<[UseHVXSgl]>; + def: Pat<(!cast(IntID#"_128B") VecDblRegs128B:$src1, VectorRegs128B:$src2, VectorRegs128B:$src3, imm:$src4), + (!cast(MI#"_128B") VecDblRegs128B:$src1, VectorRegs128B:$src2, VectorRegs128B:$src3, imm:$src4)>, + Requires<[UseHVXDbl]>; +} + +def : T_R_pat ; +def : T_PP_pat ; +def : T_PP_pat ; +def : T_PP_pat ; +def : T_PP_pat ; + +defm : T_VR_HVX_gen_pat ; +defm : T_VR_HVX_gen_pat ; +defm : T_VVL_HVX_gen_pat ; +defm : T_VVL_HVX_gen_pat ; +defm : T_VVL_HVX_gen_pat ; +defm : T_VVL_HVX_gen_pat ; +defm : T_VVL_HVX_gen_pat ; +defm : T_VV_HVX_gen_pat ; +defm : T_VV_HVX_gen_pat ; +defm : T_VV_HVX_gen_pat ; +defm : T_VV_HVX_gen_pat ; +defm : T_VV_HVX_gen_pat ; +defm : T_VV_HVX_gen_pat ; +defm : T_VV_HVX_gen_pat ; +defm : T_VV_HVX_gen_pat ; +defm : T_VV_HVX_gen_pat ; +defm : T_VV_HVX_gen_pat ; +defm : T_VV_HVX_gen_pat ; +defm : T_VV_HVX_gen_pat ; +defm : T_VV_HVX_gen_pat ; +defm : T_VV_HVX_gen_pat ; +defm : T_WW_HVX_gen_pat ; +defm : T_WW_HVX_gen_pat ; +defm : T_WW_HVX_gen_pat ; +defm : T_WW_HVX_gen_pat ; +defm : T_WVV_HVX_gen_pat ; +defm : T_WVV_HVX_gen_pat ; +defm : T_WVV_HVX_gen_pat ; +defm : T_WVV_HVX_gen_pat ; +defm : T_WR_HVX_gen_pat ; +defm : T_WWR_HVX_gen_pat ; +defm : T_VVR_HVX_gen_pat ; +defm : T_ZR_HVX_gen_pat ; +defm : T_VZR_HVX_gen_pat ; +defm : T_ZV_HVX_gen_pat ; +defm : T_ZV_HVX_gen_pat ; +defm : T_R_HVX_gen_pat ; +defm : T_R_HVX_gen_pat ; +defm : T_R_HVX_gen_pat ; +defm : T_ZZ_HVX_gen_pat ; +defm : T_ZZ_HVX_gen_pat ; +defm : T_VVI_HVX_gen_pat ; +defm : T_VVI_HVX_gen_pat ; +defm : T_VVVI_HVX_gen_pat ; +defm : T_WVVI_HVX_gen_pat ; diff --git a/lib/Target/Hexagon/HexagonRegisterInfo.cpp b/lib/Target/Hexagon/HexagonRegisterInfo.cpp index a09ae25384e..42568db4a13 100644 --- a/lib/Target/Hexagon/HexagonRegisterInfo.cpp +++ b/lib/Target/Hexagon/HexagonRegisterInfo.cpp @@ -125,6 +125,7 @@ HexagonRegisterInfo::getCalleeSavedRegs(const MachineFunction *MF) const { case HexagonSubtarget::V5: case HexagonSubtarget::V55: case HexagonSubtarget::V60: + case HexagonSubtarget::V62: return HasEHReturn ? CalleeSavedRegsV3EHReturn : CalleeSavedRegsV3; } @@ -139,17 +140,26 @@ BitVector HexagonRegisterInfo::getReservedRegs(const MachineFunction &MF) Reserved.set(Hexagon::R29); Reserved.set(Hexagon::R30); Reserved.set(Hexagon::R31); - Reserved.set(Hexagon::SA0); // C0 - Reserved.set(Hexagon::LC0); // C1 - Reserved.set(Hexagon::SA1); // C2 - Reserved.set(Hexagon::LC1); // C3 - Reserved.set(Hexagon::USR); // C8 - Reserved.set(Hexagon::PC); // C9 - Reserved.set(Hexagon::UGP); // C10 - Reserved.set(Hexagon::GP); // C11 - Reserved.set(Hexagon::CS0); // C12 - Reserved.set(Hexagon::CS1); // C13 - + // Control registers. + Reserved.set(Hexagon::SA0); // C0 + Reserved.set(Hexagon::LC0); // C1 + Reserved.set(Hexagon::SA1); // C2 + Reserved.set(Hexagon::LC1); // C3 + Reserved.set(Hexagon::P3_0); // C4 + Reserved.set(Hexagon::USR); // C8 + Reserved.set(Hexagon::PC); // C9 + Reserved.set(Hexagon::UGP); // C10 + Reserved.set(Hexagon::GP); // C11 + Reserved.set(Hexagon::CS0); // C12 + Reserved.set(Hexagon::CS1); // C13 + Reserved.set(Hexagon::UPCL); // C14 + Reserved.set(Hexagon::UPCH); // C15 + Reserved.set(Hexagon::FRAMELIMIT); // C16 + Reserved.set(Hexagon::FRAMEKEY); // C17 + Reserved.set(Hexagon::PKTCOUNTLO); // C18 + Reserved.set(Hexagon::PKTCOUNTHI); // C19 + Reserved.set(Hexagon::UTIMERLO); // C30 + Reserved.set(Hexagon::UTIMERHI); // C31 // Out of the control registers, only C8 is explicitly defined in // HexagonRegisterInfo.td. If others are defined, make sure to add // them here as well. diff --git a/lib/Target/Hexagon/HexagonRegisterInfo.td b/lib/Target/Hexagon/HexagonRegisterInfo.td index 3ab0fb9c7fd..5f813aafe58 100644 --- a/lib/Target/Hexagon/HexagonRegisterInfo.td +++ b/lib/Target/Hexagon/HexagonRegisterInfo.td @@ -140,43 +140,54 @@ let Namespace = "Hexagon" in { } // Control registers. - def SA0 : Rc<0, "sa0", ["c0"]>, DwarfRegNum<[67]>; - def LC0 : Rc<1, "lc0", ["c1"]>, DwarfRegNum<[68]>; - def SA1 : Rc<2, "sa1", ["c2"]>, DwarfRegNum<[69]>; - def LC1 : Rc<3, "lc1", ["c3"]>, DwarfRegNum<[70]>; - def P3_0 : Rc<4, "p3:0", ["c4"], [P0, P1, P2, P3]>, - DwarfRegNum<[71]>; + def SA0: Rc<0, "sa0", ["c0"]>, DwarfRegNum<[67]>; + def LC0: Rc<1, "lc0", ["c1"]>, DwarfRegNum<[68]>; + def SA1: Rc<2, "sa1", ["c2"]>, DwarfRegNum<[69]>; + def LC1: Rc<3, "lc1", ["c3"]>, DwarfRegNum<[70]>; + def P3_0: Rc<4, "p3:0", ["c4"], [P0, P1, P2, P3]>, + DwarfRegNum<[71]>; // When defining more Cn registers, make sure to explicitly mark them // as reserved in HexagonRegisterInfo.cpp. - def C5 : Rc<5, "c5", ["c5"]>, DwarfRegNum<[72]>; // future use - def C6 : Rc<6, "c6", [], [M0]>, DwarfRegNum<[73]>; - def C7 : Rc<7, "c7", [], [M1]>, DwarfRegNum<[74]>; + def C5: Rc<5, "c5", ["c5"]>, DwarfRegNum<[72]>; + def C6: Rc<6, "c6", [], [M0]>, DwarfRegNum<[73]>; + def C7: Rc<7, "c7", [], [M1]>, DwarfRegNum<[74]>; // Define C8 separately and make it aliased with USR. // The problem is that USR has subregisters (e.g. overflow). If USR was // specified as a subregister of C9_8, it would imply that subreg_overflow // and isub_lo can be composed, which leads to all kinds of issues // with lane masks. - def C8 : Rc<8, "c8", [], [USR]>, DwarfRegNum<[75]>; - def PC : Rc<9, "pc">, DwarfRegNum<[76]>; - def UGP : Rc<10, "ugp", ["c10"]>, DwarfRegNum<[77]>; - def GP : Rc<11, "gp", ["c11"]>, DwarfRegNum<[78]>; - def CS0 : Rc<12, "cs0", ["c12"]>, DwarfRegNum<[79]>; - def CS1 : Rc<13, "cs1", ["c13"]>, DwarfRegNum<[80]>; - def UPCL : Rc<14, "upcyclelo", ["c14"]>, DwarfRegNum<[81]>; - def UPCH : Rc<15, "upcyclehi", ["c15"]>, DwarfRegNum<[82]>; + def C8: Rc<8, "c8", [], [USR]>, DwarfRegNum<[75]>; + def PC: Rc<9, "pc">, DwarfRegNum<[76]>; + def UGP: Rc<10, "ugp", ["c10"]>, DwarfRegNum<[77]>; + def GP: Rc<11, "gp", ["c11"]>, DwarfRegNum<[78]>; + def CS0: Rc<12, "cs0", ["c12"]>, DwarfRegNum<[79]>; + def CS1: Rc<13, "cs1", ["c13"]>, DwarfRegNum<[80]>; + def UPCL: Rc<14, "upcyclelo", ["c14"]>, DwarfRegNum<[81]>; + def UPCH: Rc<15, "upcyclehi", ["c15"]>, DwarfRegNum<[82]>; + def FRAMELIMIT: Rc<16, "framelimit", ["c16"]>, DwarfRegNum<[83]>; + def FRAMEKEY: Rc<17, "framekey", ["c17"]>, DwarfRegNum<[84]>; + def PKTCOUNTLO: Rc<18, "pktcountlo", ["c18"]>, DwarfRegNum<[85]>; + def PKTCOUNTHI: Rc<19, "pktcounthi", ["c19"]>, DwarfRegNum<[86]>; + def UTIMERLO: Rc<30, "utimerlo", ["c30"]>, DwarfRegNum<[97]>; + def UTIMERHI: Rc<31, "utimerhi", ["c31"]>, DwarfRegNum<[98]>; } // Control registers pairs. let SubRegIndices = [isub_lo, isub_hi], CoveredBySubRegs = 1 in { - def C1_0 : Rcc<0, "c1:0", [SA0, LC0], ["lc0:sa0"]>, DwarfRegNum<[67]>; - def C3_2 : Rcc<2, "c3:2", [SA1, LC1], ["lc1:sa1"]>, DwarfRegNum<[69]>; - def C5_4 : Rcc<4, "c5:4", [P3_0, C5]>, DwarfRegNum<[71]>; - def C7_6 : Rcc<6, "c7:6", [C6, C7], ["m1:0"]>, DwarfRegNum<[72]>; + def C1_0: Rcc<0, "c1:0", [SA0, LC0], ["lc0:sa0"]>, DwarfRegNum<[67]>; + def C3_2: Rcc<2, "c3:2", [SA1, LC1], ["lc1:sa1"]>, DwarfRegNum<[69]>; + def C5_4: Rcc<4, "c5:4", [P3_0, C5]>, DwarfRegNum<[71]>; + def C7_6: Rcc<6, "c7:6", [C6, C7], ["m1:0"]>, DwarfRegNum<[72]>; // Use C8 instead of USR as a subregister of C9_8. - def C9_8 : Rcc<8, "c9:8", [C8, PC]>, DwarfRegNum<[74]>; - def C11_10 : Rcc<10, "c11:10", [UGP, GP]>, DwarfRegNum<[76]>; - def CS : Rcc<12, "c13:12", [CS0, CS1], ["cs1:0"]>, DwarfRegNum<[78]>; - def UPC : Rcc<14, "c15:14", [UPCL, UPCH]>, DwarfRegNum<[80]>; + def C9_8: Rcc<8, "c9:8", [C8, PC]>, DwarfRegNum<[74]>; + def C11_10: Rcc<10, "c11:10", [UGP, GP]>, DwarfRegNum<[76]>; + def CS: Rcc<12, "c13:12", [CS0, CS1], ["cs1:0"]>, DwarfRegNum<[78]>; + def UPC: Rcc<14, "c15:14", [UPCL, UPCH]>, DwarfRegNum<[80]>; + def C17_16: Rcc<16, "c17:16", [FRAMELIMIT, FRAMEKEY]>, DwarfRegNum<[83]>; + def PKTCOUNT: Rcc<18, "c19:18", [PKTCOUNTLO, PKTCOUNTHI], ["pktcount"]>, + DwarfRegNum<[85]>; + def UTIMER: Rcc<30, "c31:30", [UTIMERLO, UTIMERHI], ["utimer"]>, + DwarfRegNum<[97]>; } foreach i = 0-31 in { @@ -269,17 +280,26 @@ def ModRegs : RegisterClass<"Hexagon", [i32], 32, (add M0, M1)>; let Size = 32, isAllocatable = 0 in def CtrRegs : RegisterClass<"Hexagon", [i32], 32, - (add LC0, SA0, LC1, SA1, - P3_0, C5, - M0, M1, C6, C7, C8, CS0, CS1, UPCL, UPCH, - USR, UGP, GP, PC)>; + (add LC0, SA0, LC1, SA1, P3_0, C5, C6, C7, + C8, PC, UGP, GP, CS0, CS1, UPCL, UPCH, + FRAMELIMIT, FRAMEKEY, PKTCOUNTLO, PKTCOUNTHI, UTIMERLO, UTIMERHI, + M0, M1, USR)>; let isAllocatable = 0 in def UsrBits : RegisterClass<"Hexagon", [i1], 0, (add USR_OVF)>; let Size = 64, isAllocatable = 0 in def CtrRegs64 : RegisterClass<"Hexagon", [i64], 64, - (add C1_0, C3_2, C7_6, C9_8, C11_10, CS, UPC)>; + (add C1_0, C3_2, C5_4, C7_6, C9_8, C11_10, CS, UPC, C17_16, + PKTCOUNT, UTIMER)>; + +// These registers are new for v62 and onward. +// The function RegisterMatchesArch() uses this list for validation. +let isAllocatable = 0 in +def V62Regs : RegisterClass<"Hexagon", [i32], 32, + (add FRAMELIMIT, FRAMEKEY, C17_16, + PKTCOUNTLO, PKTCOUNTHI, PKTCOUNT, + UTIMERLO, UTIMERHI, UTIMER)>; def VolatileV3 { list Regs = [D0, D1, D2, D3, D4, D5, D6, D7, diff --git a/lib/Target/Hexagon/HexagonSchedule.td b/lib/Target/Hexagon/HexagonSchedule.td index bce090fb9cf..9b5fbea04d1 100644 --- a/lib/Target/Hexagon/HexagonSchedule.td +++ b/lib/Target/Hexagon/HexagonSchedule.td @@ -21,3 +21,12 @@ include "HexagonScheduleV55.td" //===----------------------------------------------------------------------===// include "HexagonScheduleV60.td" +include "HexagonIICScalar.td" +include "HexagonIICHVX.td" + +//===----------------------------------------------------------------------===// +// V62 Machine Info + +//===----------------------------------------------------------------------===// + +include "HexagonScheduleV62.td" + diff --git a/lib/Target/Hexagon/HexagonScheduleV62.td b/lib/Target/Hexagon/HexagonScheduleV62.td new file mode 100644 index 00000000000..0758788a600 --- /dev/null +++ b/lib/Target/Hexagon/HexagonScheduleV62.td @@ -0,0 +1,129 @@ +//=-HexagonScheduleV62.td - HexagonV62 Scheduling Definitions *- tablegen -*-=// +// +// The LLVM Compiler Infrastructure +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// + +// V62 follows the same schedule as V60 with following exceptions: +// Following instructions are permissible on any slot on V62: +// V4_J4_cmpeq_fp0_jump_nt +// V4_J4_cmpeq_fp0_jump_t +// V4_J4_cmpeq_fp1_jump_nt +// V4_J4_cmpeq_fp1_jump_t +// V4_J4_cmpeq_tp0_jump_nt +// V4_J4_cmpeq_tp0_jump_t +// V4_J4_cmpeq_tp1_jump_nt +// V4_J4_cmpeq_tp1_jump_t +// V4_J4_cmpeqi_fp0_jump_nt +// V4_J4_cmpeqi_fp0_jump_t +// V4_J4_cmpeqi_fp1_jump_nt +// V4_J4_cmpeqi_fp1_jump_t +// V4_J4_cmpeqi_tp0_jump_nt +// V4_J4_cmpeqi_tp0_jump_t +// V4_J4_cmpeqi_tp1_jump_nt +// V4_J4_cmpeqi_tp1_jump_t +// V4_J4_cmpeqn1_fp0_jump_nt +// V4_J4_cmpeqn1_fp0_jump_t +// V4_J4_cmpeqn1_fp1_jump_nt +// V4_J4_cmpeqn1_fp1_jump_t +// V4_J4_cmpeqn1_tp0_jump_nt +// V4_J4_cmpeqn1_tp0_jump_t +// V4_J4_cmpeqn1_tp1_jump_nt +// V4_J4_cmpeqn1_tp1_jump_t +// V4_J4_cmpgt_fp0_jump_nt +// V4_J4_cmpgt_fp0_jump_t +// V4_J4_cmpgt_fp1_jump_nt +// V4_J4_cmpgt_fp1_jump_t +// V4_J4_cmpgt_tp0_jump_nt +// V4_J4_cmpgt_tp0_jump_t +// V4_J4_cmpgt_tp1_jump_nt +// V4_J4_cmpgt_tp1_jump_t +// V4_J4_cmpgti_fp0_jump_nt +// V4_J4_cmpgti_fp0_jump_t +// V4_J4_cmpgti_fp1_jump_nt +// V4_J4_cmpgti_fp1_jump_t +// V4_J4_cmpgti_tp0_jump_nt +// V4_J4_cmpgti_tp0_jump_t +// V4_J4_cmpgti_tp1_jump_nt +// V4_J4_cmpgti_tp1_jump_t +// V4_J4_cmpgtn1_fp0_jump_nt +// V4_J4_cmpgtn1_fp0_jump_t +// V4_J4_cmpgtn1_fp1_jump_nt +// V4_J4_cmpgtn1_fp1_jump_t +// V4_J4_cmpgtn1_tp0_jump_nt +// V4_J4_cmpgtn1_tp0_jump_t +// V4_J4_cmpgtn1_tp1_jump_nt +// V4_J4_cmpgtn1_tp1_jump_t +// V4_J4_cmpgtu_fp0_jump_nt +// V4_J4_cmpgtu_fp0_jump_t +// V4_J4_cmpgtu_fp1_jump_nt +// V4_J4_cmpgtu_fp1_jump_t +// V4_J4_cmpgtu_tp0_jump_nt +// V4_J4_cmpgtu_tp0_jump_t +// V4_J4_cmpgtu_tp1_jump_nt +// V4_J4_cmpgtu_tp1_jump_t +// V4_J4_cmpgtui_fp0_jump_nt +// V4_J4_cmpgtui_fp0_jump_t +// V4_J4_cmpgtui_fp1_jump_nt +// V4_J4_cmpgtui_fp1_jump_t +// V4_J4_cmpgtui_tp0_jump_nt +// V4_J4_cmpgtui_tp0_jump_t +// V4_J4_cmpgtui_tp1_jump_nt +// V4_J4_cmpgtui_tp1_jump_t +// V4_J4_tstbit0_fp0_jump_nt +// V4_J4_tstbit0_fp0_jump_t +// V4_J4_tstbit0_fp1_jump_nt +// V4_J4_tstbit0_fp1_jump_t +// V4_J4_tstbit0_tp0_jump_nt +// V4_J4_tstbit0_tp0_jump_t +// V4_J4_tstbit0_tp1_jump_nt +// V4_J4_tstbit0_tp1_jump_t +// JMP +// JMPEXT +// JMPEXT_f +// JMPEXT_fnew_nt +// JMPEXT_fnew_t +// JMPEXT_t +// JMPEXT_tnew_nt +// JMPEXT_tnew_t +// JMPNOTEXT +// JMPNOTEXT_f +// JMPNOTEXT_fnew_nt +// JMPNOTEXT_fnew_t +// JMPNOTEXT_t +// JMPNOTEXT_tnew_nt +// JMPNOTEXT_tnew_t +// JMP_f +// JMP_fnew_nt +// JMP_fnew_t +// JMP_t +// JMP_tnew_nt +// JMP_tnew_t +// RESTORE_DEALLOC_RET_JMP_V4 +// RESTORE_DEALLOC_RET_JMP_V4_EXT + +def HexagonV62ItinList : ScalarItin, HVXV62Itin { + list ItinList = + !listconcat(ScalarItin_list, HVXV62Itin_list); +} + +def HexagonItinerariesV62 : + ProcessorItineraries<[SLOT0, SLOT1, SLOT2, SLOT3, SLOT_ENDLOOP, + CVI_ST, CVI_XLANE, CVI_SHIFT, CVI_MPY0, CVI_MPY1, + CVI_LD, CVI_XLSHF, CVI_MPY01, CVI_ALL], + [], HexagonV62ItinList.ItinList>; + +def HexagonModelV62 : SchedMachineModel { + // Max issue per cycle == bundle width. + let IssueWidth = 4; + let Itineraries = HexagonItinerariesV62; + let LoadLatency = 1; + let CompleteModel = 0; +} + +//===----------------------------------------------------------------------===// +// Hexagon V62 Resource Definitions - +//===----------------------------------------------------------------------===// diff --git a/lib/Target/Hexagon/HexagonSubtarget.cpp b/lib/Target/Hexagon/HexagonSubtarget.cpp index 8c23a2465dd..033b93fc910 100644 --- a/lib/Target/Hexagon/HexagonSubtarget.cpp +++ b/lib/Target/Hexagon/HexagonSubtarget.cpp @@ -88,6 +88,7 @@ HexagonSubtarget::initializeSubtargetDependencies(StringRef CPU, StringRef FS) { { "hexagonv5", V5 }, { "hexagonv55", V55 }, { "hexagonv60", V60 }, + { "hexagonv62", V62 }, }; auto foundIt = CpuTable.find(CPUString); diff --git a/lib/Target/Hexagon/HexagonSubtarget.h b/lib/Target/Hexagon/HexagonSubtarget.h index a9a11ca2f74..6a3e7f13be4 100644 --- a/lib/Target/Hexagon/HexagonSubtarget.h +++ b/lib/Target/Hexagon/HexagonSubtarget.h @@ -96,6 +96,9 @@ public: bool hasV55TOpsOnly() const { return getHexagonArchVersion() == V55; } bool hasV60TOps() const { return getHexagonArchVersion() >= V60; } bool hasV60TOpsOnly() const { return getHexagonArchVersion() == V60; } + bool hasV62TOps() const { return getHexagonArchVersion() >= V62; } + bool hasV62TOpsOnly() const { return getHexagonArchVersion() == V62; } + bool modeIEEERndNear() const { return ModeIEEERndNear; } bool useHVXOps() const { return UseHVXOps; } bool useHVXDblOps() const { return UseHVXOps && UseHVXDblOps; } diff --git a/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp b/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp index 52c61eb6e5d..23bffb9bc01 100644 --- a/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp +++ b/lib/Target/Hexagon/MCTargetDesc/HexagonMCTargetDesc.cpp @@ -67,6 +67,9 @@ static cl::opt HexagonV55ArchVariant("mv55", cl::Hidden, cl::init(false), static cl::opt HexagonV60ArchVariant("mv60", cl::Hidden, cl::init(false), cl::desc("Build for Hexagon V60")); +static cl::opt HexagonV62ArchVariant("mv62", cl::Hidden, cl::init(false), + cl::desc("Build for Hexagon V62")); + static StringRef DefaultArch = "hexagonv60"; static StringRef HexagonGetArchVariant() { @@ -78,6 +81,8 @@ static StringRef HexagonGetArchVariant() { return "hexagonv55"; if (HexagonV60ArchVariant) return "hexagonv60"; + if (HexagonV62ArchVariant) + return "hexagonv62"; return ""; } @@ -247,7 +252,7 @@ static bool LLVM_ATTRIBUTE_UNUSED checkFeature(MCSubtargetInfo* STI, uint64_t F) StringRef Hexagon_MC::ParseHexagonTriple(const Triple &TT, StringRef CPU) { StringRef CPUName = Hexagon_MC::selectHexagonCPU(TT, CPU); StringRef FS = ""; - if (CPUName.equals_lower("hexagonv60")) + if (CPUName.equals_lower("hexagonv60") || CPUName.equals_lower("hexagonv62")) FS = "+hvx"; return FS; } @@ -260,6 +265,7 @@ static bool isCPUValid(std::string CPU) "hexagonv5", "hexagonv55", "hexagonv60", + "hexagonv62", }; return std::find(table.begin(), table.end(), CPU) != table.end(); @@ -270,9 +276,9 @@ MCSubtargetInfo *Hexagon_MC::createHexagonMCSubtargetInfo(const Triple &TT, StringRef FS) { StringRef ArchFS = (FS.size()) ? FS : Hexagon_MC::ParseHexagonTriple(TT, CPU); StringRef CPUName = Hexagon_MC::selectHexagonCPU(TT, CPU); - if (!isCPUValid(CPUName.str())) - { - errs() << "error: invalid CPU \"" << CPUName.str().c_str() << "\" specified\n"; + if (!isCPUValid(CPUName.str())) { + errs() << "error: invalid CPU \"" << CPUName.str().c_str() + << "\" specified\n"; return nullptr; } @@ -290,6 +296,7 @@ unsigned Hexagon_MC::GetELFFlags(const MCSubtargetInfo &STI) { {"hexagonv5", ELF::EF_HEXAGON_MACH_V5}, {"hexagonv55", ELF::EF_HEXAGON_MACH_V55}, {"hexagonv60", ELF::EF_HEXAGON_MACH_V60}, + {"hexagonv62", ELF::EF_HEXAGON_MACH_V62}, }; auto F = ElfFlags.find(STI.getCPU()); diff --git a/test/MC/Hexagon/elf-flags.s b/test/MC/Hexagon/elf-flags.s index 94dce815214..0d2f007cb3d 100644 --- a/test/MC/Hexagon/elf-flags.s +++ b/test/MC/Hexagon/elf-flags.s @@ -2,8 +2,10 @@ # RUN: llvm-mc -arch=hexagon -mcpu=hexagonv5 --filetype=obj %s -o - | llvm-readobj -file-headers -elf-output-style=GNU | FileCheck --check-prefix=CHECK-V5 %s # RUN: llvm-mc -arch=hexagon -mcpu=hexagonv55 --filetype=obj %s -o - | llvm-readobj -file-headers -elf-output-style=GNU | FileCheck --check-prefix=CHECK-V55 %s # RUN: llvm-mc -arch=hexagon -mcpu=hexagonv60 --filetype=obj %s -o - | llvm-readobj -file-headers -elf-output-style=GNU | FileCheck --check-prefix=CHECK-V60 %s +# RUN: llvm-mc -arch=hexagon -mcpu=hexagonv62 --filetype=obj %s -o - | llvm-readobj -file-headers -elf-output-style=GNU | FileCheck --check-prefix=CHECK-V62 %s # CHECK-V4: Flags: 0x3 # CHECK-V5: Flags: 0x4 # CHECK-V55: Flags: 0x5 # CHECK-V60: Flags: 0x60 +# CHECK-V62: Flags: 0x62 diff --git a/test/MC/Hexagon/v62_all.s b/test/MC/Hexagon/v62_all.s new file mode 100644 index 00000000000..6effdc0caba --- /dev/null +++ b/test/MC/Hexagon/v62_all.s @@ -0,0 +1,552 @@ +# RUN: llvm-mc -arch=hexagon -mcpu=hexagonv62 -filetype=obj %s | llvm-objdump -arch=hexagon -mcpu=hexagonv62 -d - | FileCheck %s + +// V6_lvsplatb +// Vd32.b=vsplat(Rt32) + V0.b=vsplat(R0) +# CHECK: 19c0c040 { v0.b = vsplat(r0) } + +// V6_lvsplath +// Vd32.h=vsplat(Rt32) + V0.h=vsplat(R0) +# CHECK: 19c0c020 { v0.h = vsplat(r0) } + +// V6_pred_scalar2v2 +// Qd4=vsetq2(Rt32) + Q0=vsetq2(R0) +# CHECK: 19a0c04c { q0 = vsetq2(r0) } + +// V6_shuffeqh +// Qd4.b=vshuffe(Qs4.h,Qt4.h) + Q0.b=vshuffe(Q0.h,Q0.h) +# CHECK: 1e03c018 { q0.b = vshuffe(q0.h,q0.h) } + +// V6_shuffeqw +// Qd4.h=vshuffe(Qs4.w,Qt4.w) + Q0.h=vshuffe(Q0.w,Q0.w) +# CHECK: 1e03c01c { q0.h = vshuffe(q0.w,q0.w) } + +// V6_vaddbsat +// Vd32.b=vadd(Vu32.b,Vv32.b):sat + V0.b=vadd(V0.b,V0.b):sat +# CHECK: 1f00c000 { v0.b = vadd(v0.b,v0.b):sat } + +// V6_vaddbsat_dv +// Vdd32.b=vadd(Vuu32.b,Vvv32.b):sat + V1:0.b=vadd(V1:0.b,V1:0.b):sat +# CHECK: 1ea0c000 { v1:0.b = vadd(v1:0.b,v1:0.b):sat } + +// V6_vaddcarry +// Vd32.w=vadd(Vu32.w,Vv32.w,Qx4):carry + V0.w=vadd(V0.w,V0.w,Q0):carry +# CHECK: 1ca0e000 { v0.w = vadd(v0.w,v0.w,q0):carry } + +// V6_vaddclbh +// $Vd.h=vadd(vclb($Vu.h),$Vv.h) + V0.h=vadd(vclb(V0.h),V0.h) +# CHECK: 1f00e000 { v0.h = vadd(vclb(v0.h),v0.h) } + +// V6_vaddclbw +// $Vd.w=vadd(vclb($Vu.w),$Vv.w) + V0.w=vadd(vclb(V0.w),V0.w) +# CHECK: 1f00e020 { v0.w = vadd(vclb(v0.w),v0.w) } + +// V6_vaddhw_acc +// Vxx32.w+=vadd(Vu32.h,Vv32.h) + V1:0.w+=vadd(V0.h,V0.h) +# CHECK: 1c20e040 { v1:0.w += vadd(v0.h,v0.h) } + +// V6_vaddubh_acc +// Vxx32.h+=vadd(Vu32.ub,Vv32.ub) + V1:0.h+=vadd(V0.ub,V0.ub) +# CHECK: 1c40e0a0 { v1:0.h += vadd(v0.ub,v0.ub) } + +// V6_vaddububb_sat +// Vd32.ub=vadd(Vu32.ub,Vv32.b):sat + V0.ub=vadd(V0.ub,V0.b):sat +# CHECK: 1ea0c080 { v0.ub = vadd(v0.ub,v0.b):sat } + +// V6_vadduhw_acc +// Vxx32.w+=vadd(Vu32.uh,Vv32.uh) + V1:0.w+=vadd(V0.uh,V0.uh) +# CHECK: 1c40e080 { v1:0.w += vadd(v0.uh,v0.uh) } + +// V6_vadduwsat +// Vd32.uw=vadd(Vu32.uw,Vv32.uw):sat + V0.uw=vadd(V0.uw,V0.uw):sat +# CHECK: 1f60c020 { v0.uw = vadd(v0.uw,v0.uw):sat } + +// V6_vadduwsat_dv +// Vdd32.uw=vadd(Vuu32.uw,Vvv32.uw):sat + V1:0.uw=vadd(V1:0.uw,V1:0.uw):sat +# CHECK: 1ea0c040 { v1:0.uw = vadd(v1:0.uw,v1:0.uw):sat } + +// V6_vandnqrt +// Vd32=vand(!Qu4,Rt32) + V0=vand(!Q0,R0) +# CHECK: 19a0c4a0 { v0 = vand(!q0,r0) } + +// V6_vandnqrt_acc +// Vx32|=vand(!Qu4,Rt32) + V0|=vand(!Q0,R0) +# CHECK: 1960e460 { v0 |= vand(!q0,r0) } + +// V6_vandvnqv +// Vd32=vand(!Qv4,Vu32) + V0=vand(!Q0,V0) +# CHECK: 1e03e020 { v0 = vand(!q0,v0) } + +// V6_vandvqv +// Vd32=vand(Qv4,Vu32) + V0=vand(Q0,V0) +# CHECK: 1e03e000 { v0 = vand(q0,v0) } + +// V6_vasrhbsat +// Vd32.b=vasr(Vu32.h,Vv32.h,Rt8):sat + V0.b=vasr(V0.h,V0.h,R0):sat +# CHECK: 1800c000 { v0.b = vasr(v0.h,v0.h,r0):sat } + +// V6_vasruwuhrndsat +// Vd32.uh=vasr(Vu32.uw,Vv32.uw,Rt8):rnd:sat + V0.uh=vasr(V0.uw,V0.uw,R0):rnd:sat +# CHECK: 1800c020 { v0.uh = vasr(v0.uw,v0.uw,r0):rnd:sat } + +// V6_vasrwuhrndsat +// Vd32.uh=vasr(Vu32.w,Vv32.w,Rt8):rnd:sat + V0.uh=vasr(V0.w,V0.w,R0):rnd:sat +# CHECK: 1800c040 { v0.uh = vasr(v0.w,v0.w,r0):rnd:sat } + +// V6_vL32b_cur_npred_ai +// if (!Pv4) Vd32.cur=vmem(Rt32+#s4) + { + v1=v0 + if (!P0) V0.cur=vmem(R0+#04) + } +# CHECK: 1e0360e1 { v1 = v0 +# CHECK: 2880c4a0 if (!p0) v0.cur = vmem(r0+#4) } + +// V6_vL32b_cur_npred_pi +// if (!Pv4) Vd32.cur=vmem(Rx32++#s3) + { + v1=v0 + if (!P0) V0.cur=vmem(R0++#03) + } +# CHECK: 1e0360e1 { v1 = v0 +# CHECK: 2980c3a0 if (!p0) v0.cur = vmem(r0++#3) } + +// V6_vL32b_cur_npred_ppu +// if (!Pv4) Vd32.cur=vmem(Rx32++Mu2) + { + v1=v0 + if (!P0) V0.cur=vmem(R0++M0) + } +# CHECK: 1e0360e1 { v1 = v0 +# CHECK: 2b80c0a0 if (!p0) v0.cur = vmem(r0++m0) } + +// V6_vL32b_cur_pred_ai +// if (Pv4) Vd32.cur=vmem(Rt32+#s4) + { + v1=v0 + if (P0) V0.cur=vmem(R0+#04) + } +# CHECK: 1e0360e1 { v1 = v0 +# CHECK: 2880c480 if (p0) v0.cur = vmem(r0+#4) } + +// V6_vL32b_cur_pred_pi +// if (Pv4) Vd32.cur=vmem(Rx32++#s3) + { + v1=v0 + if (P0) V0.cur=vmem(R0++#03) + } +# CHECK: 1e0360e1 { v1 = v0 +# CHECK: 2980c380 if (p0) v0.cur = vmem(r0++#3) } + +// V6_vL32b_cur_pred_ppu +// if (Pv4) Vd32.cur=vmem(Rx32++Mu2) + { + v1=v0 + if (P0) V0.cur=vmem(R0++M0) + } +# CHECK: 1e0360e1 { v1 = v0 +# CHECK: 2b80c080 if (p0) v0.cur = vmem(r0++m0) } + +// V6_vL32b_npred_ai +// if (!Pv4) Vd32=vmem(Rt32+#s4) + if (!P0) V0=vmem(R0+#04) +# CHECK: 2880c460 { if (!p0) v0 = vmem(r0+#4) } + +// V6_vL32b_npred_pi +// if (!Pv4) Vd32=vmem(Rx32++#s3) + if (!P0) V0=vmem(R0++#03) +# CHECK: 2980c360 { if (!p0) v0 = vmem(r0++#3) } + +// V6_vL32b_npred_ppu +// if (!Pv4) Vd32=vmem(Rx32++Mu2) + if (!P0) V0=vmem(R0++M0) +# CHECK: 2b80c060 { if (!p0) v0 = vmem(r0++m0) } + +// V6_vL32b_nt_cur_npred_ai +// if (!Pv4) Vd32.cur=vmem(Rt32+#s4):nt + { + v1=v0 + if (!P0) V0.cur=vmem(R0+#04):nt + } +# CHECK: 1e0360e1 { v1 = v0 +# CHECK: 28c0c4a0 if (!p0) v0.cur = vmem(r0+#4):nt } + +// V6_vL32b_nt_cur_npred_pi +// if (!Pv4) Vd32.cur=vmem(Rx32++#s3):nt + { + v1=v0 + if (!P0) V0.cur=vmem(R0++#03):nt + } +# CHECK: 1e0360e1 { v1 = v0 +# CHECK: 29c0c3a0 if (!p0) v0.cur = vmem(r0++#3):nt } + +// V6_vL32b_nt_cur_npred_ppu +// if (!Pv4) Vd32.cur=vmem(Rx32++Mu2):nt + { + v1=v0 + if (!P0) V0.cur=vmem(R0++M0):nt + } +# CHECK: 1e0360e1 { v1 = v0 +# CHECK: 2bc0c0a0 if (!p0) v0.cur = vmem(r0++m0):nt } + +// V6_vL32b_nt_cur_pred_ai +// if (Pv4) Vd32.cur=vmem(Rt32+#s4):nt + { + v1=v0 + if (P0) V0.cur=vmem(R0+#04):nt + } +# CHECK: 1e0360e1 { v1 = v0 +# CHECK: 28c0c480 if (p0) v0.cur = vmem(r0+#4):nt } + +// V6_vL32b_nt_cur_pred_pi +// if (Pv4) Vd32.cur=vmem(Rx32++#s3):nt + { + v1=v0 + if (P0) V0.cur=vmem(R0++#03):nt + } +# CHECK: 1e0360e1 { v1 = v0 +# CHECK: 29c0c380 if (p0) v0.cur = vmem(r0++#3):nt } + +// V6_vL32b_nt_cur_pred_ppu +// if (Pv4) Vd32.cur=vmem(Rx32++Mu2):nt + { + v1=v0 + if (P0) V0.cur=vmem(R0++M0):nt + } +# CHECK: 1e0360e1 { v1 = v0 +# CHECK: 2bc0c080 if (p0) v0.cur = vmem(r0++m0):nt } + +// V6_vL32b_nt_npred_ai +// if (!Pv4) Vd32=vmem(Rt32+#s4):nt + if (!P0) V0=vmem(R0+#04):nt +# CHECK: 28c0c460 { if (!p0) v0 = vmem(r0+#4):nt } + +// V6_vL32b_nt_npred_pi +// if (!Pv4) Vd32=vmem(Rx32++#s3):nt + if (!P0) V0=vmem(R0++#03):nt +# CHECK: 29c0c360 { if (!p0) v0 = vmem(r0++#3):nt } + +// V6_vL32b_nt_npred_ppu +// if (!Pv4) Vd32=vmem(Rx32++Mu2):nt + if (!P0) V0=vmem(R0++M0):nt +# CHECK: 2bc0c060 { if (!p0) v0 = vmem(r0++m0):nt } + +// V6_vL32b_nt_pred_ai +// if (Pv4) Vd32=vmem(Rt32+#s4):nt + if (P0) V0=vmem(R0+#04):nt +# CHECK: 28c0c440 { if (p0) v0 = vmem(r0+#4):nt } + +// V6_vL32b_nt_pred_pi +// if (Pv4) Vd32=vmem(Rx32++#s3):nt + if (P0) V0=vmem(R0++#03):nt +# CHECK: 29c0c340 { if (p0) v0 = vmem(r0++#3):nt } + +// V6_vL32b_nt_pred_ppu +// if (Pv4) Vd32=vmem(Rx32++Mu2):nt + if (P0) V0=vmem(R0++M0):nt +# CHECK: 2bc0c040 { if (p0) v0 = vmem(r0++m0):nt } + +// V6_vL32b_nt_tmp_npred_ai +// if (!Pv4) Vd32.tmp=vmem(Rt32+#s4):nt + { + v1=v0 + if (!P0) V0.tmp=vmem(R0+#04):nt + } +# CHECK: 1e0360e1 { v1 = v0 +# CHECK: 28c0c4e0 if (!p0) v0.tmp = vmem(r0+#4):nt } + +// V6_vL32b_nt_tmp_npred_pi +// if (!Pv4) Vd32.tmp=vmem(Rx32++#s3):nt + { + v1=v0 + if (!P0) V0.tmp=vmem(R0++#03):nt + } +# CHECK: 1e0360e1 { v1 = v0 +# CHECK: 29c0c3e0 if (!p0) v0.tmp = vmem(r0++#3):nt } + +// V6_vL32b_nt_tmp_npred_ppu +// if (!Pv4) Vd32.tmp=vmem(Rx32++Mu2):nt + { + v1=v0 + if (!P0) V0.tmp=vmem(R0++M0):nt + } +# CHECK: 1e0360e1 { v1 = v0 +# CHECK: 2bc0c0e0 if (!p0) v0.tmp = vmem(r0++m0):nt } + +// V6_vL32b_nt_tmp_pred_ai +// if (Pv4) Vd32.tmp=vmem(Rt32+#s4):nt + { + v1=v0 + if (P0) V0.tmp=vmem(R0+#04):nt + } +# CHECK: 1e0360e1 { v1 = v0 +# CHECK: 28c0c4c0 if (p0) v0.tmp = vmem(r0+#4):nt } + +// V6_vL32b_nt_tmp_pred_pi +// if (Pv4) Vd32.tmp=vmem(Rx32++#s3):nt + { + v1=v0 + if (P0) V0.tmp=vmem(R0++#03):nt + } +# CHECK: 1e0360e1 { v1 = v0 +# CHECK: 29c0c3c0 if (p0) v0.tmp = vmem(r0++#3):nt } + +// V6_vL32b_nt_tmp_pred_ppu +// if (Pv4) Vd32.tmp=vmem(Rx32++Mu2):nt + { + v1=v0 + if (P0) V0.tmp=vmem(R0++M0):nt + } +# CHECK: 1e0360e1 { v1 = v0 +# CHECK: 2bc0c0c0 if (p0) v0.tmp = vmem(r0++m0):nt } + +// V6_vL32b_pred_ai +// if (Pv4) Vd32=vmem(Rt32+#s4) + if (P0) V0=vmem(R0+#04) +# CHECK: 2880c440 { if (p0) v0 = vmem(r0+#4) } + +// V6_vL32b_pred_pi +// if (Pv4) Vd32=vmem(Rx32++#s3) + if (P0) V0=vmem(R0++#03) +# CHECK: 2980c340 { if (p0) v0 = vmem(r0++#3) } + +// V6_vL32b_pred_ppu +// if (Pv4) Vd32=vmem(Rx32++Mu2) + if (P0) V0=vmem(R0++M0) +# CHECK: 2b80c040 { if (p0) v0 = vmem(r0++m0) } + +// V6_vL32b_tmp_npred_ai +// if (!Pv4) Vd32.tmp=vmem(Rt32+#s4) + { + v1=v0 + if (!P0) V0.tmp=vmem(R0+#04) + } +# CHECK: 1e0360e1 { v1 = v0 +# CHECK: 2880c4e0 if (!p0) v0.tmp = vmem(r0+#4) } + +// V6_vL32b_tmp_npred_pi +// if (!Pv4) Vd32.tmp=vmem(Rx32++#s3) + { + v1=v0 + if (!P0) V0.tmp=vmem(R0++#03) + } +# CHECK: 1e0360e1 { v1 = v0 +# CHECK: 2980c3e0 if (!p0) v0.tmp = vmem(r0++#3) } + +// V6_vL32b_tmp_npred_ppu +// if (!Pv4) Vd32.tmp=vmem(Rx32++Mu2) + { + v1=v0 + if (!P0) V0.tmp=vmem(R0++M0) + } +# CHECK: 1e0360e1 { v1 = v0 +# CHECK: 2b80c0e0 if (!p0) v0.tmp = vmem(r0++m0) } + +// V6_vL32b_tmp_pred_ai +// if (Pv4) Vd32.tmp=vmem(Rt32+#s4) + { + v1=v0 + if (P0) V0.tmp=vmem(R0+#04) + } +# CHECK: 1e0360e1 { v1 = v0 +# CHECK: 2880c4c0 if (p0) v0.tmp = vmem(r0+#4) } + +// V6_vL32b_tmp_pred_pi +// if (Pv4) Vd32.tmp=vmem(Rx32++#s3) + { + v1=v0 + if (P0) V0.tmp=vmem(R0++#03) + } +# CHECK: 1e0360e1 { v1 = v0 +# CHECK: 2980c3c0 if (p0) v0.tmp = vmem(r0++#3) } + +// V6_vL32b_tmp_pred_ppu +// if (Pv4) Vd32.tmp=vmem(Rx32++Mu2) + { + v1=v0 + if (P0) V0.tmp=vmem(R0++M0) + } +# CHECK: 1e0360e1 { v1 = v0 +# CHECK: 2b80c0c0 if (p0) v0.tmp = vmem(r0++m0) } + +// V6_vlsrb +// Vd32.ub=vlsr(Vu32.ub,Rt32) + V0.ub=vlsr(V0.ub,R0) +# CHECK: 1980c060 { v0.ub = vlsr(v0.ub,r0) } + +// V6_vlutvvbi +// Vd32.b=vlut32(Vu32.b,Vv32.b,#u3) + V0.b=vlut32(V0.b,V0.b,#03) +# CHECK: 1e20c060 { v0.b = vlut32(v0.b,v0.b,#3) } + +// V6_vlutvvb_nm +// Vd32.b=vlut32(Vu32.b,Vv32.b,Rt8):nomatch + V0.b=vlut32(V0.b,V0.b,R0):nomatch +# CHECK: 1800c060 { v0.b = vlut32(v0.b,v0.b,r0):nomatch } + +// V6_vlutvvb_oracci +// Vx32.b|=vlut32(Vu32.b,Vv32.b,#u3) + V0.b|=vlut32(V0.b,V0.b,#03) +# CHECK: 1cc0e060 { v0.b |= vlut32(v0.b,v0.b,#3) } + +// V6_vlutvwhi +// Vdd32.h=vlut16(Vu32.b,Vv32.h,#u3) + V1:0.h=vlut16(V0.b,V0.h,#03) +# CHECK: 1e60c060 { v1:0.h = vlut16(v0.b,v0.h,#3) } + +// V6_vlutvwh_nm +// Vdd32.h=vlut16(Vu32.b,Vv32.h,Rt8):nomatch + V1:0.h=vlut16(V0.b,V0.h,R0):nomatch +# CHECK: 1800c080 { v1:0.h = vlut16(v0.b,v0.h,r0):nomatch } + +// V6_vlutvwh_oracci +// Vxx32.h|=vlut16(Vu32.b,Vv32.h,#u3) + V1:0.h|=vlut16(V0.b,V0.h,#03) +# CHECK: 1ce0e060 { v1:0.h |= vlut16(v0.b,v0.h,#3) } + +// V6_vmaxb +// Vd32.b=vmax(Vu32.b,Vv32.b) + V0.b=vmax(V0.b,V0.b) +# CHECK: 1f20c0a0 { v0.b = vmax(v0.b,v0.b) } + +// V6_vminb +// Vd32.b=vmin(Vu32.b,Vv32.b) + V0.b=vmin(V0.b,V0.b) +# CHECK: 1f20c080 { v0.b = vmin(v0.b,v0.b) } + +// V6_vmpauhb +// Vdd32.w=vmpa(Vuu32.uh,Rt32.b) + V1:0.w=vmpa(V1:0.uh,R0.b) +# CHECK: 1980c0a0 { v1:0.w = vmpa(v1:0.uh,r0.b) } + +// V6_vmpauhb_acc +// Vxx32.w+=vmpa(Vuu32.uh,Rt32.b) + V1:0.w+=vmpa(V1:0.uh,R0.b) +# CHECK: 1980e040 { v1:0.w += vmpa(v1:0.uh,r0.b) } + +// V6_vmpyewuh_64 +// Vdd32=vmpye(Vu32.w,Vv32.uh) + V1:0=vmpye(V0.w,V0.uh) +# CHECK: 1ea0c0c0 { v1:0 = vmpye(v0.w,v0.uh) } + +// V6_vmpyiwub +// Vd32.w=vmpyi(Vu32.w,Rt32.ub) + V0.w=vmpyi(V0.w,R0.ub) +# CHECK: 1980c0c0 { v0.w = vmpyi(v0.w,r0.ub) } + +// V6_vmpyiwub_acc +// Vx32.w+=vmpyi(Vu32.w,Rt32.ub) + V0.w+=vmpyi(V0.w,R0.ub) +# CHECK: 1980e020 { v0.w += vmpyi(v0.w,r0.ub) } + +// V6_vmpyowh_64_acc +// Vxx32+=vmpyo(Vu32.w,Vv32.h) + V1:0+=vmpyo(V0.w,V0.h) +# CHECK: 1c20e060 { v1:0 += vmpyo(v0.w,v0.h) } + +// V6_vrounduhub +// Vd32.ub=vround(Vu32.uh,Vv32.uh):sat + V0.ub=vround(V0.uh,V0.uh):sat +# CHECK: 1fe0c060 { v0.ub = vround(v0.uh,v0.uh):sat } + +// V6_vrounduwuh +// Vd32.uh=vround(Vu32.uw,Vv32.uw):sat + V0.uh=vround(V0.uw,V0.uw):sat +# CHECK: 1fe0c080 { v0.uh = vround(v0.uw,v0.uw):sat } + +// V6_vsatuwuh +// Vd32.uh=vsat(Vu32.uw,Vv32.uw) + V0.uh=vsat(V0.uw,V0.uw) +# CHECK: 1f20c0c0 { v0.uh = vsat(v0.uw,v0.uw) } + +// V6_vsubbsat +// Vd32.b=vsub(Vu32.b,Vv32.b):sat + V0.b=vsub(V0.b,V0.b):sat +# CHECK: 1f20c040 { v0.b = vsub(v0.b,v0.b):sat } + +// V6_vsubbsat_dv +// Vdd32.b=vsub(Vuu32.b,Vvv32.b):sat + V1:0.b=vsub(V1:0.b,V1:0.b):sat +# CHECK: 1ea0c020 { v1:0.b = vsub(v1:0.b,v1:0.b):sat } + +// V6_vsubcarry +// Vd32.w=vsub(Vu32.w,Vv32.w,Qx4):carry + V0.w=vsub(V0.w,V0.w,Q0):carry +# CHECK: 1ca0e080 { v0.w = vsub(v0.w,v0.w,q0):carry } + +// V6_vsubububb_sat +// Vd32.ub=vsub(Vu32.ub,Vv32.b):sat + V0.ub=vsub(V0.ub,V0.b):sat +# CHECK: 1ea0c0a0 { v0.ub = vsub(v0.ub,v0.b):sat } + +// V6_vsubuwsat +// Vd32.uw=vsub(Vu32.uw,Vv32.uw):sat + V0.uw=vsub(V0.uw,V0.uw):sat +# CHECK: 1fc0c080 { v0.uw = vsub(v0.uw,v0.uw):sat } + +// V6_vsubuwsat_dv +// Vdd32.uw=vsub(Vuu32.uw,Vvv32.uw):sat + V1:0.uw=vsub(V1:0.uw,V1:0.uw):sat +# CHECK: 1ea0c060 { v1:0.uw = vsub(v1:0.uw,v1:0.uw):sat } + +// V6_vwhist128 +// vwhist128 + vwhist128 +# CHECK: 1e00e480 { vwhist128 } + +// V6_vwhist128m +// vwhist128(#u1) + vwhist128(#01) +# CHECK: 1e00e780 { vwhist128(#1) } + +// V6_vwhist128q +// vwhist128(Qv4) + vwhist128(Q0) +# CHECK: 1e02e480 { vwhist128(q0) } + +// V6_vwhist128qm +// vwhist128(Qv4,#u1) + vwhist128(Q0,#01) +# CHECK: 1e02e780 { vwhist128(q0,#1) } + +// V6_vwhist256 +// vwhist256 + vwhist256 +# CHECK: 1e00e280 { vwhist256 } + +// V6_vwhist256q +// vwhist256(Qv4) + vwhist256(Q0) +# CHECK: 1e02e280 { vwhist256(q0) } + +// V6_vwhist256q_sat +// vwhist256(Qv4):sat + vwhist256(Q0):sat +# CHECK: 1e02e380 { vwhist256(q0):sat } + +// V6_vwhist256_sat +// vwhist256:sat + vwhist256:sat +# CHECK: 1e00e380 { vwhist256:sat } diff --git a/test/MC/Hexagon/v62_jumps.s b/test/MC/Hexagon/v62_jumps.s new file mode 100644 index 00000000000..0197ecdd232 --- /dev/null +++ b/test/MC/Hexagon/v62_jumps.s @@ -0,0 +1,13 @@ +# RUN: llvm-mc -arch=hexagon -mcpu=hexagonv62 -filetype=obj %s | llvm-objdump -arch=hexagon -mcpu=hexagonv62 -d - | FileCheck %s + +# verify compound is split into single instructions if needed +{ + p0=cmp.eq(R1:0,R3:2) + if (!p0.new) jump:nt ltmp + r0=r1 ; jump ltmp +} + +# CHECK: 5c204800 { if (!p0.new) jump:nt +# CHECK: d2804200 p0 = cmp.eq(r1:0,r3:2) +# CHECK: 58004000 jump +# CHECK: 7061c000 r0 = r1 } diff --git a/test/MC/Hexagon/v62a.s b/test/MC/Hexagon/v62a.s new file mode 100644 index 00000000000..4cc6302f6fa --- /dev/null +++ b/test/MC/Hexagon/v62a.s @@ -0,0 +1,19 @@ +# RUN: llvm-mc -arch=hexagon -mcpu=hexagonv62 -filetype=obj -o - %s | llvm-objdump -arch=hexagon -arch=hexagon -mcpu=hexagonv62 -d - | FileCheck %s + + r31:30=vabsdiffb(r29:28, r27:26) +# CHECK: e8fadc1e { r31:30 = vabsdiffb(r29:28,r27:26) + + r25:24=vabsdiffub(r23:22, r21:20) +# CHECK: e8b4d618 { r25:24 = vabsdiffub(r23:22,r21:20) + + r19:18,p3=vminub(r17:16, r15:14) +# CHECK: eaeed072 { r19:18,p3 = vminub(r17:16,r15:14) + + r13:12=vtrunehb(r11:10, r9:8) +# CHECK: c18ac86c { r13:12 = vtrunehb(r11:10,r9:8) + + r7:6=vtrunohb(r5:4, r3:2) +# CHECK: c184c2a6 { r7:6 = vtrunohb(r5:4,r3:2) + + r1:0=vsplatb(r31) +# CHECK: 845fc080 { r1:0 = vsplatb(r31) diff --git a/test/MC/Hexagon/v62a_regs.s b/test/MC/Hexagon/v62a_regs.s new file mode 100644 index 00000000000..2d31b837afd --- /dev/null +++ b/test/MC/Hexagon/v62a_regs.s @@ -0,0 +1,44 @@ +# RUN: llvm-mc -arch=hexagon -mcpu=hexagonv62 -filetype=obj %s | llvm-objdump -d - | FileCheck %s --check-prefix=CHECK-V62 +# RUN: not llvm-mc -arch=hexagon -mcpu=hexagonv60 -filetype=asm %s 2>%t; FileCheck -check-prefix=CHECK-NOV62 %s < %t +# + +# Assure that v62 added registers are understood + +r0=framelimit +r0=framekey +r1:0=c17:16 + +# CHECK-V62: 6a10c000 { r0 = framelimit } +# CHECK-V62: 6a11c000 { r0 = framekey } +# CHECK-V62: 6810c000 { r1:0 = c17:16 } +# CHECK-NOV62: rror: invalid operand for instruction +# CHECK-NOV62: rror: invalid operand for instruction +# CHECK-NOV62: rror: invalid operand for instruction + +r0=pktcountlo +r0=pktcounthi +r1:0=c19:18 +r1:0=pktcount + +# CHECK-V62: 6a12c000 { r0 = pktcountlo } +# CHECK-V62: 6a13c000 { r0 = pktcounthi } +# CHECK-V62: 6812c000 { r1:0 = c19:18 } +# CHECK-V62: 6812c000 { r1:0 = c19:18 } +# CHECK-NOV62: rror: invalid operand for instruction +# CHECK-NOV62: rror: invalid operand for instruction +# CHECK-NOV62: rror: invalid operand for instruction +# CHECK-NOV62: rror: invalid operand for instruction + +r0=utimerlo +r0=utimerhi +r1:0=c31:30 +r1:0=UTIMER + +# CHECK-V62: 6a1ec000 { r0 = utimerlo } +# CHECK-V62: 6a1fc000 { r0 = utimerhi } +# CHECK-V62: 681ec000 { r1:0 = c31:30 } +# CHECK-V62: 681ec000 { r1:0 = c31:30 } +# CHECK-NOV62: rror: invalid operand for instruction +# CHECK-NOV62: rror: invalid operand for instruction +# CHECK-NOV62: rror: invalid operand for instruction +# CHECK-NOV62: rror: invalid operand for instruction -- 2.11.0