case TargetOpcode::G_LOAD:
case TargetOpcode::G_STORE:
case TargetOpcode::G_ZEXTLOAD:
- case TargetOpcode::G_SEXTLOAD:
+ case TargetOpcode::G_SEXTLOAD: {
+ LLT PtrTy = MRI->getType(MI->getOperand(1).getReg());
+ if (!PtrTy.isPointer())
+ report("Generic memory instruction must access a pointer", MI);
+
// Generic loads and stores must have a single MachineMemOperand
// describing that access.
if (!MI->hasOneMemOperand()) {
}
break;
+ }
case TargetOpcode::G_PHI: {
LLT DstTy = MRI->getType(MI->getOperand(0).getReg());
if (!DstTy.isValid() ||
%1:vgpr(s32) = COPY $vgpr0
; GCN: [[VGPR1:%[0-9]+]]:vgpr_32 = COPY $vgpr1
%2:vgpr(s32) = COPY $vgpr1
- %3:vgpr(s64) = COPY $vgpr3_vgpr4
+ %3:vgpr(p1) = COPY $vgpr3_vgpr4
; cvt_pkrtz vs
; GCN: V_CVT_PKRTZ_F16_F32_e64 0, [[VGPR0]], 0, [[SGPR0]]
%7:vgpr(s32) = G_BITCAST %4
%8:vgpr(s32) = G_BITCAST %5
%9:vgpr(s32) = G_BITCAST %6
- G_STORE %7, %3 :: (store 4 into %ir.global0)
- G_STORE %8, %3 :: (store 4 into %ir.global0)
- G_STORE %9, %3 :: (store 4 into %ir.global0)
+ G_STORE %7, %3 :: (store 4 into %ir.global0, addrspace 1)
+ G_STORE %8, %3 :: (store 4 into %ir.global0, addrspace 1)
+ G_STORE %9, %3 :: (store 4 into %ir.global0, addrspace 1)
...
---
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s32) = COPY $sgpr1
%2:vgpr(s32) = COPY $vgpr0
- %3:vgpr(s64) = COPY $vgpr3_vgpr4
+ %3:vgpr(p1) = COPY $vgpr3_vgpr4
; GCN: [[C1:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 1
; GCN: [[C4096:%[0-9]+]]:sreg_32_xm0 = S_MOV_B32 4096
%17:vgpr(s32) = G_ASHR %16, %5
- G_STORE %17, %3 :: (store 4 into %ir.global0)
+ G_STORE %17, %3 :: (store 4 into %ir.global0, addrspace 1)
...
---
bb.0:
liveins: $sgpr0, $vgpr3_vgpr4
%0:vgpr(s32) = COPY $vgpr0
- %1:vgpr(s64) = COPY $vgpr3_vgpr4
+ %1:vgpr(p1) = COPY $vgpr3_vgpr4
%2:vgpr(<2 x s16>) = G_BITCAST %0
%3:vgpr(s32) = G_BITCAST %2
- G_STORE %3, %1 :: (store 4 into %ir.global0)
+ G_STORE %3, %1 :: (store 4 into %ir.global0, addrspace 1)
...
---
bb.0:
liveins: $vgpr0_vgpr1, $vgpr2_vgpr3
; GCN-LABEL: name: constant
- %0:vgpr(s64) = COPY $vgpr0_vgpr1
- %1:vgpr(s64) = COPY $vgpr2_vgpr3
+ %0:vgpr(p1) = COPY $vgpr0_vgpr1
+ %1:vgpr(p1) = COPY $vgpr2_vgpr3
; GCN: %{{[0-9]+}}:sreg_32 = S_MOV_B32 1
%2:sreg_32(s32) = G_CONSTANT i32 1
; GCN: %{{[0-9]+}}:vreg_64 = REG_SEQUENCE [[LO3]], %subreg.sub0, [[HI3]], %subreg.sub1
%9:vgpr(s64) = G_FCONSTANT double 1.0
- G_STORE %2, %0 :: (volatile store 4 into %ir.global0)
- G_STORE %4, %0 :: (volatile store 4 into %ir.global0)
- G_STORE %6, %0 :: (volatile store 4 into %ir.global0)
- G_STORE %8, %0 :: (volatile store 4 into %ir.global0)
- G_STORE %3, %1 :: (volatile store 8 into %ir.global1)
- G_STORE %5, %1 :: (volatile store 8 into %ir.global1)
- G_STORE %7, %1 :: (volatile store 8 into %ir.global1)
- G_STORE %9, %1 :: (volatile store 8 into %ir.global1)
+ G_STORE %2, %0 :: (volatile store 4 into %ir.global0, addrspace 1)
+ G_STORE %4, %0 :: (volatile store 4 into %ir.global0, addrspace 1)
+ G_STORE %6, %0 :: (volatile store 4 into %ir.global0, addrspace 1)
+ G_STORE %8, %0 :: (volatile store 4 into %ir.global0, addrspace 1)
+ G_STORE %3, %1 :: (volatile store 8 into %ir.global1, addrspace 1)
+ G_STORE %5, %1 :: (volatile store 8 into %ir.global1, addrspace 1)
+ G_STORE %7, %1 :: (volatile store 8 into %ir.global1, addrspace 1)
+ G_STORE %9, %1 :: (volatile store 8 into %ir.global1, addrspace 1)
...
---
; GCN: [[COPY1:%[0-9]+]]:vreg_64 = COPY [[COPY]]
; GCN: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN: FLAT_STORE_DWORD [[COPY1]], [[DEF]], 0, 0, 0, implicit $exec, implicit $flat_scr
- %0:sgpr(s64) = COPY $sgpr2_sgpr3
- %1:vgpr(s64) = COPY %0
+ %0:sgpr(p1) = COPY $sgpr2_sgpr3
+ %1:vgpr(p1) = COPY %0
%2:vgpr(s32) = G_IMPLICIT_DEF
G_STORE %2, %1 :: (store 4 into %ir.global0)
...
%0:sgpr(s32) = COPY $sgpr0
%1:vgpr(s32) = COPY $vgpr0
%2:vgpr(s32) = COPY $vgpr1
- %3:vgpr(s64) = COPY $vgpr3_vgpr4
+ %3:vgpr(p1) = COPY $vgpr3_vgpr4
; fadd vs
; GCN: V_ADD_F32_e64
%0:sgpr(s32) = COPY $sgpr0
%1:vgpr(s32) = COPY $vgpr0
%2:vgpr(s32) = COPY $vgpr1
- %3:vgpr(s64) = COPY $vgpr3_vgpr4
+ %3:vgpr(p1) = COPY $vgpr3_vgpr4
; fmul vs
; GCN: V_MUL_F32_e64
; GCN: V_MUL_F32_e64
%6:vgpr(s32) = G_FMUL %1, %2
- G_STORE %4, %3 :: (store 4 into %ir.global0)
- G_STORE %5, %3 :: (store 4 into %ir.global0)
- G_STORE %6, %3 :: (store 4 into %ir.global0)
+ G_STORE %4, %3 :: (store 4 into %ir.global0, addrspace 1)
+ G_STORE %5, %3 :: (store 4 into %ir.global0, addrspace 1)
+ G_STORE %6, %3 :: (store 4 into %ir.global0, addrspace 1)
...
---
; GCN: [[VGPR:%[0-9]+]]:vgpr_32 = COPY $vgpr0
%1:vgpr(s32) = COPY $vgpr0
- %2:vgpr(s64) = COPY $vgpr3_vgpr4
+ %2:vgpr(p1) = COPY $vgpr3_vgpr4
; fptoui s
; GCN: V_CVT_U32_F32_e64 0, [[SGPR]], 0, 0
; GCN: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4
; GCN: [[DEF:%[0-9]+]]:vgpr_32 = IMPLICIT_DEF
; GCN: FLAT_STORE_DWORD [[COPY]], [[DEF]], 0, 0, 0, implicit $exec, implicit $flat_scr
- %0:vgpr(s64) = COPY $vgpr3_vgpr4
+ %0:vgpr(p1) = COPY $vgpr3_vgpr4
%1:vgpr(s32) = G_IMPLICIT_DEF
- G_STORE %1, %0 :: (store 4)
+ G_STORE %1, %0 :: (store 4, addrspace 1)
...
---
; GCN: [[COPY:%[0-9]+]]:vreg_64 = COPY $vgpr3_vgpr4
; GCN: [[DEF:%[0-9]+]]:vreg_64 = IMPLICIT_DEF
; GCN: FLAT_STORE_DWORDX2 [[COPY]], [[DEF]], 0, 0, 0, implicit $exec, implicit $flat_scr
- %0:vgpr(s64) = COPY $vgpr3_vgpr4
+ %0:vgpr(p1) = COPY $vgpr3_vgpr4
%1:vgpr(s64) = G_IMPLICIT_DEF
- G_STORE %1, %0 :: (store 8)
+ G_STORE %1, %0 :: (store 8, addrspace 1)
---
---
; GCN: FLAT_STORE_DWORD [[DEF]], [[V_MOV_B32_e32_]], 0, 0, 0, implicit $exec, implicit $flat_scr
%0:vgpr(p1) = G_IMPLICIT_DEF
%1:vgpr(s32) = G_CONSTANT 4
- G_STORE %1, %0 :: (store 4)
+ G_STORE %1, %0 :: (store 4, addrspace 1)
...
---
; GCN: FLAT_STORE_DWORD [[DEF]], [[V_MOV_B32_e32_]], 0, 0, 0, implicit $exec, implicit $flat_scr
%0:vgpr(p3) = G_IMPLICIT_DEF
%1:vgpr(s32) = G_CONSTANT 4
- G_STORE %1, %0 :: (store 4)
+ G_STORE %1, %0 :: (store 4, addrspace 1)
...
---
; GCN: FLAT_STORE_DWORD [[DEF]], [[V_MOV_B32_e32_]], 0, 0, 0, implicit $exec, implicit $flat_scr
%0:vgpr(p4) = G_IMPLICIT_DEF
%1:vgpr(s32) = G_CONSTANT 4
- G_STORE %1, %0 :: (store 4)
+ G_STORE %1, %0 :: (store 4, addrspace 1)
...
%0:sgpr(s32) = COPY $sgpr0
%1:vgpr(s32) = COPY $vgpr0
%2:vgpr(s32) = COPY $vgpr1
- %3:vgpr(s64) = COPY $vgpr3_vgpr4
+ %3:vgpr(p1) = COPY $vgpr3_vgpr4
; GCN: [[SGPR64_0:%[0-9]+]]:sreg_64_xexec = COPY $sgpr10_sgpr11
; GCN: [[VGPR64_0:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
%0:sgpr(s32) = COPY $sgpr0
%1:vgpr(s32) = COPY $vgpr0
%2:vgpr(s32) = COPY $vgpr1
- %3:vgpr(s64) = COPY $vgpr3_vgpr4
+ %3:vgpr(p1) = COPY $vgpr3_vgpr4
; GCN: [[SGPR64_0:%[0-9]+]]:sreg_64_xexec = COPY $sgpr10_sgpr11
; GCN: [[VGPR64_0:%[0-9]+]]:vreg_64 = COPY $vgpr10_vgpr11
%0:sgpr(s32) = COPY $sgpr0
%1:sgpr(s32) = COPY $sgpr1
%2:vgpr(s32) = COPY $vgpr0
- %3:vgpr(s64) = COPY $vgpr3_vgpr4
+ %3:vgpr(p1) = COPY $vgpr3_vgpr4
%4:sgpr(s32) = G_CONSTANT i32 1
%5:sgpr(s32) = G_CONSTANT i32 4096
; GCN: [[VV:%[0-9]+]]:vgpr_32 = V_OR_B32_e32 [[SV]], [[VGPR0]]
%9:vgpr(s32) = G_OR %8, %2
- G_STORE %9, %3 :: (store 4 into %ir.global0)
+ G_STORE %9, %3 :: (store 4 into %ir.global0, addrspace 1)
...
---
; GCN: [[VGPR:%[0-9]+]]:vgpr_32 = COPY $vgpr0
%1:vgpr(s32) = COPY $vgpr0
- %2:vgpr(s64) = COPY $vgpr3_vgpr4
+ %2:vgpr(p1) = COPY $vgpr3_vgpr4
; sitofp s
; GCN: V_CVT_F32_I32_e64 [[SGPR]], 0, 0
+++ /dev/null
-# RUN: not llc -mtriple=aarch64-none-linux-gnu -run-pass none -o - %s 2>&1 | FileCheck %s
-
-# CHECK: *** Bad machine code: Generic extload must have a narrower memory type ***
-# CHECK: *** Bad machine code: Generic extload must have a narrower memory type ***
-# CHECK: *** Bad machine code: Generic extload must have a narrower memory type ***
-# CHECK: *** Bad machine code: Generic extload must have a narrower memory type ***
-# CHECK: *** Bad machine code: Generic instruction accessing memory must have one mem operand ***
-# CHECK: *** Bad machine code: Generic instruction accessing memory must have one mem operand ***
-
----
-name: invalid_extload_memory_sizes
-body: |
- bb.0:
-
- %0:_(p0) = COPY $x0
- %1:_(s64) = G_ZEXTLOAD %0(p0) :: (load 8)
- %2:_(s64) = G_ZEXTLOAD %0(p0) :: (load 16)
- %3:_(s64) = G_SEXTLOAD %0(p0) :: (load 8)
- %4:_(s64) = G_SEXTLOAD %0(p0) :: (load 16)
- %5:_(s64) = G_ZEXTLOAD %0(p0)
- %6:_(s64) = G_SEXTLOAD %0(p0)
-
-...
--- /dev/null
+#RUN: not llc -o - -global-isel -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s
+# REQUIRES: global-isel, aarch64-registered-target
+
+---
+name: test_load
+legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+body: |
+ bb.0:
+
+ ; CHECK: Bad machine code: Generic memory instruction must access a pointer
+ %0:_(s64) = G_CONSTANT i32 0
+ %1:_(s32) = G_LOAD %0 :: (load 4)
+
+...
--- /dev/null
+# RUN: not llc -o - -global-isel -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s
+# REQUIRES: global-isel, aarch64-registered-target
+
+---
+name: test_sextload
+legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+body: |
+ bb.0:
+
+ ; CHECK: Bad machine code: Generic memory instruction must access a pointer
+ %0:_(s64) = G_CONSTANT i32 0
+ %1:_(s32) = G_SEXTLOAD %0 :: (load 1)
+
+ ; CHECK: *** Bad machine code: Generic instruction accessing memory must have one mem operand ***
+ %2:_(p0) = G_IMPLICIT_DEF
+ %3:_(s64) = G_SEXTLOAD %2
+
+ ; CHECK: Bad machine code: Generic extload must have a narrower memory type
+ ; CHECK: Bad machine code: Generic extload must have a narrower memory type
+
+ %4:_(s64) = G_SEXTLOAD %2 :: (load 8)
+ %5:_(s64) = G_SEXTLOAD %2 :: (load 16)
+
+...
--- /dev/null
+# RUN: not llc -o - -global-isel -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s
+# REQUIRES: global-isel, aarch64-registered-target
+
+---
+name: test_store
+legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+body: |
+ bb.0:
+
+ ; CHECK: Bad machine code: Generic memory instruction must access a pointer
+ %0:_(s64) = G_CONSTANT i32 0
+ %1:_(s32) = G_CONSTANT i32 1
+ G_STORE %1, %0 :: (store 4)
+
+...
--- /dev/null
+# RUN: not llc -o - -global-isel -run-pass=none -verify-machineinstrs %s 2>&1 | FileCheck %s
+# REQUIRES: global-isel, aarch64-registered-target
+
+---
+name: test_zextload
+legalized: true
+regBankSelected: false
+selected: false
+tracksRegLiveness: true
+liveins:
+body: |
+ bb.0:
+
+ ; CHECK: Bad machine code: Generic memory instruction must access a pointer
+ %0:_(s64) = G_CONSTANT i32 0
+ %1:_(s32) = G_ZEXTLOAD %0 :: (load 1)
+
+ ; CHECK: *** Bad machine code: Generic instruction accessing memory must have one mem operand ***
+ %2:_(p0) = G_IMPLICIT_DEF
+ %3:_(s64) = G_ZEXTLOAD %2
+
+ ; CHECK: Bad machine code: Generic extload must have a narrower memory type
+ ; CHECK: Bad machine code: Generic extload must have a narrower memory type
+
+ %4:_(s64) = G_ZEXTLOAD %2 :: (load 8)
+ %5:_(s64) = G_ZEXTLOAD %2 :: (load 16)
+
+...