From: Matt Arsenault Date: Sat, 28 May 2016 00:51:06 +0000 (+0000) Subject: AMDGPU: Cleanup vector insert/extract tests X-Git-Tag: android-x86-7.1-r4~32642 X-Git-Url: http://git.osdn.net/view?a=commitdiff_plain;h=c3eeba0f4cff659160a0a2b2b4124ae7f7c85859;p=android-x86%2Fexternal-llvm.git AMDGPU: Cleanup vector insert/extract tests This mostly makes sure that 3-vector dynamic inserts and extracts are covered. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@271082 91177308-0d34-0410-b5e6-96231b3b80d8 --- diff --git a/test/CodeGen/AMDGPU/extract_vector_elt-f64.ll b/test/CodeGen/AMDGPU/extract_vector_elt-f64.ll new file mode 100644 index 00000000000..d0b19c825ee --- /dev/null +++ b/test/CodeGen/AMDGPU/extract_vector_elt-f64.ll @@ -0,0 +1,29 @@ +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s + +; GCN-LABEL: {{^}}extract_vector_elt_v3f64_2: +; GCN: buffer_load_dwordx4 +; GCN: buffer_load_dwordx2 +; GCN: buffer_store_dwordx2 +define void @extract_vector_elt_v3f64_2(double addrspace(1)* %out, <3 x double> addrspace(1)* %in) #0 { + %ld = load volatile <3 x double>, <3 x double> addrspace(1)* %in + %elt = extractelement <3 x double> %ld, i32 2 + store volatile double %elt, double addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}dyn_extract_vector_elt_v3f64: +define void @dyn_extract_vector_elt_v3f64(double addrspace(1)* %out, <3 x double> %foo, i32 %elt) #0 { + %dynelt = extractelement <3 x double> %foo, i32 %elt + store volatile double %dynelt, double addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}dyn_extract_vector_elt_v4f64: +define void @dyn_extract_vector_elt_v4f64(double addrspace(1)* %out, <4 x double> %foo, i32 %elt) #0 { + %dynelt = extractelement <4 x double> %foo, i32 %elt + store volatile double %dynelt, double addrspace(1)* %out + ret void +} + +attributes #0 = { nounwind } diff --git a/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll b/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll new file mode 100644 index 00000000000..e012cb07163 --- /dev/null +++ b/test/CodeGen/AMDGPU/extract_vector_elt-i16.ll @@ -0,0 +1,86 @@ +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s + +; FUNC-LABEL: {{^}}extract_vector_elt_v2i16: +; GCN: buffer_load_ushort +; GCN: buffer_load_ushort +; GCN: buffer_store_short +; GCN: buffer_store_short +define void @extract_vector_elt_v2i16(i16 addrspace(1)* %out, <2 x i16> %foo) #0 { + %p0 = extractelement <2 x i16> %foo, i32 0 + %p1 = extractelement <2 x i16> %foo, i32 1 + %out1 = getelementptr i16, i16 addrspace(1)* %out, i32 10 + store i16 %p1, i16 addrspace(1)* %out, align 2 + store i16 %p0, i16 addrspace(1)* %out1, align 2 + ret void +} + +; FUNC-LABEL: {{^}}extract_vector_elt_v3i16: +; GCN: buffer_load_ushort +; GCN: buffer_store_short +; GCN: buffer_store_short +define void @extract_vector_elt_v3i16(i16 addrspace(1)* %out, <3 x i16> %foo) #0 { + %p0 = extractelement <3 x i16> %foo, i32 0 + %p1 = extractelement <3 x i16> %foo, i32 2 + %out1 = getelementptr i16, i16 addrspace(1)* %out, i32 1 + store i16 %p1, i16 addrspace(1)* %out, align 2 + store i16 %p0, i16 addrspace(1)* %out1, align 2 + ret void +} + +; FUNC-LABEL: {{^}}extract_vector_elt_v4i16: +; GCN: buffer_load_ushort +; GCN: buffer_load_ushort +; GCN: buffer_store_short +; GCN: buffer_store_short +define void @extract_vector_elt_v4i16(i16 addrspace(1)* %out, <4 x i16> %foo) #0 { + %p0 = extractelement <4 x i16> %foo, i32 0 + %p1 = extractelement <4 x i16> %foo, i32 2 + %out1 = getelementptr i16, i16 addrspace(1)* %out, i32 10 + store i16 %p1, i16 addrspace(1)* %out, align 2 + store i16 %p0, i16 addrspace(1)* %out1, align 2 + ret void +} + + +; FUNC-LABEL: {{^}}dynamic_extract_vector_elt_v3i16: +; GCN: buffer_load_ushort +; GCN: buffer_load_ushort +; GCN: buffer_load_ushort + +; GCN: buffer_store_short +; GCN: buffer_store_short +; GCN: buffer_store_short + +; GCN: buffer_store_short +; GCN: buffer_load_ushort +; GCN: buffer_store_short +define void @dynamic_extract_vector_elt_v3i16(i16 addrspace(1)* %out, <3 x i16> %foo, i32 %idx) #0 { + %p0 = extractelement <3 x i16> %foo, i32 %idx + %out1 = getelementptr i16, i16 addrspace(1)* %out, i32 1 + store i16 %p0, i16 addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}dynamic_extract_vector_elt_v4i16: +; GCN: buffer_load_ushort +; GCN: buffer_load_ushort +; GCN: buffer_load_ushort +; GCN: buffer_load_ushort + +; GCN: buffer_store_short +; GCN: buffer_store_short +; GCN: buffer_store_short +; GCN: buffer_store_short + +; GCN: buffer_store_short +; GCN: buffer_load_ushort +; GCN: buffer_store_short +define void @dynamic_extract_vector_elt_v4i16(i16 addrspace(1)* %out, <4 x i16> %foo, i32 %idx) #0 { + %p0 = extractelement <4 x i16> %foo, i32 %idx + %out1 = getelementptr i16, i16 addrspace(1)* %out, i32 1 + store i16 %p0, i16 addrspace(1)* %out + ret void +} + +attributes #0 = { nounwind } diff --git a/test/CodeGen/AMDGPU/extract-vector-elt-i64.ll b/test/CodeGen/AMDGPU/extract_vector_elt-i64.ll similarity index 61% rename from test/CodeGen/AMDGPU/extract-vector-elt-i64.ll rename to test/CodeGen/AMDGPU/extract_vector_elt-i64.ll index e3255913962..0a51c39f026 100644 --- a/test/CodeGen/AMDGPU/extract-vector-elt-i64.ll +++ b/test/CodeGen/AMDGPU/extract_vector_elt-i64.ll @@ -1,4 +1,5 @@ ; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=GCN %s ; How the replacement of i64 stores with v2i32 stores resulted in ; breaking other users of the bitcast if they already existed @@ -7,7 +8,7 @@ ; GCN: buffer_store_dword ; GCN: buffer_store_dword ; GCN: buffer_store_dwordx2 -define void @extract_vector_elt_select_error(i32 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %val) nounwind { +define void @extract_vector_elt_select_error(i32 addrspace(1)* %out, i64 addrspace(1)* %in, i64 %val) #0 { %vec = bitcast i64 %val to <2 x i32> %elt0 = extractelement <2 x i32> %vec, i32 0 %elt1 = extractelement <2 x i32> %vec, i32 1 @@ -18,8 +19,8 @@ define void @extract_vector_elt_select_error(i32 addrspace(1)* %out, i64 addrspa ret void } - -define void @extract_vector_elt_v2i64(i64 addrspace(1)* %out, <2 x i64> %foo) nounwind { +; GCN-LABEL: {{^}}extract_vector_elt_v2i64: +define void @extract_vector_elt_v2i64(i64 addrspace(1)* %out, <2 x i64> %foo) #0 { %p0 = extractelement <2 x i64> %foo, i32 0 %p1 = extractelement <2 x i64> %foo, i32 1 %out1 = getelementptr i64, i64 addrspace(1)* %out, i32 1 @@ -28,16 +29,34 @@ define void @extract_vector_elt_v2i64(i64 addrspace(1)* %out, <2 x i64> %foo) no ret void } -define void @dyn_extract_vector_elt_v2i64(i64 addrspace(1)* %out, <2 x i64> %foo, i32 %elt) nounwind { +; GCN-LABEL: {{^}}dyn_extract_vector_elt_v2i64: +define void @dyn_extract_vector_elt_v2i64(i64 addrspace(1)* %out, <2 x i64> %foo, i32 %elt) #0 { %dynelt = extractelement <2 x i64> %foo, i32 %elt store volatile i64 %dynelt, i64 addrspace(1)* %out ret void } -define void @dyn_extract_vector_elt_v2i64_2(i64 addrspace(1)* %out, <2 x i64> addrspace(1)* %foo, i32 %elt, <2 x i64> %arst) nounwind { +; GCN-LABEL: {{^}}dyn_extract_vector_elt_v2i64_2: +define void @dyn_extract_vector_elt_v2i64_2(i64 addrspace(1)* %out, <2 x i64> addrspace(1)* %foo, i32 %elt, <2 x i64> %arst) #0 { %load = load volatile <2 x i64>, <2 x i64> addrspace(1)* %foo %or = or <2 x i64> %load, %arst %dynelt = extractelement <2 x i64> %or, i32 %elt store volatile i64 %dynelt, i64 addrspace(1)* %out ret void } + +; GCN-LABEL: {{^}}dyn_extract_vector_elt_v3i64: +define void @dyn_extract_vector_elt_v3i64(i64 addrspace(1)* %out, <3 x i64> %foo, i32 %elt) #0 { + %dynelt = extractelement <3 x i64> %foo, i32 %elt + store volatile i64 %dynelt, i64 addrspace(1)* %out + ret void +} + +; GCN-LABEL: {{^}}dyn_extract_vector_elt_v4i64: +define void @dyn_extract_vector_elt_v4i64(i64 addrspace(1)* %out, <4 x i64> %foo, i32 %elt) #0 { + %dynelt = extractelement <4 x i64> %foo, i32 %elt + store volatile i64 %dynelt, i64 addrspace(1)* %out + ret void +} + +attributes #0 = { nounwind } diff --git a/test/CodeGen/AMDGPU/extract-vector-elt-i8.ll b/test/CodeGen/AMDGPU/extract_vector_elt-i8.ll similarity index 57% rename from test/CodeGen/AMDGPU/extract-vector-elt-i8.ll rename to test/CodeGen/AMDGPU/extract_vector_elt-i8.ll index ff1006b80ef..9005bfa07c2 100644 --- a/test/CodeGen/AMDGPU/extract-vector-elt-i8.ll +++ b/test/CodeGen/AMDGPU/extract_vector_elt-i8.ll @@ -1,9 +1,9 @@ -; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s -; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s +; RUN: llc -march=amdgcn -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s +; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs < %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s ; FUNC-LABEL: {{^}}extract_vector_elt_v1i8: -; SI: buffer_load_ubyte -; SI: buffer_store_byte +; GCN: buffer_load_ubyte +; GCN: buffer_store_byte define void @extract_vector_elt_v1i8(i8 addrspace(1)* %out, <1 x i8> %foo) #0 { %p0 = extractelement <1 x i8> %foo, i32 0 store i8 %p0, i8 addrspace(1)* %out @@ -11,10 +11,10 @@ define void @extract_vector_elt_v1i8(i8 addrspace(1)* %out, <1 x i8> %foo) #0 { } ; FUNC-LABEL: {{^}}extract_vector_elt_v2i8: -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_store_byte -; SI: buffer_store_byte +; GCN: buffer_load_ubyte +; GCN: buffer_load_ubyte +; GCN: buffer_store_byte +; GCN: buffer_store_byte define void @extract_vector_elt_v2i8(i8 addrspace(1)* %out, <2 x i8> %foo) #0 { %p0 = extractelement <2 x i8> %foo, i32 0 %p1 = extractelement <2 x i8> %foo, i32 1 @@ -25,10 +25,10 @@ define void @extract_vector_elt_v2i8(i8 addrspace(1)* %out, <2 x i8> %foo) #0 { } ; FUNC-LABEL: {{^}}extract_vector_elt_v3i8: -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_store_byte -; SI: buffer_store_byte +; GCN: buffer_load_ubyte +; GCN: buffer_load_ubyte +; GCN: buffer_store_byte +; GCN: buffer_store_byte define void @extract_vector_elt_v3i8(i8 addrspace(1)* %out, <3 x i8> %foo) #0 { %p0 = extractelement <3 x i8> %foo, i32 0 %p1 = extractelement <3 x i8> %foo, i32 2 @@ -39,10 +39,10 @@ define void @extract_vector_elt_v3i8(i8 addrspace(1)* %out, <3 x i8> %foo) #0 { } ; FUNC-LABEL: {{^}}extract_vector_elt_v4i8: -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_store_byte -; SI: buffer_store_byte +; GCN: buffer_load_ubyte +; GCN: buffer_load_ubyte +; GCN: buffer_store_byte +; GCN: buffer_store_byte define void @extract_vector_elt_v4i8(i8 addrspace(1)* %out, <4 x i8> %foo) #0 { %p0 = extractelement <4 x i8> %foo, i32 0 %p1 = extractelement <4 x i8> %foo, i32 2 @@ -53,10 +53,10 @@ define void @extract_vector_elt_v4i8(i8 addrspace(1)* %out, <4 x i8> %foo) #0 { } ; FUNC-LABEL: {{^}}extract_vector_elt_v8i8: -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_store_byte -; SI: buffer_store_byte +; GCN: buffer_load_ubyte +; GCN: buffer_load_ubyte +; GCN: buffer_store_byte +; GCN: buffer_store_byte define void @extract_vector_elt_v8i8(i8 addrspace(1)* %out, <8 x i8> %foo) #0 { %p0 = extractelement <8 x i8> %foo, i32 0 %p1 = extractelement <8 x i8> %foo, i32 2 @@ -67,10 +67,10 @@ define void @extract_vector_elt_v8i8(i8 addrspace(1)* %out, <8 x i8> %foo) #0 { } ; FUNC-LABEL: {{^}}extract_vector_elt_v16i8: -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_store_byte -; SI: buffer_store_byte +; GCN: buffer_load_ubyte +; GCN: buffer_load_ubyte +; GCN: buffer_store_byte +; GCN: buffer_store_byte define void @extract_vector_elt_v16i8(i8 addrspace(1)* %out, <16 x i8> %foo) #0 { %p0 = extractelement <16 x i8> %foo, i32 0 %p1 = extractelement <16 x i8> %foo, i32 2 @@ -81,10 +81,10 @@ define void @extract_vector_elt_v16i8(i8 addrspace(1)* %out, <16 x i8> %foo) #0 } ; FUNC-LABEL: {{^}}extract_vector_elt_v32i8: -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_store_byte -; SI: buffer_store_byte +; GCN: buffer_load_ubyte +; GCN: buffer_load_ubyte +; GCN: buffer_store_byte +; GCN: buffer_store_byte define void @extract_vector_elt_v32i8(i8 addrspace(1)* %out, <32 x i8> %foo) #0 { %p0 = extractelement <32 x i8> %foo, i32 0 %p1 = extractelement <32 x i8> %foo, i32 2 @@ -95,10 +95,10 @@ define void @extract_vector_elt_v32i8(i8 addrspace(1)* %out, <32 x i8> %foo) #0 } ; FUNC-LABEL: {{^}}extract_vector_elt_v64i8: -; SI: buffer_load_ubyte -; SI: buffer_load_ubyte -; SI: buffer_store_byte -; SI: buffer_store_byte +; GCN: buffer_load_ubyte +; GCN: buffer_load_ubyte +; GCN: buffer_store_byte +; GCN: buffer_store_byte define void @extract_vector_elt_v64i8(i8 addrspace(1)* %out, <64 x i8> %foo) #0 { %p0 = extractelement <64 x i8> %foo, i32 0 %p1 = extractelement <64 x i8> %foo, i32 2 @@ -108,4 +108,44 @@ define void @extract_vector_elt_v64i8(i8 addrspace(1)* %out, <64 x i8> %foo) #0 ret void } +; FUNC-LABEL: {{^}}dynamic_extract_vector_elt_v3i8: +; GCN: buffer_load_ubyte +; GCN: buffer_load_ubyte +; GCN: buffer_load_ubyte + +; GCN: buffer_store_byte +; GCN: buffer_store_byte +; GCN: buffer_store_byte + +; GCN: buffer_store_byte +; GCN: buffer_load_ubyte +; GCN: buffer_store_byte +define void @dynamic_extract_vector_elt_v3i8(i8 addrspace(1)* %out, <3 x i8> %foo, i32 %idx) #0 { + %p0 = extractelement <3 x i8> %foo, i32 %idx + %out1 = getelementptr i8, i8 addrspace(1)* %out, i32 1 + store i8 %p0, i8 addrspace(1)* %out + ret void +} + +; FUNC-LABEL: {{^}}dynamic_extract_vector_elt_v4i8: +; GCN: buffer_load_ubyte +; GCN: buffer_load_ubyte +; GCN: buffer_load_ubyte +; GCN: buffer_load_ubyte + +; GCN: buffer_store_byte +; GCN: buffer_store_byte +; GCN: buffer_store_byte +; GCN: buffer_store_byte + +; GCN: buffer_store_byte +; GCN: buffer_load_ubyte +; GCN: buffer_store_byte +define void @dynamic_extract_vector_elt_v4i8(i8 addrspace(1)* %out, <4 x i8> %foo, i32 %idx) #0 { + %p0 = extractelement <4 x i8> %foo, i32 %idx + %out1 = getelementptr i8, i8 addrspace(1)* %out, i32 1 + store i8 %p0, i8 addrspace(1)* %out + ret void +} + attributes #0 = { nounwind } diff --git a/test/CodeGen/AMDGPU/extract_vector_elt_i16.ll b/test/CodeGen/AMDGPU/extract_vector_elt_i16.ll deleted file mode 100644 index c7572efc6f5..00000000000 --- a/test/CodeGen/AMDGPU/extract_vector_elt_i16.ll +++ /dev/null @@ -1,30 +0,0 @@ -; RUN: llc -march=amdgcn -mcpu=SI -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s -; RUN: llc -march=amdgcn -mcpu=tonga -verify-machineinstrs< %s | FileCheck -check-prefix=SI -check-prefix=FUNC %s - -; FUNC-LABEL: {{^}}extract_vector_elt_v2i16: -; SI: buffer_load_ushort -; SI: buffer_load_ushort -; SI: buffer_store_short -; SI: buffer_store_short -define void @extract_vector_elt_v2i16(i16 addrspace(1)* %out, <2 x i16> %foo) nounwind { - %p0 = extractelement <2 x i16> %foo, i32 0 - %p1 = extractelement <2 x i16> %foo, i32 1 - %out1 = getelementptr i16, i16 addrspace(1)* %out, i32 1 - store i16 %p1, i16 addrspace(1)* %out, align 2 - store i16 %p0, i16 addrspace(1)* %out1, align 2 - ret void -} - -; FUNC-LABEL: {{^}}extract_vector_elt_v4i16: -; SI: buffer_load_ushort -; SI: buffer_load_ushort -; SI: buffer_store_short -; SI: buffer_store_short -define void @extract_vector_elt_v4i16(i16 addrspace(1)* %out, <4 x i16> %foo) nounwind { - %p0 = extractelement <4 x i16> %foo, i32 0 - %p1 = extractelement <4 x i16> %foo, i32 2 - %out1 = getelementptr i16, i16 addrspace(1)* %out, i32 1 - store i16 %p1, i16 addrspace(1)* %out, align 2 - store i16 %p0, i16 addrspace(1)* %out1, align 2 - ret void -} diff --git a/test/CodeGen/AMDGPU/insert_vector_elt.ll b/test/CodeGen/AMDGPU/insert_vector_elt.ll index 179bf771d6c..1965c47520d 100644 --- a/test/CodeGen/AMDGPU/insert_vector_elt.ll +++ b/test/CodeGen/AMDGPU/insert_vector_elt.ll @@ -1,5 +1,5 @@ -; RUN: llc -verify-machineinstrs -march=amdgcn -mattr=+max-private-element-size-16 < %s | FileCheck -check-prefix=SI %s -; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga -mattr=+max-private-element-size-16 < %s | FileCheck -check-prefix=SI %s +; RUN: llc -verify-machineinstrs -march=amdgcn -mattr=+max-private-element-size-16 < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s +; RUN: llc -verify-machineinstrs -march=amdgcn -mcpu=tonga -mattr=+max-private-element-size-16 < %s | FileCheck -check-prefix=GCN -check-prefix=SI %s ; FIXME: Broken on evergreen ; FIXME: For some reason the 8 and 16 vectors are being stored as @@ -9,168 +9,294 @@ ; FIXME: Why is the constant moved into the intermediate register and ; not just directly into the vector component? -; SI-LABEL: {{^}}insertelement_v4f32_0: -; s_load_dwordx4 s{{[}}[[LOW_REG:[0-9]+]]: -; v_mov_b32_e32 -; v_mov_b32_e32 [[CONSTREG:v[0-9]+]], 5.000000e+00 -; v_mov_b32_e32 v[[LOW_REG]], [[CONSTREG]] -; buffer_store_dwordx4 v{{[}}[[LOW_REG]]: +; GCN-LABEL: {{^}}insertelement_v4f32_0: +; GCN: s_load_dwordx4 s{{\[}}[[LOW_REG:[0-9]+]]: +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}} +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}} +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}} +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}} +; GCN-DAG: v_mov_b32_e32 [[CONSTREG:v[0-9]+]], 0x40a00000 +; GCN-DAG: v_mov_b32_e32 v[[LOW_REG]], [[CONSTREG]] +; GCN: buffer_store_dwordx4 v{{\[}}[[LOW_REG]]: define void @insertelement_v4f32_0(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind { %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 0 store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16 ret void } -; SI-LABEL: {{^}}insertelement_v4f32_1: +; GCN-LABEL: {{^}}insertelement_v4f32_1: define void @insertelement_v4f32_1(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind { %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 1 store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16 ret void } -; SI-LABEL: {{^}}insertelement_v4f32_2: +; GCN-LABEL: {{^}}insertelement_v4f32_2: define void @insertelement_v4f32_2(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind { %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 2 store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16 ret void } -; SI-LABEL: {{^}}insertelement_v4f32_3: +; GCN-LABEL: {{^}}insertelement_v4f32_3: define void @insertelement_v4f32_3(<4 x float> addrspace(1)* %out, <4 x float> %a) nounwind { %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 3 store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16 ret void } -; SI-LABEL: {{^}}insertelement_v4i32_0: +; GCN-LABEL: {{^}}insertelement_v4i32_0: define void @insertelement_v4i32_0(<4 x i32> addrspace(1)* %out, <4 x i32> %a) nounwind { %vecins = insertelement <4 x i32> %a, i32 999, i32 0 store <4 x i32> %vecins, <4 x i32> addrspace(1)* %out, align 16 ret void } -; SI-LABEL: {{^}}dynamic_insertelement_v2f32: -; SI: v_mov_b32_e32 [[CONST:v[0-9]+]], 0x40a00000 -; SI: v_movreld_b32_e32 v[[LOW_RESULT_REG:[0-9]+]], [[CONST]] -; SI: buffer_store_dwordx2 {{v\[}}[[LOW_RESULT_REG]]: +; GCN-LABEL: {{^}}insertelement_v3f32_1: +define void @insertelement_v3f32_1(<3 x float> addrspace(1)* %out, <3 x float> %a) nounwind { + %vecins = insertelement <3 x float> %a, float 5.000000e+00, i32 1 + store <3 x float> %vecins, <3 x float> addrspace(1)* %out, align 16 + ret void +} + +; GCN-LABEL: {{^}}insertelement_v3f32_2: +define void @insertelement_v3f32_2(<3 x float> addrspace(1)* %out, <3 x float> %a) nounwind { + %vecins = insertelement <3 x float> %a, float 5.000000e+00, i32 2 + store <3 x float> %vecins, <3 x float> addrspace(1)* %out, align 16 + ret void +} + +; GCN-LABEL: {{^}}insertelement_v3f32_3: +define void @insertelement_v3f32_3(<3 x float> addrspace(1)* %out, <3 x float> %a) nounwind { + %vecins = insertelement <3 x float> %a, float 5.000000e+00, i32 3 + store <3 x float> %vecins, <3 x float> addrspace(1)* %out, align 16 + ret void +} + +; GCN-LABEL: {{^}}dynamic_insertelement_v2f32: +; GCN: v_mov_b32_e32 [[CONST:v[0-9]+]], 0x40a00000 +; GCN: v_movreld_b32_e32 v[[LOW_RESULT_REG:[0-9]+]], [[CONST]] +; GCN: buffer_store_dwordx2 {{v\[}}[[LOW_RESULT_REG]]: define void @dynamic_insertelement_v2f32(<2 x float> addrspace(1)* %out, <2 x float> %a, i32 %b) nounwind { %vecins = insertelement <2 x float> %a, float 5.000000e+00, i32 %b store <2 x float> %vecins, <2 x float> addrspace(1)* %out, align 8 ret void } -; SI-LABEL: {{^}}dynamic_insertelement_v4f32: -; SI: v_mov_b32_e32 [[CONST:v[0-9]+]], 0x40a00000 -; SI: v_movreld_b32_e32 v[[LOW_RESULT_REG:[0-9]+]], [[CONST]] -; SI: buffer_store_dwordx4 {{v\[}}[[LOW_RESULT_REG]]: +; GCN-LABEL: {{^}}dynamic_insertelement_v3f32: +; GCN: v_mov_b32_e32 [[CONST:v[0-9]+]], 0x40a00000 +; GCN: v_movreld_b32_e32 v[[LOW_RESULT_REG:[0-9]+]], [[CONST]] +; GCN-DAG: buffer_store_dwordx2 {{v\[}}[[LOW_RESULT_REG]]: +; GCN-DAG: buffer_store_dword v +define void @dynamic_insertelement_v3f32(<3 x float> addrspace(1)* %out, <3 x float> %a, i32 %b) nounwind { + %vecins = insertelement <3 x float> %a, float 5.000000e+00, i32 %b + store <3 x float> %vecins, <3 x float> addrspace(1)* %out, align 16 + ret void +} + +; GCN-LABEL: {{^}}dynamic_insertelement_v4f32: +; GCN: v_mov_b32_e32 [[CONST:v[0-9]+]], 0x40a00000 +; GCN: v_movreld_b32_e32 v[[LOW_RESULT_REG:[0-9]+]], [[CONST]] +; GCN: buffer_store_dwordx4 {{v\[}}[[LOW_RESULT_REG]]: define void @dynamic_insertelement_v4f32(<4 x float> addrspace(1)* %out, <4 x float> %a, i32 %b) nounwind { %vecins = insertelement <4 x float> %a, float 5.000000e+00, i32 %b store <4 x float> %vecins, <4 x float> addrspace(1)* %out, align 16 ret void } -; SI-LABEL: {{^}}dynamic_insertelement_v8f32: -; SI: v_movreld_b32_e32 v{{[0-9]+}}, v{{[0-9]+}} -; SI: buffer_store_dwordx4 -; SI: buffer_store_dwordx4 +; GCN-LABEL: {{^}}dynamic_insertelement_v8f32: +; GCN: v_movreld_b32_e32 v{{[0-9]+}}, v{{[0-9]+}} +; GCN: buffer_store_dwordx4 +; GCN: buffer_store_dwordx4 define void @dynamic_insertelement_v8f32(<8 x float> addrspace(1)* %out, <8 x float> %a, i32 %b) nounwind { %vecins = insertelement <8 x float> %a, float 5.000000e+00, i32 %b store <8 x float> %vecins, <8 x float> addrspace(1)* %out, align 32 ret void } -; SI-LABEL: {{^}}dynamic_insertelement_v16f32: -; SI: v_movreld_b32_e32 v{{[0-9]+}}, v{{[0-9]+}} -; SI: buffer_store_dwordx4 -; SI: buffer_store_dwordx4 -; SI: buffer_store_dwordx4 -; SI: buffer_store_dwordx4 +; GCN-LABEL: {{^}}dynamic_insertelement_v16f32: +; GCN: v_movreld_b32_e32 v{{[0-9]+}}, v{{[0-9]+}} +; GCN: buffer_store_dwordx4 +; GCN: buffer_store_dwordx4 +; GCN: buffer_store_dwordx4 +; GCN: buffer_store_dwordx4 define void @dynamic_insertelement_v16f32(<16 x float> addrspace(1)* %out, <16 x float> %a, i32 %b) nounwind { %vecins = insertelement <16 x float> %a, float 5.000000e+00, i32 %b store <16 x float> %vecins, <16 x float> addrspace(1)* %out, align 64 ret void } -; SI-LABEL: {{^}}dynamic_insertelement_v2i32: -; SI: buffer_store_dwordx2 +; GCN-LABEL: {{^}}dynamic_insertelement_v2i32: +; GCN: v_movreld_b32 +; GCN: buffer_store_dwordx2 define void @dynamic_insertelement_v2i32(<2 x i32> addrspace(1)* %out, <2 x i32> %a, i32 %b) nounwind { %vecins = insertelement <2 x i32> %a, i32 5, i32 %b store <2 x i32> %vecins, <2 x i32> addrspace(1)* %out, align 8 ret void } -; SI-LABEL: {{^}}dynamic_insertelement_v4i32: -; SI: buffer_store_dwordx4 +; GCN-LABEL: {{^}}dynamic_insertelement_v3i32: +; GCN: v_mov_b32_e32 [[CONST:v[0-9]+]], 5 +; GCN: v_movreld_b32_e32 v[[LOW_RESULT_REG:[0-9]+]], [[CONST]] +; GCN-DAG: buffer_store_dwordx2 {{v\[}}[[LOW_RESULT_REG]]: +; GCN-DAG: buffer_store_dword v +define void @dynamic_insertelement_v3i32(<3 x i32> addrspace(1)* %out, <3 x i32> %a, i32 %b) nounwind { + %vecins = insertelement <3 x i32> %a, i32 5, i32 %b + store <3 x i32> %vecins, <3 x i32> addrspace(1)* %out, align 16 + ret void +} + +; GCN-LABEL: {{^}}dynamic_insertelement_v4i32: +; GCN: v_movreld_b32 +; GCN: buffer_store_dwordx4 define void @dynamic_insertelement_v4i32(<4 x i32> addrspace(1)* %out, <4 x i32> %a, i32 %b) nounwind { %vecins = insertelement <4 x i32> %a, i32 5, i32 %b store <4 x i32> %vecins, <4 x i32> addrspace(1)* %out, align 16 ret void } -; SI-LABEL: {{^}}dynamic_insertelement_v8i32: -; FIXMESI: buffer_store_dwordx4 -; FIXMESI: buffer_store_dwordx4 +; GCN-LABEL: {{^}}dynamic_insertelement_v8i32: +; GCN: v_movreld_b32 +; GCN: buffer_store_dwordx4 +; GCN: buffer_store_dwordx4 define void @dynamic_insertelement_v8i32(<8 x i32> addrspace(1)* %out, <8 x i32> %a, i32 %b) nounwind { %vecins = insertelement <8 x i32> %a, i32 5, i32 %b store <8 x i32> %vecins, <8 x i32> addrspace(1)* %out, align 32 ret void } -; SI-LABEL: {{^}}dynamic_insertelement_v16i32: -; FIXMESI: buffer_store_dwordx4 -; FIXMESI: buffer_store_dwordx4 -; FIXMESI: buffer_store_dwordx4 -; FIXMESI: buffer_store_dwordx4 +; GCN-LABEL: {{^}}dynamic_insertelement_v16i32: +; GCN: v_movreld_b32 +; GCN: buffer_store_dwordx4 +; GCN: buffer_store_dwordx4 +; GCN: buffer_store_dwordx4 +; GCN: buffer_store_dwordx4 define void @dynamic_insertelement_v16i32(<16 x i32> addrspace(1)* %out, <16 x i32> %a, i32 %b) nounwind { %vecins = insertelement <16 x i32> %a, i32 5, i32 %b store <16 x i32> %vecins, <16 x i32> addrspace(1)* %out, align 64 ret void } - -; SI-LABEL: {{^}}dynamic_insertelement_v2i16: -; FIXMESI: buffer_store_dwordx2 +; GCN-LABEL: {{^}}dynamic_insertelement_v2i16: define void @dynamic_insertelement_v2i16(<2 x i16> addrspace(1)* %out, <2 x i16> %a, i32 %b) nounwind { %vecins = insertelement <2 x i16> %a, i16 5, i32 %b store <2 x i16> %vecins, <2 x i16> addrspace(1)* %out, align 8 ret void } -; SI-LABEL: {{^}}dynamic_insertelement_v4i16: -; FIXMESI: buffer_store_dwordx4 +; GCN-LABEL: {{^}}dynamic_insertelement_v3i16: +define void @dynamic_insertelement_v3i16(<3 x i16> addrspace(1)* %out, <3 x i16> %a, i32 %b) nounwind { + %vecins = insertelement <3 x i16> %a, i16 5, i32 %b + store <3 x i16> %vecins, <3 x i16> addrspace(1)* %out, align 8 + ret void +} + +; GCN-LABEL: {{^}}dynamic_insertelement_v4i16: +; GCN: buffer_load_ushort v{{[0-9]+}}, off +; GCN: buffer_load_ushort v{{[0-9]+}}, off +; GCN: buffer_load_ushort v{{[0-9]+}}, off +; GCN: buffer_load_ushort v{{[0-9]+}}, off + +; GCN-DAG: buffer_store_short v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:6 +; GCN-DAG: buffer_store_short v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:4 +; GCN-DAG: buffer_store_short v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:2 +; GCN-DAG: buffer_store_short v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}} +; GCN: buffer_store_short v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}} + +; GCN: buffer_load_ushort +; GCN: buffer_load_ushort +; GCN: buffer_load_ushort +; GCN: buffer_load_ushort + +; GCN: buffer_store_short v{{[0-9]+}}, off +; GCN: buffer_store_short v{{[0-9]+}}, off +; GCN: buffer_store_short v{{[0-9]+}}, off +; GCN: buffer_store_short v{{[0-9]+}}, off define void @dynamic_insertelement_v4i16(<4 x i16> addrspace(1)* %out, <4 x i16> %a, i32 %b) nounwind { %vecins = insertelement <4 x i16> %a, i16 5, i32 %b - store <4 x i16> %vecins, <4 x i16> addrspace(1)* %out, align 16 + store <4 x i16> %vecins, <4 x i16> addrspace(1)* %out, align 8 ret void } +; GCN-LABEL: {{^}}dynamic_insertelement_v2i8: +; GCN: buffer_load_ubyte v{{[0-9]+}}, off +; GCN: buffer_load_ubyte v{{[0-9]+}}, off -; SI-LABEL: {{^}}dynamic_insertelement_v2i8: -; FIXMESI: BUFFER_STORE_USHORT +; GCN-DAG: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:1 +; GCN-DAG: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}} + +; GCN: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}} + +; GCN: buffer_load_ubyte +; GCN: buffer_load_ubyte + +; GCN: buffer_store_byte v{{[0-9]+}}, off +; GCN: buffer_store_byte v{{[0-9]+}}, off define void @dynamic_insertelement_v2i8(<2 x i8> addrspace(1)* %out, <2 x i8> %a, i32 %b) nounwind { %vecins = insertelement <2 x i8> %a, i8 5, i32 %b store <2 x i8> %vecins, <2 x i8> addrspace(1)* %out, align 8 ret void } -; SI-LABEL: {{^}}dynamic_insertelement_v4i8: -; FIXMESI: buffer_store_dword +; GCN-LABEL: {{^}}dynamic_insertelement_v3i8: +; GCN: buffer_load_ubyte v{{[0-9]+}}, off +; GCN: buffer_load_ubyte v{{[0-9]+}}, off +; GCN: buffer_load_ubyte v{{[0-9]+}}, off + +; GCN-DAG: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:2 +; GCN-DAG: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:1 +; GCN-DAG: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}} + +; GCN: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}} + +; GCN: buffer_load_ubyte +; GCN: buffer_load_ubyte +; GCN: buffer_load_ubyte + +; GCN-DAG: buffer_store_byte v{{[0-9]+}}, off +; GCN-DAG: buffer_store_short v{{[0-9]+}}, off +define void @dynamic_insertelement_v3i8(<3 x i8> addrspace(1)* %out, <3 x i8> %a, i32 %b) nounwind { + %vecins = insertelement <3 x i8> %a, i8 5, i32 %b + store <3 x i8> %vecins, <3 x i8> addrspace(1)* %out, align 4 + ret void +} + +; GCN-LABEL: {{^}}dynamic_insertelement_v4i8: +; GCN: buffer_load_ubyte v{{[0-9]+}}, off +; GCN: buffer_load_ubyte v{{[0-9]+}}, off +; GCN: buffer_load_ubyte v{{[0-9]+}}, off +; GCN: buffer_load_ubyte v{{[0-9]+}}, off + +; GCN-DAG: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:3 +; GCN-DAG: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:2 +; GCN-DAG: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen offset:1 +; GCN-DAG: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}} + +; GCN: buffer_store_byte v{{[0-9]+}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, s{{[0-9]+}} offen{{$}} + +; GCN: buffer_load_ubyte +; GCN: buffer_load_ubyte +; GCN: buffer_load_ubyte +; GCN: buffer_load_ubyte + +; GCN: buffer_store_byte v{{[0-9]+}}, off +; GCN: buffer_store_byte v{{[0-9]+}}, off +; GCN: buffer_store_byte v{{[0-9]+}}, off +; GCN: buffer_store_byte v{{[0-9]+}}, off define void @dynamic_insertelement_v4i8(<4 x i8> addrspace(1)* %out, <4 x i8> %a, i32 %b) nounwind { %vecins = insertelement <4 x i8> %a, i8 5, i32 %b - store <4 x i8> %vecins, <4 x i8> addrspace(1)* %out, align 16 + store <4 x i8> %vecins, <4 x i8> addrspace(1)* %out, align 4 ret void } -; SI-LABEL: {{^}}dynamic_insertelement_v8i8: -; FIXMESI: buffer_store_dwordx2 +; GCN-LABEL: {{^}}dynamic_insertelement_v8i8: define void @dynamic_insertelement_v8i8(<8 x i8> addrspace(1)* %out, <8 x i8> %a, i32 %b) nounwind { %vecins = insertelement <8 x i8> %a, i8 5, i32 %b - store <8 x i8> %vecins, <8 x i8> addrspace(1)* %out, align 16 + store <8 x i8> %vecins, <8 x i8> addrspace(1)* %out, align 8 ret void } -; SI-LABEL: {{^}}dynamic_insertelement_v16i8: -; FIXMESI: buffer_store_dwordx4 +; GCN-LABEL: {{^}}dynamic_insertelement_v16i8: define void @dynamic_insertelement_v16i8(<16 x i8> addrspace(1)* %out, <16 x i8> %a, i32 %b) nounwind { %vecins = insertelement <16 x i8> %a, i8 5, i32 %b store <16 x i8> %vecins, <16 x i8> addrspace(1)* %out, align 16 @@ -179,7 +305,7 @@ define void @dynamic_insertelement_v16i8(<16 x i8> addrspace(1)* %out, <16 x i8> ; This test requires handling INSERT_SUBREG in SIFixSGPRCopies. Check that ; the compiler doesn't crash. -; SI-LABEL: {{^}}insert_split_bb: +; GCN-LABEL: {{^}}insert_split_bb: define void @insert_split_bb(<2 x i32> addrspace(1)* %out, i32 addrspace(1)* %in, i32 %a, i32 %b) { entry: %0 = insertelement <2 x i32> undef, i32 %a, i32 0 @@ -203,30 +329,30 @@ endif: ret void } -; SI-LABEL: {{^}}dynamic_insertelement_v2f64: -; SI: s_load_dword [[IDX:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, {{0x11|0x44}}{{$}} -; SI-DAG: s_lshl_b32 [[SCALEDIDX:s[0-9]+]], [[IDX]], 1{{$}} -; SI-DAG: v_mov_b32_e32 [[ELT0:v[0-9]+]], 0{{$}} +; GCN-LABEL: {{^}}dynamic_insertelement_v2f64: +; GCN: s_load_dword [[IDX:s[0-9]+]], s{{\[[0-9]+:[0-9]+\]}}, {{0x11|0x44}}{{$}} +; GCN-DAG: s_lshl_b32 [[SCALEDIDX:s[0-9]+]], [[IDX]], 1{{$}} +; GCN-DAG: v_mov_b32_e32 [[ELT0:v[0-9]+]], 0{{$}} -; SI-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}} -; SI-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}} -; SI-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}} -; SI-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}} +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}} +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}} +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}} +; GCN-DAG: v_mov_b32_e32 v{{[0-9]+}}, s{{[0-9]+}} -; SI: s_mov_b32 m0, [[SCALEDIDX]] -; SI: v_movreld_b32_e32 v{{[0-9]+}}, [[ELT0]] +; GCN: s_mov_b32 m0, [[SCALEDIDX]] +; GCN: v_movreld_b32_e32 v{{[0-9]+}}, [[ELT0]] ; Increment to next element. ; FIXME: Should be able to manipulate m0 directly instead of add and ; copy. -; SI: s_or_b32 [[IDX1:s[0-9]+]], [[SCALEDIDX]], 1 -; SI-DAG: v_mov_b32_e32 [[ELT1:v[0-9]+]], 0x40200000 -; SI-DAG: s_mov_b32 m0, [[IDX1]] -; SI: v_movreld_b32_e32 v{{[0-9]+}}, [[ELT1]] +; GCN: s_or_b32 [[IDX1:s[0-9]+]], [[SCALEDIDX]], 1 +; GCN-DAG: v_mov_b32_e32 [[ELT1:v[0-9]+]], 0x40200000 +; GCN-DAG: s_mov_b32 m0, [[IDX1]] +; GCN: v_movreld_b32_e32 v{{[0-9]+}}, [[ELT1]] -; SI: buffer_store_dwordx4 -; SI: s_endpgm +; GCN: buffer_store_dwordx4 +; GCN: s_endpgm define void @dynamic_insertelement_v2f64(<2 x double> addrspace(1)* %out, <2 x double> %a, i32 %b) nounwind { %vecins = insertelement <2 x double> %a, double 8.0, i32 %b store <2 x double> %vecins, <2 x double> addrspace(1)* %out, align 16 @@ -234,44 +360,52 @@ define void @dynamic_insertelement_v2f64(<2 x double> addrspace(1)* %out, <2 x d } ; FIXME: Inline immediate should be folded into v_movreld_b32. -; SI-LABEL: {{^}}dynamic_insertelement_v2i64: +; GCN-LABEL: {{^}}dynamic_insertelement_v2i64: -; SI-DAG: v_mov_b32_e32 [[ELT0:v[0-9]+]], 5{{$}} -; SI-DAG: v_mov_b32_e32 [[ELT1:v[0-9]+]], 0{{$}} +; GCN-DAG: v_mov_b32_e32 [[ELT0:v[0-9]+]], 5{{$}} +; GCN-DAG: v_mov_b32_e32 [[ELT1:v[0-9]+]], 0{{$}} -; SI-DAG: v_movreld_b32_e32 v{{[0-9]+}}, [[ELT0]] -; SI-DAG: v_movreld_b32_e32 v{{[0-9]+}}, [[ELT1]] +; GCN-DAG: v_movreld_b32_e32 v{{[0-9]+}}, [[ELT0]] +; GCN-DAG: v_movreld_b32_e32 v{{[0-9]+}}, [[ELT1]] -; SI: buffer_store_dwordx4 -; SI: s_endpgm +; GCN: buffer_store_dwordx4 +; GCN: s_endpgm define void @dynamic_insertelement_v2i64(<2 x i64> addrspace(1)* %out, <2 x i64> %a, i32 %b) nounwind { %vecins = insertelement <2 x i64> %a, i64 5, i32 %b store <2 x i64> %vecins, <2 x i64> addrspace(1)* %out, align 8 ret void } +; GCN-LABEL: {{^}}dynamic_insertelement_v3i64: +define void @dynamic_insertelement_v3i64(<3 x i64> addrspace(1)* %out, <3 x i64> %a, i32 %b) nounwind { + %vecins = insertelement <3 x i64> %a, i64 5, i32 %b + store <3 x i64> %vecins, <3 x i64> addrspace(1)* %out, align 32 + ret void +} + ; FIXME: Should be able to do without stack access. The used stack ; space is also 2x what should be required. -; SI-LABEL: {{^}}dynamic_insertelement_v4f64: -; SI: SCRATCH_RSRC_DWORD +; GCN-LABEL: {{^}}dynamic_insertelement_v4f64: +; GCN: SCRATCH_RSRC_DWORD ; Stack store -; SI-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}} -; SI-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:16{{$}} + +; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}} +; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:16{{$}} ; Write element -; SI: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}} +; GCN: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}} ; Stack reload -; SI-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:16{{$}} -; SI-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}} +; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:16{{$}} +; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}} ; Store result -; SI: buffer_store_dwordx4 -; SI: buffer_store_dwordx4 -; SI: s_endpgm -; SI: ScratchSize: 64 +; GCN: buffer_store_dwordx4 +; GCN: buffer_store_dwordx4 +; GCN: s_endpgm +; GCN: ScratchSize: 64 define void @dynamic_insertelement_v4f64(<4 x double> addrspace(1)* %out, <4 x double> %a, i32 %b) nounwind { %vecins = insertelement <4 x double> %a, double 8.0, i32 %b @@ -279,27 +413,27 @@ define void @dynamic_insertelement_v4f64(<4 x double> addrspace(1)* %out, <4 x d ret void } -; SI-LABEL: {{^}}dynamic_insertelement_v8f64: -; SI: SCRATCH_RSRC_DWORD +; GCN-LABEL: {{^}}dynamic_insertelement_v8f64: +; GCN: SCRATCH_RSRC_DWORD -; SI-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}} -; SI-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:16{{$}} -; SI-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:32{{$}} -; SI-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:48{{$}} +; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}} +; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:16{{$}} +; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:32{{$}} +; GCN-DAG: buffer_store_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:48{{$}} -; SI: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}} +; GCN: buffer_store_dwordx2 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}} -; SI-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:16{{$}} -; SI-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}} -; SI-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:16{{$}} -; SI-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}} +; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:16{{$}} +; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}} +; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen offset:16{{$}} +; GCN-DAG: buffer_load_dwordx4 v{{\[[0-9]+:[0-9]+\]}}, v{{[0-9]+}}, s{{\[[0-9]+:[0-9]+\]}}, {{s[0-9]+}} offen{{$}} -; SI: buffer_store_dwordx4 -; SI: buffer_store_dwordx4 -; SI: buffer_store_dwordx4 -; SI: buffer_store_dwordx4 -; SI: s_endpgm -; SI: ScratchSize: 128 +; GCN: buffer_store_dwordx4 +; GCN: buffer_store_dwordx4 +; GCN: buffer_store_dwordx4 +; GCN: buffer_store_dwordx4 +; GCN: s_endpgm +; GCN: ScratchSize: 128 define void @dynamic_insertelement_v8f64(<8 x double> addrspace(1)* %out, <8 x double> %a, i32 %b) nounwind { %vecins = insertelement <8 x double> %a, double 8.0, i32 %b store <8 x double> %vecins, <8 x double> addrspace(1)* %out, align 16