From 6bd5df3900d97112aa35f6457e4ab8e66a08afe3 Mon Sep 17 00:00:00 2001 From: Daniel Sanders Date: Thu, 26 Nov 2015 10:26:18 +0000 Subject: [PATCH] [mips][ias] Replace anchor comments with anchor instructions in tests. Summary: This is because IAS will delete the comments. NFC at the moment but it will prevent a failure once IAS is the default. Reviewers: vkalintiris Subscribers: llvm-commits, dsanders Differential Revision: http://reviews.llvm.org/D14704 git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@254147 91177308-0d34-0410-b5e6-96231b3b80d8 --- test/CodeGen/Mips/cconv/arguments-varargs.ll | 72 ++++++++++++++-------------- test/CodeGen/Mips/no-odd-spreg-msa.ll | 16 +++---- 2 files changed, 44 insertions(+), 44 deletions(-) diff --git a/test/CodeGen/Mips/cconv/arguments-varargs.ll b/test/CodeGen/Mips/cconv/arguments-varargs.ll index af217c92dab..d1a196738ae 100644 --- a/test/CodeGen/Mips/cconv/arguments-varargs.ll +++ b/test/CodeGen/Mips/cconv/arguments-varargs.ll @@ -55,7 +55,7 @@ entry: ; Store [[VA]] ; O32-DAG: sw [[VA]], 0([[SP]]) -; ALL: # ANCHOR1 +; ALL: teqi $zero, 1 ; Increment [[VA]] ; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) @@ -89,7 +89,7 @@ entry: ; ALL-DAG: sh [[ARG1]], 2([[GV]]) -; ALL: # ANCHOR2 +; ALL: teqi $zero, 2 ; Increment [[VA]] again. ; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) @@ -117,12 +117,12 @@ entry: %ap2 = bitcast i8** %ap to i8* call void @llvm.va_start(i8* %ap2) - call void asm sideeffect "# ANCHOR1", ""() + call void asm sideeffect "teqi $$zero, 1", ""() %arg1 = va_arg i8** %ap, i16 %e1 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 1 store volatile i16 %arg1, i16* %e1, align 2 - call void asm sideeffect "# ANCHOR2", ""() + call void asm sideeffect "teqi $$zero, 2", ""() %arg2 = va_arg i8** %ap, i16 %e2 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 2 store volatile i16 %arg2, i16* %e2, align 2 @@ -173,7 +173,7 @@ entry: ; Store [[VA]] ; O32-DAG: sw [[VA]], 0([[SP]]) -; ALL: # ANCHOR1 +; ALL: teqi $zero, 1 ; Increment [[VA]] ; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) @@ -207,7 +207,7 @@ entry: ; ALL-DAG: sw [[ARG1]], 4([[GV]]) -; ALL: # ANCHOR2 +; ALL: teqi $zero, 2 ; Increment [[VA]] again. ; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) @@ -235,12 +235,12 @@ entry: %ap2 = bitcast i8** %ap to i8* call void @llvm.va_start(i8* %ap2) - call void asm sideeffect "# ANCHOR1", ""() + call void asm sideeffect "teqi $$zero, 1", ""() %arg1 = va_arg i8** %ap, i32 %e1 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 1 store volatile i32 %arg1, i32* %e1, align 4 - call void asm sideeffect "# ANCHOR2", ""() + call void asm sideeffect "teqi $$zero, 2", ""() %arg2 = va_arg i8** %ap, i32 %e2 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 2 store volatile i32 %arg2, i32* %e2, align 4 @@ -291,7 +291,7 @@ entry: ; Store [[VA]] ; O32-DAG: sw [[VA]], 0([[SP]]) -; ALL: # ANCHOR1 +; ALL: teqi $zero, 1 ; Increment [[VA]] (and realign pointer for O32) ; O32: lw [[VA:\$[0-9]+]], 0([[SP]]) @@ -328,7 +328,7 @@ entry: ; NEW-DAG: ld [[ARG1:\$[0-9]+]], 0([[VA]]) ; NEW-DAG: sd [[ARG1]], 8([[GV]]) -; ALL: # ANCHOR2 +; ALL: teqi $zero, 2 ; Increment [[VA]] again. ; FIXME: We're still aligned from the last one but CodeGen doesn't spot that. @@ -362,12 +362,12 @@ entry: %ap2 = bitcast i8** %ap to i8* call void @llvm.va_start(i8* %ap2) - call void asm sideeffect "# ANCHOR1", ""() + call void asm sideeffect "teqi $$zero, 1", ""() %arg1 = va_arg i8** %ap, i64 %e1 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 1 store volatile i64 %arg1, i64* %e1, align 8 - call void asm sideeffect "# ANCHOR2", ""() + call void asm sideeffect "teqi $$zero, 2", ""() %arg2 = va_arg i8** %ap, i64 %e2 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 2 store volatile i64 %arg2, i64* %e2, align 8 @@ -418,7 +418,7 @@ entry: ; Store [[VA]] ; O32-DAG: sw [[VA]], 0([[SP]]) -; ALL: # ANCHOR1 +; ALL: teqi $zero, 1 ; Increment [[VA]] ; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) @@ -452,7 +452,7 @@ entry: ; ALL-DAG: sh [[ARG1]], 2([[GV]]) -; ALL: # ANCHOR2 +; ALL: teqi $zero, 2 ; Increment [[VA]] again. ; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) @@ -480,12 +480,12 @@ entry: %ap2 = bitcast i8** %ap to i8* call void @llvm.va_start(i8* %ap2) - call void asm sideeffect "# ANCHOR1", ""() + call void asm sideeffect "teqi $$zero, 1", ""() %arg1 = va_arg i8** %ap, i16 %e1 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 1 store volatile i16 %arg1, i16* %e1, align 2 - call void asm sideeffect "# ANCHOR2", ""() + call void asm sideeffect "teqi $$zero, 2", ""() %arg2 = va_arg i8** %ap, i16 %e2 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 2 store volatile i16 %arg2, i16* %e2, align 2 @@ -536,7 +536,7 @@ entry: ; Store [[VA]] ; O32-DAG: sw [[VA]], 0([[SP]]) -; ALL: # ANCHOR1 +; ALL: teqi $zero, 1 ; Increment [[VA]] ; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) @@ -570,7 +570,7 @@ entry: ; ALL-DAG: sw [[ARG1]], 4([[GV]]) -; ALL: # ANCHOR2 +; ALL: teqi $zero, 2 ; Increment [[VA]] again. ; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) @@ -598,12 +598,12 @@ entry: %ap2 = bitcast i8** %ap to i8* call void @llvm.va_start(i8* %ap2) - call void asm sideeffect "# ANCHOR1", ""() + call void asm sideeffect "teqi $$zero, 1", ""() %arg1 = va_arg i8** %ap, i32 %e1 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 1 store volatile i32 %arg1, i32* %e1, align 4 - call void asm sideeffect "# ANCHOR2", ""() + call void asm sideeffect "teqi $$zero, 2", ""() %arg2 = va_arg i8** %ap, i32 %e2 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 2 store volatile i32 %arg2, i32* %e2, align 4 @@ -654,7 +654,7 @@ entry: ; Store [[VA]] ; O32-DAG: sw [[VA]], 0([[SP]]) -; ALL: # ANCHOR1 +; ALL: teqi $zero, 1 ; Increment [[VA]] (and realign pointer for O32) ; O32: lw [[VA:\$[0-9]+]], 0([[SP]]) @@ -691,7 +691,7 @@ entry: ; NEW-DAG: ld [[ARG1:\$[0-9]+]], 0([[VA]]) ; NEW-DAG: sd [[ARG1]], 8([[GV]]) -; ALL: # ANCHOR2 +; ALL: teqi $zero, 2 ; Increment [[VA]] again. ; FIXME: We're still aligned from the last one but CodeGen doesn't spot that. @@ -725,12 +725,12 @@ entry: %ap2 = bitcast i8** %ap to i8* call void @llvm.va_start(i8* %ap2) - call void asm sideeffect "# ANCHOR1", ""() + call void asm sideeffect "teqi $$zero, 1", ""() %arg1 = va_arg i8** %ap, i64 %e1 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 1 store volatile i64 %arg1, i64* %e1, align 8 - call void asm sideeffect "# ANCHOR2", ""() + call void asm sideeffect "teqi $$zero, 2", ""() %arg2 = va_arg i8** %ap, i64 %e2 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 2 store volatile i64 %arg2, i64* %e2, align 8 @@ -780,7 +780,7 @@ entry: ; Store [[VA]] ; O32-DAG: sw [[VA]], 0([[SP]]) -; ALL: # ANCHOR1 +; ALL: teqi $zero, 1 ; Increment [[VA]] ; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) @@ -814,7 +814,7 @@ entry: ; ALL-DAG: sh [[ARG1]], 2([[GV]]) -; ALL: # ANCHOR2 +; ALL: teqi $zero, 2 ; Increment [[VA]] again. ; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) @@ -842,12 +842,12 @@ entry: %ap2 = bitcast i8** %ap to i8* call void @llvm.va_start(i8* %ap2) - call void asm sideeffect "# ANCHOR1", ""() + call void asm sideeffect "teqi $$zero, 1", ""() %arg1 = va_arg i8** %ap, i16 %e1 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 1 store volatile i16 %arg1, i16* %e1, align 2 - call void asm sideeffect "# ANCHOR2", ""() + call void asm sideeffect "teqi $$zero, 2", ""() %arg2 = va_arg i8** %ap, i16 %e2 = getelementptr [3 x i16], [3 x i16]* @hwords, i32 0, i32 2 store volatile i16 %arg2, i16* %e2, align 2 @@ -897,7 +897,7 @@ entry: ; Store [[VA]] ; O32-DAG: sw [[VA]], 0([[SP]]) -; ALL: # ANCHOR1 +; ALL: teqi $zero, 1 ; Increment [[VA]] ; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) @@ -931,7 +931,7 @@ entry: ; ALL-DAG: sw [[ARG1]], 4([[GV]]) -; ALL: # ANCHOR2 +; ALL: teqi $zero, 2 ; Increment [[VA]] again. ; O32-DAG: lw [[VA:\$[0-9]+]], 0([[SP]]) @@ -959,12 +959,12 @@ entry: %ap2 = bitcast i8** %ap to i8* call void @llvm.va_start(i8* %ap2) - call void asm sideeffect "# ANCHOR1", ""() + call void asm sideeffect "teqi $$zero, 1", ""() %arg1 = va_arg i8** %ap, i32 %e1 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 1 store volatile i32 %arg1, i32* %e1, align 4 - call void asm sideeffect "# ANCHOR2", ""() + call void asm sideeffect "teqi $$zero, 2", ""() %arg2 = va_arg i8** %ap, i32 %e2 = getelementptr [3 x i32], [3 x i32]* @words, i32 0, i32 2 store volatile i32 %arg2, i32* %e2, align 4 @@ -1014,7 +1014,7 @@ entry: ; Store [[VA]] ; O32-DAG: sw [[VA]], 0([[SP]]) -; ALL: # ANCHOR1 +; ALL: teqi $zero, 1 ; Increment [[VA]] (and realign pointer for O32) ; O32: lw [[VA:\$[0-9]+]], 0([[SP]]) @@ -1051,7 +1051,7 @@ entry: ; NEW-DAG: ld [[ARG1:\$[0-9]+]], 0([[VA]]) ; NEW-DAG: sd [[ARG1]], 8([[GV]]) -; ALL: # ANCHOR2 +; ALL: teqi $zero, 2 ; Increment [[VA]] again. ; FIXME: We're still aligned from the last one but CodeGen doesn't spot that. @@ -1085,12 +1085,12 @@ entry: %ap2 = bitcast i8** %ap to i8* call void @llvm.va_start(i8* %ap2) - call void asm sideeffect "# ANCHOR1", ""() + call void asm sideeffect "teqi $$zero, 1", ""() %arg1 = va_arg i8** %ap, i64 %e1 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 1 store volatile i64 %arg1, i64* %e1, align 8 - call void asm sideeffect "# ANCHOR2", ""() + call void asm sideeffect "teqi $$zero, 2", ""() %arg2 = va_arg i8** %ap, i64 %e2 = getelementptr [3 x i64], [3 x i64]* @dwords, i32 0, i32 2 store volatile i64 %arg2, i64* %e2, align 8 diff --git a/test/CodeGen/Mips/no-odd-spreg-msa.ll b/test/CodeGen/Mips/no-odd-spreg-msa.ll index cf79557cc97..a96f4cc0fd6 100644 --- a/test/CodeGen/Mips/no-odd-spreg-msa.ll +++ b/test/CodeGen/Mips/no-odd-spreg-msa.ll @@ -19,7 +19,7 @@ entry: ; On the other hand, if odd single precision registers are not permitted, it ; must copy $f13 to an even-numbered register before inserting into the ; vector. - call void asm sideeffect "# Clobber", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"() + call void asm sideeffect "teqi $$zero, 1", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"() %1 = insertelement <4 x float> %0, float %b, i32 0 store <4 x float> %1, <4 x float>* @v4f32 ret void @@ -32,7 +32,7 @@ entry: ; NOODDSPREG: mov.s $f[[F0:[0-9]+]], $f13 ; NOODDSPREG: insve.w $w[[W0]][0], $w[[F0]][0] ; ODDSPREG: insve.w $w[[W0]][0], $w13[0] -; ALL: # Clobber +; ALL: teqi $zero, 1 ; ALL-NOT: sdc1 ; ALL-NOT: ldc1 ; ALL: st.w $w[[W0]], 0($[[R0]]) @@ -53,7 +53,7 @@ entry: ; On the other hand, if odd single precision registers are not permitted, it ; must copy $f13 to an even-numbered register before inserting into the ; vector. - call void asm sideeffect "# Clobber", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"() + call void asm sideeffect "teqi $$zero, 1", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"() %1 = insertelement <4 x float> %0, float %b, i32 1 store <4 x float> %1, <4 x float>* @v4f32 ret void @@ -66,7 +66,7 @@ entry: ; NOODDSPREG: mov.s $f[[F0:[0-9]+]], $f13 ; NOODDSPREG: insve.w $w[[W0]][1], $w[[F0]][0] ; ODDSPREG: insve.w $w[[W0]][1], $w13[0] -; ALL: # Clobber +; ALL: teqi $zero, 1 ; ALL-NOT: sdc1 ; ALL-NOT: ldc1 ; ALL: st.w $w[[W0]], 0($[[R0]]) @@ -83,7 +83,7 @@ entry: ; ; On the other hand, if odd single precision registers are not permitted, it ; must move it to $f12/$w12. - call void asm sideeffect "# Clobber", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"() + call void asm sideeffect "teqi $$zero, 1", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"() %2 = extractelement <4 x float> %1, i32 0 ret float %2 @@ -94,7 +94,7 @@ entry: ; ALL: ld.w $w12, 0($[[R0]]) ; ALL: move.v $w[[W0:13]], $w12 ; NOODDSPREG: move.v $w[[W0:12]], $w13 -; ALL: # Clobber +; ALL: teqi $zero, 1 ; ALL-NOT: st.w ; ALL-NOT: ld.w ; ALL: mov.s $f0, $f[[W0]] @@ -111,7 +111,7 @@ entry: ; ; On the other hand, if odd single precision registers are not permitted, it ; must be spilled. - call void asm sideeffect "# Clobber", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f12},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"() + call void asm sideeffect "teqi $$zero, 1", "~{$f0},~{$f1},~{$f2},~{$f3},~{$f4},~{$f5},~{$f6},~{$f7},~{$f8},~{$f9},~{$f10},~{$f11},~{$f12},~{$f14},~{$f15},~{$f16},~{$f17},~{$f18},~{$f19},~{$f20},~{$f21},~{$f22},~{$f23},~{$f24},~{$f25},~{$f26},~{$f27},~{$f28},~{$f29},~{$f30},~{$f31}"() %2 = extractelement <4 x float> %1, i32 1 ret float %2 @@ -124,7 +124,7 @@ entry: ; NOODDSPREG: st.w $w[[W0]], 0($sp) ; ODDSPREG-NOT: st.w ; ODDSPREG-NOT: ld.w -; ALL: # Clobber +; ALL: teqi $zero, 1 ; ODDSPREG-NOT: st.w ; ODDSPREG-NOT: ld.w ; NOODDSPREG: ld.w $w0, 0($sp) -- 2.11.0