From 723c191bb90bea346d5a906afcae6c2a4bc3b77b Mon Sep 17 00:00:00 2001 From: Craig Topper Date: Sat, 11 May 2019 04:00:27 +0000 Subject: [PATCH] [X86] Add a test case for idempotent atomic operations with speculative load hardening. Fix an additional issue found by the test. This test covers the fix from r360475 as well. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@360511 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/X86/X86SpeculativeLoadHardening.cpp | 4 +++- test/CodeGen/X86/speculative-load-hardening.ll | 21 +++++++++++++++++++++ 2 files changed, 24 insertions(+), 1 deletion(-) diff --git a/lib/Target/X86/X86SpeculativeLoadHardening.cpp b/lib/Target/X86/X86SpeculativeLoadHardening.cpp index 02f07d88afc..7b043378819 100644 --- a/lib/Target/X86/X86SpeculativeLoadHardening.cpp +++ b/lib/Target/X86/X86SpeculativeLoadHardening.cpp @@ -1719,9 +1719,11 @@ void X86SpeculativeLoadHardeningPass::tracePredStateThroughBlocksAndHarden( // If we have at least one (non-frame-index, non-RIP) register operand, // and neither operand is load-dependent, we need to check the load. + // Also handle explicit references to RSP as used by idempotent atomic + // or with 0. unsigned BaseReg = 0, IndexReg = 0; if (!BaseMO.isFI() && BaseMO.getReg() != X86::RIP && - BaseMO.getReg() != X86::NoRegister) + BaseMO.getReg() != X86::RSP && BaseMO.getReg() != X86::NoRegister) BaseReg = BaseMO.getReg(); if (IndexMO.getReg() != X86::NoRegister) IndexReg = IndexMO.getReg(); diff --git a/test/CodeGen/X86/speculative-load-hardening.ll b/test/CodeGen/X86/speculative-load-hardening.ll index 54cde2c124e..5599b88a791 100644 --- a/test/CodeGen/X86/speculative-load-hardening.ll +++ b/test/CodeGen/X86/speculative-load-hardening.ll @@ -1142,3 +1142,24 @@ entry: call void @sink(i32 %e7) ret void } + +; Make sure we don't crash on idempotent atomic operations which have a +; hardcoded reference to RSP+offset. +define void @idempotent_atomic(i32* %x) speculative_load_hardening { +; X64-LABEL: idempotent_atomic: +; X64: # %bb.0: +; X64-NEXT: movq %rsp, %rax +; X64-NEXT: movq $-1, %rcx +; X64-NEXT: sarq $63, %rax +; X64-NEXT: lock orl $0, (%rsp) +; X64-NEXT: shlq $47, %rax +; X64-NEXT: orq %rax, %rsp +; X64-NEXT: retq +; +; X64-LFENCE-LABEL: idempotent_atomic: +; X64-LFENCE: # %bb.0: +; X64-LFENCE-NEXT: lock orl $0, (%rsp) +; X64-LFENCE-NEXT: retq + %tmp = atomicrmw or i32* %x, i32 0 seq_cst + ret void +} -- 2.11.0