From eefe9f5a5457340fee5e51f9042f69993b0be895 Mon Sep 17 00:00:00 2001 From: Tim Northover Date: Tue, 10 Nov 2015 00:44:23 +0000 Subject: [PATCH] AArch64: add experimental support for address tagging. AArch64 has the ability to use the top 8-bits of an "address" for extra information, with the memory subsystem automatically masking them off for loads and stores. When that's happening, we can sometimes skip masks on memory operations in the compiler. However, this requires the host OS and support stack to preserve those bits so it can't be enabled everywhere. In principle iOS 8.0 and above do take the required precautions and but we'll put it under a flag for now. git-svn-id: https://llvm.org/svn/llvm-project/llvm/trunk@252573 91177308-0d34-0410-b5e6-96231b3b80d8 --- lib/Target/AArch64/AArch64ISelLowering.cpp | 47 +++++++++++-- lib/Target/AArch64/AArch64Subtarget.cpp | 18 +++++ lib/Target/AArch64/AArch64Subtarget.h | 4 ++ test/CodeGen/AArch64/tbi.ll | 102 +++++++++++++++++++++++++++++ 4 files changed, 166 insertions(+), 5 deletions(-) create mode 100644 test/CodeGen/AArch64/tbi.ll diff --git a/lib/Target/AArch64/AArch64ISelLowering.cpp b/lib/Target/AArch64/AArch64ISelLowering.cpp index 54ede3e2d04..14d2f6fb61a 100644 --- a/lib/Target/AArch64/AArch64ISelLowering.cpp +++ b/lib/Target/AArch64/AArch64ISelLowering.cpp @@ -489,6 +489,8 @@ AArch64TargetLowering::AArch64TargetLowering(const TargetMachine &TM, setTargetDAGCombine(ISD::BITCAST); setTargetDAGCombine(ISD::CONCAT_VECTORS); setTargetDAGCombine(ISD::STORE); + if (Subtarget->supportsAddressTopByteIgnored()) + setTargetDAGCombine(ISD::LOAD); setTargetDAGCombine(ISD::MUL); @@ -8555,10 +8557,9 @@ static SDValue replaceSplatVectorStore(SelectionDAG &DAG, StoreSDNode *St) { return NewST1; } -static SDValue performSTORECombine(SDNode *N, - TargetLowering::DAGCombinerInfo &DCI, - SelectionDAG &DAG, - const AArch64Subtarget *Subtarget) { +static SDValue split16BStores(SDNode *N, TargetLowering::DAGCombinerInfo &DCI, + SelectionDAG &DAG, + const AArch64Subtarget *Subtarget) { if (!DCI.isBeforeLegalize()) return SDValue(); @@ -8720,7 +8721,39 @@ static SDValue performPostLD1Combine(SDNode *N, return SDValue(); } -/// This function handles the log2-shuffle pattern produced by the +/// Simplify \Addr given that the top byte of it is ignored by HW during +/// address translation. +static bool performTBISimplification(SDValue Addr, + TargetLowering::DAGCombinerInfo &DCI, + SelectionDAG &DAG) { + APInt DemandedMask = APInt::getLowBitsSet(64, 56); + APInt KnownZero, KnownOne; + TargetLowering::TargetLoweringOpt TLO(DAG, DCI.isBeforeLegalize(), + DCI.isBeforeLegalizeOps()); + const TargetLowering &TLI = DAG.getTargetLoweringInfo(); + if (TLI.SimplifyDemandedBits(Addr, DemandedMask, KnownZero, KnownOne, TLO)) { + DCI.CommitTargetLoweringOpt(TLO); + return true; + } + return false; +} + +static SDValue performSTORECombine(SDNode *N, + TargetLowering::DAGCombinerInfo &DCI, + SelectionDAG &DAG, + const AArch64Subtarget *Subtarget) { + SDValue Split = split16BStores(N, DCI, DAG, Subtarget); + if (Split.getNode()) + return Split; + + if (Subtarget->supportsAddressTopByteIgnored() && + performTBISimplification(N->getOperand(2), DCI, DAG)) + return SDValue(N, 0); + + return SDValue(); +} + + /// This function handles the log2-shuffle pattern produced by the /// LoopVectorizer for the across vector reduction. It consists of /// log2(NumVectorElements) steps and, in each step, 2^(s) elements /// are reduced, where s is an induction variable from 0 to @@ -9575,6 +9608,10 @@ SDValue AArch64TargetLowering::PerformDAGCombine(SDNode *N, } case ISD::VSELECT: return performVSelectCombine(N, DCI.DAG); + case ISD::LOAD: + if (performTBISimplification(N->getOperand(1), DCI, DAG)) + return SDValue(N, 0); + break; case ISD::STORE: return performSTORECombine(N, DCI, DAG, Subtarget); case AArch64ISD::BRCOND: diff --git a/lib/Target/AArch64/AArch64Subtarget.cpp b/lib/Target/AArch64/AArch64Subtarget.cpp index e6ef6dc80e2..88af9602322 100644 --- a/lib/Target/AArch64/AArch64Subtarget.cpp +++ b/lib/Target/AArch64/AArch64Subtarget.cpp @@ -31,6 +31,11 @@ static cl::opt EnableEarlyIfConvert("aarch64-early-ifcvt", cl::desc("Enable the early if " "converter pass"), cl::init(true), cl::Hidden); +// If OS supports TBI, use this flag to enable it. +static cl::opt +UseAddressTopByteIgnored("aarch64-use-tbi", cl::desc("Assume that top byte of " + "an address is ignored"), cl::init(false), cl::Hidden); + AArch64Subtarget & AArch64Subtarget::initializeSubtargetDependencies(StringRef FS) { // Determine default and user-specified characteristics @@ -125,6 +130,19 @@ bool AArch64Subtarget::enableEarlyIfConversion() const { return EnableEarlyIfConvert; } +bool AArch64Subtarget::supportsAddressTopByteIgnored() const { + if (!UseAddressTopByteIgnored) + return false; + + if (TargetTriple.isiOS()) { + unsigned Major, Minor, Micro; + TargetTriple.getiOSVersion(Major, Minor, Micro); + return Major >= 8; + } + + return false; +} + std::unique_ptr AArch64Subtarget::getCustomPBQPConstraints() const { if (!isCortexA57()) diff --git a/lib/Target/AArch64/AArch64Subtarget.h b/lib/Target/AArch64/AArch64Subtarget.h index 78af28829e8..af617fe7220 100644 --- a/lib/Target/AArch64/AArch64Subtarget.h +++ b/lib/Target/AArch64/AArch64Subtarget.h @@ -115,6 +115,10 @@ public: bool hasNEON() const { return HasNEON; } bool hasCrypto() const { return HasCrypto; } bool hasCRC() const { return HasCRC; } + /// CPU has TBI (top byte of addresses is ignored during HW address + /// translation) and OS enables it. + bool supportsAddressTopByteIgnored() const; + bool hasPerfMon() const { return HasPerfMon; } bool isLittleEndian() const { return IsLittle; } diff --git a/test/CodeGen/AArch64/tbi.ll b/test/CodeGen/AArch64/tbi.ll new file mode 100644 index 00000000000..ab2d31b7cac --- /dev/null +++ b/test/CodeGen/AArch64/tbi.ll @@ -0,0 +1,102 @@ +; RUN: llc -aarch64-use-tbi -mtriple=arm64-apple-ios8.0.0 < %s \ +; RUN: | FileCheck --check-prefix=TBI --check-prefix=BOTH %s +; RUN: llc -aarch64-use-tbi -mtriple=arm64-apple-ios7.1.0 < %s \ +; RUN: | FileCheck --check-prefix=NO_TBI --check-prefix=BOTH %s + +; BOTH-LABEL:ld_and32: +; TBI-NOT: and x +; NO_TBI: and x +define i32 @ld_and32(i64 %p) { + %and = and i64 %p, 72057594037927935 + %cast = inttoptr i64 %and to i32* + %load = load i32, i32* %cast + ret i32 %load +} + +; load (r & MASK) + 4 +; BOTH-LABEL:ld_and_plus_offset: +; TBI-NOT: and x +; NO_TBI: and x +define i32 @ld_and_plus_offset(i64 %p) { + %and = and i64 %p, 72057594037927935 + %cast = inttoptr i64 %and to i32* + %gep = getelementptr i32, i32* %cast, i64 4 + %load = load i32, i32* %gep + ret i32 %load +} + +; load (r & WIDER_MASK) +; BOTH-LABEL:ld_and32_wider: +; TBI-NOT: and x +; NO_TBI: and x +define i32 @ld_and32_wider(i64 %p) { + %and = and i64 %p, 1152921504606846975 + %cast = inttoptr i64 %and to i32* + %load = load i32, i32* %cast + ret i32 %load +} + +; BOTH-LABEL:ld_and64: +; TBI-NOT: and x +; NO_TBI: and x +define i64 @ld_and64(i64 %p) { + %and = and i64 %p, 72057594037927935 + %cast = inttoptr i64 %and to i64* + %load = load i64, i64* %cast + ret i64 %load +} + +; BOTH-LABEL:st_and32: +; TBI-NOT: and x +; NO_TBI: and x +define void @st_and32(i64 %p, i32 %v) { + %and = and i64 %p, 72057594037927935 + %cast = inttoptr i64 %and to i32* + store i32 %v, i32* %cast + ret void +} + +; load (x1 + x2) & MASK +; BOTH-LABEL:ld_ro: +; TBI-NOT: and x +; NO_TBI: and x +define i32 @ld_ro(i64 %a, i64 %b) { + %p = add i64 %a, %b + %and = and i64 %p, 72057594037927935 + %cast = inttoptr i64 %and to i32* + %load = load i32, i32* %cast + ret i32 %load +} + +; load (r1 & MASK) + r2 +; BOTH-LABEL:ld_ro2: +; TBI-NOT: and x +; NO_TBI: and x +define i32 @ld_ro2(i64 %a, i64 %b) { + %and = and i64 %a, 72057594037927935 + %p = add i64 %and, %b + %cast = inttoptr i64 %p to i32* + %load = load i32, i32* %cast + ret i32 %load +} + +; load (r1 & MASK) | r2 +; BOTH-LABEL:ld_indirect_and: +; TBI-NOT: and x +; NO_TBI: and x +define i32 @ld_indirect_and(i64 %r1, i64 %r2) { + %and = and i64 %r1, 72057594037927935 + %p = or i64 %and, %r2 + %cast = inttoptr i64 %p to i32* + %load = load i32, i32* %cast + ret i32 %load +} + +; BOTH-LABEL:ld_and32_narrower: +; BOTH: and x +define i32 @ld_and32_narrower(i64 %p) { + %and = and i64 %p, 36028797018963967 + %cast = inttoptr i64 %and to i32* + %load = load i32, i32* %cast + ret i32 %load +} -- 2.11.0