From 5ffe6408ffb6ad8642598ab03de04d58b4980e81 Mon Sep 17 00:00:00 2001 From: Roman Lebedev Date: Thu, 23 Jan 2020 22:50:24 +0300 Subject: [PATCH] [Codegen] If reasonable, materialize clang's `AllocAlignAttr` as llvm's Alignment Attribute on call-site function return value Summary: Much like with the previous patch (D73005) with `AssumeAlignedAttr` handling, results in mildly more readable IR, and will improve test coverage in upcoming patch. Note that in `AllocAlignAttr`'s case, there is no requirement for that alignment parameter to end up being an I-C-E. Reviewers: erichkeane, jdoerfert, hfinkel, aaron.ballman, rsmith Reviewed By: erichkeane Subscribers: cfe-commits Tags: #clang Differential Revision: https://reviews.llvm.org/D73006 --- clang/lib/CodeGen/CGCall.cpp | 28 +++++++++++++++------- .../assume-aligned-and-alloc-align-attributes.c | 14 +---------- ...ssumption-attribute-alloc_align-on-function.cpp | 17 ++++++------- 3 files changed, 30 insertions(+), 29 deletions(-) diff --git a/clang/lib/CodeGen/CGCall.cpp b/clang/lib/CodeGen/CGCall.cpp index cc276600db8..3a50e2b103f 100644 --- a/clang/lib/CodeGen/CGCall.cpp +++ b/clang/lib/CodeGen/CGCall.cpp @@ -3891,6 +3891,22 @@ public: } }; +/// Helper data structure to emit `AllocAlignAttr`. +class AllocAlignAttrEmitter final + : public AbstractAssumeAlignedAttrEmitter { +public: + AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl, + const CallArgList &CallArgs) + : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) { + if (!AA) + return; + // Alignment may or may not be a constant, and that is okay. + Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()] + .getRValue(CGF) + .getScalarVal(); + } +}; + } // namespace RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, @@ -4487,6 +4503,9 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl); Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); + AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs); + Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs); + // Emit the actual call/invoke instruction. llvm::CallBase *CI; if (!InvokeDest) { @@ -4706,14 +4725,7 @@ RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo, // Emit the assume_aligned check on the return value. if (Ret.isScalar() && TargetDecl) { AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); - - if (const auto *AA = TargetDecl->getAttr()) { - llvm::Value *AlignmentVal = CallArgs[AA->getParamIndex().getLLVMIndex()] - .getRValue(*this) - .getScalarVal(); - EmitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc, AA->getLocation(), - AlignmentVal); - } + AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret); } // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though diff --git a/clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c b/clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c index 02aaf9abfab..fa4ee8db12e 100644 --- a/clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c +++ b/clang/test/CodeGen/assume-aligned-and-alloc-align-attributes.c @@ -6,10 +6,6 @@ void *my_aligned_alloc(int size, int alignment) __attribute__((assume_aligned(32 // CHECK-LABEL: @t0_immediate0( // CHECK-NEXT: entry: // CHECK-NEXT: [[CALL:%.*]] = call align 32 i8* @my_aligned_alloc(i32 320, i32 16) -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 15 -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) // CHECK-NEXT: ret i8* [[CALL]] // void *t0_immediate0() { @@ -19,10 +15,6 @@ void *t0_immediate0() { // CHECK-LABEL: @t1_immediate1( // CHECK-NEXT: entry: // CHECK-NEXT: [[CALL:%.*]] = call align 32 i8* @my_aligned_alloc(i32 320, i32 32) -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 31 -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) // CHECK-NEXT: ret i8* [[CALL]] // void *t1_immediate1() { @@ -31,11 +23,7 @@ void *t1_immediate1() { // CHECK-LABEL: @t2_immediate2( // CHECK-NEXT: entry: -// CHECK-NEXT: [[CALL:%.*]] = call align 32 i8* @my_aligned_alloc(i32 320, i32 64) -// CHECK-NEXT: [[PTRINT:%.*]] = ptrtoint i8* [[CALL]] to i64 -// CHECK-NEXT: [[MASKEDPTR:%.*]] = and i64 [[PTRINT]], 63 -// CHECK-NEXT: [[MASKCOND:%.*]] = icmp eq i64 [[MASKEDPTR]], 0 -// CHECK-NEXT: call void @llvm.assume(i1 [[MASKCOND]]) +// CHECK-NEXT: [[CALL:%.*]] = call align 64 i8* @my_aligned_alloc(i32 320, i32 64) // CHECK-NEXT: ret i8* [[CALL]] // void *t2_immediate2() { diff --git a/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function.cpp b/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function.cpp index 90682638f79..a41357933f9 100644 --- a/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function.cpp +++ b/clang/test/CodeGen/catch-alignment-assumption-attribute-alloc_align-on-function.cpp @@ -26,21 +26,22 @@ char **caller(char **x) { // CHECK-NEXT: %[[X_ADDR:.*]] = alloca i8**, align 8 // CHECK-NEXT: store i8** %[[X]], i8*** %[[X_ADDR]], align 8 // CHECK-NEXT: %[[X_RELOADED:.*]] = load i8**, i8*** %[[X_ADDR]], align 8 - // CHECK-NEXT: %[[X_RETURNED:.*]] = call i8** @[[PASSTHROUGH]](i8** %[[X_RELOADED]], i64 2147483648) - // CHECK-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64 - // CHECK-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 2147483647 - // CHECK-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 + // CHECK-NOSANITIZE-NEXT: %[[X_RETURNED:.*]] = call align 128 i8** @[[PASSTHROUGH]](i8** %[[X_RELOADED]], i64 128) + // CHECK-SANITIZE-NEXT: %[[X_RETURNED:.*]] = call i8** @[[PASSTHROUGH]](i8** %[[X_RELOADED]], i64 128) + // CHECK-SANITIZE-NEXT: %[[PTRINT:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64 + // CHECK-SANITIZE-NEXT: %[[MASKEDPTR:.*]] = and i64 %[[PTRINT]], 127 + // CHECK-SANITIZE-NEXT: %[[MASKCOND:.*]] = icmp eq i64 %[[MASKEDPTR]], 0 // CHECK-SANITIZE-NEXT: %[[PTRINT_DUP:.*]] = ptrtoint i8** %[[X_RETURNED]] to i64, !nosanitize // CHECK-SANITIZE-NEXT: br i1 %[[MASKCOND]], label %[[CONT:.*]], label %[[HANDLER_ALIGNMENT_ASSUMPTION:[^,]+]],{{.*}} !nosanitize // CHECK-SANITIZE: [[HANDLER_ALIGNMENT_ASSUMPTION]]: - // CHECK-SANITIZE-NORECOVER-NEXT: call void @__ubsan_handle_alignment_assumption_abort(i8* bitcast ({ {{{.*}}}, {{{.*}}}, {{{.*}}}* }* @[[LINE_100_ALIGNMENT_ASSUMPTION]] to i8*), i64 %[[PTRINT_DUP]], i64 2147483648, i64 0){{.*}}, !nosanitize - // CHECK-SANITIZE-RECOVER-NEXT: call void @__ubsan_handle_alignment_assumption(i8* bitcast ({ {{{.*}}}, {{{.*}}}, {{{.*}}}* }* @[[LINE_100_ALIGNMENT_ASSUMPTION]] to i8*), i64 %[[PTRINT_DUP]], i64 2147483648, i64 0){{.*}}, !nosanitize + // CHECK-SANITIZE-NORECOVER-NEXT: call void @__ubsan_handle_alignment_assumption_abort(i8* bitcast ({ {{{.*}}}, {{{.*}}}, {{{.*}}}* }* @[[LINE_100_ALIGNMENT_ASSUMPTION]] to i8*), i64 %[[PTRINT_DUP]], i64 128, i64 0){{.*}}, !nosanitize + // CHECK-SANITIZE-RECOVER-NEXT: call void @__ubsan_handle_alignment_assumption(i8* bitcast ({ {{{.*}}}, {{{.*}}}, {{{.*}}}* }* @[[LINE_100_ALIGNMENT_ASSUMPTION]] to i8*), i64 %[[PTRINT_DUP]], i64 128, i64 0){{.*}}, !nosanitize // CHECK-SANITIZE-TRAP-NEXT: call void @llvm.trap(){{.*}}, !nosanitize // CHECK-SANITIZE-UNREACHABLE-NEXT: unreachable, !nosanitize // CHECK-SANITIZE: [[CONT]]: - // CHECK-NEXT: call void @llvm.assume(i1 %[[MASKCOND]]) + // CHECK-SANITIZE-NEXT: call void @llvm.assume(i1 %[[MASKCOND]]) // CHECK-NEXT: ret i8** %[[X_RETURNED]] // CHECK-NEXT: } #line 100 - return passthrough(x, 0x80000000); + return passthrough(x, 128); } -- 2.11.0