From c393d63aa2b8f6984672fdd4de631bbeff14b6a2 Mon Sep 17 00:00:00 2001 From: Alexandre Rames Date: Fri, 15 Apr 2016 11:54:06 +0100 Subject: [PATCH] Fix: correctly destruct VIXL labels. (cherry picked from commit c01a66465a398ad15da90ab2bdc35b7f4a609b17) Bug: 27505766 Change-Id: I077465e3d308f4331e7a861902e05865f9d99835 --- compiler/optimizing/code_generator.cc | 3 ++- compiler/optimizing/code_generator.h | 9 ++++--- compiler/optimizing/code_generator_arm.h | 2 +- compiler/optimizing/code_generator_arm64.cc | 7 +++-- compiler/optimizing/code_generator_arm64.h | 21 ++++++++------- compiler/optimizing/code_generator_mips.h | 2 +- compiler/optimizing/code_generator_mips64.h | 2 +- compiler/optimizing/code_generator_x86.h | 2 +- compiler/optimizing/code_generator_x86_64.h | 2 +- compiler/utils/arm64/assembler_arm64.cc | 9 +++---- compiler/utils/arm64/assembler_arm64.h | 42 ++++++++++++++--------------- 11 files changed, 51 insertions(+), 50 deletions(-) diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index a771cc156..e7fa4e472 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -187,7 +187,8 @@ class DisassemblyScope { void CodeGenerator::GenerateSlowPaths() { size_t code_start = 0; - for (SlowPathCode* slow_path : slow_paths_) { + for (const std::unique_ptr& slow_path_unique_ptr : slow_paths_) { + SlowPathCode* slow_path = slow_path_unique_ptr.get(); current_slow_path_ = slow_path; if (disasm_info_ != nullptr) { code_start = GetAssembler()->CodeSize(); diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index 87832a2d9..d69c41055 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -67,7 +67,7 @@ class CodeAllocator { DISALLOW_COPY_AND_ASSIGN(CodeAllocator); }; -class SlowPathCode : public ArenaObject { +class SlowPathCode : public DeletableArenaObject { public: explicit SlowPathCode(HInstruction* instruction) : instruction_(instruction) { for (size_t i = 0; i < kMaximumNumberOfExpectedRegisters; ++i) { @@ -205,7 +205,7 @@ class CodeGenerator : public DeletableArenaObject { virtual const Assembler& GetAssembler() const = 0; virtual size_t GetWordSize() const = 0; virtual size_t GetFloatingPointSpillSlotSize() const = 0; - virtual uintptr_t GetAddressOf(HBasicBlock* block) const = 0; + virtual uintptr_t GetAddressOf(HBasicBlock* block) = 0; void InitializeCodeGeneration(size_t number_of_spill_slots, size_t maximum_number_of_live_core_registers, size_t maximum_number_of_live_fpu_registers, @@ -298,8 +298,9 @@ class CodeGenerator : public DeletableArenaObject { // save live registers, which may be needed by the runtime to set catch phis. bool IsImplicitNullCheckAllowed(HNullCheck* null_check) const; + // TODO: Avoid creating the `std::unique_ptr` here. void AddSlowPath(SlowPathCode* slow_path) { - slow_paths_.push_back(slow_path); + slow_paths_.push_back(std::unique_ptr(slow_path)); } void BuildStackMaps(MemoryRegion region, const DexFile::CodeItem& code_item); @@ -617,7 +618,7 @@ class CodeGenerator : public DeletableArenaObject { HGraph* const graph_; const CompilerOptions& compiler_options_; - ArenaVector slow_paths_; + ArenaVector> slow_paths_; // The current slow-path that we're generating code for. SlowPathCode* current_slow_path_; diff --git a/compiler/optimizing/code_generator_arm.h b/compiler/optimizing/code_generator_arm.h index 144d58d85..0020f7b4f 100644 --- a/compiler/optimizing/code_generator_arm.h +++ b/compiler/optimizing/code_generator_arm.h @@ -339,7 +339,7 @@ class CodeGeneratorARM : public CodeGenerator { return assembler_; } - uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE { + uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE { return GetLabelOf(block)->Position(); } diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index efe4c06d3..e8e6b6897 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -899,7 +899,7 @@ CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph, callee_saved_fp_registers.list(), compiler_options, stats), - block_labels_(nullptr), + block_labels_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), jump_tables_(graph->GetArena()->Adapter(kArenaAllocCodeGenerator)), location_builder_(graph, this), instruction_visitor_(graph, this), @@ -928,7 +928,7 @@ CodeGeneratorARM64::CodeGeneratorARM64(HGraph* graph, #define __ GetVIXLAssembler()-> void CodeGeneratorARM64::EmitJumpTables() { - for (auto jump_table : jump_tables_) { + for (auto&& jump_table : jump_tables_) { jump_table->EmitTable(this); } } @@ -4784,8 +4784,7 @@ void InstructionCodeGeneratorARM64::VisitPackedSwitch(HPackedSwitch* switch_inst __ B(codegen_->GetLabelOf(default_block)); } } else { - JumpTableARM64* jump_table = new (GetGraph()->GetArena()) JumpTableARM64(switch_instr); - codegen_->AddJumpTable(jump_table); + JumpTableARM64* jump_table = codegen_->CreateJumpTable(switch_instr); UseScratchRegisterScope temps(codegen_->GetVIXLAssembler()); diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index ec46a3461..422963e7d 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -83,7 +83,7 @@ class SlowPathCodeARM64 : public SlowPathCode { DISALLOW_COPY_AND_ASSIGN(SlowPathCodeARM64); }; -class JumpTableARM64 : public ArenaObject { +class JumpTableARM64 : public DeletableArenaObject { public: explicit JumpTableARM64(HPackedSwitch* switch_instr) : switch_instr_(switch_instr), table_start_() {} @@ -352,8 +352,9 @@ class CodeGeneratorARM64 : public CodeGenerator { void Bind(HBasicBlock* block) OVERRIDE; - vixl::Label* GetLabelOf(HBasicBlock* block) const { - return CommonGetLabelOf(block_labels_, block); + vixl::Label* GetLabelOf(HBasicBlock* block) { + block = FirstNonEmptyBlock(block); + return &(block_labels_[block->GetBlockId()]); } size_t GetWordSize() const OVERRIDE { @@ -365,7 +366,7 @@ class CodeGeneratorARM64 : public CodeGenerator { return kArm64WordSize; } - uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE { + uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE { vixl::Label* block_entry_label = GetLabelOf(block); DCHECK(block_entry_label->IsBound()); return block_entry_label->location(); @@ -413,11 +414,12 @@ class CodeGeneratorARM64 : public CodeGenerator { } void Initialize() OVERRIDE { - block_labels_ = CommonInitializeLabels(); + block_labels_.resize(GetGraph()->GetBlocks().size()); } - void AddJumpTable(JumpTableARM64* jump_table) { - jump_tables_.push_back(jump_table); + JumpTableARM64* CreateJumpTable(HPackedSwitch* switch_instr) { + jump_tables_.emplace_back(new (GetGraph()->GetArena()) JumpTableARM64(switch_instr)); + return jump_tables_.back().get(); } void Finalize(CodeAllocator* allocator) OVERRIDE; @@ -616,9 +618,10 @@ class CodeGeneratorARM64 : public CodeGenerator { void EmitJumpTables(); // Labels for each block that will be compiled. - vixl::Label* block_labels_; // Indexed by block id. + // We use a deque so that the `vixl::Label` objects do not move in memory. + ArenaDeque block_labels_; // Indexed by block id. vixl::Label frame_entry_label_; - ArenaVector jump_tables_; + ArenaVector> jump_tables_; LocationsBuilderARM64 location_builder_; InstructionCodeGeneratorARM64 instruction_visitor_; diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h index 5e6fec8cf..435a86936 100644 --- a/compiler/optimizing/code_generator_mips.h +++ b/compiler/optimizing/code_generator_mips.h @@ -275,7 +275,7 @@ class CodeGeneratorMIPS : public CodeGenerator { size_t GetFloatingPointSpillSlotSize() const OVERRIDE { return kMipsDoublewordSize; } - uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE { + uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE { return assembler_.GetLabelLocation(GetLabelOf(block)); } diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h index 4e15cdd7b..9785a2e8a 100644 --- a/compiler/optimizing/code_generator_mips64.h +++ b/compiler/optimizing/code_generator_mips64.h @@ -271,7 +271,7 @@ class CodeGeneratorMIPS64 : public CodeGenerator { size_t GetFloatingPointSpillSlotSize() const OVERRIDE { return kMips64DoublewordSize; } - uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE { + uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE { return assembler_.GetLabelLocation(GetLabelOf(block)); } diff --git a/compiler/optimizing/code_generator_x86.h b/compiler/optimizing/code_generator_x86.h index 69a625306..1739eec4c 100644 --- a/compiler/optimizing/code_generator_x86.h +++ b/compiler/optimizing/code_generator_x86.h @@ -361,7 +361,7 @@ class CodeGeneratorX86 : public CodeGenerator { return assembler_; } - uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE { + uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE { return GetLabelOf(block)->Position(); } diff --git a/compiler/optimizing/code_generator_x86_64.h b/compiler/optimizing/code_generator_x86_64.h index d7ce7c649..3a211c502 100644 --- a/compiler/optimizing/code_generator_x86_64.h +++ b/compiler/optimizing/code_generator_x86_64.h @@ -346,7 +346,7 @@ class CodeGeneratorX86_64 : public CodeGenerator { return &move_resolver_; } - uintptr_t GetAddressOf(HBasicBlock* block) const OVERRIDE { + uintptr_t GetAddressOf(HBasicBlock* block) OVERRIDE { return GetLabelOf(block)->Position(); } diff --git a/compiler/utils/arm64/assembler_arm64.cc b/compiler/utils/arm64/assembler_arm64.cc index eb851f953..eb5112b46 100644 --- a/compiler/utils/arm64/assembler_arm64.cc +++ b/compiler/utils/arm64/assembler_arm64.cc @@ -32,8 +32,8 @@ namespace arm64 { #endif void Arm64Assembler::FinalizeCode() { - for (Arm64Exception* exception : exception_blocks_) { - EmitExceptionPoll(exception); + for (const std::unique_ptr& exception : exception_blocks_) { + EmitExceptionPoll(exception.get()); } ___ FinalizeCode(); } @@ -611,10 +611,9 @@ void Arm64Assembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg, void Arm64Assembler::ExceptionPoll(ManagedRegister m_scratch, size_t stack_adjust) { CHECK_ALIGNED(stack_adjust, kStackAlignment); Arm64ManagedRegister scratch = m_scratch.AsArm64(); - Arm64Exception *current_exception = new Arm64Exception(scratch, stack_adjust); - exception_blocks_.push_back(current_exception); + exception_blocks_.emplace_back(new Arm64Exception(scratch, stack_adjust)); LoadFromOffset(scratch.AsXRegister(), TR, Thread::ExceptionOffset<8>().Int32Value()); - ___ Cbnz(reg_x(scratch.AsXRegister()), current_exception->Entry()); + ___ Cbnz(reg_x(scratch.AsXRegister()), exception_blocks_.back()->Entry()); } void Arm64Assembler::EmitExceptionPoll(Arm64Exception *exception) { diff --git a/compiler/utils/arm64/assembler_arm64.h b/compiler/utils/arm64/assembler_arm64.h index 03ae99695..c4e5de7a6 100644 --- a/compiler/utils/arm64/assembler_arm64.h +++ b/compiler/utils/arm64/assembler_arm64.h @@ -62,7 +62,25 @@ enum StoreOperandType { kStoreDWord }; -class Arm64Exception; +class Arm64Exception { + private: + Arm64Exception(Arm64ManagedRegister scratch, size_t stack_adjust) + : scratch_(scratch), stack_adjust_(stack_adjust) { + } + + vixl::Label* Entry() { return &exception_entry_; } + + // Register used for passing Thread::Current()->exception_ . + const Arm64ManagedRegister scratch_; + + // Stack adjust for ExceptionPool. + const size_t stack_adjust_; + + vixl::Label exception_entry_; + + friend class Arm64Assembler; + DISALLOW_COPY_AND_ASSIGN(Arm64Exception); +}; class Arm64Assembler FINAL : public Assembler { public: @@ -253,7 +271,7 @@ class Arm64Assembler FINAL : public Assembler { void AddConstant(XRegister rd, XRegister rn, int32_t value, vixl::Condition cond = vixl::al); // List of exception blocks to generate at the end of the code cache. - ArenaVector exception_blocks_; + ArenaVector> exception_blocks_; public: // Vixl assembler. @@ -263,26 +281,6 @@ class Arm64Assembler FINAL : public Assembler { friend class Arm64ManagedRegister_VixlRegisters_Test; }; -class Arm64Exception { - private: - Arm64Exception(Arm64ManagedRegister scratch, size_t stack_adjust) - : scratch_(scratch), stack_adjust_(stack_adjust) { - } - - vixl::Label* Entry() { return &exception_entry_; } - - // Register used for passing Thread::Current()->exception_ . - const Arm64ManagedRegister scratch_; - - // Stack adjust for ExceptionPool. - const size_t stack_adjust_; - - vixl::Label exception_entry_; - - friend class Arm64Assembler; - DISALLOW_COPY_AND_ASSIGN(Arm64Exception); -}; - } // namespace arm64 } // namespace art -- 2.11.0