From 9cd6d378bd573cdc14d049d32bdd22a97fa4d84a Mon Sep 17 00:00:00 2001 From: David Srbecky Date: Tue, 9 Feb 2016 15:24:47 +0000 Subject: [PATCH] Associate slow paths with the instruction that they belong to. Almost all slow paths already know the instruction they belong to, this CL just moves the knowledge to the base class as well. This is needed to be be able to get the corresponding dex pc for slow path, which allows us generate better native line numbers, which in turn fixes some native debugging stepping issues. Change-Id: I568dbe78a7cea6a43a4a71a014b3ad135782c270 --- compiler/optimizing/code_generator.cc | 2 ++ compiler/optimizing/code_generator.h | 8 ++++- compiler/optimizing/code_generator_arm.cc | 41 ++++++++--------------- compiler/optimizing/code_generator_arm64.cc | 42 ++++++++---------------- compiler/optimizing/code_generator_arm64.h | 3 +- compiler/optimizing/code_generator_mips.cc | 30 ++++++----------- compiler/optimizing/code_generator_mips.h | 3 +- compiler/optimizing/code_generator_mips64.cc | 30 ++++++----------- compiler/optimizing/code_generator_mips64.h | 3 +- compiler/optimizing/code_generator_x86.cc | 49 ++++++++++------------------ compiler/optimizing/code_generator_x86_64.cc | 48 ++++++++++----------------- compiler/optimizing/intrinsics_arm64.cc | 3 +- compiler/optimizing/intrinsics_mips.cc | 2 +- compiler/optimizing/intrinsics_mips64.cc | 3 +- compiler/optimizing/intrinsics_utils.h | 2 +- 15 files changed, 104 insertions(+), 165 deletions(-) diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index c67efc06c..967d156cf 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -195,6 +195,8 @@ void CodeGenerator::GenerateSlowPaths() { if (disasm_info_ != nullptr) { code_start = GetAssembler()->CodeSize(); } + // Record the dex pc at start of slow path (required for java line number mapping). + MaybeRecordNativeDebugInfo(nullptr /* instruction */, slow_path->GetDexPc()); slow_path->EmitNativeCode(this); if (disasm_info_ != nullptr) { disasm_info_->AddSlowPathInterval(slow_path, code_start, GetAssembler()->CodeSize()); diff --git a/compiler/optimizing/code_generator.h b/compiler/optimizing/code_generator.h index 789bf4019..9297fc956 100644 --- a/compiler/optimizing/code_generator.h +++ b/compiler/optimizing/code_generator.h @@ -69,7 +69,7 @@ class CodeAllocator { class SlowPathCode : public ArenaObject { public: - SlowPathCode() { + explicit SlowPathCode(HInstruction* instruction) : instruction_(instruction) { for (size_t i = 0; i < kMaximumNumberOfExpectedRegisters; ++i) { saved_core_stack_offsets_[i] = kRegisterNotSaved; saved_fpu_stack_offsets_[i] = kRegisterNotSaved; @@ -106,9 +106,15 @@ class SlowPathCode : public ArenaObject { Label* GetEntryLabel() { return &entry_label_; } Label* GetExitLabel() { return &exit_label_; } + uint32_t GetDexPc() const { + return instruction_ != nullptr ? instruction_->GetDexPc() : kNoDexPc; + } + protected: static constexpr size_t kMaximumNumberOfExpectedRegisters = 32; static constexpr uint32_t kRegisterNotSaved = -1; + // The instruction where this slow path is happening. + HInstruction* instruction_; uint32_t saved_core_stack_offsets_[kMaximumNumberOfExpectedRegisters]; uint32_t saved_fpu_stack_offsets_[kMaximumNumberOfExpectedRegisters]; diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index f60c5e9d8..cdbb9c31a 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -64,7 +64,7 @@ static constexpr uint32_t kPackedSwitchCompareJumpThreshold = 7; class NullCheckSlowPathARM : public SlowPathCode { public: - explicit NullCheckSlowPathARM(HNullCheck* instruction) : instruction_(instruction) {} + explicit NullCheckSlowPathARM(HNullCheck* instruction) : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorARM* arm_codegen = down_cast(codegen); @@ -83,13 +83,12 @@ class NullCheckSlowPathARM : public SlowPathCode { const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM"; } private: - HNullCheck* const instruction_; DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM); }; class DivZeroCheckSlowPathARM : public SlowPathCode { public: - explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : instruction_(instruction) {} + explicit DivZeroCheckSlowPathARM(HDivZeroCheck* instruction) : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorARM* arm_codegen = down_cast(codegen); @@ -108,14 +107,13 @@ class DivZeroCheckSlowPathARM : public SlowPathCode { const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM"; } private: - HDivZeroCheck* const instruction_; DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM); }; class SuspendCheckSlowPathARM : public SlowPathCode { public: SuspendCheckSlowPathARM(HSuspendCheck* instruction, HBasicBlock* successor) - : instruction_(instruction), successor_(successor) {} + : SlowPathCode(instruction), successor_(successor) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorARM* arm_codegen = down_cast(codegen); @@ -144,7 +142,6 @@ class SuspendCheckSlowPathARM : public SlowPathCode { const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARM"; } private: - HSuspendCheck* const instruction_; // If not null, the block to branch to after the suspend check. HBasicBlock* const successor_; @@ -157,7 +154,7 @@ class SuspendCheckSlowPathARM : public SlowPathCode { class BoundsCheckSlowPathARM : public SlowPathCode { public: explicit BoundsCheckSlowPathARM(HBoundsCheck* instruction) - : instruction_(instruction) {} + : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorARM* arm_codegen = down_cast(codegen); @@ -188,8 +185,6 @@ class BoundsCheckSlowPathARM : public SlowPathCode { const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM"; } private: - HBoundsCheck* const instruction_; - DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM); }; @@ -199,7 +194,7 @@ class LoadClassSlowPathARM : public SlowPathCode { HInstruction* at, uint32_t dex_pc, bool do_clinit) - : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) { + : SlowPathCode(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) { DCHECK(at->IsLoadClass() || at->IsClinitCheck()); } @@ -253,7 +248,7 @@ class LoadClassSlowPathARM : public SlowPathCode { class LoadStringSlowPathARM : public SlowPathCode { public: - explicit LoadStringSlowPathARM(HLoadString* instruction) : instruction_(instruction) {} + explicit LoadStringSlowPathARM(HLoadString* instruction) : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -264,7 +259,8 @@ class LoadStringSlowPathARM : public SlowPathCode { SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; - __ LoadImmediate(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex()); + const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex(); + __ LoadImmediate(calling_convention.GetRegisterAt(0), string_index); arm_codegen->InvokeRuntime( QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes(); @@ -277,15 +273,13 @@ class LoadStringSlowPathARM : public SlowPathCode { const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM"; } private: - HLoadString* const instruction_; - DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM); }; class TypeCheckSlowPathARM : public SlowPathCode { public: TypeCheckSlowPathARM(HInstruction* instruction, bool is_fatal) - : instruction_(instruction), is_fatal_(is_fatal) {} + : SlowPathCode(instruction), is_fatal_(is_fatal) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -340,7 +334,6 @@ class TypeCheckSlowPathARM : public SlowPathCode { bool IsFatal() const OVERRIDE { return is_fatal_; } private: - HInstruction* const instruction_; const bool is_fatal_; DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM); @@ -349,7 +342,7 @@ class TypeCheckSlowPathARM : public SlowPathCode { class DeoptimizationSlowPathARM : public SlowPathCode { public: explicit DeoptimizationSlowPathARM(HDeoptimize* instruction) - : instruction_(instruction) {} + : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorARM* arm_codegen = down_cast(codegen); @@ -365,13 +358,12 @@ class DeoptimizationSlowPathARM : public SlowPathCode { const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM"; } private: - HDeoptimize* const instruction_; DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM); }; class ArraySetSlowPathARM : public SlowPathCode { public: - explicit ArraySetSlowPathARM(HInstruction* instruction) : instruction_(instruction) {} + explicit ArraySetSlowPathARM(HInstruction* instruction) : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -410,8 +402,6 @@ class ArraySetSlowPathARM : public SlowPathCode { const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathARM"; } private: - HInstruction* const instruction_; - DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARM); }; @@ -419,7 +409,7 @@ class ArraySetSlowPathARM : public SlowPathCode { class ReadBarrierMarkSlowPathARM : public SlowPathCode { public: ReadBarrierMarkSlowPathARM(HInstruction* instruction, Location out, Location obj) - : instruction_(instruction), out_(out), obj_(obj) { + : SlowPathCode(instruction), out_(out), obj_(obj) { DCHECK(kEmitCompilerReadBarrier); } @@ -458,7 +448,6 @@ class ReadBarrierMarkSlowPathARM : public SlowPathCode { } private: - HInstruction* const instruction_; const Location out_; const Location obj_; @@ -474,7 +463,7 @@ class ReadBarrierForHeapReferenceSlowPathARM : public SlowPathCode { Location obj, uint32_t offset, Location index) - : instruction_(instruction), + : SlowPathCode(instruction), out_(out), ref_(ref), obj_(obj), @@ -629,7 +618,6 @@ class ReadBarrierForHeapReferenceSlowPathARM : public SlowPathCode { UNREACHABLE(); } - HInstruction* const instruction_; const Location out_; const Location ref_; const Location obj_; @@ -646,7 +634,7 @@ class ReadBarrierForHeapReferenceSlowPathARM : public SlowPathCode { class ReadBarrierForRootSlowPathARM : public SlowPathCode { public: ReadBarrierForRootSlowPathARM(HInstruction* instruction, Location out, Location root) - : instruction_(instruction), out_(out), root_(root) { + : SlowPathCode(instruction), out_(out), root_(root) { DCHECK(kEmitCompilerReadBarrier); } @@ -679,7 +667,6 @@ class ReadBarrierForRootSlowPathARM : public SlowPathCode { const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathARM"; } private: - HInstruction* const instruction_; const Location out_; const Location root_; diff --git a/compiler/optimizing/code_generator_arm64.cc b/compiler/optimizing/code_generator_arm64.cc index 0c2e9cf37..814f8b4d5 100644 --- a/compiler/optimizing/code_generator_arm64.cc +++ b/compiler/optimizing/code_generator_arm64.cc @@ -219,7 +219,7 @@ void SlowPathCodeARM64::RestoreLiveRegisters(CodeGenerator* codegen, LocationSum class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 { public: - explicit BoundsCheckSlowPathARM64(HBoundsCheck* instruction) : instruction_(instruction) {} + explicit BoundsCheckSlowPathARM64(HBoundsCheck* instruction) : SlowPathCodeARM64(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -246,14 +246,12 @@ class BoundsCheckSlowPathARM64 : public SlowPathCodeARM64 { const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathARM64"; } private: - HBoundsCheck* const instruction_; - DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathARM64); }; class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 { public: - explicit DivZeroCheckSlowPathARM64(HDivZeroCheck* instruction) : instruction_(instruction) {} + explicit DivZeroCheckSlowPathARM64(HDivZeroCheck* instruction) : SlowPathCodeARM64(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorARM64* arm64_codegen = down_cast(codegen); @@ -272,7 +270,6 @@ class DivZeroCheckSlowPathARM64 : public SlowPathCodeARM64 { const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathARM64"; } private: - HDivZeroCheck* const instruction_; DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathARM64); }; @@ -282,7 +279,7 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 { HInstruction* at, uint32_t dex_pc, bool do_clinit) - : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) { + : SlowPathCodeARM64(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) { DCHECK(at->IsLoadClass() || at->IsClinitCheck()); } @@ -337,7 +334,7 @@ class LoadClassSlowPathARM64 : public SlowPathCodeARM64 { class LoadStringSlowPathARM64 : public SlowPathCodeARM64 { public: - explicit LoadStringSlowPathARM64(HLoadString* instruction) : instruction_(instruction) {} + explicit LoadStringSlowPathARM64(HLoadString* instruction) : SlowPathCodeARM64(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -348,7 +345,8 @@ class LoadStringSlowPathARM64 : public SlowPathCodeARM64 { SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; - __ Mov(calling_convention.GetRegisterAt(0).W(), instruction_->GetStringIndex()); + const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex(); + __ Mov(calling_convention.GetRegisterAt(0).W(), string_index); arm64_codegen->InvokeRuntime( QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), this); CheckEntrypointTypes(); @@ -362,14 +360,12 @@ class LoadStringSlowPathARM64 : public SlowPathCodeARM64 { const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathARM64"; } private: - HLoadString* const instruction_; - DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathARM64); }; class NullCheckSlowPathARM64 : public SlowPathCodeARM64 { public: - explicit NullCheckSlowPathARM64(HNullCheck* instr) : instruction_(instr) {} + explicit NullCheckSlowPathARM64(HNullCheck* instr) : SlowPathCodeARM64(instr) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorARM64* arm64_codegen = down_cast(codegen); @@ -388,15 +384,13 @@ class NullCheckSlowPathARM64 : public SlowPathCodeARM64 { const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathARM64"; } private: - HNullCheck* const instruction_; - DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathARM64); }; class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 { public: SuspendCheckSlowPathARM64(HSuspendCheck* instruction, HBasicBlock* successor) - : instruction_(instruction), successor_(successor) {} + : SlowPathCodeARM64(instruction), successor_(successor) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorARM64* arm64_codegen = down_cast(codegen); @@ -425,7 +419,6 @@ class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 { const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathARM64"; } private: - HSuspendCheck* const instruction_; // If not null, the block to branch to after the suspend check. HBasicBlock* const successor_; @@ -438,7 +431,7 @@ class SuspendCheckSlowPathARM64 : public SlowPathCodeARM64 { class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { public: TypeCheckSlowPathARM64(HInstruction* instruction, bool is_fatal) - : instruction_(instruction), is_fatal_(is_fatal) {} + : SlowPathCodeARM64(instruction), is_fatal_(is_fatal) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -487,7 +480,6 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { bool IsFatal() const { return is_fatal_; } private: - HInstruction* const instruction_; const bool is_fatal_; DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathARM64); @@ -496,7 +488,7 @@ class TypeCheckSlowPathARM64 : public SlowPathCodeARM64 { class DeoptimizationSlowPathARM64 : public SlowPathCodeARM64 { public: explicit DeoptimizationSlowPathARM64(HDeoptimize* instruction) - : instruction_(instruction) {} + : SlowPathCodeARM64(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorARM64* arm64_codegen = down_cast(codegen); @@ -512,13 +504,12 @@ class DeoptimizationSlowPathARM64 : public SlowPathCodeARM64 { const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathARM64"; } private: - HDeoptimize* const instruction_; DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathARM64); }; class ArraySetSlowPathARM64 : public SlowPathCodeARM64 { public: - explicit ArraySetSlowPathARM64(HInstruction* instruction) : instruction_(instruction) {} + explicit ArraySetSlowPathARM64(HInstruction* instruction) : SlowPathCodeARM64(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -557,8 +548,6 @@ class ArraySetSlowPathARM64 : public SlowPathCodeARM64 { const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathARM64"; } private: - HInstruction* const instruction_; - DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathARM64); }; @@ -588,7 +577,7 @@ void JumpTableARM64::EmitTable(CodeGeneratorARM64* codegen) { class ReadBarrierMarkSlowPathARM64 : public SlowPathCodeARM64 { public: ReadBarrierMarkSlowPathARM64(HInstruction* instruction, Location out, Location obj) - : instruction_(instruction), out_(out), obj_(obj) { + : SlowPathCodeARM64(instruction), out_(out), obj_(obj) { DCHECK(kEmitCompilerReadBarrier); } @@ -627,7 +616,6 @@ class ReadBarrierMarkSlowPathARM64 : public SlowPathCodeARM64 { } private: - HInstruction* const instruction_; const Location out_; const Location obj_; @@ -643,7 +631,7 @@ class ReadBarrierForHeapReferenceSlowPathARM64 : public SlowPathCodeARM64 { Location obj, uint32_t offset, Location index) - : instruction_(instruction), + : SlowPathCodeARM64(instruction), out_(out), ref_(ref), obj_(obj), @@ -804,7 +792,6 @@ class ReadBarrierForHeapReferenceSlowPathARM64 : public SlowPathCodeARM64 { UNREACHABLE(); } - HInstruction* const instruction_; const Location out_; const Location ref_; const Location obj_; @@ -821,7 +808,7 @@ class ReadBarrierForHeapReferenceSlowPathARM64 : public SlowPathCodeARM64 { class ReadBarrierForRootSlowPathARM64 : public SlowPathCodeARM64 { public: ReadBarrierForRootSlowPathARM64(HInstruction* instruction, Location out, Location root) - : instruction_(instruction), out_(out), root_(root) { + : SlowPathCodeARM64(instruction), out_(out), root_(root) { DCHECK(kEmitCompilerReadBarrier); } @@ -865,7 +852,6 @@ class ReadBarrierForRootSlowPathARM64 : public SlowPathCodeARM64 { const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathARM64"; } private: - HInstruction* const instruction_; const Location out_; const Location root_; diff --git a/compiler/optimizing/code_generator_arm64.h b/compiler/optimizing/code_generator_arm64.h index fea87ab6d..352726183 100644 --- a/compiler/optimizing/code_generator_arm64.h +++ b/compiler/optimizing/code_generator_arm64.h @@ -66,7 +66,8 @@ Location ARM64ReturnLocation(Primitive::Type return_type); class SlowPathCodeARM64 : public SlowPathCode { public: - SlowPathCodeARM64() : entry_label_(), exit_label_() {} + explicit SlowPathCodeARM64(HInstruction* instruction) + : SlowPathCode(instruction), entry_label_(), exit_label_() {} vixl::Label* GetEntryLabel() { return &entry_label_; } vixl::Label* GetExitLabel() { return &exit_label_; } diff --git a/compiler/optimizing/code_generator_mips.cc b/compiler/optimizing/code_generator_mips.cc index 9dd7c519a..a47cf9370 100644 --- a/compiler/optimizing/code_generator_mips.cc +++ b/compiler/optimizing/code_generator_mips.cc @@ -149,7 +149,7 @@ Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type) class BoundsCheckSlowPathMIPS : public SlowPathCodeMIPS { public: - explicit BoundsCheckSlowPathMIPS(HBoundsCheck* instruction) : instruction_(instruction) {} + explicit BoundsCheckSlowPathMIPS(HBoundsCheck* instruction) : SlowPathCodeMIPS(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -181,14 +181,12 @@ class BoundsCheckSlowPathMIPS : public SlowPathCodeMIPS { const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS"; } private: - HBoundsCheck* const instruction_; - DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS); }; class DivZeroCheckSlowPathMIPS : public SlowPathCodeMIPS { public: - explicit DivZeroCheckSlowPathMIPS(HDivZeroCheck* instruction) : instruction_(instruction) {} + explicit DivZeroCheckSlowPathMIPS(HDivZeroCheck* instruction) : SlowPathCodeMIPS(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorMIPS* mips_codegen = down_cast(codegen); @@ -210,7 +208,6 @@ class DivZeroCheckSlowPathMIPS : public SlowPathCodeMIPS { const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS"; } private: - HDivZeroCheck* const instruction_; DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS); }; @@ -220,7 +217,7 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS { HInstruction* at, uint32_t dex_pc, bool do_clinit) - : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) { + : SlowPathCodeMIPS(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) { DCHECK(at->IsLoadClass() || at->IsClinitCheck()); } @@ -279,7 +276,7 @@ class LoadClassSlowPathMIPS : public SlowPathCodeMIPS { class LoadStringSlowPathMIPS : public SlowPathCodeMIPS { public: - explicit LoadStringSlowPathMIPS(HLoadString* instruction) : instruction_(instruction) {} + explicit LoadStringSlowPathMIPS(HLoadString* instruction) : SlowPathCodeMIPS(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -290,7 +287,8 @@ class LoadStringSlowPathMIPS : public SlowPathCodeMIPS { SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; - __ LoadConst32(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex()); + const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex(); + __ LoadConst32(calling_convention.GetRegisterAt(0), string_index); mips_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), @@ -309,14 +307,12 @@ class LoadStringSlowPathMIPS : public SlowPathCodeMIPS { const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS"; } private: - HLoadString* const instruction_; - DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS); }; class NullCheckSlowPathMIPS : public SlowPathCodeMIPS { public: - explicit NullCheckSlowPathMIPS(HNullCheck* instr) : instruction_(instr) {} + explicit NullCheckSlowPathMIPS(HNullCheck* instr) : SlowPathCodeMIPS(instr) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorMIPS* mips_codegen = down_cast(codegen); @@ -338,15 +334,13 @@ class NullCheckSlowPathMIPS : public SlowPathCodeMIPS { const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS"; } private: - HNullCheck* const instruction_; - DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS); }; class SuspendCheckSlowPathMIPS : public SlowPathCodeMIPS { public: SuspendCheckSlowPathMIPS(HSuspendCheck* instruction, HBasicBlock* successor) - : instruction_(instruction), successor_(successor) {} + : SlowPathCodeMIPS(instruction), successor_(successor) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorMIPS* mips_codegen = down_cast(codegen); @@ -374,7 +368,6 @@ class SuspendCheckSlowPathMIPS : public SlowPathCodeMIPS { const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS"; } private: - HSuspendCheck* const instruction_; // If not null, the block to branch to after the suspend check. HBasicBlock* const successor_; @@ -386,7 +379,7 @@ class SuspendCheckSlowPathMIPS : public SlowPathCodeMIPS { class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS { public: - explicit TypeCheckSlowPathMIPS(HInstruction* instruction) : instruction_(instruction) {} + explicit TypeCheckSlowPathMIPS(HInstruction* instruction) : SlowPathCodeMIPS(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -437,15 +430,13 @@ class TypeCheckSlowPathMIPS : public SlowPathCodeMIPS { const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS"; } private: - HInstruction* const instruction_; - DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS); }; class DeoptimizationSlowPathMIPS : public SlowPathCodeMIPS { public: explicit DeoptimizationSlowPathMIPS(HDeoptimize* instruction) - : instruction_(instruction) {} + : SlowPathCodeMIPS(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorMIPS* mips_codegen = down_cast(codegen); @@ -462,7 +453,6 @@ class DeoptimizationSlowPathMIPS : public SlowPathCodeMIPS { const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS"; } private: - HDeoptimize* const instruction_; DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS); }; diff --git a/compiler/optimizing/code_generator_mips.h b/compiler/optimizing/code_generator_mips.h index 49c958335..605c79442 100644 --- a/compiler/optimizing/code_generator_mips.h +++ b/compiler/optimizing/code_generator_mips.h @@ -152,7 +152,8 @@ class ParallelMoveResolverMIPS : public ParallelMoveResolverWithSwap { class SlowPathCodeMIPS : public SlowPathCode { public: - SlowPathCodeMIPS() : entry_label_(), exit_label_() {} + explicit SlowPathCodeMIPS(HInstruction* instruction) + : SlowPathCode(instruction), entry_label_(), exit_label_() {} MipsLabel* GetEntryLabel() { return &entry_label_; } MipsLabel* GetExitLabel() { return &exit_label_; } diff --git a/compiler/optimizing/code_generator_mips64.cc b/compiler/optimizing/code_generator_mips64.cc index 2c0ae9ba9..dea6322e5 100644 --- a/compiler/optimizing/code_generator_mips64.cc +++ b/compiler/optimizing/code_generator_mips64.cc @@ -110,7 +110,7 @@ Location InvokeRuntimeCallingConvention::GetReturnLocation(Primitive::Type type) class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { public: - explicit BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction) : instruction_(instruction) {} + explicit BoundsCheckSlowPathMIPS64(HBoundsCheck* instruction) : SlowPathCodeMIPS64(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -141,14 +141,12 @@ class BoundsCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathMIPS64"; } private: - HBoundsCheck* const instruction_; - DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathMIPS64); }; class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { public: - explicit DivZeroCheckSlowPathMIPS64(HDivZeroCheck* instruction) : instruction_(instruction) {} + explicit DivZeroCheckSlowPathMIPS64(HDivZeroCheck* instruction) : SlowPathCodeMIPS64(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorMIPS64* mips64_codegen = down_cast(codegen); @@ -169,7 +167,6 @@ class DivZeroCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathMIPS64"; } private: - HDivZeroCheck* const instruction_; DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathMIPS64); }; @@ -179,7 +176,7 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 { HInstruction* at, uint32_t dex_pc, bool do_clinit) - : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) { + : SlowPathCodeMIPS64(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) { DCHECK(at->IsLoadClass() || at->IsClinitCheck()); } @@ -234,7 +231,7 @@ class LoadClassSlowPathMIPS64 : public SlowPathCodeMIPS64 { class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 { public: - explicit LoadStringSlowPathMIPS64(HLoadString* instruction) : instruction_(instruction) {} + explicit LoadStringSlowPathMIPS64(HLoadString* instruction) : SlowPathCodeMIPS64(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -245,7 +242,8 @@ class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 { SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; - __ LoadConst32(calling_convention.GetRegisterAt(0), instruction_->GetStringIndex()); + const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex(); + __ LoadConst32(calling_convention.GetRegisterAt(0), string_index); mips64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), @@ -263,14 +261,12 @@ class LoadStringSlowPathMIPS64 : public SlowPathCodeMIPS64 { const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathMIPS64"; } private: - HLoadString* const instruction_; - DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathMIPS64); }; class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { public: - explicit NullCheckSlowPathMIPS64(HNullCheck* instr) : instruction_(instr) {} + explicit NullCheckSlowPathMIPS64(HNullCheck* instr) : SlowPathCodeMIPS64(instr) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorMIPS64* mips64_codegen = down_cast(codegen); @@ -291,15 +287,13 @@ class NullCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathMIPS64"; } private: - HNullCheck* const instruction_; - DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathMIPS64); }; class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { public: SuspendCheckSlowPathMIPS64(HSuspendCheck* instruction, HBasicBlock* successor) - : instruction_(instruction), successor_(successor) {} + : SlowPathCodeMIPS64(instruction), successor_(successor) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorMIPS64* mips64_codegen = down_cast(codegen); @@ -326,7 +320,6 @@ class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathMIPS64"; } private: - HSuspendCheck* const instruction_; // If not null, the block to branch to after the suspend check. HBasicBlock* const successor_; @@ -338,7 +331,7 @@ class SuspendCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { public: - explicit TypeCheckSlowPathMIPS64(HInstruction* instruction) : instruction_(instruction) {} + explicit TypeCheckSlowPathMIPS64(HInstruction* instruction) : SlowPathCodeMIPS64(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -384,15 +377,13 @@ class TypeCheckSlowPathMIPS64 : public SlowPathCodeMIPS64 { const char* GetDescription() const OVERRIDE { return "TypeCheckSlowPathMIPS64"; } private: - HInstruction* const instruction_; - DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathMIPS64); }; class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 { public: explicit DeoptimizationSlowPathMIPS64(HDeoptimize* instruction) - : instruction_(instruction) {} + : SlowPathCodeMIPS64(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorMIPS64* mips64_codegen = down_cast(codegen); @@ -408,7 +399,6 @@ class DeoptimizationSlowPathMIPS64 : public SlowPathCodeMIPS64 { const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathMIPS64"; } private: - HDeoptimize* const instruction_; DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathMIPS64); }; diff --git a/compiler/optimizing/code_generator_mips64.h b/compiler/optimizing/code_generator_mips64.h index c298097a4..ba9eaff46 100644 --- a/compiler/optimizing/code_generator_mips64.h +++ b/compiler/optimizing/code_generator_mips64.h @@ -152,7 +152,8 @@ class ParallelMoveResolverMIPS64 : public ParallelMoveResolverWithSwap { class SlowPathCodeMIPS64 : public SlowPathCode { public: - SlowPathCodeMIPS64() : entry_label_(), exit_label_() {} + explicit SlowPathCodeMIPS64(HInstruction* instruction) + : SlowPathCode(instruction), entry_label_(), exit_label_() {} Mips64Label* GetEntryLabel() { return &entry_label_; } Mips64Label* GetExitLabel() { return &exit_label_; } diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index 236dea1bb..88e42f3fa 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -52,7 +52,7 @@ static constexpr int kFakeReturnRegister = Register(8); class NullCheckSlowPathX86 : public SlowPathCode { public: - explicit NullCheckSlowPathX86(HNullCheck* instruction) : instruction_(instruction) {} + explicit NullCheckSlowPathX86(HNullCheck* instruction) : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorX86* x86_codegen = down_cast(codegen); @@ -73,13 +73,12 @@ class NullCheckSlowPathX86 : public SlowPathCode { const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathX86"; } private: - HNullCheck* const instruction_; DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86); }; class DivZeroCheckSlowPathX86 : public SlowPathCode { public: - explicit DivZeroCheckSlowPathX86(HDivZeroCheck* instruction) : instruction_(instruction) {} + explicit DivZeroCheckSlowPathX86(HDivZeroCheck* instruction) : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorX86* x86_codegen = down_cast(codegen); @@ -100,13 +99,13 @@ class DivZeroCheckSlowPathX86 : public SlowPathCode { const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathX86"; } private: - HDivZeroCheck* const instruction_; DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathX86); }; class DivRemMinusOneSlowPathX86 : public SlowPathCode { public: - DivRemMinusOneSlowPathX86(Register reg, bool is_div) : reg_(reg), is_div_(is_div) {} + DivRemMinusOneSlowPathX86(HInstruction* instruction, Register reg, bool is_div) + : SlowPathCode(instruction), reg_(reg), is_div_(is_div) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { __ Bind(GetEntryLabel()); @@ -128,7 +127,7 @@ class DivRemMinusOneSlowPathX86 : public SlowPathCode { class BoundsCheckSlowPathX86 : public SlowPathCode { public: - explicit BoundsCheckSlowPathX86(HBoundsCheck* instruction) : instruction_(instruction) {} + explicit BoundsCheckSlowPathX86(HBoundsCheck* instruction) : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -160,15 +159,13 @@ class BoundsCheckSlowPathX86 : public SlowPathCode { const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathX86"; } private: - HBoundsCheck* const instruction_; - DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86); }; class SuspendCheckSlowPathX86 : public SlowPathCode { public: SuspendCheckSlowPathX86(HSuspendCheck* instruction, HBasicBlock* successor) - : instruction_(instruction), successor_(successor) {} + : SlowPathCode(instruction), successor_(successor) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorX86* x86_codegen = down_cast(codegen); @@ -199,7 +196,6 @@ class SuspendCheckSlowPathX86 : public SlowPathCode { const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathX86"; } private: - HSuspendCheck* const instruction_; HBasicBlock* const successor_; Label return_label_; @@ -208,7 +204,7 @@ class SuspendCheckSlowPathX86 : public SlowPathCode { class LoadStringSlowPathX86 : public SlowPathCode { public: - explicit LoadStringSlowPathX86(HLoadString* instruction) : instruction_(instruction) {} + explicit LoadStringSlowPathX86(HLoadString* instruction): SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -219,7 +215,8 @@ class LoadStringSlowPathX86 : public SlowPathCode { SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; - __ movl(calling_convention.GetRegisterAt(0), Immediate(instruction_->GetStringIndex())); + const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex(); + __ movl(calling_convention.GetRegisterAt(0), Immediate(string_index)); x86_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), @@ -234,8 +231,6 @@ class LoadStringSlowPathX86 : public SlowPathCode { const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathX86"; } private: - HLoadString* const instruction_; - DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86); }; @@ -245,7 +240,7 @@ class LoadClassSlowPathX86 : public SlowPathCode { HInstruction* at, uint32_t dex_pc, bool do_clinit) - : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) { + : SlowPathCode(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) { DCHECK(at->IsLoadClass() || at->IsClinitCheck()); } @@ -299,7 +294,7 @@ class LoadClassSlowPathX86 : public SlowPathCode { class TypeCheckSlowPathX86 : public SlowPathCode { public: TypeCheckSlowPathX86(HInstruction* instruction, bool is_fatal) - : instruction_(instruction), is_fatal_(is_fatal) {} + : SlowPathCode(instruction), is_fatal_(is_fatal) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -356,7 +351,6 @@ class TypeCheckSlowPathX86 : public SlowPathCode { bool IsFatal() const OVERRIDE { return is_fatal_; } private: - HInstruction* const instruction_; const bool is_fatal_; DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86); @@ -365,7 +359,7 @@ class TypeCheckSlowPathX86 : public SlowPathCode { class DeoptimizationSlowPathX86 : public SlowPathCode { public: explicit DeoptimizationSlowPathX86(HDeoptimize* instruction) - : instruction_(instruction) {} + : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorX86* x86_codegen = down_cast(codegen); @@ -381,13 +375,12 @@ class DeoptimizationSlowPathX86 : public SlowPathCode { const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86"; } private: - HDeoptimize* const instruction_; DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86); }; class ArraySetSlowPathX86 : public SlowPathCode { public: - explicit ArraySetSlowPathX86(HInstruction* instruction) : instruction_(instruction) {} + explicit ArraySetSlowPathX86(HInstruction* instruction) : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -426,8 +419,6 @@ class ArraySetSlowPathX86 : public SlowPathCode { const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathX86"; } private: - HInstruction* const instruction_; - DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86); }; @@ -435,7 +426,7 @@ class ArraySetSlowPathX86 : public SlowPathCode { class ReadBarrierMarkSlowPathX86 : public SlowPathCode { public: ReadBarrierMarkSlowPathX86(HInstruction* instruction, Location out, Location obj) - : instruction_(instruction), out_(out), obj_(obj) { + : SlowPathCode(instruction), out_(out), obj_(obj) { DCHECK(kEmitCompilerReadBarrier); } @@ -474,7 +465,6 @@ class ReadBarrierMarkSlowPathX86 : public SlowPathCode { } private: - HInstruction* const instruction_; const Location out_; const Location obj_; @@ -490,7 +480,7 @@ class ReadBarrierForHeapReferenceSlowPathX86 : public SlowPathCode { Location obj, uint32_t offset, Location index) - : instruction_(instruction), + : SlowPathCode(instruction), out_(out), ref_(ref), obj_(obj), @@ -645,7 +635,6 @@ class ReadBarrierForHeapReferenceSlowPathX86 : public SlowPathCode { UNREACHABLE(); } - HInstruction* const instruction_; const Location out_; const Location ref_; const Location obj_; @@ -662,7 +651,7 @@ class ReadBarrierForHeapReferenceSlowPathX86 : public SlowPathCode { class ReadBarrierForRootSlowPathX86 : public SlowPathCode { public: ReadBarrierForRootSlowPathX86(HInstruction* instruction, Location out, Location root) - : instruction_(instruction), out_(out), root_(root) { + : SlowPathCode(instruction), out_(out), root_(root) { DCHECK(kEmitCompilerReadBarrier); } @@ -695,7 +684,6 @@ class ReadBarrierForRootSlowPathX86 : public SlowPathCode { const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathX86"; } private: - HInstruction* const instruction_; const Location out_; const Location root_; @@ -3453,9 +3441,8 @@ void InstructionCodeGeneratorX86::GenerateDivRemIntegral(HBinaryOperation* instr GenerateDivRemWithAnyConstant(instruction); } } else { - SlowPathCode* slow_path = - new (GetGraph()->GetArena()) DivRemMinusOneSlowPathX86(out.AsRegister(), - is_div); + SlowPathCode* slow_path = new (GetGraph()->GetArena()) DivRemMinusOneSlowPathX86( + instruction, out.AsRegister(), is_div); codegen_->AddSlowPath(slow_path); Register second_reg = second.AsRegister(); diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index 8def1de14..bb24c6f59 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -56,7 +56,7 @@ static constexpr int kC2ConditionMask = 0x400; class NullCheckSlowPathX86_64 : public SlowPathCode { public: - explicit NullCheckSlowPathX86_64(HNullCheck* instruction) : instruction_(instruction) {} + explicit NullCheckSlowPathX86_64(HNullCheck* instruction) : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorX86_64* x86_64_codegen = down_cast(codegen); @@ -77,13 +77,12 @@ class NullCheckSlowPathX86_64 : public SlowPathCode { const char* GetDescription() const OVERRIDE { return "NullCheckSlowPathX86_64"; } private: - HNullCheck* const instruction_; DISALLOW_COPY_AND_ASSIGN(NullCheckSlowPathX86_64); }; class DivZeroCheckSlowPathX86_64 : public SlowPathCode { public: - explicit DivZeroCheckSlowPathX86_64(HDivZeroCheck* instruction) : instruction_(instruction) {} + explicit DivZeroCheckSlowPathX86_64(HDivZeroCheck* instruction) : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorX86_64* x86_64_codegen = down_cast(codegen); @@ -104,14 +103,13 @@ class DivZeroCheckSlowPathX86_64 : public SlowPathCode { const char* GetDescription() const OVERRIDE { return "DivZeroCheckSlowPathX86_64"; } private: - HDivZeroCheck* const instruction_; DISALLOW_COPY_AND_ASSIGN(DivZeroCheckSlowPathX86_64); }; class DivRemMinusOneSlowPathX86_64 : public SlowPathCode { public: - DivRemMinusOneSlowPathX86_64(Register reg, Primitive::Type type, bool is_div) - : cpu_reg_(CpuRegister(reg)), type_(type), is_div_(is_div) {} + DivRemMinusOneSlowPathX86_64(HInstruction* at, Register reg, Primitive::Type type, bool is_div) + : SlowPathCode(at), cpu_reg_(CpuRegister(reg)), type_(type), is_div_(is_div) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { __ Bind(GetEntryLabel()); @@ -145,7 +143,7 @@ class DivRemMinusOneSlowPathX86_64 : public SlowPathCode { class SuspendCheckSlowPathX86_64 : public SlowPathCode { public: SuspendCheckSlowPathX86_64(HSuspendCheck* instruction, HBasicBlock* successor) - : instruction_(instruction), successor_(successor) {} + : SlowPathCode(instruction), successor_(successor) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorX86_64* x86_64_codegen = down_cast(codegen); @@ -176,7 +174,6 @@ class SuspendCheckSlowPathX86_64 : public SlowPathCode { const char* GetDescription() const OVERRIDE { return "SuspendCheckSlowPathX86_64"; } private: - HSuspendCheck* const instruction_; HBasicBlock* const successor_; Label return_label_; @@ -186,7 +183,7 @@ class SuspendCheckSlowPathX86_64 : public SlowPathCode { class BoundsCheckSlowPathX86_64 : public SlowPathCode { public: explicit BoundsCheckSlowPathX86_64(HBoundsCheck* instruction) - : instruction_(instruction) {} + : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -218,8 +215,6 @@ class BoundsCheckSlowPathX86_64 : public SlowPathCode { const char* GetDescription() const OVERRIDE { return "BoundsCheckSlowPathX86_64"; } private: - HBoundsCheck* const instruction_; - DISALLOW_COPY_AND_ASSIGN(BoundsCheckSlowPathX86_64); }; @@ -229,7 +224,7 @@ class LoadClassSlowPathX86_64 : public SlowPathCode { HInstruction* at, uint32_t dex_pc, bool do_clinit) - : cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) { + : SlowPathCode(at), cls_(cls), at_(at), dex_pc_(dex_pc), do_clinit_(do_clinit) { DCHECK(at->IsLoadClass() || at->IsClinitCheck()); } @@ -286,7 +281,7 @@ class LoadClassSlowPathX86_64 : public SlowPathCode { class LoadStringSlowPathX86_64 : public SlowPathCode { public: - explicit LoadStringSlowPathX86_64(HLoadString* instruction) : instruction_(instruction) {} + explicit LoadStringSlowPathX86_64(HLoadString* instruction) : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -297,8 +292,8 @@ class LoadStringSlowPathX86_64 : public SlowPathCode { SaveLiveRegisters(codegen, locations); InvokeRuntimeCallingConvention calling_convention; - __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), - Immediate(instruction_->GetStringIndex())); + const uint32_t string_index = instruction_->AsLoadString()->GetStringIndex(); + __ movl(CpuRegister(calling_convention.GetRegisterAt(0)), Immediate(string_index)); x86_64_codegen->InvokeRuntime(QUICK_ENTRY_POINT(pResolveString), instruction_, instruction_->GetDexPc(), @@ -312,15 +307,13 @@ class LoadStringSlowPathX86_64 : public SlowPathCode { const char* GetDescription() const OVERRIDE { return "LoadStringSlowPathX86_64"; } private: - HLoadString* const instruction_; - DISALLOW_COPY_AND_ASSIGN(LoadStringSlowPathX86_64); }; class TypeCheckSlowPathX86_64 : public SlowPathCode { public: TypeCheckSlowPathX86_64(HInstruction* instruction, bool is_fatal) - : instruction_(instruction), is_fatal_(is_fatal) {} + : SlowPathCode(instruction), is_fatal_(is_fatal) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -379,7 +372,6 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode { bool IsFatal() const OVERRIDE { return is_fatal_; } private: - HInstruction* const instruction_; const bool is_fatal_; DISALLOW_COPY_AND_ASSIGN(TypeCheckSlowPathX86_64); @@ -388,7 +380,7 @@ class TypeCheckSlowPathX86_64 : public SlowPathCode { class DeoptimizationSlowPathX86_64 : public SlowPathCode { public: explicit DeoptimizationSlowPathX86_64(HDeoptimize* instruction) - : instruction_(instruction) {} + : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { CodeGeneratorX86_64* x86_64_codegen = down_cast(codegen); @@ -404,13 +396,12 @@ class DeoptimizationSlowPathX86_64 : public SlowPathCode { const char* GetDescription() const OVERRIDE { return "DeoptimizationSlowPathX86_64"; } private: - HDeoptimize* const instruction_; DISALLOW_COPY_AND_ASSIGN(DeoptimizationSlowPathX86_64); }; class ArraySetSlowPathX86_64 : public SlowPathCode { public: - explicit ArraySetSlowPathX86_64(HInstruction* instruction) : instruction_(instruction) {} + explicit ArraySetSlowPathX86_64(HInstruction* instruction) : SlowPathCode(instruction) {} void EmitNativeCode(CodeGenerator* codegen) OVERRIDE { LocationSummary* locations = instruction_->GetLocations(); @@ -449,8 +440,6 @@ class ArraySetSlowPathX86_64 : public SlowPathCode { const char* GetDescription() const OVERRIDE { return "ArraySetSlowPathX86_64"; } private: - HInstruction* const instruction_; - DISALLOW_COPY_AND_ASSIGN(ArraySetSlowPathX86_64); }; @@ -458,7 +447,7 @@ class ArraySetSlowPathX86_64 : public SlowPathCode { class ReadBarrierMarkSlowPathX86_64 : public SlowPathCode { public: ReadBarrierMarkSlowPathX86_64(HInstruction* instruction, Location out, Location obj) - : instruction_(instruction), out_(out), obj_(obj) { + : SlowPathCode(instruction), out_(out), obj_(obj) { DCHECK(kEmitCompilerReadBarrier); } @@ -497,7 +486,6 @@ class ReadBarrierMarkSlowPathX86_64 : public SlowPathCode { } private: - HInstruction* const instruction_; const Location out_; const Location obj_; @@ -513,7 +501,7 @@ class ReadBarrierForHeapReferenceSlowPathX86_64 : public SlowPathCode { Location obj, uint32_t offset, Location index) - : instruction_(instruction), + : SlowPathCode(instruction), out_(out), ref_(ref), obj_(obj), @@ -667,7 +655,6 @@ class ReadBarrierForHeapReferenceSlowPathX86_64 : public SlowPathCode { UNREACHABLE(); } - HInstruction* const instruction_; const Location out_; const Location ref_; const Location obj_; @@ -684,7 +671,7 @@ class ReadBarrierForHeapReferenceSlowPathX86_64 : public SlowPathCode { class ReadBarrierForRootSlowPathX86_64 : public SlowPathCode { public: ReadBarrierForRootSlowPathX86_64(HInstruction* instruction, Location out, Location root) - : instruction_(instruction), out_(out), root_(root) { + : SlowPathCode(instruction), out_(out), root_(root) { DCHECK(kEmitCompilerReadBarrier); } @@ -716,7 +703,6 @@ class ReadBarrierForRootSlowPathX86_64 : public SlowPathCode { const char* GetDescription() const OVERRIDE { return "ReadBarrierForRootSlowPathX86_64"; } private: - HInstruction* const instruction_; const Location out_; const Location root_; @@ -3546,7 +3532,7 @@ void InstructionCodeGeneratorX86_64::GenerateDivRemIntegral(HBinaryOperation* in } else { SlowPathCode* slow_path = new (GetGraph()->GetArena()) DivRemMinusOneSlowPathX86_64( - out.AsRegister(), type, is_div); + instruction, out.AsRegister(), type, is_div); codegen_->AddSlowPath(slow_path); CpuRegister second_reg = second.AsRegister(); diff --git a/compiler/optimizing/intrinsics_arm64.cc b/compiler/optimizing/intrinsics_arm64.cc index 8741fd284..b5f15fe22 100644 --- a/compiler/optimizing/intrinsics_arm64.cc +++ b/compiler/optimizing/intrinsics_arm64.cc @@ -99,7 +99,8 @@ static void MoveArguments(HInvoke* invoke, CodeGeneratorARM64* codegen) { // restored! class IntrinsicSlowPathARM64 : public SlowPathCodeARM64 { public: - explicit IntrinsicSlowPathARM64(HInvoke* invoke) : invoke_(invoke) { } + explicit IntrinsicSlowPathARM64(HInvoke* invoke) + : SlowPathCodeARM64(invoke), invoke_(invoke) { } void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE { CodeGeneratorARM64* codegen = down_cast(codegen_in); diff --git a/compiler/optimizing/intrinsics_mips.cc b/compiler/optimizing/intrinsics_mips.cc index c8629644b..ca9a9dd36 100644 --- a/compiler/optimizing/intrinsics_mips.cc +++ b/compiler/optimizing/intrinsics_mips.cc @@ -99,7 +99,7 @@ static void MoveArguments(HInvoke* invoke, CodeGeneratorMIPS* codegen) { // restored! class IntrinsicSlowPathMIPS : public SlowPathCodeMIPS { public: - explicit IntrinsicSlowPathMIPS(HInvoke* invoke) : invoke_(invoke) { } + explicit IntrinsicSlowPathMIPS(HInvoke* invoke) : SlowPathCodeMIPS(invoke), invoke_(invoke) { } void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE { CodeGeneratorMIPS* codegen = down_cast(codegen_in); diff --git a/compiler/optimizing/intrinsics_mips64.cc b/compiler/optimizing/intrinsics_mips64.cc index cf3a3657d..457e247d6 100644 --- a/compiler/optimizing/intrinsics_mips64.cc +++ b/compiler/optimizing/intrinsics_mips64.cc @@ -87,7 +87,8 @@ static void MoveArguments(HInvoke* invoke, CodeGeneratorMIPS64* codegen) { // restored! class IntrinsicSlowPathMIPS64 : public SlowPathCodeMIPS64 { public: - explicit IntrinsicSlowPathMIPS64(HInvoke* invoke) : invoke_(invoke) { } + explicit IntrinsicSlowPathMIPS64(HInvoke* invoke) + : SlowPathCodeMIPS64(invoke), invoke_(invoke) { } void EmitNativeCode(CodeGenerator* codegen_in) OVERRIDE { CodeGeneratorMIPS64* codegen = down_cast(codegen_in); diff --git a/compiler/optimizing/intrinsics_utils.h b/compiler/optimizing/intrinsics_utils.h index e70afd29f..c1f9ae642 100644 --- a/compiler/optimizing/intrinsics_utils.h +++ b/compiler/optimizing/intrinsics_utils.h @@ -39,7 +39,7 @@ namespace art { template class IntrinsicSlowPath : public SlowPathCode { public: - explicit IntrinsicSlowPath(HInvoke* invoke) : invoke_(invoke) { } + explicit IntrinsicSlowPath(HInvoke* invoke) : SlowPathCode(invoke), invoke_(invoke) { } Location MoveArguments(CodeGenerator* codegen) { TDexCallingConvention calling_convention_visitor; -- 2.11.0