From 407a9d2847161b843966a443b71760b1280bd396 Mon Sep 17 00:00:00 2001 From: Serguei Katkov Date: Sat, 5 Jul 2014 03:09:32 +0700 Subject: [PATCH] Clean-up call_x86.cc Also adds some DCHECKs and fixes for the bugs found by them. Change-Id: I455bbfe2c6018590cf491880cd9273edbe39c4c7 Signed-off-by: Serguei Katkov --- compiler/dex/quick/x86/call_x86.cc | 32 +++++++++++--------------------- compiler/dex/quick/x86/codegen_x86.h | 4 ++-- compiler/dex/quick/x86/target_x86.cc | 10 ++++++---- 3 files changed, 19 insertions(+), 27 deletions(-) diff --git a/compiler/dex/quick/x86/call_x86.cc b/compiler/dex/quick/x86/call_x86.cc index 6ca220cb2..900051485 100644 --- a/compiler/dex/quick/x86/call_x86.cc +++ b/compiler/dex/quick/x86/call_x86.cc @@ -94,13 +94,10 @@ void X86Mir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset, start_of_method_reg = rl_method.reg; store_method_addr_used_ = true; } else { - if (cu_->target64) { - start_of_method_reg = AllocTempWide(); - } else { - start_of_method_reg = AllocTemp(); - } + start_of_method_reg = AllocTempRef(); NewLIR1(kX86StartOfMethod, start_of_method_reg.GetReg()); } + DCHECK_EQ(start_of_method_reg.Is64Bit(), cu_->target64); int low_key = s4FromSwitchData(&table[2]); RegStorage keyReg; // Remove the bias, if necessary @@ -111,7 +108,7 @@ void X86Mir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset, OpRegRegImm(kOpSub, keyReg, rl_src.reg, low_key); } // Bounds check - if < 0 or >= size continue following switch - OpRegImm(kOpCmp, keyReg, size-1); + OpRegImm(kOpCmp, keyReg, size - 1); LIR* branch_over = OpCondBranch(kCondHi, NULL); // Load the displacement from the switch table @@ -119,11 +116,7 @@ void X86Mir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset, NewLIR5(kX86PcRelLoadRA, disp_reg.GetReg(), start_of_method_reg.GetReg(), keyReg.GetReg(), 2, WrapPointer(tab_rec)); // Add displacement to start of method - if (cu_->target64) { - NewLIR2(kX86Add64RR, start_of_method_reg.GetReg(), disp_reg.GetReg()); - } else { - OpRegReg(kOpAdd, start_of_method_reg, disp_reg); - } + OpRegReg(kOpAdd, start_of_method_reg, cu_->target64 ? As64BitReg(disp_reg) : disp_reg); // ..and go! LIR* switch_branch = NewLIR1(kX86JmpR, start_of_method_reg.GetReg()); tab_rec->anchor = switch_branch; @@ -174,7 +167,6 @@ void X86Mir2Lir::GenFillArrayData(DexOffset table_offset, RegLocation rl_src) { } store_method_addr_used_ = true; } else { - // TODO(64) force to be 64-bit NewLIR1(kX86StartOfMethod, method_start.GetReg()); } NewLIR2(kX86PcRelAdr, payload.GetReg(), WrapPointer(tab_rec)); @@ -193,8 +185,8 @@ void X86Mir2Lir::GenMoveException(RegLocation rl_dest) { Thread::ExceptionOffset<8>().Int32Value() : Thread::ExceptionOffset<4>().Int32Value(); RegLocation rl_result = EvalLoc(rl_dest, kRefReg, true); - NewLIR2(kX86Mov32RT, rl_result.reg.GetReg(), ex_offset); - NewLIR2(kX86Mov32TI, ex_offset, 0); + NewLIR2(cu_->target64 ? kX86Mov64RT : kX86Mov32RT, rl_result.reg.GetReg(), ex_offset); + NewLIR2(cu_->target64 ? kX86Mov64TI : kX86Mov32TI, ex_offset, 0); StoreValue(rl_dest, rl_result); } @@ -202,17 +194,15 @@ void X86Mir2Lir::GenMoveException(RegLocation rl_dest) { * Mark garbage collection card. Skip if the value we're storing is null. */ void X86Mir2Lir::MarkGCCard(RegStorage val_reg, RegStorage tgt_addr_reg) { - RegStorage reg_card_base = AllocTemp(); - RegStorage reg_card_no = AllocTemp(); + DCHECK_EQ(tgt_addr_reg.Is64Bit(), cu_->target64); + DCHECK_EQ(val_reg.Is64Bit(), cu_->target64); + RegStorage reg_card_base = AllocTempRef(); + RegStorage reg_card_no = AllocTempRef(); LIR* branch_over = OpCmpImmBranch(kCondEq, val_reg, 0, NULL); int ct_offset = cu_->target64 ? Thread::CardTableOffset<8>().Int32Value() : Thread::CardTableOffset<4>().Int32Value(); - if (cu_->target64) { - NewLIR2(kX86Mov64RT, reg_card_base.GetReg(), ct_offset); - } else { - NewLIR2(kX86Mov32RT, reg_card_base.GetReg(), ct_offset); - } + NewLIR2(cu_->target64 ? kX86Mov64RT : kX86Mov32RT, reg_card_base.GetReg(), ct_offset); OpRegRegImm(kOpLsr, reg_card_no, tgt_addr_reg, gc::accounting::CardTable::kCardShift); StoreBaseIndexed(reg_card_base, reg_card_no, reg_card_base, 0, kUnsignedByte); LIR* target = NewLIR0(kPseudoTargetLabel); diff --git a/compiler/dex/quick/x86/codegen_x86.h b/compiler/dex/quick/x86/codegen_x86.h index 55e5993dc..5f4f23ae5 100644 --- a/compiler/dex/quick/x86/codegen_x86.h +++ b/compiler/dex/quick/x86/codegen_x86.h @@ -28,7 +28,7 @@ class X86Mir2Lir : public Mir2Lir { protected: class InToRegStorageMapper { public: - virtual RegStorage GetNextReg(bool is_double_or_float, bool is_wide) = 0; + virtual RegStorage GetNextReg(bool is_double_or_float, bool is_wide, bool is_ref) = 0; virtual ~InToRegStorageMapper() {} }; @@ -36,7 +36,7 @@ class X86Mir2Lir : public Mir2Lir { public: explicit InToRegStorageX86_64Mapper(Mir2Lir* ml) : ml_(ml), cur_core_reg_(0), cur_fp_reg_(0) {} virtual ~InToRegStorageX86_64Mapper() {} - virtual RegStorage GetNextReg(bool is_double_or_float, bool is_wide); + virtual RegStorage GetNextReg(bool is_double_or_float, bool is_wide, bool is_ref); protected: Mir2Lir* ml_; private: diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc index 43882c2e0..2731343b2 100755 --- a/compiler/dex/quick/x86/target_x86.cc +++ b/compiler/dex/quick/x86/target_x86.cc @@ -1867,7 +1867,7 @@ LIR *X86Mir2Lir::AddVectorLiteral(MIR *mir) { } // ------------ ABI support: mapping of args to physical registers ------------- -RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide) { +RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_float, bool is_wide, bool is_ref) { const SpecialTargetRegister coreArgMappingToPhysicalReg[] = {kArg1, kArg2, kArg3, kArg4, kArg5}; const int coreArgMappingToPhysicalRegSize = sizeof(coreArgMappingToPhysicalReg) / sizeof(SpecialTargetRegister); const SpecialTargetRegister fpArgMappingToPhysicalReg[] = {kFArg0, kFArg1, kFArg2, kFArg3, @@ -1880,7 +1880,8 @@ RegStorage X86Mir2Lir::InToRegStorageX86_64Mapper::GetNextReg(bool is_double_or_ } } else { if (cur_core_reg_ < coreArgMappingToPhysicalRegSize) { - return ml_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], is_wide); + return is_ref ? ml_->TargetRefReg(coreArgMappingToPhysicalReg[cur_core_reg_++]) : + ml_->TargetReg(coreArgMappingToPhysicalReg[cur_core_reg_++], is_wide); } } return RegStorage::InvalidReg(); @@ -1897,11 +1898,12 @@ void X86Mir2Lir::InToRegStorageMapping::Initialize(RegLocation* arg_locs, int co max_mapped_in_ = -1; is_there_stack_mapped_ = false; for (int in_position = 0; in_position < count; in_position++) { - RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp, arg_locs[in_position].wide); + RegStorage reg = mapper->GetNextReg(arg_locs[in_position].fp, + arg_locs[in_position].wide, arg_locs[in_position].ref); if (reg.Valid()) { mapping_[in_position] = reg; max_mapped_in_ = std::max(max_mapped_in_, in_position); - if (reg.Is64BitSolo()) { + if (arg_locs[in_position].wide) { // We covered 2 args, so skip the next one in_position++; } -- 2.11.0