From 6a58cb16d803c9a7b3a75ccac8be19dd9d4e520d Mon Sep 17 00:00:00 2001 From: Dmitry Petrochenko Date: Wed, 2 Apr 2014 17:27:59 +0700 Subject: [PATCH] art: Handle x86_64 architecture equal to x86 This patch forces FE/ME to treat x86_64 as x86 exactly. The x86_64 logic will be revised later when assembly will be ready. Change-Id: I4a92477a6eeaa9a11fd710d35c602d8d6f88cbb6 Signed-off-by: Dmitry Petrochenko --- compiler/compilers.cc | 6 +++++ compiler/dex/frontend.cc | 1 + compiler/dex/mir_optimization.cc | 2 +- compiler/dex/quick/codegen_util.cc | 1 + compiler/dex/quick/gen_common.cc | 23 +++++++++--------- compiler/dex/quick/gen_invoke.cc | 40 +++++++++++++++---------------- compiler/dex/quick/gen_loadstore.cc | 4 ++-- compiler/dex/quick/local_optimizations.cc | 8 +++---- compiler/dex/quick/mir_to_lir.cc | 4 ++-- compiler/dex/quick/x86/int_x86.cc | 4 ++-- compiler/dex/quick/x86/target_x86.cc | 2 +- compiler/driver/compiler_driver.cc | 5 ++++ compiler/optimizing/code_generator.cc | 3 +++ 13 files changed, 60 insertions(+), 43 deletions(-) diff --git a/compiler/compilers.cc b/compiler/compilers.cc index f58b38b57..1237e7019 100644 --- a/compiler/compilers.cc +++ b/compiler/compilers.cc @@ -107,6 +107,9 @@ Backend* QuickCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_ case kX86: mir_to_lir = X86CodeGenerator(cu, cu->mir_graph.get(), &cu->arena); break; + case kX86_64: + mir_to_lir = X86CodeGenerator(cu, cu->mir_graph.get(), &cu->arena); + break; default: LOG(FATAL) << "Unexpected instruction set: " << cu->instruction_set; } @@ -125,6 +128,9 @@ std::vector* QuickCompiler::GetCallFrameInformationInitialization( if (driver.GetInstructionSet() == kX86) { return X86CFIInitialization(); } + if (driver.GetInstructionSet() == kX86_64) { + return X86CFIInitialization(); + } return nullptr; } diff --git a/compiler/dex/frontend.cc b/compiler/dex/frontend.cc index cc616f6ca..e48e5bf12 100644 --- a/compiler/dex/frontend.cc +++ b/compiler/dex/frontend.cc @@ -160,6 +160,7 @@ static CompiledMethod* CompileMethod(CompilerDriver& driver, // TODO: x86_64 & arm64 are not yet implemented. DCHECK((cu.instruction_set == kThumb2) || (cu.instruction_set == kX86) || + (cu.instruction_set == kX86_64) || (cu.instruction_set == kMips)); diff --git a/compiler/dex/mir_optimization.cc b/compiler/dex/mir_optimization.cc index 45c8d875d..51419f458 100644 --- a/compiler/dex/mir_optimization.cc +++ b/compiler/dex/mir_optimization.cc @@ -408,7 +408,7 @@ bool MIRGraph::BasicBlockOpt(BasicBlock* bb) { // TODO: flesh out support for Mips. NOTE: llvm's select op doesn't quite work here. // TUNING: expand to support IF_xx compare & branches if (!cu_->compiler->IsPortable() && - (cu_->instruction_set == kThumb2 || cu_->instruction_set == kX86) && + (cu_->instruction_set == kThumb2 || cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) && IsInstructionIfCcZ(mir->dalvikInsn.opcode)) { BasicBlock* ft = GetBasicBlock(bb->fall_through); DCHECK(ft != NULL); diff --git a/compiler/dex/quick/codegen_util.cc b/compiler/dex/quick/codegen_util.cc index 4c6c7a45b..6e6b8f0a3 100644 --- a/compiler/dex/quick/codegen_util.cc +++ b/compiler/dex/quick/codegen_util.cc @@ -480,6 +480,7 @@ void Mir2Lir::InstallSwitchTables() { bx_offset = tab_rec->anchor->offset + 4; break; case kX86: + case kX86_64: bx_offset = 0; break; case kMips: diff --git a/compiler/dex/quick/gen_common.cc b/compiler/dex/quick/gen_common.cc index 7af9d5775..c5c42e836 100644 --- a/compiler/dex/quick/gen_common.cc +++ b/compiler/dex/quick/gen_common.cc @@ -349,6 +349,7 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { r_val = TargetReg(kLr); break; case kX86: + case kX86_64: FreeTemp(TargetReg(kRet0)); r_val = AllocTemp(); break; @@ -372,7 +373,7 @@ void Mir2Lir::GenFilledNewArray(CallInfo* info) { StoreBaseIndexed(r_dst, r_idx, r_val, 2, kWord); FreeTemp(r_val); OpDecAndBranch(kCondGe, r_idx, target); - if (cu_->instruction_set == kX86) { + if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { // Restore the target pointer OpRegRegImm(kOpAdd, TargetReg(kRet0), r_dst, -mirror::Array::DataOffset(component_size).Int32Value()); @@ -634,7 +635,7 @@ void Mir2Lir::HandleThrowLaunchPads() { ThreadOffset<4> func_offset(-1); int v1 = lab->operands[2]; int v2 = lab->operands[3]; - const bool target_x86 = cu_->instruction_set == kX86; + const bool target_x86 = cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64; switch (lab->operands[0]) { case kThrowNullPointer: func_offset = QUICK_ENTRYPOINT_OFFSET(4, pThrowNullPointer); @@ -718,7 +719,7 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, if (is_long_or_double) { DCHECK(rl_dest.wide); GenNullCheck(rl_obj.reg, opt_flags); - if (cu_->instruction_set == kX86) { + if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { rl_result = EvalLoc(rl_dest, reg_class, true); // FIXME? duplicate null check? GenNullCheck(rl_obj.reg, opt_flags); @@ -966,7 +967,7 @@ void Mir2Lir::GenConstString(uint32_t string_idx, RegLocation rl_dest) { // Add to list for future. AddSlowPath(new (arena_) SlowPath(this, fromfast, cont, r_method)); } else { - DCHECK_EQ(cu_->instruction_set, kX86); + DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); LIR* branch = OpCmpImmBranch(kCondNe, TargetReg(kRet0), 0, NULL); LoadConstant(TargetReg(kArg1), string_idx); CallRuntimeHelperRegReg(QUICK_ENTRYPOINT_OFFSET(4, pResolveString), r_method, TargetReg(kArg1), @@ -1050,7 +1051,7 @@ void Mir2Lir::GenThrow(RegLocation rl_src) { void Mir2Lir::GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) { // X86 has its own implementation. - DCHECK_NE(cu_->instruction_set, kX86); + DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); RegLocation object = LoadValue(rl_src, kCoreReg); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); @@ -1108,7 +1109,7 @@ void Mir2Lir::GenInstanceofCallingHelper(bool needs_access_check, bool type_know uint32_t type_idx, RegLocation rl_dest, RegLocation rl_src) { // X86 has its own implementation. - DCHECK_NE(cu_->instruction_set, kX86); + DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); FlushAllRegs(); // May generate a call - use explicit registers @@ -1430,7 +1431,7 @@ void Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest, void Mir2Lir::GenArithOpInt(Instruction::Code opcode, RegLocation rl_dest, RegLocation rl_src1, RegLocation rl_src2) { - DCHECK_NE(cu_->instruction_set, kX86); + DCHECK(cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64); OpKind op = kOpBkpt; bool is_div_rem = false; bool check_zero = false; @@ -1801,7 +1802,7 @@ void Mir2Lir::GenArithOpIntLit(Instruction::Code opcode, RegLocation rl_dest, Re rl_src = LoadValue(rl_src, kCoreReg); rl_result = GenDivRemLit(rl_dest, rl_src.reg, lit, is_div); done = true; - } else if (cu_->instruction_set == kX86) { + } else if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { rl_result = GenDivRemLit(rl_dest, rl_src, lit, is_div); done = true; } else if (cu_->instruction_set == kThumb2) { @@ -1915,7 +1916,7 @@ void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, break; case Instruction::AND_LONG_2ADDR: case Instruction::AND_LONG: - if (cu_->instruction_set == kX86) { + if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { return GenAndLong(opcode, rl_dest, rl_src1, rl_src2); } first_op = kOpAnd; @@ -1923,7 +1924,7 @@ void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, break; case Instruction::OR_LONG: case Instruction::OR_LONG_2ADDR: - if (cu_->instruction_set == kX86) { + if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { GenOrLong(opcode, rl_dest, rl_src1, rl_src2); return; } @@ -1932,7 +1933,7 @@ void Mir2Lir::GenArithOpLong(Instruction::Code opcode, RegLocation rl_dest, break; case Instruction::XOR_LONG: case Instruction::XOR_LONG_2ADDR: - if (cu_->instruction_set == kX86) { + if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { GenXorLong(opcode, rl_dest, rl_src1, rl_src2); return; } diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index 352130d1d..188b18cac 100644 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -67,7 +67,7 @@ void Mir2Lir::AddIntrinsicLaunchpad(CallInfo* info, LIR* branch, LIR* resume) { * load arguments between the two parts. */ RegStorage Mir2Lir::CallHelperSetup(ThreadOffset<4> helper_offset) { - return (cu_->instruction_set == kX86) ? RegStorage::InvalidReg() : LoadHelper(helper_offset); + return (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) ? RegStorage::InvalidReg() : LoadHelper(helper_offset); } /* NOTE: if r_tgt is a temp, it will be freed following use */ @@ -75,7 +75,7 @@ LIR* Mir2Lir::CallHelper(RegStorage r_tgt, ThreadOffset<4> helper_offset, bool s bool use_link) { LIR* call_inst; OpKind op = use_link ? kOpBlx : kOpBx; - if (cu_->instruction_set == kX86) { + if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { call_inst = OpThreadMem(op, helper_offset); } else { call_inst = OpReg(op, r_tgt); @@ -435,10 +435,10 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, switch (state) { case 0: // Get the current Method* [sets kArg0] if (direct_code != static_cast(-1)) { - if (cu->instruction_set != kX86) { + if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code); } - } else if (cu->instruction_set != kX86) { + } else if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { cg->LoadCodeAddress(target_method, type, kInvokeTgt); } if (direct_method != static_cast(-1)) { @@ -464,7 +464,7 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, if (direct_code != 0) { if (direct_code != static_cast(-1)) { cg->LoadConstant(cg->TargetReg(kInvokeTgt), direct_code); - } else if (cu->instruction_set != kX86) { + } else if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds()); cg->LoadCodeAddress(target_method, type, kInvokeTgt); } @@ -477,7 +477,7 @@ static int NextSDCallInsn(CompilationUnit* cu, CallInfo* info, (target_method.dex_method_index * 4), cg->TargetReg(kArg0)); break; case 3: // Grab the code from the method* - if (cu->instruction_set != kX86) { + if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { if (direct_code == 0) { cg->LoadWordDisp(cg->TargetReg(kArg0), mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(), @@ -532,7 +532,7 @@ static int NextVCallInsn(CompilationUnit* cu, CallInfo* info, cg->TargetReg(kArg0)); break; case 4: // Get the compiled code address [uses kArg0, sets kInvokeTgt] - if (cu->instruction_set != kX86) { + if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { cg->LoadWordDisp(cg->TargetReg(kArg0), mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(), cg->TargetReg(kInvokeTgt)); @@ -561,7 +561,7 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, case 0: // Set target method index in case of conflict [set kHiddenArg, kHiddenFpArg (x86)] CHECK_LT(target_method.dex_method_index, target_method.dex_file->NumMethodIds()); cg->LoadConstant(cg->TargetReg(kHiddenArg), target_method.dex_method_index); - if (cu->instruction_set == kX86) { + if (cu->instruction_set == kX86 || cu->instruction_set == kX86_64) { cg->OpRegCopy(cg->TargetReg(kHiddenFpArg), cg->TargetReg(kHiddenArg)); } break; @@ -587,7 +587,7 @@ static int NextInterfaceCallInsn(CompilationUnit* cu, CallInfo* info, int state, cg->TargetReg(kArg0)); break; case 5: // Get the compiled code address [use kArg0, set kInvokeTgt] - if (cu->instruction_set != kX86) { + if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { cg->LoadWordDisp(cg->TargetReg(kArg0), mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(), cg->TargetReg(kInvokeTgt)); @@ -609,7 +609,7 @@ static int NextInvokeInsnSP(CompilationUnit* cu, CallInfo* info, ThreadOffset<4> * resolved at compile time, we bail to a runtime helper. */ if (state == 0) { - if (cu->instruction_set != kX86) { + if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { // Load trampoline target cg->LoadWordDisp(cg->TargetReg(kSelf), trampoline.Int32Value(), cg->TargetReg(kInvokeTgt)); } @@ -879,7 +879,7 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state, st->u.m.def_mask = ENCODE_ALL; call_state = next_call_insn(cu_, info, call_state, target_method, vtable_idx, direct_code, direct_method, type); - } else if (cu_->instruction_set == kX86) { + } else if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { int current_src_offset = start_offset; int current_dest_offset = outs_offset; @@ -1054,7 +1054,7 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) { RegLocation rl_idx = info->args[1]; rl_obj = LoadValue(rl_obj, kCoreReg); // X86 wants to avoid putting a constant index into a register. - if (!(cu_->instruction_set == kX86 && rl_idx.is_const)) { + if (!((cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64)&& rl_idx.is_const)) { rl_idx = LoadValue(rl_idx, kCoreReg); } RegStorage reg_max; @@ -1063,7 +1063,7 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) { LIR* range_check_branch = nullptr; RegStorage reg_off; RegStorage reg_ptr; - if (cu_->instruction_set != kX86) { + if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) { reg_off = AllocTemp(); reg_ptr = AllocTemp(); if (range_check) { @@ -1110,7 +1110,7 @@ bool Mir2Lir::GenInlinedCharAt(CallInfo* info) { } RegLocation rl_dest = InlineTarget(info); RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); - if (cu_->instruction_set != kX86) { + if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) { LoadBaseIndexed(reg_ptr, reg_off, rl_result.reg, 1, kUnsignedHalf); } else { LoadBaseIndexedDisp(reg_ptr, reg_off, 1, data_offset, rl_result.reg, @@ -1148,7 +1148,7 @@ bool Mir2Lir::GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty) { OpRegReg(kOpNeg, t_reg, rl_result.reg); OpRegRegReg(kOpAdc, rl_result.reg, rl_result.reg, t_reg); } else { - DCHECK_EQ(cu_->instruction_set, kX86); + DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); OpRegImm(kOpSub, rl_result.reg, 1); OpRegImm(kOpLsr, rl_result.reg, 31); } @@ -1218,7 +1218,7 @@ bool Mir2Lir::GenInlinedAbsLong(CallInfo* info) { RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true); // If on x86 or if we would clobber a register needed later, just copy the source first. - if (cu_->instruction_set == kX86 || rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg()) { + if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64 || rl_result.reg.GetLowReg() == rl_src.reg.GetHighReg()) { OpRegCopyWide(rl_result.reg, rl_src.reg); if (rl_result.reg.GetLowReg() != rl_src.reg.GetLowReg() && rl_result.reg.GetLowReg() != rl_src.reg.GetHighReg() && @@ -1359,7 +1359,7 @@ bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) { RegLocation rl_cmp = info->args[1]; LoadValueDirectFixed(rl_this, reg_this); LoadValueDirectFixed(rl_cmp, reg_cmp); - RegStorage r_tgt = (cu_->instruction_set != kX86) ? + RegStorage r_tgt = (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) ? LoadHelper(QUICK_ENTRYPOINT_OFFSET(4, pStringCompareTo)) : RegStorage::InvalidReg(); GenExplicitNullCheck(reg_this, info->opt_flags); info->opt_flags |= MIR_IGNORE_NULL_CHECK; // Record that we've null checked. @@ -1367,7 +1367,7 @@ bool Mir2Lir::GenInlinedStringCompareTo(CallInfo* info) { LIR* cmp_null_check_branch = OpCmpImmBranch(kCondEq, reg_cmp, 0, nullptr); AddIntrinsicLaunchpad(info, cmp_null_check_branch); // NOTE: not a safepoint - if (cu_->instruction_set != kX86) { + if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) { OpReg(kOpBlx, r_tgt); } else { OpThreadMem(kOpBlx, QUICK_ENTRYPOINT_OFFSET(4, pStringCompareTo)); @@ -1385,7 +1385,7 @@ bool Mir2Lir::GenInlinedCurrentThread(CallInfo* info) { if (cu_->instruction_set == kThumb2 || cu_->instruction_set == kMips) { LoadWordDisp(TargetReg(kSelf), offset.Int32Value(), rl_result.reg); } else { - CHECK(cu_->instruction_set == kX86); + CHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); reinterpret_cast(this)->OpRegThreadMem(kOpMov, rl_result.reg.GetReg(), offset); } StoreValue(rl_dest, rl_result); @@ -1556,7 +1556,7 @@ void Mir2Lir::GenInvokeNoInline(CallInfo* info) { method_info.DirectCode(), method_info.DirectMethod(), original_type); } LIR* call_inst; - if (cu_->instruction_set != kX86) { + if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) { call_inst = OpReg(kOpBlx, TargetReg(kInvokeTgt)); } else { if (fast_path) { diff --git a/compiler/dex/quick/gen_loadstore.cc b/compiler/dex/quick/gen_loadstore.cc index 36d619913..897d86d09 100644 --- a/compiler/dex/quick/gen_loadstore.cc +++ b/compiler/dex/quick/gen_loadstore.cc @@ -262,7 +262,7 @@ void Mir2Lir::StoreValueWide(RegLocation rl_dest, RegLocation rl_src) { MarkPair(rl_dest.reg.GetLowReg(), rl_dest.reg.GetHighReg()); } else { // This must be an x86 vector register value, - DCHECK(IsFpReg(rl_dest.reg) && (cu_->instruction_set == kX86)); + DCHECK(IsFpReg(rl_dest.reg) && (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64)); MarkDirty(rl_dest); } @@ -338,7 +338,7 @@ void Mir2Lir::StoreFinalValueWide(RegLocation rl_dest, RegLocation rl_src) { MarkPair(rl_dest.reg.GetLowReg(), rl_dest.reg.GetHighReg()); } else { // This must be an x86 vector register value, - DCHECK(IsFpReg(rl_dest.reg) && (cu_->instruction_set == kX86)); + DCHECK(IsFpReg(rl_dest.reg) && (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64)); MarkDirty(rl_dest); } diff --git a/compiler/dex/quick/local_optimizations.cc b/compiler/dex/quick/local_optimizations.cc index 8f64408b4..4bdc9fa4b 100644 --- a/compiler/dex/quick/local_optimizations.cc +++ b/compiler/dex/quick/local_optimizations.cc @@ -100,7 +100,7 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) { } int native_reg_id; - if (cu_->instruction_set == kX86) { + if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { // If x86, location differs depending on whether memory/reg operation. native_reg_id = (target_flags & IS_STORE) ? this_lir->operands[2] : this_lir->operands[0]; } else { @@ -121,7 +121,7 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) { uint64_t stop_def_reg_mask = this_lir->u.m.def_mask & ~ENCODE_MEM; uint64_t stop_use_reg_mask; - if (cu_->instruction_set == kX86) { + if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { stop_use_reg_mask = (IS_BRANCH | this_lir->u.m.use_mask) & ~ENCODE_MEM; } else { /* @@ -241,7 +241,7 @@ void Mir2Lir::ApplyLoadStoreElimination(LIR* head_lir, LIR* tail_lir) { } if (stop_here == true) { - if (cu_->instruction_set == kX86) { + if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { // Prevent stores from being sunk between ops that generate ccodes and // ops that use them. uint64_t flags = GetTargetInstFlags(check_lir->opcode); @@ -306,7 +306,7 @@ void Mir2Lir::ApplyLoadHoisting(LIR* head_lir, LIR* tail_lir) { uint64_t stop_use_all_mask = this_lir->u.m.use_mask; - if (cu_->instruction_set != kX86) { + if (cu_->instruction_set != kX86 && cu_->instruction_set != kX86_64) { /* * Branches for null/range checks are marked with the true resource * bits, and loads to Dalvik registers, constant pools, and non-alias diff --git a/compiler/dex/quick/mir_to_lir.cc b/compiler/dex/quick/mir_to_lir.cc index cd3dadbc7..73fdc8285 100644 --- a/compiler/dex/quick/mir_to_lir.cc +++ b/compiler/dex/quick/mir_to_lir.cc @@ -43,7 +43,7 @@ RegStorage Mir2Lir::LoadArg(int in_position, bool wide) { RegStorage::InvalidReg(); int offset = StackVisitor::GetOutVROffset(in_position); - if (cu_->instruction_set == kX86) { + if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { /* * When doing a call for x86, it moves the stack pointer in order to push return. * Thus, we add another 4 bytes to figure out the out of caller (in of callee). @@ -82,7 +82,7 @@ RegStorage Mir2Lir::LoadArg(int in_position, bool wide) { void Mir2Lir::LoadArgDirect(int in_position, RegLocation rl_dest) { int offset = StackVisitor::GetOutVROffset(in_position); - if (cu_->instruction_set == kX86) { + if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) { /* * When doing a call for x86, it moves the stack pointer in order to push return. * Thus, we add another 4 bytes to figure out the out of caller (in of callee). diff --git a/compiler/dex/quick/x86/int_x86.cc b/compiler/dex/quick/x86/int_x86.cc index 4310d6e43..bbcedc321 100644 --- a/compiler/dex/quick/x86/int_x86.cc +++ b/compiler/dex/quick/x86/int_x86.cc @@ -663,7 +663,7 @@ RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1, } bool X86Mir2Lir::GenInlinedMinMaxInt(CallInfo* info, bool is_min) { - DCHECK_EQ(cu_->instruction_set, kX86); + DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); // Get the two arguments to the invoke and place them in GP registers. RegLocation rl_src1 = info->args[0]; @@ -751,7 +751,7 @@ static bool IsInReg(X86Mir2Lir *pMir2Lir, const RegLocation &rl, RegStorage reg) } bool X86Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) { - DCHECK_EQ(cu_->instruction_set, kX86); + DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); // Unused - RegLocation rl_src_unsafe = info->args[0]; RegLocation rl_src_obj = info->args[1]; // Object - known non-null RegLocation rl_src_offset = info->args[2]; // long low diff --git a/compiler/dex/quick/x86/target_x86.cc b/compiler/dex/quick/x86/target_x86.cc index 925e73683..dcc5d9b73 100644 --- a/compiler/dex/quick/x86/target_x86.cc +++ b/compiler/dex/quick/x86/target_x86.cc @@ -149,7 +149,7 @@ uint64_t X86Mir2Lir::GetPCUseDefEncoding() { } void X86Mir2Lir::SetupTargetResourceMasks(LIR* lir, uint64_t flags) { - DCHECK_EQ(cu_->instruction_set, kX86); + DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64); DCHECK(!lir->flags.use_def_invalid); // X86-specific resource map setup here. diff --git a/compiler/driver/compiler_driver.cc b/compiler/driver/compiler_driver.cc index a120d054c..4f3533a17 100644 --- a/compiler/driver/compiler_driver.cc +++ b/compiler/driver/compiler_driver.cc @@ -2015,6 +2015,11 @@ void CompilerDriver::InstructionSetToLLVMTarget(InstructionSet instruction_set, *target_attr = ""; break; + case kX86_64: + *target_triple = "x86_64-pc-linux-gnu"; + *target_attr = ""; + break; + case kMips: *target_triple = "mipsel-unknown-linux"; *target_attr = "mips32r2"; diff --git a/compiler/optimizing/code_generator.cc b/compiler/optimizing/code_generator.cc index b86665b9e..2207fd761 100644 --- a/compiler/optimizing/code_generator.cc +++ b/compiler/optimizing/code_generator.cc @@ -112,6 +112,9 @@ CodeGenerator* CodeGenerator::Create(ArenaAllocator* allocator, case kX86: { return new (allocator) x86::CodeGeneratorX86(graph); } + case kX86_64: { + return new (allocator) x86::CodeGeneratorX86(graph); + } default: return nullptr; } -- 2.11.0