From e832e64a7e82d7f72aedbd7d798fb929d458ee8f Mon Sep 17 00:00:00 2001 From: Mathieu Chartier Date: Mon, 10 Nov 2014 11:08:06 -0800 Subject: [PATCH] Change 64 bit ArtMethod fields to be pointer sized Changed the 64 bit entrypoint and gc map fields in ArtMethod to be pointer sized. This saves a large amount of memory on 32 bit systems. Reduces ArtMethod size by 16 bytes on 32 bit. Total number of ArtMethod on low memory mako: 169957 Image size: 49203 methods -> 787248 image size reduction. Zygote space size: 1070 methods -> 17120 size reduction. App methods: ~120k -> 2 MB savings. Savings per app on low memory mako: 125K+ per app (less active apps -> more image methods per app). Savings depend on how often the shared methods are on dirty pages vs shared. TODO in another CL, delete gc map field from ArtMethod since we should be able to get it from the Oat method header. Bug: 17643507 Change-Id: Ie9508f05907a9f693882d4d32a564460bf273ee8 --- compiler/compilers.cc | 4 +- compiler/dex/quick/gen_invoke.cc | 10 +- compiler/image_writer.cc | 82 ++++++-- compiler/image_writer.h | 3 + compiler/jni/quick/jni_compiler.cc | 4 +- compiler/optimizing/code_generator_arm.cc | 4 +- compiler/optimizing/code_generator_x86.cc | 3 +- compiler/optimizing/code_generator_x86_64.cc | 3 +- patchoat/patchoat.cc | 35 ++-- patchoat/patchoat.h | 16 +- runtime/arch/arm/portable_entrypoints_arm.S | 2 +- runtime/arch/arm/quick_entrypoints_arm.S | 2 +- runtime/arch/arm64/quick_entrypoints_arm64.S | 2 +- runtime/arch/mips/portable_entrypoints_mips.S | 2 +- runtime/arch/mips/quick_entrypoints_mips.S | 2 +- runtime/arch/x86/portable_entrypoints_x86.S | 2 +- runtime/arch/x86/quick_entrypoints_x86.S | 2 +- runtime/arch/x86_64/quick_entrypoints_x86_64.S | 4 +- runtime/asm_support.h | 13 +- runtime/class_linker.cc | 44 +++-- runtime/class_linker.h | 3 + runtime/class_linker_test.cc | 7 - .../quick/quick_trampoline_entrypoints.cc | 2 +- runtime/image.cc | 2 +- runtime/instruction_set.h | 4 + runtime/interpreter/interpreter.cc | 37 ++-- runtime/mirror/art_method-inl.h | 13 +- runtime/mirror/art_method.cc | 2 +- runtime/mirror/art_method.h | 220 +++++++++++++++------ runtime/mirror/class.h | 7 + runtime/mirror/object-inl.h | 4 +- runtime/mirror/object.h | 45 +++-- runtime/mirror/object_test.cc | 13 +- runtime/native_bridge_art_interface.cc | 4 +- 34 files changed, 408 insertions(+), 194 deletions(-) diff --git a/compiler/compilers.cc b/compiler/compilers.cc index 250924ad3..2481128e4 100644 --- a/compiler/compilers.cc +++ b/compiler/compilers.cc @@ -84,7 +84,9 @@ CompiledMethod* QuickCompiler::JniCompile(uint32_t access_flags, } uintptr_t QuickCompiler::GetEntryPointOf(mirror::ArtMethod* method) const { - return reinterpret_cast(method->GetEntryPointFromQuickCompiledCode()); + size_t pointer_size = InstructionSetPointerSize(GetCompilerDriver()->GetInstructionSet()); + return reinterpret_cast(method->GetEntryPointFromQuickCompiledCodePtrSize( + pointer_size)); } bool QuickCompiler::WriteElf(art::File* file, diff --git a/compiler/dex/quick/gen_invoke.cc b/compiler/dex/quick/gen_invoke.cc index 7958886ff..7d89e195f 100755 --- a/compiler/dex/quick/gen_invoke.cc +++ b/compiler/dex/quick/gen_invoke.cc @@ -477,9 +477,10 @@ static bool CommonCallCodeLoadCodePointerIntoInvokeTgt(const CallInfo* info, const RegStorage* alt_from, const CompilationUnit* cu, Mir2Lir* cg) { if (cu->instruction_set != kX86 && cu->instruction_set != kX86_64) { + int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + InstructionSetPointerSize(cu->instruction_set)).Int32Value(); // Get the compiled code address [use *alt_from or kArg0, set kInvokeTgt] - cg->LoadWordDisp(alt_from == nullptr ? cg->TargetReg(kArg0, kRef) : *alt_from, - mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value(), + cg->LoadWordDisp(alt_from == nullptr ? cg->TargetReg(kArg0, kRef) : *alt_from, offset, cg->TargetPtrReg(kInvokeTgt)); return true; } @@ -1802,8 +1803,9 @@ void Mir2Lir::GenInvokeNoInline(CallInfo* info) { call_inst = reinterpret_cast(this)->CallWithLinkerFixup(target_method, info->type); } else { - call_inst = OpMem(kOpBlx, TargetReg(kArg0, kRef), - mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()); + int32_t offset = mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + InstructionSetPointerSize(cu_->instruction_set)).Int32Value(); + call_inst = OpMem(kOpBlx, TargetReg(kArg0, kRef), offset); } } else { call_inst = GenInvokeNoInlineCall(this, info->type); diff --git a/compiler/image_writer.cc b/compiler/image_writer.cc index 871889f3f..1b66adb27 100644 --- a/compiler/image_writer.cc +++ b/compiler/image_writer.cc @@ -81,6 +81,7 @@ bool ImageWriter::Write(const std::string& image_filename, ClassLinker* class_linker = Runtime::Current()->GetClassLinker(); + target_ptr_size_ = InstructionSetPointerSize(compiler_driver_.GetInstructionSet()); std::unique_ptr oat_file(OS::OpenFileReadWrite(oat_filename.c_str())); if (oat_file.get() == NULL) { LOG(ERROR) << "Failed to open oat file " << oat_filename << " for " << oat_location; @@ -212,7 +213,14 @@ void ImageWriter::SetImageOffset(mirror::Object* object, size_t offset) { void ImageWriter::AssignImageOffset(mirror::Object* object) { DCHECK(object != nullptr); SetImageOffset(object, image_end_); - image_end_ += RoundUp(object->SizeOf(), 8); // 64-bit alignment + size_t object_size; + if (object->IsArtMethod()) { + // Methods are sized based on the target pointer size. + object_size = mirror::ArtMethod::InstanceSize(target_ptr_size_); + } else { + object_size = object->SizeOf(); + } + image_end_ += RoundUp(object_size, 8); // 64-bit alignment DCHECK_LT(image_end_, image_->Size()); } @@ -609,7 +617,14 @@ void ImageWriter::CopyAndFixupObjectsCallback(Object* obj, void* arg) { size_t offset = image_writer->GetImageOffset(obj); byte* dst = image_writer->image_->Begin() + offset; const byte* src = reinterpret_cast(obj); - size_t n = obj->SizeOf(); + size_t n; + if (obj->IsArtMethod()) { + // Size without pointer fields since we don't want to overrun the buffer if target art method + // is 32 bits but source is 64 bits. + n = mirror::ArtMethod::SizeWithoutPointerFields(); + } else { + n = obj->SizeOf(); + } DCHECK_LT(offset + n, image_writer->image_->Size()); memcpy(dst, src, n); Object* copy = reinterpret_cast(dst); @@ -688,6 +703,10 @@ void ImageWriter::FixupObject(Object* orig, Object* copy) { } if (orig->IsArtMethod()) { FixupMethod(orig->AsArtMethod(), down_cast(copy)); + } else if (orig->IsClass() && orig->AsClass()->IsArtMethodClass()) { + // Set the right size for the target. + size_t size = mirror::ArtMethod::InstanceSize(target_ptr_size_); + down_cast(copy)->SetObjectSizeWithoutChecks(size); } } @@ -746,42 +765,63 @@ const byte* ImageWriter::GetQuickEntryPoint(mirror::ArtMethod* method) { void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) { // OatWriter replaces the code_ with an offset value. Here we re-adjust to a pointer relative to // oat_begin_ + // For 64 bit targets we need to repack the current runtime pointer sized fields to the right + // locations. + // Copy all of the fields from the runtime methods to the target methods first since we did a + // bytewise copy earlier. +#if defined(ART_USE_PORTABLE_COMPILER) + copy->SetEntryPointFromPortableCompiledCodePtrSize( + orig->GetEntryPointFromPortableCompiledCode(), target_ptr_size_); +#endif + copy->SetEntryPointFromInterpreterPtrSize(orig->GetEntryPointFromInterpreter(), + target_ptr_size_); + copy->SetEntryPointFromJniPtrSize(orig->GetEntryPointFromJni(), target_ptr_size_); + copy->SetEntryPointFromQuickCompiledCodePtrSize( + orig->GetEntryPointFromQuickCompiledCode(), target_ptr_size_); + copy->SetNativeGcMapPtrSize(orig->GetNativeGcMap(), target_ptr_size_); // The resolution method has a special trampoline to call. Runtime* runtime = Runtime::Current(); if (UNLIKELY(orig == runtime->GetResolutionMethod())) { #if defined(ART_USE_PORTABLE_COMPILER) - copy->SetEntryPointFromPortableCompiledCode(GetOatAddress(portable_resolution_trampoline_offset_)); + copy->SetEntryPointFromPortableCompiledCodePtrSize( + GetOatAddress(portable_resolution_trampoline_offset_), target_ptr_size_); #endif - copy->SetEntryPointFromQuickCompiledCode(GetOatAddress(quick_resolution_trampoline_offset_)); + copy->SetEntryPointFromQuickCompiledCodePtrSize( + GetOatAddress(quick_resolution_trampoline_offset_), target_ptr_size_); } else if (UNLIKELY(orig == runtime->GetImtConflictMethod() || orig == runtime->GetImtUnimplementedMethod())) { #if defined(ART_USE_PORTABLE_COMPILER) - copy->SetEntryPointFromPortableCompiledCode(GetOatAddress(portable_imt_conflict_trampoline_offset_)); + copy->SetEntryPointFromPortableCompiledCode( + GetOatAddress(portable_imt_conflict_trampoline_offset_), target_ptr_size_); #endif - copy->SetEntryPointFromQuickCompiledCode(GetOatAddress(quick_imt_conflict_trampoline_offset_)); + copy->SetEntryPointFromQuickCompiledCodePtrSize( + GetOatAddress(quick_imt_conflict_trampoline_offset_), target_ptr_size_); } else { // We assume all methods have code. If they don't currently then we set them to the use the // resolution trampoline. Abstract methods never have code and so we need to make sure their // use results in an AbstractMethodError. We use the interpreter to achieve this. if (UNLIKELY(orig->IsAbstract())) { #if defined(ART_USE_PORTABLE_COMPILER) - copy->SetEntryPointFromPortableCompiledCode(GetOatAddress(portable_to_interpreter_bridge_offset_)); + copy->SetEntryPointFromPortableCompiledCode( + GetOatAddress(portable_to_interpreter_bridge_offset_), target_ptr_size_); #endif - copy->SetEntryPointFromQuickCompiledCode(GetOatAddress(quick_to_interpreter_bridge_offset_)); - copy->SetEntryPointFromInterpreter(reinterpret_cast - (const_cast(GetOatAddress(interpreter_to_interpreter_bridge_offset_)))); + copy->SetEntryPointFromQuickCompiledCodePtrSize( + GetOatAddress(quick_to_interpreter_bridge_offset_), target_ptr_size_); + copy->SetEntryPointFromInterpreterPtrSize( + reinterpret_cast(const_cast( + GetOatAddress(interpreter_to_interpreter_bridge_offset_))), target_ptr_size_); } else { bool quick_is_interpreted; const byte* quick_code = GetQuickCode(orig, &quick_is_interpreted); - copy->SetEntryPointFromQuickCompiledCode(quick_code); + copy->SetEntryPointFromQuickCompiledCodePtrSize(quick_code, target_ptr_size_); // Portable entrypoint: bool portable_is_interpreted = false; #if defined(ART_USE_PORTABLE_COMPILER) const byte* portable_code = GetOatAddress(orig->GetPortableOatCodeOffset()); - if (portable_code != nullptr && - (!orig->IsStatic() || orig->IsConstructor() || orig->GetDeclaringClass()->IsInitialized())) { + if (portable_code != nullptr && (!orig->IsStatic() || orig->IsConstructor() || + orig->GetDeclaringClass()->IsInitialized())) { // We have code for a non-static or initialized method, just use the code. } else if (portable_code == nullptr && orig->IsNative() && (!orig->IsStatic() || orig->GetDeclaringClass()->IsInitialized())) { @@ -798,18 +838,20 @@ void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) { // initialization. portable_code = GetOatAddress(portable_resolution_trampoline_offset_); } - copy->SetEntryPointFromPortableCompiledCode(portable_code); + copy->SetEntryPointFromPortableCompiledCodePtrSize( + portable_code, target_ptr_size_); #endif // JNI entrypoint: if (orig->IsNative()) { // The native method's pointer is set to a stub to lookup via dlsym. // Note this is not the code_ pointer, that is handled above. - copy->SetNativeMethod(GetOatAddress(jni_dlsym_lookup_offset_)); + copy->SetEntryPointFromJniPtrSize(GetOatAddress(jni_dlsym_lookup_offset_), + target_ptr_size_); } else { // Normal (non-abstract non-native) methods have various tables to relocate. uint32_t native_gc_map_offset = orig->GetOatNativeGcMapOffset(); - const byte* native_gc_map = GetOatAddress(native_gc_map_offset); - copy->SetNativeGcMap(reinterpret_cast(native_gc_map)); + const uint8_t* native_gc_map = GetOatAddress(native_gc_map_offset); + copy->SetNativeGcMapPtrSize(native_gc_map, target_ptr_size_); } // Interpreter entrypoint: @@ -817,9 +859,11 @@ void ImageWriter::FixupMethod(ArtMethod* orig, ArtMethod* copy) { uint32_t interpreter_code = (quick_is_interpreted && portable_is_interpreted) ? interpreter_to_interpreter_bridge_offset_ : interpreter_to_compiled_code_bridge_offset_; - copy->SetEntryPointFromInterpreter( + EntryPointFromInterpreter* interpreter_entrypoint = reinterpret_cast( - const_cast(GetOatAddress(interpreter_code)))); + const_cast(GetOatAddress(interpreter_code))); + copy->SetEntryPointFromInterpreterPtrSize( + interpreter_entrypoint, target_ptr_size_); } } } diff --git a/compiler/image_writer.h b/compiler/image_writer.h index 61365fe23..6a9df106f 100644 --- a/compiler/image_writer.h +++ b/compiler/image_writer.h @@ -204,6 +204,9 @@ class ImageWriter FINAL { uint32_t quick_to_interpreter_bridge_offset_; bool compile_pic_; + // Size of pointers on the target architecture. + size_t target_ptr_size_; + friend class FixupVisitor; friend class FixupClassVisitor; DISALLOW_COPY_AND_ASSIGN(ImageWriter); diff --git a/compiler/jni/quick/jni_compiler.cc b/compiler/jni/quick/jni_compiler.cc index c38cfaf74..35b7294f8 100644 --- a/compiler/jni/quick/jni_compiler.cc +++ b/compiler/jni/quick/jni_compiler.cc @@ -308,7 +308,9 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver, } // 9. Plant call to native code associated with method. - __ Call(main_jni_conv->MethodStackOffset(), mirror::ArtMethod::NativeMethodOffset(), + MemberOffset jni_entrypoint_offset = mirror::ArtMethod::EntryPointFromJniOffset( + InstructionSetPointerSize(instruction_set)); + __ Call(main_jni_conv->MethodStackOffset(), jni_entrypoint_offset, mr_conv->InterproceduralScratchRegister()); // 10. Fix differences in result widths. diff --git a/compiler/optimizing/code_generator_arm.cc b/compiler/optimizing/code_generator_arm.cc index 2c954a050..7822ee558 100644 --- a/compiler/optimizing/code_generator_arm.cc +++ b/compiler/optimizing/code_generator_arm.cc @@ -854,8 +854,8 @@ void InstructionCodeGeneratorARM::VisitInvokeStatic(HInvokeStatic* invoke) { // temp = temp[index_in_cache] __ ldr(temp, Address(temp, index_in_cache)); // LR = temp[offset_of_quick_compiled_code] - __ ldr(LR, Address(temp, - mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value())); + __ ldr(LR, Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + kArmPointerSize).Int32Value())); // LR() __ blx(LR); diff --git a/compiler/optimizing/code_generator_x86.cc b/compiler/optimizing/code_generator_x86.cc index f544d47c3..1b6fb6b65 100644 --- a/compiler/optimizing/code_generator_x86.cc +++ b/compiler/optimizing/code_generator_x86.cc @@ -796,7 +796,8 @@ void InstructionCodeGeneratorX86::VisitInvokeStatic(HInvokeStatic* invoke) { // temp = temp[index_in_cache] __ movl(temp, Address(temp, index_in_cache)); // (temp + offset_of_quick_compiled_code)() - __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value())); + __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + kX86PointerSize).Int32Value())); DCHECK(!codegen_->IsLeafMethod()); codegen_->RecordPcInfo(invoke->GetDexPc()); diff --git a/compiler/optimizing/code_generator_x86_64.cc b/compiler/optimizing/code_generator_x86_64.cc index e1807dc7f..1ee827123 100644 --- a/compiler/optimizing/code_generator_x86_64.cc +++ b/compiler/optimizing/code_generator_x86_64.cc @@ -738,7 +738,8 @@ void InstructionCodeGeneratorX86_64::VisitInvokeStatic(HInvokeStatic* invoke) { // temp = temp[index_in_cache] __ movl(temp, Address(temp, index_in_cache)); // (temp + offset_of_quick_compiled_code)() - __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset().SizeValue())); + __ call(Address(temp, mirror::ArtMethod::EntryPointFromQuickCompiledCodeOffset( + kX86_64PointerSize).SizeValue())); DCHECK(!codegen_->IsLeafMethod()); codegen_->RecordPcInfo(invoke->GetDexPc()); diff --git a/patchoat/patchoat.cc b/patchoat/patchoat.cc index b9637d0cb..9f55c94b0 100644 --- a/patchoat/patchoat.cc +++ b/patchoat/patchoat.cc @@ -176,7 +176,7 @@ bool PatchOat::Patch(const std::string& image_location, off_t delta, } gc::space::ImageSpace* ispc = Runtime::Current()->GetHeap()->GetImageSpace(); - PatchOat p(image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(), + PatchOat p(isa, image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(), delta, timings); t.NewTiming("Patching files"); if (!p.PatchImage()) { @@ -298,7 +298,7 @@ bool PatchOat::Patch(File* input_oat, const std::string& image_location, off_t d CHECK(is_oat_pic == NOT_PIC); } - PatchOat p(elf.release(), image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(), + PatchOat p(isa, elf.release(), image.release(), ispc->GetLiveBitmap(), ispc->GetMemMap(), delta, timings); t.NewTiming("Patching files"); if (!skip_patching_oat && !p.PatchElf()) { @@ -523,41 +523,46 @@ void PatchOat::VisitObject(mirror::Object* object) { PatchOat::PatchVisitor visitor(this, copy); object->VisitReferences(visitor, visitor); if (object->IsArtMethod()) { - FixupMethod(static_cast(object), - static_cast(copy)); + FixupMethod(down_cast(object), down_cast(copy)); } } void PatchOat::FixupMethod(mirror::ArtMethod* object, mirror::ArtMethod* copy) { + const size_t pointer_size = InstructionSetPointerSize(isa_); // Just update the entry points if it looks like we should. // TODO: sanity check all the pointers' values #if defined(ART_USE_PORTABLE_COMPILER) uintptr_t portable = reinterpret_cast( - object->GetEntryPointFromPortableCompiledCode()); + object->GetEntryPointFromPortableCompiledCodePtrSize(pointer_size)); if (portable != 0) { - copy->SetEntryPointFromPortableCompiledCode(reinterpret_cast(portable + delta_)); + copy->SetEntryPointFromPortableCompiledCodePtrSize(reinterpret_cast(portable + delta_), + pointer_size); } #endif uintptr_t quick= reinterpret_cast( - object->GetEntryPointFromQuickCompiledCode()); + object->GetEntryPointFromQuickCompiledCodePtrSize(pointer_size)); if (quick != 0) { - copy->SetEntryPointFromQuickCompiledCode(reinterpret_cast(quick + delta_)); + copy->SetEntryPointFromQuickCompiledCodePtrSize(reinterpret_cast(quick + delta_), + pointer_size); } uintptr_t interpreter = reinterpret_cast( - object->GetEntryPointFromInterpreter()); + object->GetEntryPointFromInterpreterPtrSize(pointer_size)); if (interpreter != 0) { - copy->SetEntryPointFromInterpreter( - reinterpret_cast(interpreter + delta_)); + copy->SetEntryPointFromInterpreterPtrSize( + reinterpret_cast(interpreter + delta_), pointer_size); } - uintptr_t native_method = reinterpret_cast(object->GetNativeMethod()); + uintptr_t native_method = reinterpret_cast( + object->GetEntryPointFromJniPtrSize(pointer_size)); if (native_method != 0) { - copy->SetNativeMethod(reinterpret_cast(native_method + delta_)); + copy->SetEntryPointFromJniPtrSize(reinterpret_cast(native_method + delta_), + pointer_size); } - uintptr_t native_gc_map = reinterpret_cast(object->GetNativeGcMap()); + uintptr_t native_gc_map = reinterpret_cast( + object->GetNativeGcMapPtrSize(pointer_size)); if (native_gc_map != 0) { - copy->SetNativeGcMap(reinterpret_cast(native_gc_map + delta_)); + copy->SetNativeGcMapPtrSize(reinterpret_cast(native_gc_map + delta_), pointer_size); } } diff --git a/patchoat/patchoat.h b/patchoat/patchoat.h index 21041fbca..03d915abd 100644 --- a/patchoat/patchoat.h +++ b/patchoat/patchoat.h @@ -60,15 +60,16 @@ class PatchOat { private: // Takes ownership only of the ElfFile. All other pointers are only borrowed. PatchOat(ElfFile* oat_file, off_t delta, TimingLogger* timings) - : oat_file_(oat_file), delta_(delta), timings_(timings) {} - PatchOat(MemMap* image, gc::accounting::ContinuousSpaceBitmap* bitmap, + : oat_file_(oat_file), delta_(delta), isa_(kNone), timings_(timings) {} + PatchOat(InstructionSet isa, MemMap* image, gc::accounting::ContinuousSpaceBitmap* bitmap, MemMap* heap, off_t delta, TimingLogger* timings) : image_(image), bitmap_(bitmap), heap_(heap), - delta_(delta), timings_(timings) {} - PatchOat(ElfFile* oat_file, MemMap* image, gc::accounting::ContinuousSpaceBitmap* bitmap, - MemMap* heap, off_t delta, TimingLogger* timings) + delta_(delta), isa_(isa), timings_(timings) {} + PatchOat(InstructionSet isa, ElfFile* oat_file, MemMap* image, + gc::accounting::ContinuousSpaceBitmap* bitmap, MemMap* heap, off_t delta, + TimingLogger* timings) : oat_file_(oat_file), image_(image), bitmap_(bitmap), heap_(heap), - delta_(delta), timings_(timings) {} + delta_(delta), isa_(isa), timings_(timings) {} ~PatchOat() {} // Was the .art image at image_path made with --compile-pic ? @@ -147,6 +148,9 @@ class PatchOat { const MemMap* heap_; // The amount we are changing the offset by. off_t delta_; + // Active instruction set, used to know the entrypoint size. + const InstructionSet isa_; + TimingLogger* timings_; DISALLOW_IMPLICIT_CONSTRUCTORS(PatchOat); diff --git a/runtime/arch/arm/portable_entrypoints_arm.S b/runtime/arch/arm/portable_entrypoints_arm.S index 3491c18c3..a714bca7a 100644 --- a/runtime/arch/arm/portable_entrypoints_arm.S +++ b/runtime/arch/arm/portable_entrypoints_arm.S @@ -53,7 +53,7 @@ ENTRY art_portable_invoke_stub mov ip, #0 @ set ip to 0 str ip, [sp] @ store NULL for method* at bottom of frame add sp, #16 @ first 4 args are not passed on stack for portable - ldr ip, [r0, #METHOD_PORTABLE_CODE_OFFSET] @ get pointer to the code + ldr ip, [r0, #METHOD_PORTABLE_CODE_OFFSET_32] @ get pointer to the code blx ip @ call the method mov sp, r11 @ restore the stack pointer ldr ip, [sp, #24] @ load the result pointer diff --git a/runtime/arch/arm/quick_entrypoints_arm.S b/runtime/arch/arm/quick_entrypoints_arm.S index 1b30c9cca..26e693749 100644 --- a/runtime/arch/arm/quick_entrypoints_arm.S +++ b/runtime/arch/arm/quick_entrypoints_arm.S @@ -322,7 +322,7 @@ ENTRY art_quick_invoke_stub ldr r3, [sp, #12] @ copy arg value for r3 mov ip, #0 @ set ip to 0 str ip, [sp] @ store NULL for method* at bottom of frame - ldr ip, [r0, #METHOD_QUICK_CODE_OFFSET] @ get pointer to the code + ldr ip, [r0, #METHOD_QUICK_CODE_OFFSET_32] @ get pointer to the code blx ip @ call the method mov sp, r11 @ restore the stack pointer ldr ip, [sp, #24] @ load the result pointer diff --git a/runtime/arch/arm64/quick_entrypoints_arm64.S b/runtime/arch/arm64/quick_entrypoints_arm64.S index 2a19e27b0..3c5db50f9 100644 --- a/runtime/arch/arm64/quick_entrypoints_arm64.S +++ b/runtime/arch/arm64/quick_entrypoints_arm64.S @@ -551,7 +551,7 @@ SAVE_SIZE_AND_METHOD=SAVE_SIZE+STACK_REFERENCE_SIZE .macro INVOKE_STUB_CALL_AND_RETURN // load method-> METHOD_QUICK_CODE_OFFSET - ldr x9, [x0 , #METHOD_QUICK_CODE_OFFSET] + ldr x9, [x0 , #METHOD_QUICK_CODE_OFFSET_64] // Branch to method. blr x9 diff --git a/runtime/arch/mips/portable_entrypoints_mips.S b/runtime/arch/mips/portable_entrypoints_mips.S index 7545ce0d6..1e9fe337c 100644 --- a/runtime/arch/mips/portable_entrypoints_mips.S +++ b/runtime/arch/mips/portable_entrypoints_mips.S @@ -100,7 +100,7 @@ ENTRY art_portable_invoke_stub lw $a1, 4($sp) # copy arg value for a1 lw $a2, 8($sp) # copy arg value for a2 lw $a3, 12($sp) # copy arg value for a3 - lw $t9, METHOD_PORTABLE_CODE_OFFSET($a0) # get pointer to the code + lw $t9, METHOD_PORTABLE_CODE_OFFSET_32($a0) # get pointer to the code jalr $t9 # call the method sw $zero, 0($sp) # store NULL for method* at bottom of frame move $sp, $fp # restore the stack diff --git a/runtime/arch/mips/quick_entrypoints_mips.S b/runtime/arch/mips/quick_entrypoints_mips.S index 878622225..08b74c6e9 100644 --- a/runtime/arch/mips/quick_entrypoints_mips.S +++ b/runtime/arch/mips/quick_entrypoints_mips.S @@ -507,7 +507,7 @@ ENTRY art_quick_invoke_stub lw $a1, 4($sp) # copy arg value for a1 lw $a2, 8($sp) # copy arg value for a2 lw $a3, 12($sp) # copy arg value for a3 - lw $t9, METHOD_QUICK_CODE_OFFSET($a0) # get pointer to the code + lw $t9, METHOD_QUICK_CODE_OFFSET_32($a0) # get pointer to the code jalr $t9 # call the method sw $zero, 0($sp) # store NULL for method* at bottom of frame move $sp, $fp # restore the stack diff --git a/runtime/arch/x86/portable_entrypoints_x86.S b/runtime/arch/x86/portable_entrypoints_x86.S index 9365795fd..f8e05dd0d 100644 --- a/runtime/arch/x86/portable_entrypoints_x86.S +++ b/runtime/arch/x86/portable_entrypoints_x86.S @@ -46,7 +46,7 @@ DEFINE_FUNCTION art_portable_invoke_stub addl LITERAL(12), %esp // pop arguments to memcpy mov 12(%ebp), %eax // move method pointer into eax mov %eax, (%esp) // push method pointer onto stack - call *METHOD_PORTABLE_CODE_OFFSET(%eax) // call the method + call *METHOD_PORTABLE_CODE_OFFSET_32(%eax) // call the method mov %ebp, %esp // restore stack pointer POP ebx // pop ebx POP ebp // pop ebp diff --git a/runtime/arch/x86/quick_entrypoints_x86.S b/runtime/arch/x86/quick_entrypoints_x86.S index 75c86465f..6a10755d7 100644 --- a/runtime/arch/x86/quick_entrypoints_x86.S +++ b/runtime/arch/x86/quick_entrypoints_x86.S @@ -292,7 +292,7 @@ DEFINE_FUNCTION art_quick_invoke_stub mov 4(%esp), %ecx // copy arg1 into ecx mov 8(%esp), %edx // copy arg2 into edx mov 12(%esp), %ebx // copy arg3 into ebx - call *METHOD_QUICK_CODE_OFFSET(%eax) // call the method + call *METHOD_QUICK_CODE_OFFSET_32(%eax) // call the method mov %ebp, %esp // restore stack pointer CFI_DEF_CFA_REGISTER(esp) POP ebx // pop ebx diff --git a/runtime/arch/x86_64/quick_entrypoints_x86_64.S b/runtime/arch/x86_64/quick_entrypoints_x86_64.S index 57980925c..0de8dfd85 100644 --- a/runtime/arch/x86_64/quick_entrypoints_x86_64.S +++ b/runtime/arch/x86_64/quick_entrypoints_x86_64.S @@ -481,7 +481,7 @@ DEFINE_FUNCTION art_quick_invoke_stub LOOP_OVER_SHORTY_LOADING_GPRS r8, r8d, .Lgpr_setup_finished LOOP_OVER_SHORTY_LOADING_GPRS r9, r9d, .Lgpr_setup_finished .Lgpr_setup_finished: - call *METHOD_QUICK_CODE_OFFSET(%rdi) // Call the method. + call *METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method. movq %rbp, %rsp // Restore stack pointer. CFI_DEF_CFA_REGISTER(rsp) POP r9 // Pop r9 - shorty*. @@ -564,7 +564,7 @@ DEFINE_FUNCTION art_quick_invoke_static_stub LOOP_OVER_SHORTY_LOADING_GPRS r8, r8d, .Lgpr_setup_finished2 LOOP_OVER_SHORTY_LOADING_GPRS r9, r9d, .Lgpr_setup_finished2 .Lgpr_setup_finished2: - call *METHOD_QUICK_CODE_OFFSET(%rdi) // Call the method. + call *METHOD_QUICK_CODE_OFFSET_64(%rdi) // Call the method. movq %rbp, %rsp // Restore stack pointer. CFI_DEF_CFA_REGISTER(rsp) POP r9 // Pop r9 - shorty*. diff --git a/runtime/asm_support.h b/runtime/asm_support.h index b36b1b7b7..701f44f06 100644 --- a/runtime/asm_support.h +++ b/runtime/asm_support.h @@ -44,13 +44,12 @@ // Offsets within java.lang.Method. #define METHOD_DEX_CACHE_METHODS_OFFSET 12 -#if defined(ART_USE_PORTABLE_COMPILER) -#define METHOD_PORTABLE_CODE_OFFSET 40 -#define METHOD_QUICK_CODE_OFFSET 48 -#else -#define METHOD_PORTABLE_CODE_OFFSET 40 -#define METHOD_QUICK_CODE_OFFSET 40 -#endif // ART_USE_PORTABLE_COMPILER + +// Verified by object_test. +#define METHOD_QUICK_CODE_OFFSET_32 48 +#define METHOD_QUICK_CODE_OFFSET_64 56 +#define METHOD_PORTABLE_CODE_OFFSET_32 56 +#define METHOD_PORTABLE_CODE_OFFSET_64 72 #else diff --git a/runtime/class_linker.cc b/runtime/class_linker.cc index b80aa5aa1..15410939e 100644 --- a/runtime/class_linker.cc +++ b/runtime/class_linker.cc @@ -183,7 +183,8 @@ ClassLinker::ClassLinker(InternTable* intern_table) portable_imt_conflict_trampoline_(nullptr), quick_imt_conflict_trampoline_(nullptr), quick_generic_jni_trampoline_(nullptr), - quick_to_interpreter_bridge_trampoline_(nullptr) { + quick_to_interpreter_bridge_trampoline_(nullptr), + image_pointer_size_(sizeof(void*)) { CHECK_EQ(arraysize(class_roots_descriptors_), size_t(kClassRootsMax)); memset(find_array_class_cache_, 0, kFindArrayCacheSize * sizeof(mirror::Class*)); } @@ -318,10 +319,9 @@ void ClassLinker::InitWithoutImage(const std::vector& boot_class Handle java_lang_reflect_ArtMethod(hs.NewHandle( AllocClass(self, java_lang_Class.Get(), mirror::ArtMethod::ClassSize()))); CHECK(java_lang_reflect_ArtMethod.Get() != nullptr); - java_lang_reflect_ArtMethod->SetObjectSize(mirror::ArtMethod::InstanceSize()); + java_lang_reflect_ArtMethod->SetObjectSize(mirror::ArtMethod::InstanceSize(sizeof(void*))); SetClassRoot(kJavaLangReflectArtMethod, java_lang_reflect_ArtMethod.Get()); java_lang_reflect_ArtMethod->SetStatus(mirror::Class::kStatusResolved, self); - mirror::ArtMethod::SetClass(java_lang_reflect_ArtMethod.Get()); // Set up array classes for string, field, method @@ -347,8 +347,7 @@ void ClassLinker::InitWithoutImage(const std::vector& boot_class // DexCache instances. Needs to be after String, Field, Method arrays since AllocDexCache uses // these roots. CHECK_NE(0U, boot_class_path.size()); - for (size_t i = 0; i != boot_class_path.size(); ++i) { - const DexFile* dex_file = boot_class_path[i]; + for (const DexFile* dex_file : boot_class_path) { CHECK(dex_file != nullptr); AppendToBootClassPath(*dex_file); } @@ -1631,6 +1630,20 @@ void ClassLinker::InitFromImage() { // Set classes on AbstractMethod early so that IsMethod tests can be performed during the live // bitmap walk. mirror::ArtMethod::SetClass(GetClassRoot(kJavaLangReflectArtMethod)); + size_t art_method_object_size = mirror::ArtMethod::GetJavaLangReflectArtMethod()->GetObjectSize(); + if (!Runtime::Current()->IsCompiler()) { + // Compiler supports having an image with a different pointer size than the runtime. This + // happens on the host for compile 32 bit tests since we use a 64 bit libart compiler. We may + // also use 32 bit dex2oat on a system with 64 bit apps. + CHECK_EQ(art_method_object_size, mirror::ArtMethod::InstanceSize(sizeof(void*))) + << sizeof(void*); + } + if (art_method_object_size == mirror::ArtMethod::InstanceSize(4)) { + image_pointer_size_ = 4; + } else { + CHECK_EQ(art_method_object_size, mirror::ArtMethod::InstanceSize(8)); + image_pointer_size_ = 8; + } // Set entry point to interpreter if in InterpretOnly mode. if (Runtime::Current()->GetInstrumentation()->InterpretOnly()) { @@ -1644,7 +1657,7 @@ void ClassLinker::InitFromImage() { // reinit array_iftable_ from any array class instance, they should be == array_iftable_ = GcRoot(GetClassRoot(kObjectArrayClass)->GetIfTable()); - DCHECK(array_iftable_.Read() == GetClassRoot(kBooleanArrayClass)->GetIfTable()); + DCHECK_EQ(array_iftable_.Read(), GetClassRoot(kBooleanArrayClass)->GetIfTable()); // String class root was set above mirror::Reference::SetClass(GetClassRoot(kJavaLangRefReference)); mirror::ArtField::SetClass(GetClassRoot(kJavaLangReflectArtField)); @@ -5266,14 +5279,18 @@ bool ClassLinker::LinkFields(Handle klass, bool is_static, size_t } else { klass->SetNumReferenceInstanceFields(num_reference_fields); if (!klass->IsVariableSize()) { - std::string temp; - DCHECK_GE(size, sizeof(mirror::Object)) << klass->GetDescriptor(&temp); - size_t previous_size = klass->GetObjectSize(); - if (previous_size != 0) { - // Make sure that we didn't originally have an incorrect size. - CHECK_EQ(previous_size, size) << klass->GetDescriptor(&temp); + if (klass->DescriptorEquals("Ljava/lang/reflect/ArtMethod;")) { + klass->SetObjectSize(mirror::ArtMethod::InstanceSize(sizeof(void*))); + } else { + std::string temp; + DCHECK_GE(size, sizeof(mirror::Object)) << klass->GetDescriptor(&temp); + size_t previous_size = klass->GetObjectSize(); + if (previous_size != 0) { + // Make sure that we didn't originally have an incorrect size. + CHECK_EQ(previous_size, size) << klass->GetDescriptor(&temp); + } + klass->SetObjectSize(size); } - klass->SetObjectSize(size); } } @@ -5324,7 +5341,6 @@ bool ClassLinker::LinkFields(Handle klass, bool is_static, size_t } CHECK_EQ(current_ref_offset.Uint32Value(), end_ref_offset.Uint32Value()); } - return true; } diff --git a/runtime/class_linker.h b/runtime/class_linker.h index 7fc394a47..a5c1c2487 100644 --- a/runtime/class_linker.h +++ b/runtime/class_linker.h @@ -813,6 +813,9 @@ class ClassLinker { const void* quick_generic_jni_trampoline_; const void* quick_to_interpreter_bridge_trampoline_; + // Image pointer size. + size_t image_pointer_size_; + friend class ImageWriter; // for GetClassRoots friend class ImageDumper; // for FindOpenedOatFileFromOatLocation friend class ElfPatcher; // for FindOpenedOatFileForDexFile & FindOpenedOatFileFromOatLocation diff --git a/runtime/class_linker_test.cc b/runtime/class_linker_test.cc index c170b8d11..384a2bf95 100644 --- a/runtime/class_linker_test.cc +++ b/runtime/class_linker_test.cc @@ -492,13 +492,6 @@ struct ArtMethodOffsets : public CheckOffsets { offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_cache_strings_), "dexCacheStrings")); offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_code_item_offset_), "dexCodeItemOffset")); offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, dex_method_index_), "dexMethodIndex")); - offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_interpreter_), "entryPointFromInterpreter")); - offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_jni_), "entryPointFromJni")); -#if defined(ART_USE_PORTABLE_COMPILER) - offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_portable_compiled_code_), "entryPointFromPortableCompiledCode")); -#endif - offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, entry_point_from_quick_compiled_code_), "entryPointFromQuickCompiledCode")); - offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, gc_map_), "gcMap")); offsets.push_back(CheckOffset(OFFSETOF_MEMBER(mirror::ArtMethod, method_index_), "methodIndex")); }; }; diff --git a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc index dfd2e11fc..d4f9b492e 100644 --- a/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc +++ b/runtime/entrypoints/quick/quick_trampoline_entrypoints.cc @@ -1637,7 +1637,7 @@ extern "C" TwoWordReturn artQuickGenericJniTrampoline(Thread* self, *(sp32 - 1) = cookie; // Retrieve the stored native code. - const void* nativeCode = called->GetNativeMethod(); + void* nativeCode = called->GetEntryPointFromJni(); // There are two cases for the content of nativeCode: // 1) Pointer to the native function. diff --git a/runtime/image.cc b/runtime/image.cc index 81044dffd..b4496e332 100644 --- a/runtime/image.cc +++ b/runtime/image.cc @@ -24,7 +24,7 @@ namespace art { const byte ImageHeader::kImageMagic[] = { 'a', 'r', 't', '\n' }; -const byte ImageHeader::kImageVersion[] = { '0', '1', '1', '\0' }; +const byte ImageHeader::kImageVersion[] = { '0', '1', '2', '\0' }; ImageHeader::ImageHeader(uint32_t image_begin, uint32_t image_size, diff --git a/runtime/instruction_set.h b/runtime/instruction_set.h index ae8eeac54..da7d153c0 100644 --- a/runtime/instruction_set.h +++ b/runtime/instruction_set.h @@ -125,6 +125,10 @@ static inline bool Is64BitInstructionSet(InstructionSet isa) { } } +static inline size_t InstructionSetPointerSize(InstructionSet isa) { + return Is64BitInstructionSet(isa) ? 8U : 4U; +} + static inline size_t GetBytesPerGprSpillLocation(InstructionSet isa) { switch (isa) { case kArm: diff --git a/runtime/interpreter/interpreter.cc b/runtime/interpreter/interpreter.cc index 47a7f0d62..26c70999b 100644 --- a/runtime/interpreter/interpreter.cc +++ b/runtime/interpreter/interpreter.cc @@ -138,7 +138,7 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s if (method->IsStatic()) { if (shorty == "L") { typedef jobject (fntype)(JNIEnv*, jclass); - fntype* const fn = reinterpret_cast(const_cast(method->GetNativeMethod())); + fntype* const fn = reinterpret_cast(method->GetEntryPointFromJni()); ScopedLocalRef klass(soa.Env(), soa.AddLocalReference(method->GetDeclaringClass())); jobject jresult; @@ -149,35 +149,35 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s result->SetL(soa.Decode(jresult)); } else if (shorty == "V") { typedef void (fntype)(JNIEnv*, jclass); - fntype* const fn = reinterpret_cast(const_cast(method->GetNativeMethod())); + fntype* const fn = reinterpret_cast(method->GetEntryPointFromJni()); ScopedLocalRef klass(soa.Env(), soa.AddLocalReference(method->GetDeclaringClass())); ScopedThreadStateChange tsc(self, kNative); fn(soa.Env(), klass.get()); } else if (shorty == "Z") { typedef jboolean (fntype)(JNIEnv*, jclass); - fntype* const fn = reinterpret_cast(const_cast(method->GetNativeMethod())); + fntype* const fn = reinterpret_cast(method->GetEntryPointFromJni()); ScopedLocalRef klass(soa.Env(), soa.AddLocalReference(method->GetDeclaringClass())); ScopedThreadStateChange tsc(self, kNative); result->SetZ(fn(soa.Env(), klass.get())); } else if (shorty == "BI") { typedef jbyte (fntype)(JNIEnv*, jclass, jint); - fntype* const fn = reinterpret_cast(const_cast(method->GetNativeMethod())); + fntype* const fn = reinterpret_cast(method->GetEntryPointFromJni()); ScopedLocalRef klass(soa.Env(), soa.AddLocalReference(method->GetDeclaringClass())); ScopedThreadStateChange tsc(self, kNative); result->SetB(fn(soa.Env(), klass.get(), args[0])); } else if (shorty == "II") { typedef jint (fntype)(JNIEnv*, jclass, jint); - fntype* const fn = reinterpret_cast(const_cast(method->GetNativeMethod())); + fntype* const fn = reinterpret_cast(method->GetEntryPointFromJni()); ScopedLocalRef klass(soa.Env(), soa.AddLocalReference(method->GetDeclaringClass())); ScopedThreadStateChange tsc(self, kNative); result->SetI(fn(soa.Env(), klass.get(), args[0])); } else if (shorty == "LL") { typedef jobject (fntype)(JNIEnv*, jclass, jobject); - fntype* const fn = reinterpret_cast(const_cast(method->GetNativeMethod())); + fntype* const fn = reinterpret_cast(method->GetEntryPointFromJni()); ScopedLocalRef klass(soa.Env(), soa.AddLocalReference(method->GetDeclaringClass())); ScopedLocalRef arg0(soa.Env(), @@ -190,14 +190,15 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s result->SetL(soa.Decode(jresult)); } else if (shorty == "IIZ") { typedef jint (fntype)(JNIEnv*, jclass, jint, jboolean); - fntype* const fn = reinterpret_cast(const_cast(method->GetNativeMethod())); + fntype* const fn = reinterpret_cast(method->GetEntryPointFromJni()); ScopedLocalRef klass(soa.Env(), soa.AddLocalReference(method->GetDeclaringClass())); ScopedThreadStateChange tsc(self, kNative); result->SetI(fn(soa.Env(), klass.get(), args[0], args[1])); } else if (shorty == "ILI") { typedef jint (fntype)(JNIEnv*, jclass, jobject, jint); - fntype* const fn = reinterpret_cast(const_cast(method->GetNativeMethod())); + fntype* const fn = reinterpret_cast(const_cast( + method->GetEntryPointFromJni())); ScopedLocalRef klass(soa.Env(), soa.AddLocalReference(method->GetDeclaringClass())); ScopedLocalRef arg0(soa.Env(), @@ -206,21 +207,21 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s result->SetI(fn(soa.Env(), klass.get(), arg0.get(), args[1])); } else if (shorty == "SIZ") { typedef jshort (fntype)(JNIEnv*, jclass, jint, jboolean); - fntype* const fn = reinterpret_cast(const_cast(method->GetNativeMethod())); + fntype* const fn = reinterpret_cast(const_cast(method->GetEntryPointFromJni())); ScopedLocalRef klass(soa.Env(), soa.AddLocalReference(method->GetDeclaringClass())); ScopedThreadStateChange tsc(self, kNative); result->SetS(fn(soa.Env(), klass.get(), args[0], args[1])); } else if (shorty == "VIZ") { typedef void (fntype)(JNIEnv*, jclass, jint, jboolean); - fntype* const fn = reinterpret_cast(const_cast(method->GetNativeMethod())); + fntype* const fn = reinterpret_cast(method->GetEntryPointFromJni()); ScopedLocalRef klass(soa.Env(), soa.AddLocalReference(method->GetDeclaringClass())); ScopedThreadStateChange tsc(self, kNative); fn(soa.Env(), klass.get(), args[0], args[1]); } else if (shorty == "ZLL") { typedef jboolean (fntype)(JNIEnv*, jclass, jobject, jobject); - fntype* const fn = reinterpret_cast(const_cast(method->GetNativeMethod())); + fntype* const fn = reinterpret_cast(method->GetEntryPointFromJni()); ScopedLocalRef klass(soa.Env(), soa.AddLocalReference(method->GetDeclaringClass())); ScopedLocalRef arg0(soa.Env(), @@ -231,7 +232,7 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s result->SetZ(fn(soa.Env(), klass.get(), arg0.get(), arg1.get())); } else if (shorty == "ZILL") { typedef jboolean (fntype)(JNIEnv*, jclass, jint, jobject, jobject); - fntype* const fn = reinterpret_cast(const_cast(method->GetNativeMethod())); + fntype* const fn = reinterpret_cast(method->GetEntryPointFromJni()); ScopedLocalRef klass(soa.Env(), soa.AddLocalReference(method->GetDeclaringClass())); ScopedLocalRef arg1(soa.Env(), @@ -242,7 +243,7 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s result->SetZ(fn(soa.Env(), klass.get(), args[0], arg1.get(), arg2.get())); } else if (shorty == "VILII") { typedef void (fntype)(JNIEnv*, jclass, jint, jobject, jint, jint); - fntype* const fn = reinterpret_cast(const_cast(method->GetNativeMethod())); + fntype* const fn = reinterpret_cast(method->GetEntryPointFromJni()); ScopedLocalRef klass(soa.Env(), soa.AddLocalReference(method->GetDeclaringClass())); ScopedLocalRef arg1(soa.Env(), @@ -251,7 +252,7 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s fn(soa.Env(), klass.get(), args[0], arg1.get(), args[2], args[3]); } else if (shorty == "VLILII") { typedef void (fntype)(JNIEnv*, jclass, jobject, jint, jobject, jint, jint); - fntype* const fn = reinterpret_cast(const_cast(method->GetNativeMethod())); + fntype* const fn = reinterpret_cast(method->GetEntryPointFromJni()); ScopedLocalRef klass(soa.Env(), soa.AddLocalReference(method->GetDeclaringClass())); ScopedLocalRef arg0(soa.Env(), @@ -267,7 +268,7 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s } else { if (shorty == "L") { typedef jobject (fntype)(JNIEnv*, jobject); - fntype* const fn = reinterpret_cast(const_cast(method->GetNativeMethod())); + fntype* const fn = reinterpret_cast(method->GetEntryPointFromJni()); ScopedLocalRef rcvr(soa.Env(), soa.AddLocalReference(receiver)); jobject jresult; @@ -278,14 +279,14 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s result->SetL(soa.Decode(jresult)); } else if (shorty == "V") { typedef void (fntype)(JNIEnv*, jobject); - fntype* const fn = reinterpret_cast(const_cast(method->GetNativeMethod())); + fntype* const fn = reinterpret_cast(method->GetEntryPointFromJni()); ScopedLocalRef rcvr(soa.Env(), soa.AddLocalReference(receiver)); ScopedThreadStateChange tsc(self, kNative); fn(soa.Env(), rcvr.get()); } else if (shorty == "LL") { typedef jobject (fntype)(JNIEnv*, jobject, jobject); - fntype* const fn = reinterpret_cast(const_cast(method->GetNativeMethod())); + fntype* const fn = reinterpret_cast(method->GetEntryPointFromJni()); ScopedLocalRef rcvr(soa.Env(), soa.AddLocalReference(receiver)); ScopedLocalRef arg0(soa.Env(), @@ -299,7 +300,7 @@ static void InterpreterJni(Thread* self, ArtMethod* method, const StringPiece& s ScopedThreadStateChange tsc(self, kNative); } else if (shorty == "III") { typedef jint (fntype)(JNIEnv*, jobject, jint, jint); - fntype* const fn = reinterpret_cast(const_cast(method->GetNativeMethod())); + fntype* const fn = reinterpret_cast(method->GetEntryPointFromJni()); ScopedLocalRef rcvr(soa.Env(), soa.AddLocalReference(receiver)); ScopedThreadStateChange tsc(self, kNative); diff --git a/runtime/mirror/art_method-inl.h b/runtime/mirror/art_method-inl.h index b02f456be..33dd19f74 100644 --- a/runtime/mirror/art_method-inl.h +++ b/runtime/mirror/art_method-inl.h @@ -352,12 +352,6 @@ inline uintptr_t ArtMethod::NativePcOffset(const uintptr_t pc, const void* quick return pc - reinterpret_cast(quick_entry_point); } -template -inline void ArtMethod::SetNativeMethod(const void* native_method) { - SetFieldPtr( - OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_), native_method); -} - inline QuickMethodFrameInfo ArtMethod::GetQuickFrameInfo() { if (UNLIKELY(IsPortableCompiled())) { // Portable compiled dex bytecode or jni stub. @@ -562,6 +556,13 @@ inline void ArtMethod::SetDexCacheResolvedTypes(ObjectArray* new_dex_cach new_dex_cache_classes); } +inline void ArtMethod::CheckObjectSizeEqualsMirrorSize() { + // Using the default, check the class object size to make sure it matches the size of the + // object. + DCHECK_EQ(GetClass()->GetObjectSize(), sizeof(*this)); +} + + } // namespace mirror } // namespace art diff --git a/runtime/mirror/art_method.cc b/runtime/mirror/art_method.cc index 5b833b955..5ed7f2ead 100644 --- a/runtime/mirror/art_method.cc +++ b/runtime/mirror/art_method.cc @@ -360,7 +360,7 @@ void ArtMethod::RegisterNative(Thread* self, const void* native_method, bool is_ if (is_fast) { SetAccessFlags(GetAccessFlags() | kAccFastNative); } - SetNativeMethod(native_method); + SetEntryPointFromJni(native_method); } void ArtMethod::UnregisterNative(Thread* self) { diff --git a/runtime/mirror/art_method.h b/runtime/mirror/art_method.h index 36bb9e02e..894c3ce49 100644 --- a/runtime/mirror/art_method.h +++ b/runtime/mirror/art_method.h @@ -47,11 +47,6 @@ class MANAGED ArtMethod FINAL : public Object { // Size of java.lang.reflect.ArtMethod.class. static uint32_t ClassSize(); - // Size of an instance of java.lang.reflect.ArtMethod not including its value array. - static constexpr uint32_t InstanceSize() { - return sizeof(ArtMethod); - } - static ArtMethod* FromReflectedMethod(const ScopedObjectAccessAlreadyRunnable& soa, jobject jlr_method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -248,51 +243,94 @@ class MANAGED ArtMethod FINAL : public Object { template EntryPointFromInterpreter* GetEntryPointFromInterpreter() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetFieldPtr( - OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_interpreter_)); + CheckObjectSizeEqualsMirrorSize(); + return GetEntryPointFromInterpreterPtrSize(sizeof(void*)); } - template + EntryPointFromInterpreter* GetEntryPointFromInterpreterPtrSize(size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetFieldPtrWithSize( + EntryPointFromInterpreterOffset(pointer_size), pointer_size); + } + + template void SetEntryPointFromInterpreter(EntryPointFromInterpreter* entry_point_from_interpreter) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - SetFieldPtr( - OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_interpreter_), - entry_point_from_interpreter); + CheckObjectSizeEqualsMirrorSize(); + SetEntryPointFromInterpreterPtrSize(entry_point_from_interpreter, sizeof(void*)); + } + template + void SetEntryPointFromInterpreterPtrSize(EntryPointFromInterpreter* entry_point_from_interpreter, + size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SetFieldPtrWithSize( + EntryPointFromInterpreterOffset(pointer_size), entry_point_from_interpreter, pointer_size); } #if defined(ART_USE_PORTABLE_COMPILER) - static MemberOffset EntryPointFromPortableCompiledCodeOffset() { - return MemberOffset(OFFSETOF_MEMBER(ArtMethod, entry_point_from_portable_compiled_code_)); + ALWAYS_INLINE static MemberOffset EntryPointFromPortableCompiledCodeOffset(size_t pointer_size) { + return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER( + PtrSizedFields, entry_point_from_portable_compiled_code_) / sizeof(void*) * pointer_size); } - template - const void* GetEntryPointFromPortableCompiledCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetFieldPtr( - EntryPointFromPortableCompiledCodeOffset()); + template + const void* GetEntryPointFromPortableCompiledCode() + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + CheckObjectSizeEqualsMirrorSize(); + return GetEntryPointFromPortableCompiledCodePtrSize(sizeof(void*)); } - template + template + ALWAYS_INLINE const void* GetEntryPointFromPortableCompiledCodePtrSize(size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetFieldPtrWithSize( + EntryPointFromPortableCompiledCodeOffset(pointer_size), pointer_size); + } + + template void SetEntryPointFromPortableCompiledCode(const void* entry_point_from_portable_compiled_code) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - SetFieldPtr( - EntryPointFromPortableCompiledCodeOffset(), entry_point_from_portable_compiled_code); + CheckObjectSizeEqualsMirrorSize(); + return SetEntryPointFromPortableCompiledCodePtrSize(entry_point_from_portable_compiled_code, + sizeof(void*)); } -#endif - static MemberOffset EntryPointFromQuickCompiledCodeOffset() { - return MemberOffset(OFFSETOF_MEMBER(ArtMethod, entry_point_from_quick_compiled_code_)); + template + void SetEntryPointFromPortableCompiledCodePtrSize( + const void* entry_point_from_portable_compiled_code, size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SetFieldPtrWithSize( + EntryPointFromPortableCompiledCodeOffset(pointer_size), + entry_point_from_portable_compiled_code, pointer_size); } +#endif - template + template const void* GetEntryPointFromQuickCompiledCode() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetFieldPtr(EntryPointFromQuickCompiledCodeOffset()); + CheckObjectSizeEqualsMirrorSize(); + return GetEntryPointFromQuickCompiledCodePtrSize(sizeof(void*)); + } + template + ALWAYS_INLINE const void* GetEntryPointFromQuickCompiledCodePtrSize(size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetFieldPtrWithSize( + EntryPointFromQuickCompiledCodeOffset(pointer_size), pointer_size); } - template + template void SetEntryPointFromQuickCompiledCode(const void* entry_point_from_quick_compiled_code) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - SetFieldPtr( - EntryPointFromQuickCompiledCodeOffset(), entry_point_from_quick_compiled_code); + CheckObjectSizeEqualsMirrorSize(); + SetEntryPointFromQuickCompiledCodePtrSize(entry_point_from_quick_compiled_code, + sizeof(void*)); + } + template + ALWAYS_INLINE void SetEntryPointFromQuickCompiledCodePtrSize( + const void* entry_point_from_quick_compiled_code, size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SetFieldPtrWithSize( + EntryPointFromQuickCompiledCodeOffset(pointer_size), entry_point_from_quick_compiled_code, + pointer_size); } uint32_t GetCodeSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); @@ -321,7 +359,7 @@ class MANAGED ArtMethod FINAL : public Object { uint32_t GetQuickOatCodeOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); void SetQuickOatCodeOffset(uint32_t code_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static const void* EntryPointToCodePointer(const void* entry_point) ALWAYS_INLINE { + ALWAYS_INLINE static const void* EntryPointToCodePointer(const void* entry_point) { uintptr_t code = reinterpret_cast(entry_point); code &= ~0x1; // TODO: Make this Thumb2 specific. return reinterpret_cast(code); @@ -343,11 +381,23 @@ class MANAGED ArtMethod FINAL : public Object { SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); const uint8_t* GetNativeGcMap() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetFieldPtr(OFFSET_OF_OBJECT_MEMBER(ArtMethod, gc_map_)); + CheckObjectSizeEqualsMirrorSize(); + return GetNativeGcMapPtrSize(sizeof(void*)); } - template + ALWAYS_INLINE const uint8_t* GetNativeGcMapPtrSize(size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetFieldPtrWithSize(GcMapOffset(pointer_size), pointer_size); + } + template void SetNativeGcMap(const uint8_t* data) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - SetFieldPtr(OFFSET_OF_OBJECT_MEMBER(ArtMethod, gc_map_), data); + CheckObjectSizeEqualsMirrorSize(); + SetNativeGcMapPtrSize(data, sizeof(void*)); + } + template + ALWAYS_INLINE void SetNativeGcMapPtrSize(const uint8_t* data, size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SetFieldPtrWithSize(GcMapOffset(pointer_size), data, + pointer_size); } // When building the oat need a convenient place to stuff the offset of the native GC map. @@ -386,16 +436,46 @@ class MANAGED ArtMethod FINAL : public Object { void UnregisterNative(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); - static MemberOffset NativeMethodOffset() { - return OFFSET_OF_OBJECT_MEMBER(ArtMethod, entry_point_from_jni_); + static MemberOffset EntryPointFromInterpreterOffset(size_t pointer_size) { + return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER( + PtrSizedFields, entry_point_from_interpreter_) / sizeof(void*) * pointer_size); } - const void* GetNativeMethod() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { - return GetFieldPtr(NativeMethodOffset()); + static MemberOffset EntryPointFromJniOffset(size_t pointer_size) { + return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER( + PtrSizedFields, entry_point_from_jni_) / sizeof(void*) * pointer_size); } - template - void SetNativeMethod(const void*) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static MemberOffset EntryPointFromQuickCompiledCodeOffset(size_t pointer_size) { + return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER( + PtrSizedFields, entry_point_from_quick_compiled_code_) / sizeof(void*) * pointer_size); + } + + static MemberOffset GcMapOffset(size_t pointer_size) { + return MemberOffset(PtrSizedFieldsOffset() + OFFSETOF_MEMBER( + PtrSizedFields, gc_map_) / sizeof(void*) * pointer_size); + } + + void* GetEntryPointFromJni() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + CheckObjectSizeEqualsMirrorSize(); + return GetEntryPointFromJniPtrSize(sizeof(void*)); + } + ALWAYS_INLINE void* GetEntryPointFromJniPtrSize(size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + return GetFieldPtrWithSize(EntryPointFromJniOffset(pointer_size), pointer_size); + } + + template + void SetEntryPointFromJni(const void* entrypoint) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + CheckObjectSizeEqualsMirrorSize(); + SetEntryPointFromJniPtrSize(entrypoint, sizeof(void*)); + } + template + ALWAYS_INLINE void SetEntryPointFromJniPtrSize(const void* entrypoint, size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + SetFieldPtrWithSize( + EntryPointFromJniOffset(pointer_size), entrypoint, pointer_size); + } static MemberOffset GetMethodIndexOffset() { return OFFSET_OF_OBJECT_MEMBER(ArtMethod, method_index_); @@ -484,6 +564,15 @@ class MANAGED ArtMethod FINAL : public Object { ALWAYS_INLINE ArtMethod* GetInterfaceMethodIfProxy() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static size_t SizeWithoutPointerFields() { + return sizeof(ArtMethod) - sizeof(PtrSizedFields); + } + + // Size of an instance of java.lang.reflect.ArtMethod not including its value array. + static size_t InstanceSize(size_t pointer_size) { + return SizeWithoutPointerFields() + (sizeof(PtrSizedFields) / sizeof(void*)) * pointer_size; + } + protected: // Field order required by test "ValidateFieldOrderOfJavaCppUnionClasses". // The class we are a part of. @@ -498,28 +587,6 @@ class MANAGED ArtMethod FINAL : public Object { // Short cuts to declaring_class_->dex_cache_ member for fast compiled code access. HeapReference> dex_cache_strings_; - // Method dispatch from the interpreter invokes this pointer which may cause a bridge into - // compiled code. - uint64_t entry_point_from_interpreter_; - - // Pointer to JNI function registered to this method, or a function to resolve the JNI function. - uint64_t entry_point_from_jni_; - - // Method dispatch from portable compiled code invokes this pointer which may cause bridging into - // quick compiled code or the interpreter. -#if defined(ART_USE_PORTABLE_COMPILER) - uint64_t entry_point_from_portable_compiled_code_; -#endif - - // Method dispatch from quick compiled code invokes this pointer which may cause bridging into - // portable compiled code or the interpreter. - uint64_t entry_point_from_quick_compiled_code_; - - // Pointer to a data structure created by the compiler and used by the garbage collector to - // determine which registers hold live references to objects within the heap. Keyed by native PC - // offsets for the quick compiler and dex PCs for the portable. - uint64_t gc_map_; - // Access flags; low 16 bits are defined by spec. uint32_t access_flags_; @@ -538,15 +605,48 @@ class MANAGED ArtMethod FINAL : public Object { // ifTable. uint32_t method_index_; + // Add alignment word here if necessary. + + // Must be the last fields in the method. + struct PACKED(4) PtrSizedFields { + // Method dispatch from the interpreter invokes this pointer which may cause a bridge into + // compiled code. + void* entry_point_from_interpreter_; + + // Pointer to JNI function registered to this method, or a function to resolve the JNI function. + void* entry_point_from_jni_; + + // Method dispatch from quick compiled code invokes this pointer which may cause bridging into + // portable compiled code or the interpreter. + void* entry_point_from_quick_compiled_code_; + + // Pointer to a data structure created by the compiler and used by the garbage collector to + // determine which registers hold live references to objects within the heap. Keyed by native PC + // offsets for the quick compiler and dex PCs for the portable. + void* gc_map_; + + // Method dispatch from portable compiled code invokes this pointer which may cause bridging + // into quick compiled code or the interpreter. Last to simplify entrypoint logic. +#if defined(ART_USE_PORTABLE_COMPILER) + void* entry_point_from_portable_compiled_code_; +#endif + } ptr_sized_fields_; + static GcRoot java_lang_reflect_ArtMethod_; private: + ALWAYS_INLINE void CheckObjectSizeEqualsMirrorSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + ALWAYS_INLINE ObjectArray* GetDexCacheResolvedMethods() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); ALWAYS_INLINE ObjectArray* GetDexCacheResolvedTypes() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); + static size_t PtrSizedFieldsOffset() { + return OFFSETOF_MEMBER(ArtMethod, ptr_sized_fields_); + } + friend struct art::ArtMethodOffsets; // for verifying offset information DISALLOW_IMPLICIT_CONSTRUCTORS(ArtMethod); }; diff --git a/runtime/mirror/class.h b/runtime/mirror/class.h index d00c3ef6f..63aa67528 100644 --- a/runtime/mirror/class.h +++ b/runtime/mirror/class.h @@ -518,6 +518,13 @@ class MANAGED Class FINAL : public Object { return SetField32(OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size); } + void SetObjectSizeWithoutChecks(uint32_t new_object_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + // Not called within a transaction. + return SetField32( + OFFSET_OF_OBJECT_MEMBER(Class, object_size_), new_object_size); + } + // Returns true if this class is in the same packages as that class. bool IsInSamePackage(Class* that) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_); diff --git a/runtime/mirror/object-inl.h b/runtime/mirror/object-inl.h index 97a8ee7aa..7a383e4c2 100644 --- a/runtime/mirror/object-inl.h +++ b/runtime/mirror/object-inl.h @@ -402,8 +402,7 @@ inline size_t Object::SizeOf() { } DCHECK_GE(result, sizeof(Object)) << " class=" << PrettyTypeOf(GetClass()); - DCHECK(!(IsArtField()) || result == sizeof(ArtField)); - DCHECK(!(IsArtMethod()) || result == sizeof(ArtMethod)); + DCHECK(!(IsArtField()) || result == sizeof(ArtField)); return result; } @@ -817,7 +816,6 @@ inline void Object::VisitReferences(const Visitor& visitor, } } } - } // namespace mirror } // namespace art diff --git a/runtime/mirror/object.h b/runtime/mirror/object.h index a6b622719..ae1aeb5ae 100644 --- a/runtime/mirror/object.h +++ b/runtime/mirror/object.h @@ -316,15 +316,26 @@ class MANAGED LOCKABLE Object { VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, typename T> void SetFieldPtr(MemberOffset field_offset, T new_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { -#ifndef __LP64__ - SetField32( - field_offset, reinterpret_cast(new_value)); -#else - SetField64( - field_offset, reinterpret_cast(new_value)); -#endif + SetFieldPtrWithSize( + field_offset, new_value, sizeof(void*)); } + template + ALWAYS_INLINE void SetFieldPtrWithSize(MemberOffset field_offset, T new_value, + size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size; + if (pointer_size == 4) { + intptr_t ptr = reinterpret_cast(new_value); + DCHECK_EQ(static_cast(ptr), ptr); // Check that we dont lose any non 0 bits. + SetField32( + field_offset, static_cast(ptr)); + } else { + SetField64( + field_offset, static_cast(reinterpret_cast(new_value))); + } + } // TODO fix thread safety analysis broken by the use of template. This should be // SHARED_LOCKS_REQUIRED(Locks::mutator_lock_). template T GetFieldPtr(MemberOffset field_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { -#ifndef __LP64__ - return reinterpret_cast(GetField32(field_offset)); -#else - return reinterpret_cast(GetField64(field_offset)); -#endif + return GetFieldPtrWithSize(field_offset, sizeof(void*)); + } + + template + ALWAYS_INLINE T GetFieldPtrWithSize(MemberOffset field_offset, size_t pointer_size) + SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { + DCHECK(pointer_size == 4 || pointer_size == 8) << pointer_size; + if (pointer_size == 4) { + return reinterpret_cast(GetField32(field_offset)); + } else { + int64_t v = GetField64(field_offset); + // Check that we dont lose any non 0 bits. + DCHECK_EQ(reinterpret_cast(reinterpret_cast(v)), v); + return reinterpret_cast(v); + } } // TODO: Fixme when anotatalysis works with visitors. diff --git a/runtime/mirror/object_test.cc b/runtime/mirror/object_test.cc index ede3b64e9..3e29e5f7c 100644 --- a/runtime/mirror/object_test.cc +++ b/runtime/mirror/object_test.cc @@ -89,11 +89,18 @@ TEST_F(ObjectTest, AsmConstants) { EXPECT_EQ(STRING_OFFSET_OFFSET, String::OffsetOffset().Int32Value()); EXPECT_EQ(STRING_DATA_OFFSET, Array::DataOffset(sizeof(uint16_t)).Int32Value()); - EXPECT_EQ(METHOD_DEX_CACHE_METHODS_OFFSET, ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()); + EXPECT_EQ(METHOD_DEX_CACHE_METHODS_OFFSET, + ArtMethod::DexCacheResolvedMethodsOffset().Int32Value()); #if defined(ART_USE_PORTABLE_COMPILER) - EXPECT_EQ(METHOD_PORTABLE_CODE_OFFSET, ArtMethod::EntryPointFromPortableCompiledCodeOffset().Int32Value()); + EXPECT_EQ(METHOD_PORTABLE_CODE_OFFSET_32, + ArtMethod::EntryPointFromPortableCompiledCodeOffset(4).Int32Value()); + EXPECT_EQ(METHOD_PORTABLE_CODE_OFFSET_64, + ArtMethod::EntryPointFromPortableCompiledCodeOffset(8).Int32Value()); #endif - EXPECT_EQ(METHOD_QUICK_CODE_OFFSET, ArtMethod::EntryPointFromQuickCompiledCodeOffset().Int32Value()); + EXPECT_EQ(METHOD_QUICK_CODE_OFFSET_32, + ArtMethod::EntryPointFromQuickCompiledCodeOffset(4).Int32Value()); + EXPECT_EQ(METHOD_QUICK_CODE_OFFSET_64, + ArtMethod::EntryPointFromQuickCompiledCodeOffset(8).Int32Value()); } TEST_F(ObjectTest, IsInSamePackage) { diff --git a/runtime/native_bridge_art_interface.cc b/runtime/native_bridge_art_interface.cc index 9f77e55c4..809381beb 100644 --- a/runtime/native_bridge_art_interface.cc +++ b/runtime/native_bridge_art_interface.cc @@ -71,7 +71,7 @@ uint32_t GetNativeMethods(JNIEnv* env, jclass clazz, JNINativeMethod* methods, if (count < method_count) { methods[count].name = m->GetName(); methods[count].signature = m->GetShorty(); - methods[count].fnPtr = const_cast(m->GetNativeMethod()); + methods[count].fnPtr = m->GetEntryPointFromJni(); count++; } else { LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m); @@ -84,7 +84,7 @@ uint32_t GetNativeMethods(JNIEnv* env, jclass clazz, JNINativeMethod* methods, if (count < method_count) { methods[count].name = m->GetName(); methods[count].signature = m->GetShorty(); - methods[count].fnPtr = const_cast(m->GetNativeMethod()); + methods[count].fnPtr = m->GetEntryPointFromJni(); count++; } else { LOG(WARNING) << "Output native method array too small. Skipping " << PrettyMethod(m); -- 2.11.0