OSDN Git Service

am b148ad48: am f72fd02c: Merge "Quick compiler: disable GVN DO NOT MERGE" into lmp-dev
authorbuzbee <buzbee@google.com>
Wed, 3 Sep 2014 14:56:40 +0000 (14:56 +0000)
committerAndroid Git Automerger <android-git-automerger@android.com>
Wed, 3 Sep 2014 14:56:40 +0000 (14:56 +0000)
* commit 'b148ad486920d464c0713f34e77eb53778f5d36d':
  Quick compiler: disable GVN DO NOT MERGE

285 files changed:
build/Android.common_test.mk
build/Android.gtest.mk
build/Android.oat.mk
compiler/Android.mk
compiler/common_compiler_test.cc
compiler/common_compiler_test.h
compiler/compiled_method.cc
compiler/compiled_method.h
compiler/compiler.cc
compiler/compiler.h
compiler/compilers.cc [deleted file]
compiler/compilers.h [deleted file]
compiler/dex/bb_optimizations.cc
compiler/dex/bb_optimizations.h
compiler/dex/compiler_enums.h
compiler/dex/compiler_internals.h
compiler/dex/compiler_ir.cc [new file with mode: 0644]
compiler/dex/compiler_ir.h
compiler/dex/dex_to_dex_compiler.cc
compiler/dex/frontend.cc
compiler/dex/frontend.h
compiler/dex/global_value_numbering.cc
compiler/dex/global_value_numbering.h
compiler/dex/global_value_numbering_test.cc
compiler/dex/local_value_numbering.cc
compiler/dex/local_value_numbering.h
compiler/dex/mir_analysis.cc
compiler/dex/mir_dataflow.cc
compiler/dex/mir_graph.cc
compiler/dex/mir_graph.h
compiler/dex/mir_graph_test.cc
compiler/dex/mir_optimization.cc
compiler/dex/pass.h
compiler/dex/pass_driver.h
compiler/dex/pass_driver_me.h
compiler/dex/pass_driver_me_opts.cc
compiler/dex/pass_driver_me_post_opt.cc
compiler/dex/pass_me.h
compiler/dex/portable/mir_to_gbc.cc
compiler/dex/portable/mir_to_gbc.h
compiler/dex/post_opt_passes.cc
compiler/dex/post_opt_passes.h
compiler/dex/quick/arm/backend_arm.h [new file with mode: 0644]
compiler/dex/quick/arm/call_arm.cc
compiler/dex/quick/arm/codegen_arm.h
compiler/dex/quick/arm/target_arm.cc
compiler/dex/quick/arm64/arm64_lir.h
compiler/dex/quick/arm64/assemble_arm64.cc
compiler/dex/quick/arm64/backend_arm64.h [new file with mode: 0644]
compiler/dex/quick/arm64/call_arm64.cc
compiler/dex/quick/arm64/codegen_arm64.h
compiler/dex/quick/arm64/fp_arm64.cc
compiler/dex/quick/arm64/int_arm64.cc
compiler/dex/quick/arm64/target_arm64.cc
compiler/dex/quick/codegen_util.cc
compiler/dex/quick/dex_file_method_inliner.cc
compiler/dex/quick/dex_file_method_inliner.h
compiler/dex/quick/gen_common.cc
compiler/dex/quick/gen_invoke.cc
compiler/dex/quick/mips/backend_mips.h [new file with mode: 0644]
compiler/dex/quick/mips/call_mips.cc
compiler/dex/quick/mips/codegen_mips.h
compiler/dex/quick/mips/int_mips.cc
compiler/dex/quick/mips/target_mips.cc
compiler/dex/quick/mir_to_lir-inl.h
compiler/dex/quick/mir_to_lir.cc
compiler/dex/quick/mir_to_lir.h
compiler/dex/quick/quick_compiler.cc [new file with mode: 0644]
compiler/dex/quick/quick_compiler.h [new file with mode: 0644]
compiler/dex/quick/ralloc_util.cc
compiler/dex/quick/x86/assemble_x86.cc
compiler/dex/quick/x86/backend_x86.h [new file with mode: 0644]
compiler/dex/quick/x86/call_x86.cc
compiler/dex/quick/x86/codegen_x86.h
compiler/dex/quick/x86/fp_x86.cc
compiler/dex/quick/x86/int_x86.cc
compiler/dex/quick/x86/target_x86.cc
compiler/dex/quick/x86/utility_x86.cc
compiler/dex/quick/x86/x86_lir.h
compiler/dex/ssa_transformation.cc
compiler/dex/vreg_analysis.cc
compiler/driver/compiler_driver-inl.h
compiler/driver/compiler_driver.cc
compiler/driver/compiler_driver.h
compiler/elf_fixup.cc
compiler/elf_patcher.cc
compiler/elf_writer_quick.cc
compiler/elf_writer_quick.h
compiler/image_test.cc
compiler/image_writer.cc
compiler/jni/jni_compiler_test.cc
compiler/jni/quick/jni_compiler.cc
compiler/jni/quick/jni_compiler.h [new file with mode: 0644]
compiler/llvm/compiler_llvm.cc
compiler/llvm/compiler_llvm.h
compiler/llvm/llvm_compiler.cc [new file with mode: 0644]
compiler/llvm/llvm_compiler.h [new file with mode: 0644]
compiler/oat_test.cc
compiler/oat_writer.cc
compiler/oat_writer.h
compiler/optimizing/code_generator.cc
compiler/optimizing/code_generator.h
compiler/optimizing/code_generator_x86.cc
compiler/optimizing/code_generator_x86_64.cc
compiler/optimizing/nodes.h
compiler/optimizing/optimizing_compiler.cc
compiler/optimizing/optimizing_compiler.h [new file with mode: 0644]
compiler/optimizing/register_allocator.cc
compiler/optimizing/register_allocator_test.cc
compiler/optimizing/ssa_liveness_analysis.cc
compiler/optimizing/ssa_phi_elimination.cc
compiler/trampolines/trampoline_compiler.cc
compiler/utils/arena_bit_vector.cc
compiler/utils/arena_bit_vector.h
compiler/utils/arm64/assembler_arm64.cc
compiler/utils/arm64/assembler_arm64.h
compiler/utils/assembler.h
compiler/utils/dwarf_cfi.cc [new file with mode: 0644]
compiler/utils/dwarf_cfi.h [new file with mode: 0644]
compiler/utils/x86/assembler_x86.cc
compiler/utils/x86/assembler_x86.h
compiler/utils/x86/managed_register_x86.h
compiler/utils/x86_64/assembler_x86_64.cc
compiler/utils/x86_64/assembler_x86_64.h
compiler/utils/x86_64/managed_register_x86_64.h
dex2oat/Android.mk
dex2oat/dex2oat.cc
disassembler/Android.mk
disassembler/disassembler_x86.cc
oatdump/oatdump.cc
patchoat/Android.mk
patchoat/patchoat.cc
patchoat/patchoat.h
runtime/Android.mk
runtime/arch/arm/entrypoints_init_arm.cc
runtime/arch/arm/quick_entrypoints_arm.S
runtime/arch/arm64/entrypoints_init_arm64.cc
runtime/arch/arm64/quick_entrypoints_arm64.S
runtime/arch/mips/asm_support_mips.h
runtime/arch/mips/entrypoints_init_mips.cc
runtime/arch/mips/quick_entrypoints_mips.S
runtime/arch/stub_test.cc
runtime/arch/x86/asm_support_x86.S
runtime/arch/x86/entrypoints_init_x86.cc
runtime/arch/x86/fault_handler_x86.cc
runtime/arch/x86/jni_entrypoints_x86.S
runtime/arch/x86/portable_entrypoints_x86.S
runtime/arch/x86/quick_entrypoints_x86.S
runtime/arch/x86_64/asm_support_x86_64.S
runtime/arch/x86_64/entrypoints_init_x86_64.cc
runtime/arch/x86_64/quick_entrypoints_x86_64.S
runtime/base/allocator.cc
runtime/base/bit_vector-inl.h [new file with mode: 0644]
runtime/base/bit_vector.cc
runtime/base/bit_vector.h
runtime/base/bit_vector_test.cc
runtime/base/mutex.cc
runtime/base/mutex.h
runtime/check_jni.cc
runtime/check_jni.h [new file with mode: 0644]
runtime/class_linker.cc
runtime/class_linker.h
runtime/common_runtime_test.cc
runtime/common_runtime_test.h
runtime/common_throws.cc
runtime/debugger.cc
runtime/dex_file.cc
runtime/dex_file.h
runtime/dex_file_test.cc
runtime/dex_instruction_list.h
runtime/elf_file.cc
runtime/elf_file.h
runtime/entrypoints/entrypoint_utils-inl.h
runtime/entrypoints/entrypoint_utils.cc
runtime/entrypoints/quick/quick_alloc_entrypoints.cc
runtime/entrypoints/quick/quick_entrypoints_list.h
runtime/entrypoints/quick/quick_field_entrypoints.cc
runtime/entrypoints_order_test.cc
runtime/gc/accounting/card_table_test.cc
runtime/gc/accounting/mod_union_table.cc
runtime/gc/accounting/mod_union_table.h
runtime/gc/collector/garbage_collector.cc
runtime/gc/collector/garbage_collector.h
runtime/gc/heap.cc
runtime/gc/heap.h
runtime/gc/space/rosalloc_space.cc
runtime/gc/space/rosalloc_space.h
runtime/handle.h
runtime/indirect_reference_table-inl.h
runtime/indirect_reference_table.cc
runtime/indirect_reference_table.h
runtime/intern_table.cc
runtime/interpreter/interpreter_common.cc
runtime/interpreter/interpreter_goto_table_impl.cc
runtime/interpreter/interpreter_switch_impl.cc
runtime/java_vm_ext.cc [new file with mode: 0644]
runtime/java_vm_ext.h [new file with mode: 0644]
runtime/jni_env_ext-inl.h [moved from runtime/jni_internal-inl.h with 89% similarity]
runtime/jni_env_ext.cc [new file with mode: 0644]
runtime/jni_env_ext.h [new file with mode: 0644]
runtime/jni_internal.cc
runtime/jni_internal.h
runtime/jni_internal_test.cc
runtime/leb128.h
runtime/lock_word-inl.h
runtime/lock_word.h
runtime/method_helper.h
runtime/mirror/array-inl.h
runtime/mirror/art_field-inl.h
runtime/mirror/art_field.h
runtime/mirror/art_method-inl.h
runtime/mirror/art_method.cc
runtime/mirror/art_method.h
runtime/mirror/class-inl.h
runtime/mirror/class.cc
runtime/mirror/class.h
runtime/mirror/dex_cache-inl.h
runtime/mirror/object-inl.h
runtime/mirror/object.h
runtime/mirror/object_test.cc
runtime/mirror/reference-inl.h
runtime/mirror/string-inl.h
runtime/monitor.cc
runtime/monitor.h
runtime/native/dalvik_system_VMRuntime.cc
runtime/native/dalvik_system_ZygoteHooks.cc
runtime/native/java_lang_DexCache.cc
runtime/native/java_lang_Runtime.cc
runtime/native/org_apache_harmony_dalvik_ddmc_DdmServer.cc
runtime/oat.cc
runtime/oat_file.cc
runtime/oat_file.h
runtime/object_lock.cc
runtime/object_lock.h
runtime/parsed_options.cc
runtime/parsed_options.h
runtime/primitive.cc
runtime/primitive.h
runtime/quick/inline_method_analyser.h
runtime/quick_exception_handler.cc
runtime/reflection.cc
runtime/reflection.h
runtime/runtime.cc
runtime/runtime.h
runtime/scoped_thread_state_change.h
runtime/stack.h
runtime/thread-inl.h
runtime/thread.cc
runtime/thread.h
runtime/thread_linux.cc
runtime/thread_list.cc
runtime/transaction.cc
runtime/transaction.h
runtime/utils.cc
runtime/utils.h
runtime/verifier/method_verifier-inl.h
runtime/verifier/method_verifier.cc
runtime/verifier/method_verifier.h
runtime/verifier/reg_type.h
runtime/zip_archive.cc
sigchainlib/sigchain.cc
test/004-SignalTest/signaltest.cc
test/021-string2/src/junit/framework/Assert.java
test/021-string2/src/junit/framework/AssertionFailedError.java
test/021-string2/src/junit/framework/ComparisonCompactor.java [new file with mode: 0644]
test/021-string2/src/junit/framework/ComparisonFailure.java
test/046-reflect/expected.txt
test/046-reflect/src/Main.java
test/080-oom-throw/src/Main.java
test/082-inline-execute/src/junit/framework/Assert.java
test/082-inline-execute/src/junit/framework/AssertionFailedError.java
test/082-inline-execute/src/junit/framework/ComparisonCompactor.java [new file with mode: 0644]
test/082-inline-execute/src/junit/framework/ComparisonFailure.java
test/118-noimage-dex2oat/run
test/702-LargeBranchOffset/build [new file with mode: 0644]
test/702-LargeBranchOffset/expected.txt [new file with mode: 0644]
test/702-LargeBranchOffset/info.txt [new file with mode: 0644]
test/702-LargeBranchOffset/src/Main.java.in [new file with mode: 0644]
test/Android.run-test.mk
test/etc/host-run-test-jar
test/etc/push-and-run-prebuilt-test-jar
test/etc/push-and-run-test-jar
test/run-all-tests
test/run-test
tools/art [changed mode: 0755->0644]

index 7e38157..20c5a21 100644 (file)
@@ -26,18 +26,6 @@ ART_TARGET_CFLAGS += -DART_TARGET_NATIVETEST_DIR=${ART_TARGET_NATIVETEST_DIR}
 # List of known broken tests that we won't attempt to execute. The test name must be the full
 # rule name such as test-art-host-oat-optimizing-HelloWorld64.
 ART_TEST_KNOWN_BROKEN := \
-  test-art-host-run-test-gcstress-optimizing-no-prebuild-004-SignalTest32 \
-  test-art-host-run-test-gcstress-optimizing-prebuild-004-SignalTest32 \
-  test-art-host-run-test-gcstress-optimizing-norelocate-004-SignalTest32 \
-  test-art-host-run-test-gcstress-optimizing-relocate-004-SignalTest32 \
-  test-art-host-run-test-gcverify-optimizing-no-prebuild-004-SignalTest32 \
-  test-art-host-run-test-gcverify-optimizing-prebuild-004-SignalTest32 \
-  test-art-host-run-test-gcverify-optimizing-norelocate-004-SignalTest32 \
-  test-art-host-run-test-gcverify-optimizing-relocate-004-SignalTest32 \
-  test-art-host-run-test-optimizing-no-prebuild-004-SignalTest32 \
-  test-art-host-run-test-optimizing-prebuild-004-SignalTest32 \
-  test-art-host-run-test-optimizing-norelocate-004-SignalTest32 \
-  test-art-host-run-test-optimizing-relocate-004-SignalTest32 \
   test-art-target-run-test-gcstress-optimizing-prebuild-004-SignalTest32 \
   test-art-target-run-test-gcstress-optimizing-norelocate-004-SignalTest32 \
   test-art-target-run-test-gcstress-default-prebuild-004-SignalTest32 \
@@ -45,7 +33,13 @@ ART_TEST_KNOWN_BROKEN := \
   test-art-target-run-test-gcstress-optimizing-relocate-004-SignalTest32 \
   test-art-target-run-test-gcstress-default-relocate-004-SignalTest32 \
   test-art-target-run-test-gcstress-optimizing-no-prebuild-004-SignalTest32 \
-  test-art-target-run-test-gcstress-default-no-prebuild-004-SignalTest32
+  test-art-target-run-test-gcstress-default-no-prebuild-004-SignalTest32 \
+  test-art-host-run-test-gcstress-default-prebuild-114-ParallelGC32 \
+  test-art-host-run-test-gcstress-interpreter-prebuild-114-ParallelGC32 \
+  test-art-host-run-test-gcstress-optimizing-prebuild-114-ParallelGC32 \
+  test-art-host-run-test-gcstress-default-prebuild-114-ParallelGC64 \
+  test-art-host-run-test-gcstress-interpreter-prebuild-114-ParallelGC64 \
+  test-art-host-run-test-gcstress-optimizing-prebuild-114-ParallelGC64
 
 # List of known failing tests that when executed won't cause test execution to not finish.
 # The test name must be the full rule name such as test-art-host-oat-optimizing-HelloWorld64.
@@ -55,7 +49,13 @@ ART_TEST_KNOWN_FAILING :=
 ART_TEST_KEEP_GOING ?= true
 
 # Do you want all tests, even those that are time consuming?
-ART_TEST_FULL ?= true
+ART_TEST_FULL ?= false
+
+# Do you want default compiler tests run?
+ART_TEST_DEFAULT_COMPILER ?= true
+
+# Do you want interpreter tests run?
+ART_TEST_INTERPRETER ?= $(ART_TEST_FULL)
 
 # Do you want optimizing compiler tests run?
 ART_TEST_OPTIMIZING ?= $(ART_TEST_FULL)
@@ -69,17 +69,23 @@ ART_TEST_GC_VERIFY ?= $(ART_TEST_FULL)
 # Do you want tests with the GC stress mode enabled run?
 ART_TEST_GC_STRESS ?= $(ART_TEST_FULL)
 
-# Do you want run-tests with relocation enabled?
-ART_TEST_RUN_TEST_RELOCATE ?= $(ART_TEST_FULL)
+# Do you want tests with the JNI forcecopy mode enabled run?
+ART_TEST_JNI_FORCECOPY ?= $(ART_TEST_FULL)
 
-# Do you want run-tests with relocation disabled?
+# Do you want run-tests with relocation disabled run?
 ART_TEST_RUN_TEST_NO_RELOCATE ?= $(ART_TEST_FULL)
 
-# Do you want run-tests with prebuild disabled?
+# Do you want run-tests with no prebuilding enabled run?
 ART_TEST_RUN_TEST_NO_PREBUILD ?= $(ART_TEST_FULL)
 
-# Do you want run-tests with prebuild enabled?
-ART_TEST_RUN_TEST_PREBUILD ?= true
+# Do you want run-tests without a pregenerated core.art?
+ART_TEST_RUN_TEST_NO_IMAGE ?= $(ART_TEST_FULL)
+
+# Do you want run-tests with relocation enabled but patchoat failing?
+ART_TEST_RUN_TEST_RELOCATE_NO_PATCHOAT ?= $(ART_TEST_FULL)
+
+# Do you want run-tests without a dex2oat?
+ART_TEST_RUN_TEST_NO_DEX2OAT ?= $(ART_TEST_FULL)
 
 # Do you want failed tests to have their artifacts cleaned up?
 ART_TEST_RUN_TEST_ALWAYS_CLEAN ?= true
index 9ee3b69..352938e 100644 (file)
@@ -48,7 +48,7 @@ $(foreach dir,$(GTEST_DEX_DIRECTORIES), $(eval $(call build-art-test-dex,art-gte
 # Dex file dependencies for each gtest.
 ART_GTEST_class_linker_test_DEX_DEPS := Interfaces MyClass Nested Statics StaticsFromCode
 ART_GTEST_compiler_driver_test_DEX_DEPS := AbstractMethod
-ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature
+ART_GTEST_dex_file_test_DEX_DEPS := GetMethodSignature Nested
 ART_GTEST_exception_test_DEX_DEPS := ExceptionHandle
 ART_GTEST_jni_compiler_test_DEX_DEPS := MyClassNatives
 ART_GTEST_jni_internal_test_DEX_DEPS := AllFields StaticLeafMethods
@@ -61,6 +61,9 @@ ART_GTEST_transaction_test_DEX_DEPS := Transaction
 # The elf writer test has dependencies on core.oat.
 ART_GTEST_elf_writer_test_HOST_DEPS := $(HOST_CORE_OAT_OUT) $(2ND_HOST_CORE_OAT_OUT)
 ART_GTEST_elf_writer_test_TARGET_DEPS := $(TARGET_CORE_OAT_OUT) $(2ND_TARGET_CORE_OAT_OUT)
+ART_GTEST_jni_internal_test_TARGET_DEPS := $(TARGET_CORE_DEX_FILES)
+ART_GTEST_proxy_test_TARGET_DEPS := $(TARGET_CORE_DEX_FILES)
+ART_GTEST_proxy_test_HOST_DEPS := $(HOST_CORE_OAT_OUT) $(2ND_HOST_CORE_OAT_OUT)
 
 # The path for which all the source files are relative, not actually the current directory.
 LOCAL_PATH := art
index 10936a4..1c462eb 100644 (file)
@@ -25,8 +25,10 @@ include art/build/Android.common_path.mk
 
 # Use dex2oat debug version for better error reporting
 # $(1): 2ND_ or undefined, 2ND_ for 32-bit host builds.
+# NB depending on HOST_CORE_DEX_LOCATIONS so we are sure to have the dex files in frameworks for
+# run-test --no-image
 define create-core-oat-host-rules
-$$($(1)HOST_CORE_IMG_OUT): $$(HOST_CORE_DEX_FILES) $$(DEX2OATD_DEPENDENCY)
+$$($(1)HOST_CORE_IMG_OUT): $$(HOST_CORE_DEX_LOCATIONS) $$(DEX2OATD_DEPENDENCY)
        @echo "host dex2oat: $$@ ($$?)"
        @mkdir -p $$(dir $$@)
        $$(hide) $$(DEX2OATD) --runtime-arg -Xms$(DEX2OAT_IMAGE_XMS) --runtime-arg -Xmx$(DEX2OAT_IMAGE_XMX) \
index 69f9387..6e48bdf 100644 (file)
@@ -48,6 +48,7 @@ LIBART_COMPILER_SRC_FILES := \
        dex/quick/mips/target_mips.cc \
        dex/quick/mips/utility_mips.cc \
        dex/quick/mir_to_lir.cc \
+       dex/quick/quick_compiler.cc \
        dex/quick/ralloc_util.cc \
        dex/quick/resource_mask.cc \
        dex/quick/x86/assemble_x86.cc \
@@ -62,6 +63,7 @@ LIBART_COMPILER_SRC_FILES := \
        dex/mir_method_info.cc \
        dex/mir_optimization.cc \
        dex/bb_optimizations.cc \
+       dex/compiler_ir.cc \
        dex/post_opt_passes.cc \
        dex/pass_driver_me_opts.cc \
        dex/pass_driver_me_post_opt.cc \
@@ -82,6 +84,7 @@ LIBART_COMPILER_SRC_FILES := \
        jni/quick/x86_64/calling_convention_x86_64.cc \
        jni/quick/calling_convention.cc \
        jni/quick/jni_compiler.cc \
+       llvm/llvm_compiler.cc \
        optimizing/builder.cc \
        optimizing/code_generator.cc \
        optimizing/code_generator_arm.cc \
@@ -107,6 +110,7 @@ LIBART_COMPILER_SRC_FILES := \
        utils/arm64/assembler_arm64.cc \
        utils/arm64/managed_register_arm64.cc \
        utils/assembler.cc \
+       utils/dwarf_cfi.cc \
        utils/mips/assembler_mips.cc \
        utils/mips/managed_register_mips.cc \
        utils/x86/assembler_x86.cc \
@@ -115,7 +119,6 @@ LIBART_COMPILER_SRC_FILES := \
        utils/x86_64/managed_register_x86_64.cc \
        utils/scoped_arena_allocator.cc \
        buffered_output_stream.cc \
-       compilers.cc \
        compiler.cc \
        elf_fixup.cc \
        elf_patcher.cc \
@@ -282,10 +285,10 @@ $$(ENUM_OPERATOR_OUT_GEN): $$(GENERATED_SRC_DIR)/%_operator_out.cc : $(LOCAL_PAT
 endef
 
 # We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
-ifeq ($(ART_BUILD_NDEBUG),true)
+ifeq ($(ART_BUILD_HOST_NDEBUG),true)
   $(eval $(call build-libart-compiler,host,ndebug))
 endif
-ifeq ($(ART_BUILD_DEBUG),true)
+ifeq ($(ART_BUILD_HOST_DEBUG),true)
   $(eval $(call build-libart-compiler,host,debug))
 endif
 ifeq ($(ART_BUILD_TARGET_NDEBUG),true)
index 1823366..db9dcd4 100644 (file)
@@ -367,7 +367,7 @@ void CommonCompilerTest::CompileMethod(mirror::ArtMethod* method) {
   MakeExecutable(method);
 }
 
-void CommonCompilerTest::CompileDirectMethod(Handle<mirror::ClassLoader> class_loader,
+void CommonCompilerTest::CompileDirectMethod(ConstHandle<mirror::ClassLoader> class_loader,
                                              const char* class_name, const char* method_name,
                                              const char* signature) {
   std::string class_descriptor(DotToDescriptor(class_name));
@@ -380,9 +380,9 @@ void CommonCompilerTest::CompileDirectMethod(Handle<mirror::ClassLoader> class_l
   CompileMethod(method);
 }
 
-void CommonCompilerTest::CompileVirtualMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name,
-                                              const char* method_name, const char* signature)
-SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+void CommonCompilerTest::CompileVirtualMethod(ConstHandle<mirror::ClassLoader> class_loader,
+                                              const char* class_name, const char* method_name,
+                                              const char* signature) {
   std::string class_descriptor(DotToDescriptor(class_name));
   Thread* self = Thread::Current();
   mirror::Class* klass = class_linker_->FindClass(self, class_descriptor.c_str(), class_loader);
index df06b71..4e74f0a 100644 (file)
@@ -63,11 +63,11 @@ class CommonCompilerTest : public CommonRuntimeTest {
 
   void CompileMethod(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void CompileDirectMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name,
+  void CompileDirectMethod(ConstHandle<mirror::ClassLoader> class_loader, const char* class_name,
                            const char* method_name, const char* signature)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void CompileVirtualMethod(Handle<mirror::ClassLoader> class_loader, const char* class_name,
+  void CompileVirtualMethod(ConstHandle<mirror::ClassLoader> class_loader, const char* class_name,
                             const char* method_name, const char* signature)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
index f098a34..f9a78be 100644 (file)
@@ -148,16 +148,18 @@ CompiledMethod::CompiledMethod(CompilerDriver* driver,
                                const size_t frame_size_in_bytes,
                                const uint32_t core_spill_mask,
                                const uint32_t fp_spill_mask,
+                               SrcMap* src_mapping_table,
                                const std::vector<uint8_t>& mapping_table,
                                const std::vector<uint8_t>& vmap_table,
                                const std::vector<uint8_t>& native_gc_map,
                                const std::vector<uint8_t>* cfi_info)
     : CompiledCode(driver, instruction_set, quick_code), frame_size_in_bytes_(frame_size_in_bytes),
       core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask),
-  mapping_table_(driver->DeduplicateMappingTable(mapping_table)),
-  vmap_table_(driver->DeduplicateVMapTable(vmap_table)),
-  gc_map_(driver->DeduplicateGCMap(native_gc_map)),
-  cfi_info_(driver->DeduplicateCFIInfo(cfi_info)) {
+      src_mapping_table_(driver->DeduplicateSrcMappingTable(src_mapping_table->Arrange())),
+      mapping_table_(driver->DeduplicateMappingTable(mapping_table)),
+      vmap_table_(driver->DeduplicateVMapTable(vmap_table)),
+      gc_map_(driver->DeduplicateGCMap(native_gc_map)),
+      cfi_info_(driver->DeduplicateCFIInfo(cfi_info)) {
 }
 
 CompiledMethod::CompiledMethod(CompilerDriver* driver,
@@ -165,14 +167,16 @@ CompiledMethod::CompiledMethod(CompilerDriver* driver,
                                const std::vector<uint8_t>& code,
                                const size_t frame_size_in_bytes,
                                const uint32_t core_spill_mask,
-                               const uint32_t fp_spill_mask)
+                               const uint32_t fp_spill_mask,
+                               const std::vector<uint8_t>* cfi_info)
     : CompiledCode(driver, instruction_set, code),
       frame_size_in_bytes_(frame_size_in_bytes),
       core_spill_mask_(core_spill_mask), fp_spill_mask_(fp_spill_mask),
+      src_mapping_table_(driver->DeduplicateSrcMappingTable(SrcMap())),
       mapping_table_(driver->DeduplicateMappingTable(std::vector<uint8_t>())),
       vmap_table_(driver->DeduplicateVMapTable(std::vector<uint8_t>())),
       gc_map_(driver->DeduplicateGCMap(std::vector<uint8_t>())),
-      cfi_info_(nullptr) {
+      cfi_info_(driver->DeduplicateCFIInfo(cfi_info)) {
 }
 
 // Constructs a CompiledMethod for the Portable compiler.
@@ -181,19 +185,22 @@ CompiledMethod::CompiledMethod(CompilerDriver* driver, InstructionSet instructio
                                const std::string& symbol)
     : CompiledCode(driver, instruction_set, code, symbol),
       frame_size_in_bytes_(kStackAlignment), core_spill_mask_(0),
-      fp_spill_mask_(0), gc_map_(driver->DeduplicateGCMap(gc_map)) {
-  mapping_table_ = driver->DeduplicateMappingTable(std::vector<uint8_t>());
-  vmap_table_ = driver->DeduplicateVMapTable(std::vector<uint8_t>());
+      fp_spill_mask_(0),
+      src_mapping_table_(driver->DeduplicateSrcMappingTable(SrcMap())),
+      mapping_table_(driver->DeduplicateMappingTable(std::vector<uint8_t>())),
+      vmap_table_(driver->DeduplicateVMapTable(std::vector<uint8_t>())),
+      gc_map_(driver->DeduplicateGCMap(gc_map)) {
 }
 
 CompiledMethod::CompiledMethod(CompilerDriver* driver, InstructionSet instruction_set,
                                const std::string& code, const std::string& symbol)
     : CompiledCode(driver, instruction_set, code, symbol),
       frame_size_in_bytes_(kStackAlignment), core_spill_mask_(0),
-      fp_spill_mask_(0) {
-  mapping_table_ = driver->DeduplicateMappingTable(std::vector<uint8_t>());
-  vmap_table_ = driver->DeduplicateVMapTable(std::vector<uint8_t>());
-  gc_map_ = driver->DeduplicateGCMap(std::vector<uint8_t>());
+      fp_spill_mask_(0),
+      src_mapping_table_(driver->DeduplicateSrcMappingTable(SrcMap())),
+      mapping_table_(driver->DeduplicateMappingTable(std::vector<uint8_t>())),
+      vmap_table_(driver->DeduplicateVMapTable(std::vector<uint8_t>())),
+      gc_map_(driver->DeduplicateGCMap(std::vector<uint8_t>())) {
 }
 
 }  // namespace art
index b8cd851..36f4745 100644 (file)
@@ -100,7 +100,78 @@ class CompiledCode {
   std::vector<uint32_t> oatdata_offsets_to_compiled_code_offset_;
 };
 
-class CompiledMethod : public CompiledCode {
+class SrcMapElem {
+ public:
+  uint32_t from_;
+  int32_t to_;
+
+  explicit operator int64_t() const {
+    return (static_cast<int64_t>(to_) << 32) | from_;
+  }
+
+  bool operator<(const SrcMapElem& sme) const {
+    return int64_t(*this) < int64_t(sme);
+  }
+
+  bool operator==(const SrcMapElem& sme) const {
+    return int64_t(*this) == int64_t(sme);
+  }
+
+  explicit operator uint8_t() const {
+    return static_cast<uint8_t>(from_ + to_);
+  }
+};
+
+class SrcMap FINAL : public std::vector<SrcMapElem> {
+ public:
+  void SortByFrom() {
+    std::sort(begin(), end(), [] (const SrcMapElem& lhs, const SrcMapElem& rhs) -> bool {
+      return lhs.from_ < rhs.from_;
+    });
+  }
+
+  const_iterator FindByTo(int32_t to) const {
+    return std::lower_bound(begin(), end(), SrcMapElem({0, to}));
+  }
+
+  SrcMap& Arrange() {
+    if (!empty()) {
+      std::sort(begin(), end());
+      resize(std::unique(begin(), end()) - begin());
+      shrink_to_fit();
+    }
+    return *this;
+  }
+
+  void DeltaFormat(const SrcMapElem& start, uint32_t highest_pc) {
+    // Convert from abs values to deltas.
+    if (!empty()) {
+      SortByFrom();
+
+      // TODO: one PC can be mapped to several Java src lines.
+      // do we want such a one-to-many correspondence?
+
+      // get rid of the highest values
+      size_t i = size() - 1;
+      for (; i > 0 ; i--) {
+        if ((*this)[i].from_ >= highest_pc) {
+          break;
+        }
+      }
+      this->resize(i + 1);
+
+      for (size_t i = size(); --i >= 1; ) {
+        (*this)[i].from_ -= (*this)[i-1].from_;
+        (*this)[i].to_ -= (*this)[i-1].to_;
+      }
+      DCHECK((*this)[0].from_ >= start.from_);
+      (*this)[0].from_ -= start.from_;
+      (*this)[0].to_ -= start.to_;
+    }
+  }
+};
+
+class CompiledMethod FINAL : public CompiledCode {
  public:
   // Constructs a CompiledMethod for the non-LLVM compilers.
   CompiledMethod(CompilerDriver* driver,
@@ -109,6 +180,7 @@ class CompiledMethod : public CompiledCode {
                  const size_t frame_size_in_bytes,
                  const uint32_t core_spill_mask,
                  const uint32_t fp_spill_mask,
+                 SrcMap* src_mapping_table,
                  const std::vector<uint8_t>& mapping_table,
                  const std::vector<uint8_t>& vmap_table,
                  const std::vector<uint8_t>& native_gc_map,
@@ -120,7 +192,8 @@ class CompiledMethod : public CompiledCode {
                  const std::vector<uint8_t>& quick_code,
                  const size_t frame_size_in_bytes,
                  const uint32_t core_spill_mask,
-                 const uint32_t fp_spill_mask);
+                 const uint32_t fp_spill_mask,
+                 const std::vector<uint8_t>* cfi_info);
 
   // Constructs a CompiledMethod for the Portable compiler.
   CompiledMethod(CompilerDriver* driver, InstructionSet instruction_set, const std::string& code,
@@ -144,6 +217,11 @@ class CompiledMethod : public CompiledCode {
     return fp_spill_mask_;
   }
 
+  const SrcMap& GetSrcMappingTable() const {
+    DCHECK(src_mapping_table_ != nullptr);
+    return *src_mapping_table_;
+  }
+
   const std::vector<uint8_t>& GetMappingTable() const {
     DCHECK(mapping_table_ != nullptr);
     return *mapping_table_;
@@ -170,6 +248,8 @@ class CompiledMethod : public CompiledCode {
   const uint32_t core_spill_mask_;
   // For quick code, a bit mask describing spilled FPR callee-save registers.
   const uint32_t fp_spill_mask_;
+  // For quick code, a set of pairs (PC, Line) mapping from native PC offset to Java line
+  SrcMap* src_mapping_table_;
   // For quick code, a uleb128 encoded map from native PC offset to dex PC aswell as dex PC to
   // native PC offset. Size prefixed.
   std::vector<uint8_t>* mapping_table_;
index a832c31..fbfd8e6 100644 (file)
  */
 
 #include "compiler.h"
-#include "compilers.h"
-#include "driver/compiler_driver.h"
-#include "mirror/art_method-inl.h"
 
-#ifdef ART_USE_PORTABLE_COMPILER
-#include "dex/portable/mir_to_gbc.h"
-#include "elf_writer_mclinker.h"
-#endif
+#include "base/logging.h"
+#include "dex/quick/quick_compiler.h"
+#include "driver/compiler_driver.h"
+#include "llvm/llvm_compiler.h"
+#include "optimizing/optimizing_compiler.h"
 
 namespace art {
 
@@ -60,137 +58,21 @@ CompiledMethod* Compiler::TryCompileWithSeaIR(const art::DexFile::CodeItem* code
   return nullptr;
 }
 
-
-#ifdef ART_USE_PORTABLE_COMPILER
-
-extern "C" void ArtInitCompilerContext(art::CompilerDriver* driver);
-
-extern "C" void ArtUnInitCompilerContext(art::CompilerDriver* driver);
-
-extern "C" art::CompiledMethod* ArtCompileMethod(art::CompilerDriver* driver,
-                                                 const art::DexFile::CodeItem* code_item,
-                                                 uint32_t access_flags,
-                                                 art::InvokeType invoke_type,
-                                                 uint16_t class_def_idx,
-                                                 uint32_t method_idx,
-                                                 jobject class_loader,
-                                                 const art::DexFile& dex_file);
-
-extern "C" art::CompiledMethod* ArtLLVMJniCompileMethod(art::CompilerDriver* driver,
-                                                        uint32_t access_flags, uint32_t method_idx,
-                                                        const art::DexFile& dex_file);
-
-extern "C" void compilerLLVMSetBitcodeFileName(art::CompilerDriver* driver,
-                                               std::string const& filename);
-
-
-class LLVMCompiler FINAL : public Compiler {
- public:
-  explicit LLVMCompiler(CompilerDriver* driver) : Compiler(driver, 1000) {}
-
-  void Init() const OVERRIDE {
-    ArtInitCompilerContext(GetCompilerDriver());
-  }
-
-  void UnInit() const OVERRIDE {
-    ArtUnInitCompilerContext(GetCompilerDriver());
-  }
-
-  CompiledMethod* Compile(const DexFile::CodeItem* code_item,
-                          uint32_t access_flags,
-                          InvokeType invoke_type,
-                          uint16_t class_def_idx,
-                          uint32_t method_idx,
-                          jobject class_loader,
-                          const DexFile& dex_file) const OVERRIDE {
-    CompiledMethod* method = TryCompileWithSeaIR(code_item,
-                                                 access_flags,
-                                                 invoke_type,
-                                                 class_def_idx,
-                                                 method_idx,
-                                                 class_loader,
-                                                 dex_file);
-    if (method != nullptr) {
-      return method;
-    }
-
-    return ArtCompileMethod(GetCompilerDriver(),
-                            code_item,
-                            access_flags,
-                            invoke_type,
-                            class_def_idx,
-                            method_idx,
-                            class_loader,
-                            dex_file);
-  }
-
-  CompiledMethod* JniCompile(uint32_t access_flags,
-                             uint32_t method_idx,
-                             const DexFile& dex_file) const OVERRIDE {
-    return ArtLLVMJniCompileMethod(GetCompilerDriver(), access_flags, method_idx, dex_file);
-  }
-
-  uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const {
-    return reinterpret_cast<uintptr_t>(method->GetEntryPointFromPortableCompiledCode());
-  }
-
-  bool WriteElf(art::File* file,
-                OatWriter* oat_writer,
-                const std::vector<const art::DexFile*>& dex_files,
-                const std::string& android_root,
-                bool is_host, const CompilerDriver& driver) const
-      OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return art::ElfWriterMclinker::Create(
-        file, oat_writer, dex_files, android_root, is_host, driver);
-  }
-
-  Backend* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const {
-    return PortableCodeGenerator(
-        cu, cu->mir_graph.get(), &cu->arena,
-        reinterpret_cast<art::llvm::LlvmCompilationUnit*>(compilation_unit));
-  }
-
-  void InitCompilationUnit(CompilationUnit& cu) const {
-      // Fused long branches not currently useful in bitcode.
-    cu.disable_opt |=
-        (1 << kBranchFusing) |
-        (1 << kSuppressExceptionEdges);
-  }
-
-  bool IsPortable() const OVERRIDE {
-    return true;
-  }
-
-  void SetBitcodeFileName(const CompilerDriver& driver, const std::string& filename) {
-    typedef void (*SetBitcodeFileNameFn)(const CompilerDriver&, const std::string&);
-
-    SetBitcodeFileNameFn set_bitcode_file_name =
-      reinterpret_cast<SetBitcodeFileNameFn>(compilerLLVMSetBitcodeFileName);
-
-    set_bitcode_file_name(driver, filename);
-  }
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(LLVMCompiler);
-};
-#endif
-
 Compiler* Compiler::Create(CompilerDriver* driver, Compiler::Kind kind) {
   switch (kind) {
     case kQuick:
-      return new QuickCompiler(driver);
-      break;
+      return CreateQuickCompiler(driver);
+
     case kOptimizing:
-      return new OptimizingCompiler(driver);
-      break;
+      return CreateOptimizingCompiler(driver);
+
     case kPortable:
-#ifdef ART_USE_PORTABLE_COMPILER
-      return new LLVMCompiler(driver);
-#else
-      LOG(FATAL) << "Portable compiler not compiled";
-#endif
-      break;
+      {
+        Compiler* compiler = CreateLLVMCompiler(driver);
+        CHECK(compiler != nullptr) << "Portable compiler not compiled";
+        return compiler;
+      }
+
     default:
       LOG(FATAL) << "UNREACHABLE";
   }
index 4caebf3..05fa858 100644 (file)
@@ -26,13 +26,19 @@ class Backend;
 struct CompilationUnit;
 class CompilerDriver;
 class CompiledMethod;
-class MIRGraph;
 class OatWriter;
 
 namespace mirror {
   class ArtMethod;
 }
 
+// Base class for compiler-specific thread-local storage for compiler worker threads
+class CompilerTls {
+  public:
+    CompilerTls() {}
+    ~CompilerTls() {}
+};
+
 class Compiler {
  public:
   enum Kind {
@@ -47,6 +53,9 @@ class Compiler {
 
   virtual void UnInit() const = 0;
 
+  virtual bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file, CompilationUnit* cu)
+      const = 0;
+
   virtual CompiledMethod* Compile(const DexFile::CodeItem* code_item,
                                   uint32_t access_flags,
                                   InvokeType invoke_type,
@@ -109,6 +118,10 @@ class Compiler {
     return nullptr;
   }
 
+  virtual CompilerTls* CreateNewCompilerTls() {
+    return nullptr;
+  }
+
  protected:
   explicit Compiler(CompilerDriver* driver, uint64_t warning) :
       driver_(driver), maximum_compilation_time_before_warning_(warning) {
diff --git a/compiler/compilers.cc b/compiler/compilers.cc
deleted file mode 100644 (file)
index 250924a..0000000
+++ /dev/null
@@ -1,157 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#include "compilers.h"
-
-#include "dex/mir_graph.h"
-#include "dex/quick/mir_to_lir.h"
-#include "elf_writer_quick.h"
-#include "mirror/art_method-inl.h"
-
-namespace art {
-
-extern "C" void ArtInitQuickCompilerContext(art::CompilerDriver* driver);
-extern "C" void ArtUnInitQuickCompilerContext(art::CompilerDriver* driver);
-extern "C" art::CompiledMethod* ArtQuickCompileMethod(art::CompilerDriver* driver,
-                                                      const art::DexFile::CodeItem* code_item,
-                                                      uint32_t access_flags,
-                                                      art::InvokeType invoke_type,
-                                                      uint16_t class_def_idx,
-                                                      uint32_t method_idx,
-                                                      jobject class_loader,
-                                                      const art::DexFile& dex_file);
-
-extern "C" art::CompiledMethod* ArtQuickJniCompileMethod(art::CompilerDriver* driver,
-                                                         uint32_t access_flags, uint32_t method_idx,
-                                                         const art::DexFile& dex_file);
-
-// Hack for CFI CIE initialization
-extern std::vector<uint8_t>* X86CFIInitialization(bool is_x86_64);
-
-void QuickCompiler::Init() const {
-  ArtInitQuickCompilerContext(GetCompilerDriver());
-}
-
-void QuickCompiler::UnInit() const {
-  ArtUnInitQuickCompilerContext(GetCompilerDriver());
-}
-
-CompiledMethod* QuickCompiler::Compile(const DexFile::CodeItem* code_item,
-                                       uint32_t access_flags,
-                                       InvokeType invoke_type,
-                                       uint16_t class_def_idx,
-                                       uint32_t method_idx,
-                                       jobject class_loader,
-                                       const DexFile& dex_file) const {
-  CompiledMethod* method = TryCompileWithSeaIR(code_item,
-                                               access_flags,
-                                               invoke_type,
-                                               class_def_idx,
-                                               method_idx,
-                                               class_loader,
-                                               dex_file);
-  if (method != nullptr) {
-    return method;
-  }
-
-  return ArtQuickCompileMethod(GetCompilerDriver(),
-                               code_item,
-                               access_flags,
-                               invoke_type,
-                               class_def_idx,
-                               method_idx,
-                               class_loader,
-                               dex_file);
-}
-
-CompiledMethod* QuickCompiler::JniCompile(uint32_t access_flags,
-                                          uint32_t method_idx,
-                                          const DexFile& dex_file) const {
-  return ArtQuickJniCompileMethod(GetCompilerDriver(), access_flags, method_idx, dex_file);
-}
-
-uintptr_t QuickCompiler::GetEntryPointOf(mirror::ArtMethod* method) const {
-  return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode());
-}
-
-bool QuickCompiler::WriteElf(art::File* file,
-                             OatWriter* oat_writer,
-                             const std::vector<const art::DexFile*>& dex_files,
-                             const std::string& android_root,
-                             bool is_host) const {
-  return art::ElfWriterQuick::Create(file, oat_writer, dex_files, android_root, is_host,
-                                     *GetCompilerDriver());
-}
-
-Backend* QuickCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const {
-  Mir2Lir* mir_to_lir = nullptr;
-  switch (cu->instruction_set) {
-    case kThumb2:
-      mir_to_lir = ArmCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
-      break;
-    case kArm64:
-      mir_to_lir = Arm64CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
-      break;
-    case kMips:
-      mir_to_lir = MipsCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
-      break;
-    case kX86:
-      // Fall-through.
-    case kX86_64:
-      mir_to_lir = X86CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
-      break;
-    default:
-      LOG(FATAL) << "Unexpected instruction set: " << cu->instruction_set;
-  }
-
-  /* The number of compiler temporaries depends on backend so set it up now if possible */
-  if (mir_to_lir) {
-    size_t max_temps = mir_to_lir->GetMaxPossibleCompilerTemps();
-    bool set_max = cu->mir_graph->SetMaxAvailableNonSpecialCompilerTemps(max_temps);
-    CHECK(set_max);
-  }
-  return mir_to_lir;
-}
-
-std::vector<uint8_t>* QuickCompiler::GetCallFrameInformationInitialization(
-    const CompilerDriver& driver) const {
-  if (driver.GetInstructionSet() == kX86) {
-    return X86CFIInitialization(false);
-  }
-  if (driver.GetInstructionSet() == kX86_64) {
-    return X86CFIInitialization(true);
-  }
-  return nullptr;
-}
-
-CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
-                                            uint32_t access_flags,
-                                            InvokeType invoke_type,
-                                            uint16_t class_def_idx,
-                                            uint32_t method_idx,
-                                            jobject class_loader,
-                                            const DexFile& dex_file) const {
-  CompiledMethod* method = TryCompile(code_item, access_flags, invoke_type, class_def_idx,
-                                      method_idx, class_loader, dex_file);
-  if (method != nullptr) {
-    return method;
-  }
-
-  return QuickCompiler::Compile(code_item, access_flags, invoke_type, class_def_idx, method_idx,
-                                class_loader, dex_file);
-}
-
-}  // namespace art
diff --git a/compiler/compilers.h b/compiler/compilers.h
deleted file mode 100644 (file)
index 2c231e1..0000000
+++ /dev/null
@@ -1,102 +0,0 @@
-/*
- * Copyright (C) 2014 The Android Open Source Project
- *
- * Licensed under the Apache License, Version 2.0 (the "License");
- * you may not use this file except in compliance with the License.
- * You may obtain a copy of the License at
- *
- *      http://www.apache.org/licenses/LICENSE-2.0
- *
- * Unless required by applicable law or agreed to in writing, software
- * distributed under the License is distributed on an "AS IS" BASIS,
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
- * See the License for the specific language governing permissions and
- * limitations under the License.
- */
-
-#ifndef ART_COMPILER_COMPILERS_H_
-#define ART_COMPILER_COMPILERS_H_
-
-#include "compiler.h"
-
-namespace art {
-
-class QuickCompiler : public Compiler {
- public:
-  explicit QuickCompiler(CompilerDriver* driver) : Compiler(driver, 100) {}
-
-  void Init() const OVERRIDE;
-
-  void UnInit() const OVERRIDE;
-
-  CompiledMethod* Compile(const DexFile::CodeItem* code_item,
-                          uint32_t access_flags,
-                          InvokeType invoke_type,
-                          uint16_t class_def_idx,
-                          uint32_t method_idx,
-                          jobject class_loader,
-                          const DexFile& dex_file) const OVERRIDE;
-
-  CompiledMethod* JniCompile(uint32_t access_flags,
-                             uint32_t method_idx,
-                             const DexFile& dex_file) const OVERRIDE;
-
-  uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const OVERRIDE
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  bool WriteElf(art::File* file,
-                OatWriter* oat_writer,
-                const std::vector<const art::DexFile*>& dex_files,
-                const std::string& android_root,
-                bool is_host) const
-    OVERRIDE
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  Backend* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const OVERRIDE;
-
-  void InitCompilationUnit(CompilationUnit& cu) const OVERRIDE {}
-
-  /*
-   * @brief Generate and return Dwarf CFI initialization, if supported by the
-   * backend.
-   * @param driver CompilerDriver for this compile.
-   * @returns nullptr if not supported by backend or a vector of bytes for CFI DWARF
-   * information.
-   * @note This is used for backtrace information in generated code.
-   */
-  std::vector<uint8_t>* GetCallFrameInformationInitialization(const CompilerDriver& driver) const
-      OVERRIDE;
-
- private:
-  DISALLOW_COPY_AND_ASSIGN(QuickCompiler);
-};
-
-class OptimizingCompiler FINAL : public QuickCompiler {
- public:
-  explicit OptimizingCompiler(CompilerDriver* driver);
-
-  CompiledMethod* Compile(const DexFile::CodeItem* code_item,
-                          uint32_t access_flags,
-                          InvokeType invoke_type,
-                          uint16_t class_def_idx,
-                          uint32_t method_idx,
-                          jobject class_loader,
-                          const DexFile& dex_file) const OVERRIDE;
-
-  CompiledMethod* TryCompile(const DexFile::CodeItem* code_item,
-                             uint32_t access_flags,
-                             InvokeType invoke_type,
-                             uint16_t class_def_idx,
-                             uint32_t method_idx,
-                             jobject class_loader,
-                             const DexFile& dex_file) const;
-
- private:
-  std::unique_ptr<std::ostream> visualizer_output_;
-
-  DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler);
-};
-
-}  // namespace art
-
-#endif  // ART_COMPILER_COMPILERS_H_
index 920cde2..6a610ab 100644 (file)
@@ -23,9 +23,9 @@ namespace art {
 /*
  * Code Layout pass implementation start.
  */
-bool CodeLayout::Worker(const PassDataHolder* data) const {
+bool CodeLayout::Worker(PassDataHolder* data) const {
   DCHECK(data != nullptr);
-  const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
+  PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
   CompilationUnit* c_unit = pass_me_data_holder->c_unit;
   DCHECK(c_unit != nullptr);
   BasicBlock* bb = pass_me_data_holder->bb;
@@ -38,9 +38,9 @@ bool CodeLayout::Worker(const PassDataHolder* data) const {
 /*
  * BasicBlock Combine pass implementation start.
  */
-bool BBCombine::Worker(const PassDataHolder* data) const {
+bool BBCombine::Worker(PassDataHolder* data) const {
   DCHECK(data != nullptr);
-  const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
+  PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
   CompilationUnit* c_unit = pass_me_data_holder->c_unit;
   DCHECK(c_unit != nullptr);
   BasicBlock* bb = pass_me_data_holder->bb;
index 7395324..2920190 100644 (file)
@@ -17,6 +17,7 @@
 #ifndef ART_COMPILER_DEX_BB_OPTIMIZATIONS_H_
 #define ART_COMPILER_DEX_BB_OPTIMIZATIONS_H_
 
+#include "base/casts.h"
 #include "compiler_internals.h"
 #include "pass_me.h"
 
@@ -95,9 +96,9 @@ class SpecialMethodInliner : public PassME {
     cUnit->mir_graph->InlineSpecialMethodsStart();
   }
 
-  bool Worker(const PassDataHolder* data) const {
+  bool Worker(PassDataHolder* data) const {
     DCHECK(data != nullptr);
-    const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
+    PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
     CompilationUnit* cUnit = pass_me_data_holder->c_unit;
     DCHECK(cUnit != nullptr);
     BasicBlock* bb = pass_me_data_holder->bb;
@@ -131,7 +132,7 @@ class CodeLayout : public PassME {
     cUnit->mir_graph->VerifyDataflow();
   }
 
-  bool Worker(const PassDataHolder* data) const;
+  bool Worker(PassDataHolder* data) const;
 };
 
 /**
@@ -151,9 +152,9 @@ class NullCheckEliminationAndTypeInference : public PassME {
     cUnit->mir_graph->EliminateNullChecksAndInferTypesStart();
   }
 
-  bool Worker(const PassDataHolder* data) const {
+  bool Worker(PassDataHolder* data) const {
     DCHECK(data != nullptr);
-    const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
+    PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
     CompilationUnit* cUnit = pass_me_data_holder->c_unit;
     DCHECK(cUnit != nullptr);
     BasicBlock* bb = pass_me_data_holder->bb;
@@ -182,9 +183,9 @@ class ClassInitCheckElimination : public PassME {
     return cUnit->mir_graph->EliminateClassInitChecksGate();
   }
 
-  bool Worker(const PassDataHolder* data) const {
+  bool Worker(PassDataHolder* data) const {
     DCHECK(data != nullptr);
-    const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
+    PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
     CompilationUnit* cUnit = pass_me_data_holder->c_unit;
     DCHECK(cUnit != nullptr);
     BasicBlock* bb = pass_me_data_holder->bb;
@@ -217,9 +218,9 @@ class GlobalValueNumberingPass : public PassME {
     return cUnit->mir_graph->ApplyGlobalValueNumberingGate();
   }
 
-  bool Worker(const PassDataHolder* data) const OVERRIDE {
+  bool Worker(PassDataHolder* data) const {
     DCHECK(data != nullptr);
-    const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
+    PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
     CompilationUnit* cUnit = pass_me_data_holder->c_unit;
     DCHECK(cUnit != nullptr);
     BasicBlock* bb = pass_me_data_holder->bb;
@@ -251,7 +252,7 @@ class BBCombine : public PassME {
     return ((cUnit->disable_opt & (1 << kSuppressExceptionEdges)) != 0);
   }
 
-  bool Worker(const PassDataHolder* data) const;
+  bool Worker(PassDataHolder* data) const;
 };
 
 /**
index dcc67c3..9c2a8ba 100644 (file)
@@ -107,14 +107,52 @@ enum LIRPseudoOpcode {
 enum ExtendedMIROpcode {
   kMirOpFirst = kNumPackedOpcodes,
   kMirOpPhi = kMirOpFirst,
+
+  // @brief Copy from one VR to another.
+  // @details
+  // vA: destination VR
+  // vB: source VR
   kMirOpCopy,
+
+  // @brief Used to do float comparison with less-than bias.
+  // @details Unlike cmpl-float, this does not store result of comparison in VR.
+  // vA: left-hand side VR for comparison.
+  // vB: right-hand side VR for comparison.
   kMirOpFusedCmplFloat,
+
+  // @brief Used to do float comparison with greater-than bias.
+  // @details Unlike cmpg-float, this does not store result of comparison in VR.
+  // vA: left-hand side VR for comparison.
+  // vB: right-hand side VR for comparison.
   kMirOpFusedCmpgFloat,
+
+  // @brief Used to do double comparison with less-than bias.
+  // @details Unlike cmpl-double, this does not store result of comparison in VR.
+  // vA: left-hand side wide VR for comparison.
+  // vB: right-hand side wide VR for comparison.
   kMirOpFusedCmplDouble,
+
+  // @brief Used to do double comparison with greater-than bias.
+  // @details Unlike cmpl-double, this does not store result of comparison in VR.
+  // vA: left-hand side wide VR for comparison.
+  // vB: right-hand side wide VR for comparison.
   kMirOpFusedCmpgDouble,
+
+  // @brief Used to do comparison of 64-bit long integers.
+  // @details Unlike cmp-long, this does not store result of comparison in VR.
+  // vA: left-hand side wide VR for comparison.
+  // vB: right-hand side wide VR for comparison.
   kMirOpFusedCmpLong,
+
+  // @brief This represents no-op.
   kMirOpNop,
+
+  // @brief Do a null check on the object register.
+  // @details The backends may implement this implicitly or explicitly. This MIR is guaranteed
+  // to have the correct offset as an exception thrower.
+  // vA: object register
   kMirOpNullCheck,
+
   kMirOpRangeCheck,
   kMirOpDivZeroCheck,
   kMirOpCheck,
@@ -228,6 +266,10 @@ enum ExtendedMIROpcode {
   // @note: All currently reserved vector registers are returned to the temporary pool.
   kMirOpReturnVectorRegisters,
 
+  // @brief Create a memory barrier.
+  // vA: a constant defined by enum MemBarrierKind.
+  kMirOpMemBarrier,
+
   kMirOpLast,
 };
 
@@ -243,6 +285,7 @@ enum MIROptimizationFlagPositions {
   kMIRIgnoreSuspendCheck,
   kMIRDup,
   kMIRMark,                           // Temporary node mark.
+  kMIRStoreNonTemporal,
   kMIRLastMIRFlag,
 };
 
@@ -447,12 +490,15 @@ std::ostream& operator<<(std::ostream& os, const DividePattern& pattern);
  * -# Use LoadAny barrier ~= (LoadLoad | LoadStore) ~= acquire barrierafter each volatile load.
  * -# Use StoreStore barrier after all stores but before return from any constructor whose
  *    class has final fields.
+ * -# Use NTStoreStore to order non-temporal stores with respect to all later
+ *    store-to-memory instructions.  Only generated together with non-temporal stores.
  */
 enum MemBarrierKind {
   kAnyStore,
   kLoadAny,
   kStoreStore,
-  kAnyAny
+  kAnyAny,
+  kNTStoreStore,
 };
 
 std::ostream& operator<<(std::ostream& os, const MemBarrierKind& kind);
@@ -528,6 +574,7 @@ enum FixupKind {
   kFixupLoad,        // Mostly for immediates.
   kFixupVLoad,       // FP load which *may* be pc-relative.
   kFixupCBxZ,        // Cbz, Cbnz.
+  kFixupTBxZ,        // Tbz, Tbnz.
   kFixupPushPop,     // Not really pc relative, but changes size based on args.
   kFixupCondBranch,  // Conditional branch
   kFixupT1Branch,    // Thumb1 Unconditional branch
index 9dd0272..2019f0b 100644 (file)
 #include <stdio.h>
 
 #include "base/logging.h"
-#include "class_linker.h"
-#include "driver/compiler_driver.h"
-#include "quick/mir_to_lir.h"
 #include "mir_graph.h"
 #include "compiler_ir.h"
-#include "frontend.h"
-#include "monitor.h"
-#include "thread.h"
+#include "frontend.h"  // Debug flags.
 #include "utils.h"
 
 #endif  // ART_COMPILER_DEX_COMPILER_INTERNALS_H_
diff --git a/compiler/dex/compiler_ir.cc b/compiler/dex/compiler_ir.cc
new file mode 100644 (file)
index 0000000..ce48eb2
--- /dev/null
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "compiler_ir.h"
+
+#include "backend.h"
+#include "frontend.h"
+#include "mir_graph.h"
+
+namespace art {
+
+CompilationUnit::CompilationUnit(ArenaPool* pool)
+  : compiler_driver(nullptr),
+    class_linker(nullptr),
+    dex_file(nullptr),
+    class_loader(nullptr),
+    class_def_idx(0),
+    method_idx(0),
+    code_item(nullptr),
+    access_flags(0),
+    invoke_type(kDirect),
+    shorty(nullptr),
+    disable_opt(0),
+    enable_debug(0),
+    verbose(false),
+    compiler(nullptr),
+    instruction_set(kNone),
+    target64(false),
+    compiler_flip_match(false),
+    arena(pool),
+    arena_stack(pool),
+    mir_graph(nullptr),
+    cg(nullptr),
+    timings("QuickCompiler", true, false),
+    print_pass(false) {
+}
+
+CompilationUnit::~CompilationUnit() {
+  overridden_pass_options.clear();
+}
+
+void CompilationUnit::StartTimingSplit(const char* label) {
+  if (compiler_driver->GetDumpPasses()) {
+    timings.StartTiming(label);
+  }
+}
+
+void CompilationUnit::NewTimingSplit(const char* label) {
+  if (compiler_driver->GetDumpPasses()) {
+    timings.EndTiming();
+    timings.StartTiming(label);
+  }
+}
+
+void CompilationUnit::EndTiming() {
+  if (compiler_driver->GetDumpPasses()) {
+    timings.EndTiming();
+    if (enable_debug & (1 << kDebugTimings)) {
+      LOG(INFO) << "TIMINGS " << PrettyMethod(method_idx, *dex_file);
+      LOG(INFO) << Dumpable<TimingLogger>(timings);
+    }
+  }
+}
+
+}  // namespace art
index 66fb608..c4e43fd 100644 (file)
 #ifndef ART_COMPILER_DEX_COMPILER_IR_H_
 #define ART_COMPILER_DEX_COMPILER_IR_H_
 
+#include <string>
 #include <vector>
 
 #include "compiler_enums.h"
-#include "dex/quick/mir_to_lir.h"
-#include "dex_instruction.h"
 #include "driver/compiler_driver.h"
-#include "driver/dex_compilation_unit.h"
-#include "safe_map.h"
 #include "utils/scoped_arena_allocator.h"
 #include "base/timing_logger.h"
 #include "utils/arena_allocator.h"
 
 namespace art {
 
-struct ArenaMemBlock;
 class Backend;
-struct Memstats;
+class ClassLinker;
 class MIRGraph;
-class Mir2Lir;
+
+/*
+ * TODO: refactoring pass to move these (and other) typedefs towards usage style of runtime to
+ * add type safety (see runtime/offsets.h).
+ */
+typedef uint32_t DexOffset;          // Dex offset in code units.
+typedef uint16_t NarrowDexOffset;    // For use in structs, Dex offsets range from 0 .. 0xffff.
+typedef uint32_t CodeOffset;         // Native code offset in bytes.
 
 struct CompilationUnit {
   explicit CompilationUnit(ArenaPool* pool);
@@ -69,12 +72,6 @@ struct CompilationUnit {
   InstructionSetFeatures GetInstructionSetFeatures() {
     return compiler_driver->GetInstructionSetFeatures();
   }
-  // TODO: much of this info available elsewhere.  Go to the original source?
-  uint16_t num_dalvik_registers;        // method->registers_size.
-  const uint16_t* insns;
-  uint16_t num_ins;
-  uint16_t num_outs;
-  uint16_t num_regs;            // Unlike num_dalvik_registers, does not include ins.
 
   // If non-empty, apply optimizer/debug flags only to matching methods.
   std::string compiler_method_match;
@@ -89,6 +86,15 @@ struct CompilationUnit {
   std::unique_ptr<Backend> cg;           // Target-specific codegen.
   TimingLogger timings;
   bool print_pass;                 // Do we want to print a pass or not?
+
+  /**
+   * @brief Holds pass options for current pass being applied to compilation unit.
+   * @details This is updated for every pass to contain the overridden pass options
+   * that were specified by user. The pass itself will check this to see if the
+   * default settings have been changed. The key is simply the option string without
+   * the pass name.
+   */
+  SafeMap<const std::string, int> overridden_pass_options;
 };
 
 }  // namespace art
index b9f9437..f9a05c2 100644 (file)
@@ -121,13 +121,23 @@ void DexCompiler::Compile() {
         break;
 
       case Instruction::IPUT:
+        CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_QUICK, true);
+        break;
+
       case Instruction::IPUT_BOOLEAN:
+        CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_BOOLEAN_QUICK, true);
+        break;
+
       case Instruction::IPUT_BYTE:
+        CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_BYTE_QUICK, true);
+        break;
+
       case Instruction::IPUT_CHAR:
+        CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_CHAR_QUICK, true);
+        break;
+
       case Instruction::IPUT_SHORT:
-        // These opcodes have the same implementation in interpreter so group
-        // them under IPUT_QUICK.
-        CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_QUICK, true);
+        CompileInstanceFieldAccess(inst, dex_pc, Instruction::IPUT_SHORT_QUICK, true);
         break;
 
       case Instruction::IPUT_WIDE:
index 2ba320f..6f6ae49 100644 (file)
  * limitations under the License.
  */
 
+#include "frontend.h"
+
 #include <cstdint>
 
+#include "backend.h"
 #include "compiler.h"
 #include "compiler_internals.h"
 #include "driver/compiler_driver.h"
 #include "driver/compiler_options.h"
-#include "dataflow_iterator-inl.h"
-#include "leb128.h"
 #include "mirror/object.h"
 #include "pass_driver_me_opts.h"
 #include "runtime.h"
 
 namespace art {
 
-extern "C" void ArtInitQuickCompilerContext(art::CompilerDriver* driver) {
-  CHECK(driver->GetCompilerContext() == nullptr);
-}
-
-extern "C" void ArtUnInitQuickCompilerContext(art::CompilerDriver* driver) {
-  CHECK(driver->GetCompilerContext() == nullptr);
-}
-
 /* Default optimizer/debug setting for the compiler. */
 static uint32_t kCompilerOptimizerDisableFlags = 0 |  // Disable specific optimizations
   (1 << kLoadStoreElimination) |
@@ -81,542 +74,8 @@ static uint32_t kCompilerDebugFlags = 0 |     // Enable debug/testing modes
   // (1 << kDebugCodegenDump) |
   0;
 
-COMPILE_ASSERT(0U == static_cast<size_t>(kNone), kNone_not_0);
-COMPILE_ASSERT(1U == static_cast<size_t>(kArm), kArm_not_1);
-COMPILE_ASSERT(2U == static_cast<size_t>(kArm64), kArm64_not_2);
-COMPILE_ASSERT(3U == static_cast<size_t>(kThumb2), kThumb2_not_3);
-COMPILE_ASSERT(4U == static_cast<size_t>(kX86), kX86_not_4);
-COMPILE_ASSERT(5U == static_cast<size_t>(kX86_64), kX86_64_not_5);
-COMPILE_ASSERT(6U == static_cast<size_t>(kMips), kMips_not_6);
-COMPILE_ASSERT(7U == static_cast<size_t>(kMips64), kMips64_not_7);
-
-// Additional disabled optimizations (over generally disabled) per instruction set.
-static constexpr uint32_t kDisabledOptimizationsPerISA[] = {
-    // 0 = kNone.
-    ~0U,
-    // 1 = kArm, unused (will use kThumb2).
-    ~0U,
-    // 2 = kArm64.
-    0,
-    // 3 = kThumb2.
-    0,
-    // 4 = kX86.
-    (1 << kLoadStoreElimination) |
-    0,
-    // 5 = kX86_64.
-    (1 << kLoadStoreElimination) |
-    0,
-    // 6 = kMips.
-    (1 << kLoadStoreElimination) |
-    (1 << kLoadHoisting) |
-    (1 << kSuppressLoads) |
-    (1 << kNullCheckElimination) |
-    (1 << kPromoteRegs) |
-    (1 << kTrackLiveTemps) |
-    (1 << kSafeOptimizations) |
-    (1 << kBBOpt) |
-    (1 << kMatch) |
-    (1 << kPromoteCompilerTemps) |
-    0,
-    // 7 = kMips64.
-    ~0U
-};
-COMPILE_ASSERT(sizeof(kDisabledOptimizationsPerISA) == 8 * sizeof(uint32_t), kDisabledOpts_unexp);
-
-// Supported shorty types per instruction set. nullptr means that all are available.
-// Z : boolean
-// B : byte
-// S : short
-// C : char
-// I : int
-// J : long
-// F : float
-// D : double
-// L : reference(object, array)
-// V : void
-static const char* kSupportedTypes[] = {
-    // 0 = kNone.
-    "",
-    // 1 = kArm, unused (will use kThumb2).
-    "",
-    // 2 = kArm64.
-    nullptr,
-    // 3 = kThumb2.
-    nullptr,
-    // 4 = kX86.
-    nullptr,
-    // 5 = kX86_64.
-    nullptr,
-    // 6 = kMips.
-    nullptr,
-    // 7 = kMips64.
-    ""
-};
-COMPILE_ASSERT(sizeof(kSupportedTypes) == 8 * sizeof(char*), kSupportedTypes_unexp);
-
-static int kAllOpcodes[] = {
-    Instruction::NOP,
-    Instruction::MOVE,
-    Instruction::MOVE_FROM16,
-    Instruction::MOVE_16,
-    Instruction::MOVE_WIDE,
-    Instruction::MOVE_WIDE_FROM16,
-    Instruction::MOVE_WIDE_16,
-    Instruction::MOVE_OBJECT,
-    Instruction::MOVE_OBJECT_FROM16,
-    Instruction::MOVE_OBJECT_16,
-    Instruction::MOVE_RESULT,
-    Instruction::MOVE_RESULT_WIDE,
-    Instruction::MOVE_RESULT_OBJECT,
-    Instruction::MOVE_EXCEPTION,
-    Instruction::RETURN_VOID,
-    Instruction::RETURN,
-    Instruction::RETURN_WIDE,
-    Instruction::RETURN_OBJECT,
-    Instruction::CONST_4,
-    Instruction::CONST_16,
-    Instruction::CONST,
-    Instruction::CONST_HIGH16,
-    Instruction::CONST_WIDE_16,
-    Instruction::CONST_WIDE_32,
-    Instruction::CONST_WIDE,
-    Instruction::CONST_WIDE_HIGH16,
-    Instruction::CONST_STRING,
-    Instruction::CONST_STRING_JUMBO,
-    Instruction::CONST_CLASS,
-    Instruction::MONITOR_ENTER,
-    Instruction::MONITOR_EXIT,
-    Instruction::CHECK_CAST,
-    Instruction::INSTANCE_OF,
-    Instruction::ARRAY_LENGTH,
-    Instruction::NEW_INSTANCE,
-    Instruction::NEW_ARRAY,
-    Instruction::FILLED_NEW_ARRAY,
-    Instruction::FILLED_NEW_ARRAY_RANGE,
-    Instruction::FILL_ARRAY_DATA,
-    Instruction::THROW,
-    Instruction::GOTO,
-    Instruction::GOTO_16,
-    Instruction::GOTO_32,
-    Instruction::PACKED_SWITCH,
-    Instruction::SPARSE_SWITCH,
-    Instruction::CMPL_FLOAT,
-    Instruction::CMPG_FLOAT,
-    Instruction::CMPL_DOUBLE,
-    Instruction::CMPG_DOUBLE,
-    Instruction::CMP_LONG,
-    Instruction::IF_EQ,
-    Instruction::IF_NE,
-    Instruction::IF_LT,
-    Instruction::IF_GE,
-    Instruction::IF_GT,
-    Instruction::IF_LE,
-    Instruction::IF_EQZ,
-    Instruction::IF_NEZ,
-    Instruction::IF_LTZ,
-    Instruction::IF_GEZ,
-    Instruction::IF_GTZ,
-    Instruction::IF_LEZ,
-    Instruction::UNUSED_3E,
-    Instruction::UNUSED_3F,
-    Instruction::UNUSED_40,
-    Instruction::UNUSED_41,
-    Instruction::UNUSED_42,
-    Instruction::UNUSED_43,
-    Instruction::AGET,
-    Instruction::AGET_WIDE,
-    Instruction::AGET_OBJECT,
-    Instruction::AGET_BOOLEAN,
-    Instruction::AGET_BYTE,
-    Instruction::AGET_CHAR,
-    Instruction::AGET_SHORT,
-    Instruction::APUT,
-    Instruction::APUT_WIDE,
-    Instruction::APUT_OBJECT,
-    Instruction::APUT_BOOLEAN,
-    Instruction::APUT_BYTE,
-    Instruction::APUT_CHAR,
-    Instruction::APUT_SHORT,
-    Instruction::IGET,
-    Instruction::IGET_WIDE,
-    Instruction::IGET_OBJECT,
-    Instruction::IGET_BOOLEAN,
-    Instruction::IGET_BYTE,
-    Instruction::IGET_CHAR,
-    Instruction::IGET_SHORT,
-    Instruction::IPUT,
-    Instruction::IPUT_WIDE,
-    Instruction::IPUT_OBJECT,
-    Instruction::IPUT_BOOLEAN,
-    Instruction::IPUT_BYTE,
-    Instruction::IPUT_CHAR,
-    Instruction::IPUT_SHORT,
-    Instruction::SGET,
-    Instruction::SGET_WIDE,
-    Instruction::SGET_OBJECT,
-    Instruction::SGET_BOOLEAN,
-    Instruction::SGET_BYTE,
-    Instruction::SGET_CHAR,
-    Instruction::SGET_SHORT,
-    Instruction::SPUT,
-    Instruction::SPUT_WIDE,
-    Instruction::SPUT_OBJECT,
-    Instruction::SPUT_BOOLEAN,
-    Instruction::SPUT_BYTE,
-    Instruction::SPUT_CHAR,
-    Instruction::SPUT_SHORT,
-    Instruction::INVOKE_VIRTUAL,
-    Instruction::INVOKE_SUPER,
-    Instruction::INVOKE_DIRECT,
-    Instruction::INVOKE_STATIC,
-    Instruction::INVOKE_INTERFACE,
-    Instruction::RETURN_VOID_BARRIER,
-    Instruction::INVOKE_VIRTUAL_RANGE,
-    Instruction::INVOKE_SUPER_RANGE,
-    Instruction::INVOKE_DIRECT_RANGE,
-    Instruction::INVOKE_STATIC_RANGE,
-    Instruction::INVOKE_INTERFACE_RANGE,
-    Instruction::UNUSED_79,
-    Instruction::UNUSED_7A,
-    Instruction::NEG_INT,
-    Instruction::NOT_INT,
-    Instruction::NEG_LONG,
-    Instruction::NOT_LONG,
-    Instruction::NEG_FLOAT,
-    Instruction::NEG_DOUBLE,
-    Instruction::INT_TO_LONG,
-    Instruction::INT_TO_FLOAT,
-    Instruction::INT_TO_DOUBLE,
-    Instruction::LONG_TO_INT,
-    Instruction::LONG_TO_FLOAT,
-    Instruction::LONG_TO_DOUBLE,
-    Instruction::FLOAT_TO_INT,
-    Instruction::FLOAT_TO_LONG,
-    Instruction::FLOAT_TO_DOUBLE,
-    Instruction::DOUBLE_TO_INT,
-    Instruction::DOUBLE_TO_LONG,
-    Instruction::DOUBLE_TO_FLOAT,
-    Instruction::INT_TO_BYTE,
-    Instruction::INT_TO_CHAR,
-    Instruction::INT_TO_SHORT,
-    Instruction::ADD_INT,
-    Instruction::SUB_INT,
-    Instruction::MUL_INT,
-    Instruction::DIV_INT,
-    Instruction::REM_INT,
-    Instruction::AND_INT,
-    Instruction::OR_INT,
-    Instruction::XOR_INT,
-    Instruction::SHL_INT,
-    Instruction::SHR_INT,
-    Instruction::USHR_INT,
-    Instruction::ADD_LONG,
-    Instruction::SUB_LONG,
-    Instruction::MUL_LONG,
-    Instruction::DIV_LONG,
-    Instruction::REM_LONG,
-    Instruction::AND_LONG,
-    Instruction::OR_LONG,
-    Instruction::XOR_LONG,
-    Instruction::SHL_LONG,
-    Instruction::SHR_LONG,
-    Instruction::USHR_LONG,
-    Instruction::ADD_FLOAT,
-    Instruction::SUB_FLOAT,
-    Instruction::MUL_FLOAT,
-    Instruction::DIV_FLOAT,
-    Instruction::REM_FLOAT,
-    Instruction::ADD_DOUBLE,
-    Instruction::SUB_DOUBLE,
-    Instruction::MUL_DOUBLE,
-    Instruction::DIV_DOUBLE,
-    Instruction::REM_DOUBLE,
-    Instruction::ADD_INT_2ADDR,
-    Instruction::SUB_INT_2ADDR,
-    Instruction::MUL_INT_2ADDR,
-    Instruction::DIV_INT_2ADDR,
-    Instruction::REM_INT_2ADDR,
-    Instruction::AND_INT_2ADDR,
-    Instruction::OR_INT_2ADDR,
-    Instruction::XOR_INT_2ADDR,
-    Instruction::SHL_INT_2ADDR,
-    Instruction::SHR_INT_2ADDR,
-    Instruction::USHR_INT_2ADDR,
-    Instruction::ADD_LONG_2ADDR,
-    Instruction::SUB_LONG_2ADDR,
-    Instruction::MUL_LONG_2ADDR,
-    Instruction::DIV_LONG_2ADDR,
-    Instruction::REM_LONG_2ADDR,
-    Instruction::AND_LONG_2ADDR,
-    Instruction::OR_LONG_2ADDR,
-    Instruction::XOR_LONG_2ADDR,
-    Instruction::SHL_LONG_2ADDR,
-    Instruction::SHR_LONG_2ADDR,
-    Instruction::USHR_LONG_2ADDR,
-    Instruction::ADD_FLOAT_2ADDR,
-    Instruction::SUB_FLOAT_2ADDR,
-    Instruction::MUL_FLOAT_2ADDR,
-    Instruction::DIV_FLOAT_2ADDR,
-    Instruction::REM_FLOAT_2ADDR,
-    Instruction::ADD_DOUBLE_2ADDR,
-    Instruction::SUB_DOUBLE_2ADDR,
-    Instruction::MUL_DOUBLE_2ADDR,
-    Instruction::DIV_DOUBLE_2ADDR,
-    Instruction::REM_DOUBLE_2ADDR,
-    Instruction::ADD_INT_LIT16,
-    Instruction::RSUB_INT,
-    Instruction::MUL_INT_LIT16,
-    Instruction::DIV_INT_LIT16,
-    Instruction::REM_INT_LIT16,
-    Instruction::AND_INT_LIT16,
-    Instruction::OR_INT_LIT16,
-    Instruction::XOR_INT_LIT16,
-    Instruction::ADD_INT_LIT8,
-    Instruction::RSUB_INT_LIT8,
-    Instruction::MUL_INT_LIT8,
-    Instruction::DIV_INT_LIT8,
-    Instruction::REM_INT_LIT8,
-    Instruction::AND_INT_LIT8,
-    Instruction::OR_INT_LIT8,
-    Instruction::XOR_INT_LIT8,
-    Instruction::SHL_INT_LIT8,
-    Instruction::SHR_INT_LIT8,
-    Instruction::USHR_INT_LIT8,
-    Instruction::IGET_QUICK,
-    Instruction::IGET_WIDE_QUICK,
-    Instruction::IGET_OBJECT_QUICK,
-    Instruction::IPUT_QUICK,
-    Instruction::IPUT_WIDE_QUICK,
-    Instruction::IPUT_OBJECT_QUICK,
-    Instruction::INVOKE_VIRTUAL_QUICK,
-    Instruction::INVOKE_VIRTUAL_RANGE_QUICK,
-    Instruction::UNUSED_EB,
-    Instruction::UNUSED_EC,
-    Instruction::UNUSED_ED,
-    Instruction::UNUSED_EE,
-    Instruction::UNUSED_EF,
-    Instruction::UNUSED_F0,
-    Instruction::UNUSED_F1,
-    Instruction::UNUSED_F2,
-    Instruction::UNUSED_F3,
-    Instruction::UNUSED_F4,
-    Instruction::UNUSED_F5,
-    Instruction::UNUSED_F6,
-    Instruction::UNUSED_F7,
-    Instruction::UNUSED_F8,
-    Instruction::UNUSED_F9,
-    Instruction::UNUSED_FA,
-    Instruction::UNUSED_FB,
-    Instruction::UNUSED_FC,
-    Instruction::UNUSED_FD,
-    Instruction::UNUSED_FE,
-    Instruction::UNUSED_FF,
-    // ----- ExtendedMIROpcode -----
-    kMirOpPhi,
-    kMirOpCopy,
-    kMirOpFusedCmplFloat,
-    kMirOpFusedCmpgFloat,
-    kMirOpFusedCmplDouble,
-    kMirOpFusedCmpgDouble,
-    kMirOpFusedCmpLong,
-    kMirOpNop,
-    kMirOpNullCheck,
-    kMirOpRangeCheck,
-    kMirOpDivZeroCheck,
-    kMirOpCheck,
-    kMirOpCheckPart2,
-    kMirOpSelect,
-};
-
-// Unsupported opcodes. nullptr can be used when everything is supported. Size of the lists is
-// recorded below.
-static const int* kUnsupportedOpcodes[] = {
-    // 0 = kNone.
-    kAllOpcodes,
-    // 1 = kArm, unused (will use kThumb2).
-    kAllOpcodes,
-    // 2 = kArm64.
-    nullptr,
-    // 3 = kThumb2.
-    nullptr,
-    // 4 = kX86.
-    nullptr,
-    // 5 = kX86_64.
-    nullptr,
-    // 6 = kMips.
-    nullptr,
-    // 7 = kMips64.
-    kAllOpcodes
-};
-COMPILE_ASSERT(sizeof(kUnsupportedOpcodes) == 8 * sizeof(int*), kUnsupportedOpcodes_unexp);
-
-// Size of the arrays stored above.
-static const size_t kUnsupportedOpcodesSize[] = {
-    // 0 = kNone.
-    arraysize(kAllOpcodes),
-    // 1 = kArm, unused (will use kThumb2).
-    arraysize(kAllOpcodes),
-    // 2 = kArm64.
-    0,
-    // 3 = kThumb2.
-    0,
-    // 4 = kX86.
-    0,
-    // 5 = kX86_64.
-    0,
-    // 6 = kMips.
-    0,
-    // 7 = kMips64.
-    arraysize(kAllOpcodes),
-};
-COMPILE_ASSERT(sizeof(kUnsupportedOpcodesSize) == 8 * sizeof(size_t),
-               kUnsupportedOpcodesSize_unexp);
-
-// The maximum amount of Dalvik register in a method for which we will start compiling. Tries to
-// avoid an abort when we need to manage more SSA registers than we can.
-static constexpr size_t kMaxAllowedDalvikRegisters = INT16_MAX / 2;
-
-CompilationUnit::CompilationUnit(ArenaPool* pool)
-  : compiler_driver(nullptr),
-    class_linker(nullptr),
-    dex_file(nullptr),
-    class_loader(nullptr),
-    class_def_idx(0),
-    method_idx(0),
-    code_item(nullptr),
-    access_flags(0),
-    invoke_type(kDirect),
-    shorty(nullptr),
-    disable_opt(0),
-    enable_debug(0),
-    verbose(false),
-    compiler(nullptr),
-    instruction_set(kNone),
-    target64(false),
-    num_dalvik_registers(0),
-    insns(nullptr),
-    num_ins(0),
-    num_outs(0),
-    num_regs(0),
-    compiler_flip_match(false),
-    arena(pool),
-    arena_stack(pool),
-    mir_graph(nullptr),
-    cg(nullptr),
-    timings("QuickCompiler", true, false),
-    print_pass(false) {
-}
-
-CompilationUnit::~CompilationUnit() {
-}
-
-void CompilationUnit::StartTimingSplit(const char* label) {
-  if (compiler_driver->GetDumpPasses()) {
-    timings.StartTiming(label);
-  }
-}
-
-void CompilationUnit::NewTimingSplit(const char* label) {
-  if (compiler_driver->GetDumpPasses()) {
-    timings.EndTiming();
-    timings.StartTiming(label);
-  }
-}
-
-void CompilationUnit::EndTiming() {
-  if (compiler_driver->GetDumpPasses()) {
-    timings.EndTiming();
-    if (enable_debug & (1 << kDebugTimings)) {
-      LOG(INFO) << "TIMINGS " << PrettyMethod(method_idx, *dex_file);
-      LOG(INFO) << Dumpable<TimingLogger>(timings);
-    }
-  }
-}
-
-static bool CanCompileShorty(const char* shorty, InstructionSet instruction_set) {
-  const char* supported_types = kSupportedTypes[instruction_set];
-  if (supported_types == nullptr) {
-    // Everything available.
-    return true;
-  }
-
-  uint32_t shorty_size = strlen(shorty);
-  CHECK_GE(shorty_size, 1u);
-
-  for (uint32_t i = 0; i < shorty_size; i++) {
-    if (strchr(supported_types, shorty[i]) == nullptr) {
-      return false;
-    }
-  }
-  return true;
-};
-
-// Skip the method that we do not support currently.
-static bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file,
-                             CompilationUnit& cu) {
-  // This is a limitation in mir_graph. See MirGraph::SetNumSSARegs.
-  if (cu.num_dalvik_registers > kMaxAllowedDalvikRegisters) {
-    VLOG(compiler) << "Too many dalvik registers : " << cu.num_dalvik_registers;
-    return false;
-  }
-
-  // Check whether we do have limitations at all.
-  if (kSupportedTypes[cu.instruction_set] == nullptr &&
-      kUnsupportedOpcodesSize[cu.instruction_set] == 0U) {
-    return true;
-  }
-
-  // Check if we can compile the prototype.
-  const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
-  if (!CanCompileShorty(shorty, cu.instruction_set)) {
-    VLOG(compiler) << "Unsupported shorty : " << shorty;
-    return false;
-  }
-
-  const int *unsupport_list = kUnsupportedOpcodes[cu.instruction_set];
-  int unsupport_list_size = kUnsupportedOpcodesSize[cu.instruction_set];
-
-  for (unsigned int idx = 0; idx < cu.mir_graph->GetNumBlocks(); idx++) {
-    BasicBlock* bb = cu.mir_graph->GetBasicBlock(idx);
-    if (bb == NULL) continue;
-    if (bb->block_type == kDead) continue;
-    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
-      int opcode = mir->dalvikInsn.opcode;
-      // Check if we support the byte code.
-      if (std::find(unsupport_list, unsupport_list + unsupport_list_size,
-                    opcode) != unsupport_list + unsupport_list_size) {
-        if (!MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
-          VLOG(compiler) << "Unsupported dalvik byte code : "
-              << mir->dalvikInsn.opcode;
-        } else {
-          VLOG(compiler) << "Unsupported extended MIR opcode : "
-              << MIRGraph::extended_mir_op_names_[opcode - kMirOpFirst];
-        }
-        return false;
-      }
-      // Check if it invokes a prototype that we cannot support.
-      if (Instruction::INVOKE_VIRTUAL == opcode ||
-          Instruction::INVOKE_SUPER == opcode ||
-          Instruction::INVOKE_DIRECT == opcode ||
-          Instruction::INVOKE_STATIC == opcode ||
-          Instruction::INVOKE_INTERFACE == opcode) {
-        uint32_t invoke_method_idx = mir->dalvikInsn.vB;
-        const char* invoke_method_shorty = dex_file.GetMethodShorty(
-            dex_file.GetMethodId(invoke_method_idx));
-        if (!CanCompileShorty(invoke_method_shorty, cu.instruction_set)) {
-          VLOG(compiler) << "Unsupported to invoke '"
-              << PrettyMethod(invoke_method_idx, dex_file)
-              << "' with shorty : " << invoke_method_shorty;
-          return false;
-        }
-      }
-    }
-  }
-  return true;
-}
-
 static CompiledMethod* CompileMethod(CompilerDriver& driver,
-                                     Compiler* compiler,
+                                     const Compiler* compiler,
                                      const DexFile::CodeItem* code_item,
                                      uint32_t access_flags, InvokeType invoke_type,
                                      uint16_t class_def_idx, uint32_t method_idx,
@@ -651,8 +110,6 @@ static CompiledMethod* CompileMethod(CompilerDriver& driver,
         (cu.instruction_set == kX86_64) ||
         (cu.instruction_set == kMips));
 
-  /* Adjust this value accordingly once inlining is performed */
-  cu.num_dalvik_registers = code_item->registers_size_;
   // TODO: set this from command line
   cu.compiler_flip_match = false;
   bool use_match = !cu.compiler_method_match.empty();
@@ -687,9 +144,6 @@ static CompiledMethod* CompileMethod(CompilerDriver& driver,
 
   compiler->InitCompilationUnit(cu);
 
-  // Disable optimizations according to instruction set.
-  cu.disable_opt |= kDisabledOptimizationsPerISA[cu.instruction_set];
-
   cu.StartTimingSplit("BuildMIRGraph");
   cu.mir_graph.reset(new MIRGraph(&cu, &cu.arena));
 
@@ -709,7 +163,7 @@ static CompiledMethod* CompileMethod(CompilerDriver& driver,
   cu.mir_graph->InlineMethod(code_item, access_flags, invoke_type, class_def_idx, method_idx,
                               class_loader, dex_file);
 
-  if (!CanCompileMethod(method_idx, dex_file, cu)) {
+  if (!compiler->CanCompileMethod(method_idx, dex_file, &cu)) {
     VLOG(compiler)  << cu.instruction_set << ": Cannot compile method : "
         << PrettyMethod(method_idx, dex_file);
     return nullptr;
@@ -791,8 +245,8 @@ static CompiledMethod* CompileMethod(CompilerDriver& driver,
   return result;
 }
 
-CompiledMethod* CompileOneMethod(CompilerDriver& driver,
-                                 Compiler* compiler,
+CompiledMethod* CompileOneMethod(CompilerDriver* driver,
+                                 const Compiler* compiler,
                                  const DexFile::CodeItem* code_item,
                                  uint32_t access_flags,
                                  InvokeType invoke_type,
@@ -801,22 +255,8 @@ CompiledMethod* CompileOneMethod(CompilerDriver& driver,
                                  jobject class_loader,
                                  const DexFile& dex_file,
                                  void* compilation_unit) {
-  return CompileMethod(driver, compiler, code_item, access_flags, invoke_type, class_def_idx,
+  return CompileMethod(*driver, compiler, code_item, access_flags, invoke_type, class_def_idx,
                        method_idx, class_loader, dex_file, compilation_unit);
 }
 
 }  // namespace art
-
-extern "C" art::CompiledMethod*
-    ArtQuickCompileMethod(art::CompilerDriver& driver,
-                          const art::DexFile::CodeItem* code_item,
-                          uint32_t access_flags, art::InvokeType invoke_type,
-                          uint16_t class_def_idx, uint32_t method_idx, jobject class_loader,
-                          const art::DexFile& dex_file) {
-  // TODO: check method fingerprint here to determine appropriate backend type.  Until then, use
-  // build default.
-  art::Compiler* compiler = driver.GetCompiler();
-  return art::CompileOneMethod(driver, compiler, code_item, access_flags, invoke_type,
-                               class_def_idx, method_idx, class_loader, dex_file,
-                               NULL /* use thread llvm_info */);
-}
index f4cbdfb..51b6d68 100644 (file)
 #include "dex_file.h"
 #include "invoke_type.h"
 
-namespace llvm {
-  class Module;
-  class LLVMContext;
-}
-
 namespace art {
-namespace llvm {
-  class IntrinsicHelper;
-  class IRBuilder;
-}
+
+class CompiledMethod;
+class Compiler;
+class CompilerDriver;
 
 /*
  * Assembly is an iterative process, and usually terminates within
@@ -81,48 +76,17 @@ enum debugControlVector {
   kDebugCodegenDump
 };
 
-class LLVMInfo {
-  public:
-    LLVMInfo();
-    ~LLVMInfo();
-
-    ::llvm::LLVMContext* GetLLVMContext() {
-      return llvm_context_.get();
-    }
-
-    ::llvm::Module* GetLLVMModule() {
-      return llvm_module_;
-    }
-
-    art::llvm::IntrinsicHelper* GetIntrinsicHelper() {
-      return intrinsic_helper_.get();
-    }
-
-    art::llvm::IRBuilder* GetIRBuilder() {
-      return ir_builder_.get();
-    }
-
-  private:
-    std::unique_ptr< ::llvm::LLVMContext> llvm_context_;
-    ::llvm::Module* llvm_module_;  // Managed by context_.
-    std::unique_ptr<art::llvm::IntrinsicHelper> intrinsic_helper_;
-    std::unique_ptr<art::llvm::IRBuilder> ir_builder_;
-};
-
-class CompiledMethod;
-class CompilerDriver;
+CompiledMethod* CompileOneMethod(CompilerDriver* driver,
+                                 const Compiler* compiler,
+                                 const DexFile::CodeItem* code_item,
+                                 uint32_t access_flags,
+                                 InvokeType invoke_type,
+                                 uint16_t class_def_idx,
+                                 uint32_t method_idx,
+                                 jobject class_loader,
+                                 const DexFile& dex_file,
+                                 void* compilation_unit);
 
 }  // namespace art
 
-extern "C" art::CompiledMethod* ArtCompileMethod(art::CompilerDriver& driver,
-                                                 const art::DexFile::CodeItem* code_item,
-                                                 uint32_t access_flags,
-                                                 art::InvokeType invoke_type,
-                                                 uint16_t class_def_idx,
-                                                 uint32_t method_idx,
-                                                 jobject class_loader,
-                                                 const art::DexFile& dex_file);
-
-
-
 #endif  // ART_COMPILER_DEX_FRONTEND_H_
index d7ef6f0..4d885fd 100644 (file)
@@ -56,8 +56,11 @@ LocalValueNumbering* GlobalValueNumbering::PrepareBasicBlock(BasicBlock* bb,
     return nullptr;
   }
   if (UNLIKELY(bbs_processed_ == max_bbs_to_process_)) {
-    last_value_ = kNoValue;  // Make bad.
-    return nullptr;
+    // If we're still trying to converge, stop now. Otherwise, proceed to apply optimizations.
+    if (!modifications_allowed_) {
+      last_value_ = kNoValue;  // Make bad.
+      return nullptr;
+    }
   }
   if (allocator == nullptr) {
     allocator = allocator_;
@@ -67,7 +70,7 @@ LocalValueNumbering* GlobalValueNumbering::PrepareBasicBlock(BasicBlock* bb,
   if (bb->block_type == kEntryBlock) {
     if ((cu_->access_flags & kAccStatic) == 0) {
       // If non-static method, mark "this" as non-null
-      int this_reg = cu_->num_dalvik_registers - cu_->num_ins;
+      int this_reg = cu_->mir_graph->GetFirstInVR();
       uint16_t value_name = work_lvn_->GetSRegValueName(this_reg);
       work_lvn_->SetValueNameNullChecked(value_name);
     }
index c06ff6f..1a38692 100644 (file)
@@ -214,7 +214,7 @@ class GlobalValueNumbering {
   static constexpr uint32_t kMaxBbsToProcessMultiplyFactor = 20u;
 
   uint32_t bbs_processed_;
-  uint32_t max_bbs_to_process_;
+  uint32_t max_bbs_to_process_;  // Doesn't apply after the main GVN has converged.
 
   // We have 32-bit last_value_ so that we can detect when we run out of value names, see Good().
   // We usually don't check Good() until the end of LVN unless we're about to modify code.
index e8501cd..1c0de37 100644 (file)
@@ -266,6 +266,10 @@ class GlobalValueNumberingTest : public testing::Test {
       mir->optimization_flags = 0u;
     }
     mirs_[count - 1u].next = nullptr;
+    DexFile::CodeItem* code_item = static_cast<DexFile::CodeItem*>(
+        cu_.arena.Alloc(sizeof(DexFile::CodeItem), kArenaAllocMisc));
+    code_item->insns_size_in_code_units_ = 2u * count;
+    cu_.mir_graph->current_code_item_ = cu_.code_item = code_item;
   }
 
   template <size_t count>
index 5997568..4279955 100644 (file)
@@ -464,7 +464,7 @@ void LocalValueNumbering::PruneNonAliasingRefsForCatch() {
     const MIR* mir = fall_through_bb->first_mir_insn;
     DCHECK(mir != nullptr);
     // Only INVOKEs can leak and clobber non-aliasing references if they throw.
-    if ((Instruction::FlagsOf(mir->dalvikInsn.opcode) & Instruction::kInvoke) != 0) {
+    if ((mir->dalvikInsn.FlagsOf() & Instruction::kInvoke) != 0) {
       for (uint16_t i = 0u; i != mir->ssa_rep->num_uses; ++i) {
         uint16_t value_name = lvn->GetOperandValue(mir->ssa_rep->uses[i]);
         non_aliasing_refs_.erase(value_name);
@@ -656,13 +656,37 @@ void LocalValueNumbering::MergeEscapedArrayClobberSets(
   }
 }
 
-void LocalValueNumbering::MergeNullChecked(const ValueNameSet::value_type& entry,
-                                           ValueNameSet::iterator hint) {
-  // Merge null_checked_ for this ref.
-  merge_names_.clear();
-  merge_names_.resize(gvn_->merge_lvns_.size(), entry);
-  if (gvn_->NullCheckedInAllPredecessors(merge_names_)) {
-    null_checked_.insert(hint, entry);
+void LocalValueNumbering::MergeNullChecked() {
+  DCHECK_GE(gvn_->merge_lvns_.size(), 2u);
+
+  // Find the LVN with the least entries in the set.
+  const LocalValueNumbering* least_entries_lvn = gvn_->merge_lvns_[0];
+  for (const LocalValueNumbering* lvn : gvn_->merge_lvns_) {
+    if (lvn->null_checked_.size() < least_entries_lvn->null_checked_.size()) {
+      least_entries_lvn = lvn;
+    }
+  }
+
+  // For each null-checked value name check if it's null-checked in all the LVNs.
+  for (const auto& value_name : least_entries_lvn->null_checked_) {
+    // Merge null_checked_ for this ref.
+    merge_names_.clear();
+    merge_names_.resize(gvn_->merge_lvns_.size(), value_name);
+    if (gvn_->NullCheckedInAllPredecessors(merge_names_)) {
+      null_checked_.insert(null_checked_.end(), value_name);
+    }
+  }
+
+  // Now check if the least_entries_lvn has a null-check as the last insn.
+  const BasicBlock* least_entries_bb = gvn_->GetBasicBlock(least_entries_lvn->Id());
+  if (gvn_->HasNullCheckLastInsn(least_entries_bb, id_)) {
+    int s_reg = least_entries_bb->last_mir_insn->ssa_rep->uses[0];
+    uint32_t value_name = least_entries_lvn->GetSRegValueName(s_reg);
+    merge_names_.clear();
+    merge_names_.resize(gvn_->merge_lvns_.size(), value_name);
+    if (gvn_->NullCheckedInAllPredecessors(merge_names_)) {
+      null_checked_.insert(value_name);
+    }
   }
 }
 
@@ -896,8 +920,7 @@ void LocalValueNumbering::Merge(MergeType merge_type) {
   IntersectSets<RangeCheckSet, &LocalValueNumbering::range_checked_>();
 
   // Merge null_checked_. We may later insert more, such as merged object field values.
-  MergeSets<ValueNameSet, &LocalValueNumbering::null_checked_,
-            &LocalValueNumbering::MergeNullChecked>();
+  MergeNullChecked();
 
   if (merge_type == kCatchMerge) {
     // Memory is clobbered. New memory version already created, don't merge aliasing locations.
index 855d66d..f6a454b 100644 (file)
@@ -343,11 +343,11 @@ class LocalValueNumbering {
                                      EscapedIFieldClobberSet::iterator hint);
   void MergeEscapedArrayClobberSets(const EscapedArrayClobberSet::value_type& entry,
                                     EscapedArrayClobberSet::iterator hint);
-  void MergeNullChecked(const ValueNameSet::value_type& entry, ValueNameSet::iterator hint);
   void MergeSFieldValues(const SFieldToValueMap::value_type& entry,
                          SFieldToValueMap::iterator hint);
   void MergeNonAliasingIFieldValues(const IFieldLocToValueMap::value_type& entry,
                                     IFieldLocToValueMap::iterator hint);
+  void MergeNullChecked();
 
   template <typename Map, Map LocalValueNumbering::*map_ptr, typename Versions>
   void MergeAliasingValues(const typename Map::value_type& entry, typename Map::iterator hint);
index 3de4483..a8af92c 100644 (file)
@@ -841,6 +841,57 @@ const uint32_t MIRGraph::analysis_attributes_[kMirOpLast] = {
 
   // 113 MIR_SELECT
   AN_NONE,
+
+  // 114 MirOpConstVector
+  AN_NONE,
+
+  // 115 MirOpMoveVector
+  AN_NONE,
+
+  // 116 MirOpPackedMultiply
+  AN_NONE,
+
+  // 117 MirOpPackedAddition
+  AN_NONE,
+
+  // 118 MirOpPackedSubtract
+  AN_NONE,
+
+  // 119 MirOpPackedShiftLeft
+  AN_NONE,
+
+  // 120 MirOpPackedSignedShiftRight
+  AN_NONE,
+
+  // 121 MirOpPackedUnsignedShiftRight
+  AN_NONE,
+
+  // 122 MirOpPackedAnd
+  AN_NONE,
+
+  // 123 MirOpPackedOr
+  AN_NONE,
+
+  // 124 MirOpPackedXor
+  AN_NONE,
+
+  // 125 MirOpPackedAddReduce
+  AN_NONE,
+
+  // 126 MirOpPackedReduce
+  AN_NONE,
+
+  // 127 MirOpPackedSet
+  AN_NONE,
+
+  // 128 MirOpReserveVectorRegisters
+  AN_NONE,
+
+  // 129 MirOpReturnVectorRegisters
+  AN_NONE,
+
+  // 130 MirOpMemBarrier
+  AN_NONE,
 };
 
 struct MethodStats {
index b82c5c7..4c906b0 100644 (file)
@@ -824,7 +824,7 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
   DF_NOP,
 
   // 108 MIR_NULL_CHECK
-  0,
+  DF_UA | DF_REF_A | DF_NULL_CHK_0 | DF_LVN,
 
   // 109 MIR_RANGE_CHECK
   0,
@@ -888,6 +888,9 @@ const uint64_t MIRGraph::oat_data_flow_attributes_[kMirOpLast] = {
 
   // 129 MirOpReturnVectorRegisters
   0,
+
+  // 130 MirOpMemBarrier
+  0,
 };
 
 /* Return the base virtual register for a SSA name */
@@ -930,11 +933,11 @@ bool MIRGraph::FindLocalLiveIn(BasicBlock* bb) {
   if (bb->data_flow_info == NULL) return false;
 
   use_v = bb->data_flow_info->use_v =
-      new (arena_) ArenaBitVector(arena_, cu_->num_dalvik_registers, false, kBitMapUse);
+      new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false, kBitMapUse);
   def_v = bb->data_flow_info->def_v =
-      new (arena_) ArenaBitVector(arena_, cu_->num_dalvik_registers, false, kBitMapDef);
+      new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false, kBitMapDef);
   live_in_v = bb->data_flow_info->live_in_v =
-      new (arena_) ArenaBitVector(arena_, cu_->num_dalvik_registers, false, kBitMapLiveIn);
+      new (arena_) ArenaBitVector(arena_, GetNumOfCodeAndTempVRs(), false, kBitMapLiveIn);
 
   for (mir = bb->first_mir_insn; mir != NULL; mir = mir->next) {
     uint64_t df_attributes = GetDataFlowAttributes(mir);
@@ -984,8 +987,7 @@ bool MIRGraph::FindLocalLiveIn(BasicBlock* bb) {
 }
 
 int MIRGraph::AddNewSReg(int v_reg) {
-  // Compiler temps always have a subscript of 0
-  int subscript = (v_reg < 0) ? 0 : ++ssa_last_defs_[v_reg];
+  int subscript = ++ssa_last_defs_[v_reg];
   uint32_t ssa_reg = GetNumSSARegs();
   SetNumSSARegs(ssa_reg + 1);
   ssa_base_vregs_->Insert(v_reg);
@@ -1002,13 +1004,13 @@ int MIRGraph::AddNewSReg(int v_reg) {
 
 /* Find out the latest SSA register for a given Dalvik register */
 void MIRGraph::HandleSSAUse(int* uses, int dalvik_reg, int reg_index) {
-  DCHECK((dalvik_reg >= 0) && (dalvik_reg < cu_->num_dalvik_registers));
+  DCHECK((dalvik_reg >= 0) && (dalvik_reg < static_cast<int>(GetNumOfCodeAndTempVRs())));
   uses[reg_index] = vreg_to_ssa_map_[dalvik_reg];
 }
 
 /* Setup a new SSA register for a given Dalvik register */
 void MIRGraph::HandleSSADef(int* defs, int dalvik_reg, int reg_index) {
-  DCHECK((dalvik_reg >= 0) && (dalvik_reg < cu_->num_dalvik_registers));
+  DCHECK((dalvik_reg >= 0) && (dalvik_reg < static_cast<int>(GetNumOfCodeAndTempVRs())));
   int ssa_reg = AddNewSReg(dalvik_reg);
   vreg_to_ssa_map_[dalvik_reg] = ssa_reg;
   defs[reg_index] = ssa_reg;
@@ -1085,7 +1087,7 @@ bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
 
       // If not a pseudo-op, note non-leaf or can throw
     if (!MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
-      int flags = Instruction::FlagsOf(mir->dalvikInsn.opcode);
+      int flags = mir->dalvikInsn.FlagsOf();
 
       if ((flags & Instruction::kInvoke) != 0 && (mir->optimization_flags & MIR_INLINED) == 0) {
         attributes_ &= ~METHOD_IS_LEAF;
@@ -1188,34 +1190,34 @@ bool MIRGraph::DoSSAConversion(BasicBlock* bb) {
    * predecessor blocks.
    */
   bb->data_flow_info->vreg_to_ssa_map_exit =
-      static_cast<int*>(arena_->Alloc(sizeof(int) * cu_->num_dalvik_registers,
+      static_cast<int*>(arena_->Alloc(sizeof(int) * GetNumOfCodeAndTempVRs(),
                                       kArenaAllocDFInfo));
 
   memcpy(bb->data_flow_info->vreg_to_ssa_map_exit, vreg_to_ssa_map_,
-         sizeof(int) * cu_->num_dalvik_registers);
+         sizeof(int) * GetNumOfCodeAndTempVRs());
   return true;
 }
 
 /* Setup the basic data structures for SSA conversion */
 void MIRGraph::CompilerInitializeSSAConversion() {
-  size_t num_dalvik_reg = cu_->num_dalvik_registers;
+  size_t num_reg = GetNumOfCodeAndTempVRs();
 
-  ssa_base_vregs_ = new (arena_) GrowableArray<int>(arena_, num_dalvik_reg + GetDefCount() + 128,
+  ssa_base_vregs_ = new (arena_) GrowableArray<int>(arena_, num_reg + GetDefCount() + 128,
                                                     kGrowableArraySSAtoDalvikMap);
-  ssa_subscripts_ = new (arena_) GrowableArray<int>(arena_, num_dalvik_reg + GetDefCount() + 128,
+  ssa_subscripts_ = new (arena_) GrowableArray<int>(arena_, num_reg + GetDefCount() + 128,
                                                     kGrowableArraySSAtoDalvikMap);
   /*
    * Initial number of SSA registers is equal to the number of Dalvik
    * registers.
    */
-  SetNumSSARegs(num_dalvik_reg);
+  SetNumSSARegs(num_reg);
 
   /*
-   * Initialize the SSA2Dalvik map list. For the first num_dalvik_reg elements,
+   * Initialize the SSA2Dalvik map list. For the first num_reg elements,
    * the subscript is 0 so we use the ENCODE_REG_SUB macro to encode the value
    * into "(0 << 16) | i"
    */
-  for (unsigned int i = 0; i < num_dalvik_reg; i++) {
+  for (unsigned int i = 0; i < num_reg; i++) {
     ssa_base_vregs_->Insert(i);
     ssa_subscripts_->Insert(0);
   }
@@ -1225,20 +1227,22 @@ void MIRGraph::CompilerInitializeSSAConversion() {
    * Dalvik register, and the SSA names for those are the same.
    */
   vreg_to_ssa_map_ =
-      static_cast<int*>(arena_->Alloc(sizeof(int) * num_dalvik_reg,
+      static_cast<int*>(arena_->Alloc(sizeof(int) * num_reg,
                                       kArenaAllocDFInfo));
   /* Keep track of the higest def for each dalvik reg */
   ssa_last_defs_ =
-      static_cast<int*>(arena_->Alloc(sizeof(int) * num_dalvik_reg,
+      static_cast<int*>(arena_->Alloc(sizeof(int) * num_reg,
                                       kArenaAllocDFInfo));
 
-  for (unsigned int i = 0; i < num_dalvik_reg; i++) {
+  for (unsigned int i = 0; i < num_reg; i++) {
     vreg_to_ssa_map_[i] = i;
     ssa_last_defs_[i] = 0;
   }
 
   // Create a compiler temporary for Method*. This is done after SSA initialization.
-  GetNewCompilerTemp(kCompilerTempSpecialMethodPtr, false);
+  CompilerTemp* method_temp = GetNewCompilerTemp(kCompilerTempSpecialMethodPtr, false);
+  // The MIR graph keeps track of the sreg for method pointer specially, so record that now.
+  method_sreg_ = method_temp->s_reg_low;
 
   /*
    * Allocate the BasicBlockDataFlow structure for the entry and code blocks
index 6aee563..e77be5d 100644 (file)
@@ -19,6 +19,7 @@
 #include <inttypes.h>
 #include <queue>
 
+#include "base/bit_vector-inl.h"
 #include "base/stl_util.h"
 #include "compiler_internals.h"
 #include "dex_file-inl.h"
@@ -28,6 +29,7 @@
 #include "dex/quick/dex_file_method_inliner.h"
 #include "leb128.h"
 #include "pass_driver_me_post_opt.h"
+#include "stack.h"
 #include "utils/scoped_arena_containers.h"
 
 namespace art {
@@ -65,6 +67,7 @@ const char* MIRGraph::extended_mir_op_names_[kMirOpLast - kMirOpFirst] = {
   "PackedSet",
   "ReserveVectorRegisters",
   "ReturnVectorRegisters",
+  "MemBarrier",
 };
 
 MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
@@ -116,9 +119,10 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
       arena_(arena),
       backward_branches_(0),
       forward_branches_(0),
-      compiler_temps_(arena, 6, kGrowableArrayMisc),
       num_non_special_compiler_temps_(0),
-      max_available_non_special_compiler_temps_(0),
+      max_available_special_compiler_temps_(1),  // We only need the method ptr as a special temp for now.
+      requested_backend_temp_(false),
+      compiler_temps_committed_(false),
       punt_to_interpreter_(false),
       merged_df_flags_(0u),
       ifield_lowering_infos_(arena, 0u),
@@ -126,8 +130,20 @@ MIRGraph::MIRGraph(CompilationUnit* cu, ArenaAllocator* arena)
       method_lowering_infos_(arena, 0u),
       gen_suspend_test_list_(arena, 0u) {
   try_block_addr_ = new (arena_) ArenaBitVector(arena_, 0, true /* expandable */);
-  max_available_special_compiler_temps_ = std::abs(static_cast<int>(kVRegNonSpecialTempBaseReg))
-      - std::abs(static_cast<int>(kVRegTempBaseReg));
+
+
+  if (cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64) {
+    // X86 requires a temp to keep track of the method address.
+    // TODO For x86_64, addressing can be done with RIP. When that is implemented,
+    // this needs to be updated to reserve 0 temps for BE.
+    max_available_non_special_compiler_temps_ = cu_->target64 ? 2 : 1;
+    reserved_temps_for_backend_ = max_available_non_special_compiler_temps_;
+  } else {
+    // Other architectures do not have a known lower bound for non-special temps.
+    // We allow the update of the max to happen at BE initialization stage and simply set 0 for now.
+    max_available_non_special_compiler_temps_ = 0;
+    reserved_temps_for_backend_ = 0;
+  }
 }
 
 MIRGraph::~MIRGraph() {
@@ -172,10 +188,18 @@ BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset,
   bottom_block->first_mir_insn = insn;
   bottom_block->last_mir_insn = orig_block->last_mir_insn;
 
-  /* If this block was terminated by a return, the flag needs to go with the bottom block */
+  /* If this block was terminated by a return, conditional branch or throw,
+   * the flag needs to go with the bottom block
+   */
   bottom_block->terminated_by_return = orig_block->terminated_by_return;
   orig_block->terminated_by_return = false;
 
+  bottom_block->conditional_branch = orig_block->conditional_branch;
+  orig_block->conditional_branch = false;
+
+  bottom_block->explicit_throw = orig_block->explicit_throw;
+  orig_block->explicit_throw = false;
+
   /* Handle the taken path */
   bottom_block->taken = orig_block->taken;
   if (bottom_block->taken != NullBasicBlockId) {
@@ -265,7 +289,7 @@ BasicBlock* MIRGraph::SplitBlock(DexOffset code_offset,
  */
 BasicBlock* MIRGraph::FindBlock(DexOffset code_offset, bool split, bool create,
                                 BasicBlock** immed_pred_block_p) {
-  if (code_offset >= cu_->code_item->insns_size_in_code_units_) {
+  if (code_offset >= current_code_item_->insns_size_in_code_units_) {
     return NULL;
   }
 
@@ -339,10 +363,10 @@ bool MIRGraph::IsBadMonitorExitCatch(NarrowDexOffset monitor_exit_offset,
   // (We don't want to ignore all monitor-exit catches since one could enclose a synchronized
   // block in a try-block and catch the NPE, Error or Throwable and we should let it through;
   // even though a throwing monitor-exit certainly indicates a bytecode error.)
-  const Instruction* monitor_exit = Instruction::At(cu_->code_item->insns_ + monitor_exit_offset);
+  const Instruction* monitor_exit = Instruction::At(current_code_item_->insns_ + monitor_exit_offset);
   DCHECK(monitor_exit->Opcode() == Instruction::MONITOR_EXIT);
   int monitor_reg = monitor_exit->VRegA_11x();
-  const Instruction* check_insn = Instruction::At(cu_->code_item->insns_ + catch_offset);
+  const Instruction* check_insn = Instruction::At(current_code_item_->insns_ + catch_offset);
   DCHECK(check_insn->Opcode() == Instruction::MOVE_EXCEPTION);
   if (check_insn->VRegA_11x() == monitor_reg) {
     // Unexpected move-exception to the same register. Probably not the pattern we're looking for.
@@ -691,11 +715,6 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
     cu_->access_flags = access_flags;
     cu_->invoke_type = invoke_type;
     cu_->shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
-    cu_->num_ins = current_code_item_->ins_size_;
-    cu_->num_regs = current_code_item_->registers_size_ - cu_->num_ins;
-    cu_->num_outs = current_code_item_->outs_size_;
-    cu_->num_dalvik_registers = current_code_item_->registers_size_;
-    cu_->insns = current_code_item_->insns_;
     cu_->code_item = current_code_item_;
   } else {
     UNIMPLEMENTED(FATAL) << "Nested inlining not implemented.";
@@ -730,7 +749,7 @@ void MIRGraph::InlineMethod(const DexFile::CodeItem* code_item, uint32_t access_
       opcode_count_[static_cast<int>(opcode)]++;
     }
 
-    int flags = Instruction::FlagsOf(insn->dalvikInsn.opcode);
+    int flags = insn->dalvikInsn.FlagsOf();
     int verify_flags = Instruction::VerifyFlagsOf(insn->dalvikInsn.opcode);
 
     uint64_t df_flags = GetDataFlowAttributes(insn);
@@ -911,27 +930,7 @@ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suff
                 bb->first_mir_insn ? " | " : " ");
         for (mir = bb->first_mir_insn; mir; mir = mir->next) {
             int opcode = mir->dalvikInsn.opcode;
-            if (opcode > kMirOpSelect && opcode < kMirOpLast) {
-              if (opcode == kMirOpConstVector) {
-                fprintf(file, "    {%04x %s %d %d %d %d %d %d\\l}%s\\\n", mir->offset,
-                        extended_mir_op_names_[kMirOpConstVector - kMirOpFirst],
-                        mir->dalvikInsn.vA,
-                        mir->dalvikInsn.vB,
-                        mir->dalvikInsn.arg[0],
-                        mir->dalvikInsn.arg[1],
-                        mir->dalvikInsn.arg[2],
-                        mir->dalvikInsn.arg[3],
-                        mir->next ? " | " : " ");
-              } else {
-                fprintf(file, "    {%04x %s %d %d %d\\l}%s\\\n", mir->offset,
-                        extended_mir_op_names_[opcode - kMirOpFirst],
-                        mir->dalvikInsn.vA,
-                        mir->dalvikInsn.vB,
-                        mir->dalvikInsn.vC,
-                        mir->next ? " | " : " ");
-              }
-            } else {
-              fprintf(file, "    {%04x %s %s %s %s\\l}%s\\\n", mir->offset,
+            fprintf(file, "    {%04x %s %s %s %s %s %s %s\\l}%s\\\n", mir->offset,
                       mir->ssa_rep ? GetDalvikDisassembly(mir) :
                       !MIR::DecodedInstruction::IsPseudoMirOp(opcode) ?
                         Instruction::Name(mir->dalvikInsn.opcode) :
@@ -939,8 +938,10 @@ void MIRGraph::DumpCFG(const char* dir_prefix, bool all_blocks, const char *suff
                       (mir->optimization_flags & MIR_IGNORE_RANGE_CHECK) != 0 ? " no_rangecheck" : " ",
                       (mir->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0 ? " no_nullcheck" : " ",
                       (mir->optimization_flags & MIR_IGNORE_SUSPEND_CHECK) != 0 ? " no_suspendcheck" : " ",
+                      (mir->optimization_flags & MIR_STORE_NON_TEMPORAL) != 0 ? " non_temporal" : " ",
+                      (mir->optimization_flags & MIR_CALLEE) != 0 ? " inlined" : " ",
+                      (mir->optimization_flags & MIR_IGNORE_CLINIT_CHECK) != 0 ? " no_clinit" : " ",
                       mir->next ? " | " : " ");
-            }
         }
         fprintf(file, "  }\"];\n\n");
     } else if (bb->block_type == kExceptionHandling) {
@@ -1210,6 +1211,186 @@ MIR* BasicBlock::GetNextUnconditionalMir(MIRGraph* mir_graph, MIR* current) {
   return next_mir;
 }
 
+static void FillTypeSizeString(uint32_t type_size, std::string* decoded_mir) {
+  DCHECK(decoded_mir != nullptr);
+  OpSize type = static_cast<OpSize>(type_size >> 16);
+  uint16_t vect_size = (type_size & 0xFFFF);
+
+  // Now print the type and vector size.
+  std::stringstream ss;
+  ss << " (type:";
+  ss << type;
+  ss << " vectsize:";
+  ss << vect_size;
+  ss << ")";
+
+  decoded_mir->append(ss.str());
+}
+
+void MIRGraph::DisassembleExtendedInstr(const MIR* mir, std::string* decoded_mir) {
+  DCHECK(decoded_mir != nullptr);
+  int opcode = mir->dalvikInsn.opcode;
+  SSARepresentation* ssa_rep = mir->ssa_rep;
+  int defs = (ssa_rep != nullptr) ? ssa_rep->num_defs : 0;
+  int uses = (ssa_rep != nullptr) ? ssa_rep->num_uses : 0;
+
+  decoded_mir->append(extended_mir_op_names_[opcode - kMirOpFirst]);
+
+  switch (opcode) {
+    case kMirOpPhi: {
+      if (defs > 0 && uses > 0) {
+        BasicBlockId* incoming = mir->meta.phi_incoming;
+        decoded_mir->append(StringPrintf(" %s = (%s",
+                           GetSSANameWithConst(ssa_rep->defs[0], true).c_str(),
+                           GetSSANameWithConst(ssa_rep->uses[0], true).c_str()));
+        decoded_mir->append(StringPrintf(":%d", incoming[0]));
+        for (int i = 1; i < uses; i++) {
+          decoded_mir->append(StringPrintf(", %s:%d", GetSSANameWithConst(ssa_rep->uses[i], true).c_str(), incoming[i]));
+        }
+        decoded_mir->append(")");
+      }
+      break;
+    }
+    case kMirOpCopy:
+      if (ssa_rep != nullptr) {
+        decoded_mir->append(" ");
+        decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[0], false));
+        if (defs > 1) {
+          decoded_mir->append(", ");
+          decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[1], false));
+        }
+        decoded_mir->append(" = ");
+        decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[0], false));
+        if (uses > 1) {
+          decoded_mir->append(", ");
+          decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[1], false));
+        }
+      } else {
+        decoded_mir->append(StringPrintf(" v%d = v%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB));
+      }
+      break;
+    case kMirOpFusedCmplFloat:
+    case kMirOpFusedCmpgFloat:
+    case kMirOpFusedCmplDouble:
+    case kMirOpFusedCmpgDouble:
+    case kMirOpFusedCmpLong:
+      if (ssa_rep != nullptr) {
+        decoded_mir->append(" ");
+        decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[0], false));
+        for (int i = 1; i < uses; i++) {
+          decoded_mir->append(", ");
+          decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[i], false));
+        }
+      } else {
+        decoded_mir->append(StringPrintf(" v%d, v%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB));
+      }
+      break;
+    case kMirOpMoveVector:
+      decoded_mir->append(StringPrintf(" vect%d = vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB));
+      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
+      break;
+    case kMirOpPackedAddition:
+      decoded_mir->append(StringPrintf(" vect%d = vect%d + vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
+      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
+      break;
+    case kMirOpPackedMultiply:
+      decoded_mir->append(StringPrintf(" vect%d = vect%d * vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
+      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
+      break;
+    case kMirOpPackedSubtract:
+      decoded_mir->append(StringPrintf(" vect%d = vect%d - vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
+      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
+      break;
+    case kMirOpPackedAnd:
+      decoded_mir->append(StringPrintf(" vect%d = vect%d & vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
+      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
+      break;
+    case kMirOpPackedOr:
+      decoded_mir->append(StringPrintf(" vect%d = vect%d \\| vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
+      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
+      break;
+    case kMirOpPackedXor:
+      decoded_mir->append(StringPrintf(" vect%d = vect%d ^ vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
+      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
+      break;
+    case kMirOpPackedShiftLeft:
+      decoded_mir->append(StringPrintf(" vect%d = vect%d \\<\\< %d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
+      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
+      break;
+    case kMirOpPackedUnsignedShiftRight:
+      decoded_mir->append(StringPrintf(" vect%d = vect%d \\>\\>\\> %d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
+      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
+      break;
+    case kMirOpPackedSignedShiftRight:
+      decoded_mir->append(StringPrintf(" vect%d = vect%d \\>\\> %d", mir->dalvikInsn.vA, mir->dalvikInsn.vA, mir->dalvikInsn.vB));
+      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
+      break;
+    case kMirOpConstVector:
+      decoded_mir->append(StringPrintf(" vect%d = %x, %x, %x, %x", mir->dalvikInsn.vA, mir->dalvikInsn.arg[0],
+                                      mir->dalvikInsn.arg[1], mir->dalvikInsn.arg[2], mir->dalvikInsn.arg[3]));
+      break;
+    case kMirOpPackedSet:
+      if (ssa_rep != nullptr) {
+        decoded_mir->append(StringPrintf(" vect%d = %s", mir->dalvikInsn.vA,
+              GetSSANameWithConst(ssa_rep->uses[0], false).c_str()));
+        if (uses > 1) {
+          decoded_mir->append(", ");
+          decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[1], false));
+        }
+      } else {
+        decoded_mir->append(StringPrintf(" vect%d = v%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB));
+      }
+      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
+      break;
+    case kMirOpPackedAddReduce:
+      if (ssa_rep != nullptr) {
+        decoded_mir->append(" ");
+        decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[0], false));
+        if (defs > 1) {
+          decoded_mir->append(", ");
+          decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[1], false));
+        }
+        decoded_mir->append(StringPrintf(" = vect%d + %s", mir->dalvikInsn.vB,
+            GetSSANameWithConst(ssa_rep->uses[0], false).c_str()));
+        if (uses > 1) {
+          decoded_mir->append(", ");
+          decoded_mir->append(GetSSANameWithConst(ssa_rep->uses[1], false));
+        }
+      } else {
+        decoded_mir->append(StringPrintf("v%d = vect%d + v%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB, mir->dalvikInsn.vA));
+      }
+      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
+      break;
+    case kMirOpPackedReduce:
+      if (ssa_rep != nullptr) {
+        decoded_mir->append(" ");
+        decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[0], false));
+        if (defs > 1) {
+          decoded_mir->append(", ");
+          decoded_mir->append(GetSSANameWithConst(ssa_rep->defs[1], false));
+        }
+        decoded_mir->append(StringPrintf(" = vect%d", mir->dalvikInsn.vB));
+      } else {
+        decoded_mir->append(StringPrintf(" v%d = vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB));
+      }
+      FillTypeSizeString(mir->dalvikInsn.vC, decoded_mir);
+      break;
+    case kMirOpReserveVectorRegisters:
+    case kMirOpReturnVectorRegisters:
+      decoded_mir->append(StringPrintf(" vect%d - vect%d", mir->dalvikInsn.vA, mir->dalvikInsn.vB));
+      break;
+    case kMirOpMemBarrier: {
+      decoded_mir->append(" type:");
+      std::stringstream ss;
+      ss << static_cast<MemBarrierKind>(mir->dalvikInsn.vA);
+      decoded_mir->append(ss.str());
+      break;
+    }
+    default:
+      break;
+  }
+}
+
 char* MIRGraph::GetDalvikDisassembly(const MIR* mir) {
   MIR::DecodedInstruction insn = mir->dalvikInsn;
   std::string str;
@@ -1219,18 +1400,14 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) {
   bool nop = false;
   SSARepresentation* ssa_rep = mir->ssa_rep;
   Instruction::Format dalvik_format = Instruction::k10x;  // Default to no-operand format.
-  int defs = (ssa_rep != NULL) ? ssa_rep->num_defs : 0;
-  int uses = (ssa_rep != NULL) ? ssa_rep->num_uses : 0;
 
-  // Handle special cases.
+  // Handle special cases that recover the original dalvik instruction.
   if ((opcode == kMirOpCheck) || (opcode == kMirOpCheckPart2)) {
     str.append(extended_mir_op_names_[opcode - kMirOpFirst]);
     str.append(": ");
     // Recover the original Dex instruction.
     insn = mir->meta.throw_insn->dalvikInsn;
     ssa_rep = mir->meta.throw_insn->ssa_rep;
-    defs = ssa_rep->num_defs;
-    uses = ssa_rep->num_uses;
     opcode = insn.opcode;
   } else if (opcode == kMirOpNop) {
     str.append("[");
@@ -1239,100 +1416,96 @@ char* MIRGraph::GetDalvikDisassembly(const MIR* mir) {
     opcode = insn.opcode;
     nop = true;
   }
+  int defs = (ssa_rep != NULL) ? ssa_rep->num_defs : 0;
+  int uses = (ssa_rep != NULL) ? ssa_rep->num_uses : 0;
 
   if (MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
-    str.append(extended_mir_op_names_[opcode - kMirOpFirst]);
+    // Note that this does not check the MIR's opcode in all cases. In cases where it
+    // recovered dalvik instruction, it uses opcode of that instead of the extended one.
+    DisassembleExtendedInstr(mir, &str);
   } else {
     dalvik_format = Instruction::FormatOf(insn.opcode);
-    flags = Instruction::FlagsOf(insn.opcode);
+    flags = insn.FlagsOf();
     str.append(Instruction::Name(insn.opcode));
-  }
 
-  if (opcode == kMirOpPhi) {
-    BasicBlockId* incoming = mir->meta.phi_incoming;
-    str.append(StringPrintf(" %s = (%s",
-               GetSSANameWithConst(ssa_rep->defs[0], true).c_str(),
-               GetSSANameWithConst(ssa_rep->uses[0], true).c_str()));
-    str.append(StringPrintf(":%d", incoming[0]));
-    int i;
-    for (i = 1; i < uses; i++) {
-      str.append(StringPrintf(", %s:%d",
-                              GetSSANameWithConst(ssa_rep->uses[i], true).c_str(),
-                              incoming[i]));
-    }
-    str.append(")");
-  } else if ((flags & Instruction::kBranch) != 0) {
-    // For branches, decode the instructions to print out the branch targets.
-    int offset = 0;
-    switch (dalvik_format) {
-      case Instruction::k21t:
-        str.append(StringPrintf(" %s,", GetSSANameWithConst(ssa_rep->uses[0], false).c_str()));
-        offset = insn.vB;
-        break;
-      case Instruction::k22t:
-        str.append(StringPrintf(" %s, %s,", GetSSANameWithConst(ssa_rep->uses[0], false).c_str(),
-                   GetSSANameWithConst(ssa_rep->uses[1], false).c_str()));
-        offset = insn.vC;
-        break;
-      case Instruction::k10t:
-      case Instruction::k20t:
-      case Instruction::k30t:
-        offset = insn.vA;
-        break;
-      default:
-        LOG(FATAL) << "Unexpected branch format " << dalvik_format << " from " << insn.opcode;
-    }
-    str.append(StringPrintf(" 0x%x (%c%x)", mir->offset + offset,
-                            offset > 0 ? '+' : '-', offset > 0 ? offset : -offset));
-  } else {
     // For invokes-style formats, treat wide regs as a pair of singles.
     bool show_singles = ((dalvik_format == Instruction::k35c) ||
                          (dalvik_format == Instruction::k3rc));
     if (defs != 0) {
-      str.append(StringPrintf(" %s", GetSSANameWithConst(ssa_rep->defs[0], false).c_str()));
+      str.append(" ");
+      str.append(GetSSANameWithConst(ssa_rep->defs[0], false));
+      if (defs > 1) {
+        str.append(", ");
+        str.append(GetSSANameWithConst(ssa_rep->defs[1], false));
+      }
       if (uses != 0) {
         str.append(", ");
       }
     }
     for (int i = 0; i < uses; i++) {
-      str.append(
-          StringPrintf(" %s", GetSSANameWithConst(ssa_rep->uses[i], show_singles).c_str()));
+      str.append(" ");
+      str.append(GetSSANameWithConst(ssa_rep->uses[i], show_singles));
       if (!show_singles && (reg_location_ != NULL) && reg_location_[i].wide) {
         // For the listing, skip the high sreg.
         i++;
       }
-      if (i != (uses -1)) {
+      if (i != (uses - 1)) {
         str.append(",");
       }
     }
+
     switch (dalvik_format) {
       case Instruction::k11n:  // Add one immediate from vB.
       case Instruction::k21s:
       case Instruction::k31i:
       case Instruction::k21h:
-        str.append(StringPrintf(", #%d", insn.vB));
+        str.append(StringPrintf(", #0x%x", insn.vB));
         break;
       case Instruction::k51l:  // Add one wide immediate.
         str.append(StringPrintf(", #%" PRId64, insn.vB_wide));
         break;
       case Instruction::k21c:  // One register, one string/type/method index.
       case Instruction::k31c:
-        str.append(StringPrintf(", index #%d", insn.vB));
+        str.append(StringPrintf(", index #0x%x", insn.vB));
         break;
       case Instruction::k22c:  // Two registers, one string/type/method index.
-        str.append(StringPrintf(", index #%d", insn.vC));
+        str.append(StringPrintf(", index #0x%x", insn.vC));
         break;
       case Instruction::k22s:  // Add one immediate from vC.
       case Instruction::k22b:
-        str.append(StringPrintf(", #%d", insn.vC));
+        str.append(StringPrintf(", #0x%x", insn.vC));
         break;
-      default: {
+      default:
         // Nothing left to print.
+        break;
+    }
+
+    if ((flags & Instruction::kBranch) != 0) {
+      // For branches, decode the instructions to print out the branch targets.
+      int offset = 0;
+      switch (dalvik_format) {
+        case Instruction::k21t:
+          offset = insn.vB;
+          break;
+        case Instruction::k22t:
+          offset = insn.vC;
+          break;
+        case Instruction::k10t:
+        case Instruction::k20t:
+        case Instruction::k30t:
+          offset = insn.vA;
+          break;
+        default:
+          LOG(FATAL) << "Unexpected branch format " << dalvik_format << " from " << insn.opcode;
+          break;
       }
+      str.append(StringPrintf(", 0x%x (%c%x)", mir->offset + offset,
+                              offset > 0 ? '+' : '-', offset > 0 ? offset : -offset));
+    }
+
+    if (nop) {
+      str.append("]--optimized away");
     }
-  }
-  if (nop) {
-    str.append("]--optimized away");
   }
   int length = str.length() + 1;
   ret = static_cast<char*>(arena_->Alloc(length, kArenaAllocDFInfo));
@@ -1355,7 +1528,12 @@ std::string MIRGraph::GetSSAName(int ssa_reg) {
   // TODO: This value is needed for LLVM and debugging. Currently, we compute this and then copy to
   //       the arena. We should be smarter and just place straight into the arena, or compute the
   //       value more lazily.
-  return StringPrintf("v%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg));
+  int vreg = SRegToVReg(ssa_reg);
+  if (vreg >= static_cast<int>(GetFirstTempVR())) {
+    return StringPrintf("t%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg));
+  } else {
+    return StringPrintf("v%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg));
+  }
 }
 
 // Similar to GetSSAName, but if ssa name represents an immediate show that as well.
@@ -1373,7 +1551,12 @@ std::string MIRGraph::GetSSANameWithConst(int ssa_reg, bool singles_only) {
                           ConstantValue(reg_location_[ssa_reg]));
     }
   } else {
-    return StringPrintf("v%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg));
+    int vreg = SRegToVReg(ssa_reg);
+    if (vreg >= static_cast<int>(GetFirstTempVR())) {
+      return StringPrintf("t%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg));
+    } else {
+      return StringPrintf("v%d_%d", SRegToVReg(ssa_reg), GetSSASubscript(ssa_reg));
+    }
   }
 }
 
@@ -1417,7 +1600,7 @@ void MIRGraph::DumpMIRGraph() {
   };
 
   LOG(INFO) << "Compiling " << PrettyMethod(cu_->method_idx, *cu_->dex_file);
-  LOG(INFO) << cu_->insns << " insns";
+  LOG(INFO) << GetInsns(0) << " insns";
   LOG(INFO) << GetNumBlocks() << " blocks in total";
   GrowableArray<BasicBlock*>::Iterator iterator(&block_list_);
 
@@ -1504,6 +1687,9 @@ void MIRGraph::InitializeMethodUses() {
   int num_ssa_regs = GetNumSSARegs();
   use_counts_.Resize(num_ssa_regs + 32);
   raw_use_counts_.Resize(num_ssa_regs + 32);
+  // Resize does not actually reset the number of used, so reset before initialization.
+  use_counts_.Reset();
+  raw_use_counts_.Reset();
   // Initialize list.
   for (int i = 0; i < num_ssa_regs; i++) {
     use_counts_.Insert(0);
@@ -1514,7 +1700,7 @@ void MIRGraph::InitializeMethodUses() {
 void MIRGraph::SSATransformationStart() {
   DCHECK(temp_scoped_alloc_.get() == nullptr);
   temp_scoped_alloc_.reset(ScopedArenaAllocator::Create(&cu_->arena_stack));
-  temp_bit_vector_size_ = cu_->num_dalvik_registers;
+  temp_bit_vector_size_ = GetNumOfCodeAndTempVRs();
   temp_bit_vector_ = new (temp_scoped_alloc_.get()) ArenaBitVector(
       temp_scoped_alloc_.get(), temp_bit_vector_size_, false, kBitMapRegisterV);
 
@@ -1934,6 +2120,10 @@ uint32_t SSARepresentation::GetStartUseIndex(Instruction::Code opcode) {
     case Instruction::IPUT_SHORT:
     case Instruction::IPUT_QUICK:
     case Instruction::IPUT_OBJECT_QUICK:
+    case Instruction::IPUT_BOOLEAN_QUICK:
+    case Instruction::IPUT_BYTE_QUICK:
+    case Instruction::IPUT_CHAR_QUICK:
+    case Instruction::IPUT_SHORT_QUICK:
     case Instruction::APUT:
     case Instruction::APUT_OBJECT:
     case Instruction::APUT_BOOLEAN:
@@ -2060,6 +2250,11 @@ void BasicBlock::Hide(CompilationUnit* c_unit) {
     // Replace child with null child.
     childPtr->predecessors->Delete(id);
   }
+
+  // Remove link to children.
+  taken = NullBasicBlockId;
+  fall_through = NullBasicBlockId;
+  successor_block_list_type = kNotUsed;
 }
 
 bool BasicBlock::IsSSALiveOut(const CompilationUnit* c_unit, int ssa_reg) {
@@ -2177,4 +2372,80 @@ void MIRGraph::InitializeBasicBlockData() {
   num_blocks_ = block_list_.Size();
 }
 
+int MIR::DecodedInstruction::FlagsOf() const {
+  // Calculate new index.
+  int idx = static_cast<int>(opcode) - kNumPackedOpcodes;
+
+  // Check if it is an extended or not.
+  if (idx < 0) {
+    return Instruction::FlagsOf(opcode);
+  }
+
+  // For extended, we use a switch.
+  switch (static_cast<int>(opcode)) {
+    case kMirOpPhi:
+      return Instruction::kContinue;
+    case kMirOpCopy:
+      return Instruction::kContinue;
+    case kMirOpFusedCmplFloat:
+      return Instruction::kContinue | Instruction::kBranch;
+    case kMirOpFusedCmpgFloat:
+      return Instruction::kContinue | Instruction::kBranch;
+    case kMirOpFusedCmplDouble:
+      return Instruction::kContinue | Instruction::kBranch;
+    case kMirOpFusedCmpgDouble:
+      return Instruction::kContinue | Instruction::kBranch;
+    case kMirOpFusedCmpLong:
+      return Instruction::kContinue | Instruction::kBranch;
+    case kMirOpNop:
+      return Instruction::kContinue;
+    case kMirOpNullCheck:
+      return Instruction::kContinue | Instruction::kThrow;
+    case kMirOpRangeCheck:
+      return Instruction::kContinue | Instruction::kThrow;
+    case kMirOpDivZeroCheck:
+      return Instruction::kContinue | Instruction::kThrow;
+    case kMirOpCheck:
+      return 0;
+    case kMirOpCheckPart2:
+      return 0;
+    case kMirOpSelect:
+      return Instruction::kContinue;
+    case kMirOpConstVector:
+      return Instruction::kContinue;
+    case kMirOpMoveVector:
+      return Instruction::kContinue;
+    case kMirOpPackedMultiply:
+      return Instruction::kContinue;
+    case kMirOpPackedAddition:
+      return Instruction::kContinue;
+    case kMirOpPackedSubtract:
+      return Instruction::kContinue;
+    case kMirOpPackedShiftLeft:
+      return Instruction::kContinue;
+    case kMirOpPackedSignedShiftRight:
+      return Instruction::kContinue;
+    case kMirOpPackedUnsignedShiftRight:
+      return Instruction::kContinue;
+    case kMirOpPackedAnd:
+      return Instruction::kContinue;
+    case kMirOpPackedOr:
+      return Instruction::kContinue;
+    case kMirOpPackedXor:
+      return Instruction::kContinue;
+    case kMirOpPackedAddReduce:
+      return Instruction::kContinue;
+    case kMirOpPackedReduce:
+      return Instruction::kContinue;
+    case kMirOpPackedSet:
+      return Instruction::kContinue;
+    case kMirOpReserveVectorRegisters:
+      return Instruction::kContinue;
+    case kMirOpReturnVectorRegisters:
+      return Instruction::kContinue;
+    default:
+      LOG(WARNING) << "ExtendedFlagsOf: Unhandled case: " << static_cast<int> (opcode);
+      return 0;
+  }
+}
 }  // namespace art
index 5817f92..078970d 100644 (file)
 
 #include <stdint.h>
 
+#include "compiler_ir.h"
 #include "dex_file.h"
 #include "dex_instruction.h"
-#include "compiler_ir.h"
+#include "driver/dex_compilation_unit.h"
 #include "invoke_type.h"
 #include "mir_field_info.h"
 #include "mir_method_info.h"
@@ -194,6 +195,7 @@ enum OatMethodAttributes {
 #define MIR_CALLEE                      (1 << kMIRCallee)
 #define MIR_IGNORE_SUSPEND_CHECK        (1 << kMIRIgnoreSuspendCheck)
 #define MIR_DUP                         (1 << kMIRDup)
+#define MIR_STORE_NON_TEMPORAL          (1 << kMIRStoreNonTemporal)
 
 #define BLOCK_NAME_LEN 80
 
@@ -215,6 +217,7 @@ struct CompilerTemp {
 enum CompilerTempType {
   kCompilerTempVR,                // A virtual register temporary.
   kCompilerTempSpecialMethodPtr,  // Temporary that keeps track of current method pointer.
+  kCompilerTempBackend,           // Temporary that is used by backend.
 };
 
 // When debug option enabled, records effectiveness of null and range check elimination.
@@ -297,37 +300,37 @@ struct MIR {
     }
 
     bool IsInvoke() const {
-      return !IsPseudoMirOp(opcode) && ((Instruction::FlagsOf(opcode) & Instruction::kInvoke) == Instruction::kInvoke);
+      return ((FlagsOf() & Instruction::kInvoke) == Instruction::kInvoke);
     }
 
     bool IsStore() const {
-      return !IsPseudoMirOp(opcode) && ((Instruction::FlagsOf(opcode) & Instruction::kStore) == Instruction::kStore);
+      return ((FlagsOf() & Instruction::kStore) == Instruction::kStore);
     }
 
     bool IsLoad() const {
-      return !IsPseudoMirOp(opcode) && ((Instruction::FlagsOf(opcode) & Instruction::kLoad) == Instruction::kLoad);
+      return ((FlagsOf() & Instruction::kLoad) == Instruction::kLoad);
     }
 
     bool IsConditionalBranch() const {
-      return !IsPseudoMirOp(opcode) && (Instruction::FlagsOf(opcode) == (Instruction::kContinue | Instruction::kBranch));
+      return (FlagsOf() == (Instruction::kContinue | Instruction::kBranch));
     }
 
     /**
      * @brief Is the register C component of the decoded instruction a constant?
      */
     bool IsCFieldOrConstant() const {
-      return !IsPseudoMirOp(opcode) && ((Instruction::FlagsOf(opcode) & Instruction::kRegCFieldOrConstant) == Instruction::kRegCFieldOrConstant);
+      return ((FlagsOf() & Instruction::kRegCFieldOrConstant) == Instruction::kRegCFieldOrConstant);
     }
 
     /**
      * @brief Is the register C component of the decoded instruction a constant?
      */
     bool IsBFieldOrConstant() const {
-      return !IsPseudoMirOp(opcode) && ((Instruction::FlagsOf(opcode) & Instruction::kRegBFieldOrConstant) == Instruction::kRegBFieldOrConstant);
+      return ((FlagsOf() & Instruction::kRegBFieldOrConstant) == Instruction::kRegBFieldOrConstant);
     }
 
     bool IsCast() const {
-      return !IsPseudoMirOp(opcode) && ((Instruction::FlagsOf(opcode) & Instruction::kCast) == Instruction::kCast);
+      return ((FlagsOf() & Instruction::kCast) == Instruction::kCast);
     }
 
     /**
@@ -337,12 +340,14 @@ struct MIR {
      *            when crossing such an instruction.
      */
     bool Clobbers() const {
-      return !IsPseudoMirOp(opcode) && ((Instruction::FlagsOf(opcode) & Instruction::kClobber) == Instruction::kClobber);
+      return ((FlagsOf() & Instruction::kClobber) == Instruction::kClobber);
     }
 
     bool IsLinear() const {
-      return !IsPseudoMirOp(opcode) && (Instruction::FlagsOf(opcode) & (Instruction::kAdd | Instruction::kSubtract)) != 0;
+      return (FlagsOf() & (Instruction::kAdd | Instruction::kSubtract)) != 0;
     }
+
+    int FlagsOf() const;
   } dalvikInsn;
 
   NarrowDexOffset offset;         // Offset of the instruction in code units.
@@ -569,10 +574,26 @@ class MIRGraph {
     return current_code_item_->insns_;
   }
 
+  /**
+   * @brief Used to obtain the raw dex bytecode instruction pointer.
+   * @param m_unit_index The method index in MIRGraph (caused by having multiple methods).
+   * This is guaranteed to contain index 0 which is the base method being compiled.
+   * @return Returns the raw instruction pointer.
+   */
   const uint16_t* GetInsns(int m_unit_index) const {
     return m_units_[m_unit_index]->GetCodeItem()->insns_;
   }
 
+  /**
+   * @brief Used to obtain the raw data table.
+   * @param mir sparse switch, packed switch, of fill-array-data
+   * @param table_offset The table offset from start of method.
+   * @return Returns the raw table pointer.
+   */
+  const uint16_t* GetTable(MIR* mir, uint32_t table_offset) const {
+    return GetInsns(mir->m_unit_index) + mir->offset + table_offset;
+  }
+
   unsigned int GetNumBlocks() const {
     return num_blocks_;
   }
@@ -724,6 +745,19 @@ class MIRGraph {
     return constant_values_[s_reg];
   }
 
+  /**
+   * @brief Used to obtain 64-bit value of a pair of ssa registers.
+   * @param s_reg_low The ssa register representing the low bits.
+   * @param s_reg_high The ssa register representing the high bits.
+   * @return Retusn the 64-bit constant value.
+   */
+  int64_t ConstantValueWide(int32_t s_reg_low, int32_t s_reg_high) const {
+    DCHECK(IsConst(s_reg_low));
+    DCHECK(IsConst(s_reg_high));
+    return (static_cast<int64_t>(constant_values_[s_reg_high]) << 32) |
+        Low32Bits(static_cast<int64_t>(constant_values_[s_reg_low]));
+  }
+
   int64_t ConstantValueWide(RegLocation loc) const {
     DCHECK(IsConst(loc));
     DCHECK(!loc.high_word);  // Do not allow asking for the high partner.
@@ -732,6 +766,20 @@ class MIRGraph {
         Low32Bits(static_cast<int64_t>(constant_values_[loc.orig_sreg]));
   }
 
+  /**
+   * @brief Used to mark ssa register as being constant.
+   * @param ssa_reg The ssa register.
+   * @param value The constant value of ssa register.
+   */
+  void SetConstant(int32_t ssa_reg, int32_t value);
+
+  /**
+   * @brief Used to mark ssa register and its wide counter-part as being constant.
+   * @param ssa_reg The ssa register.
+   * @param value The 64-bit constant value of ssa register and its pair.
+   */
+  void SetConstantWide(int32_t ssa_reg, int64_t value);
+
   bool IsConstantNullRef(RegLocation loc) const {
     return loc.ref && loc.is_const && (ConstantValue(loc) == 0);
   }
@@ -754,12 +802,12 @@ class MIRGraph {
     return num_reachable_blocks_;
   }
 
-  int GetUseCount(int vreg) const {
-    return use_counts_.Get(vreg);
+  int GetUseCount(int sreg) const {
+    return use_counts_.Get(sreg);
   }
 
-  int GetRawUseCount(int vreg) const {
-    return raw_use_counts_.Get(vreg);
+  int GetRawUseCount(int sreg) const {
+    return raw_use_counts_.Get(sreg);
   }
 
   int GetSSASubscript(int ssa_reg) const {
@@ -815,9 +863,26 @@ class MIRGraph {
    * @return Returns the number of compiler temporaries.
    */
   size_t GetNumUsedCompilerTemps() const {
-    size_t total_num_temps = compiler_temps_.Size();
-    DCHECK_LE(num_non_special_compiler_temps_, total_num_temps);
-    return total_num_temps;
+    // Assume that the special temps will always be used.
+    return GetNumNonSpecialCompilerTemps() + max_available_special_compiler_temps_;
+  }
+
+  /**
+   * @brief Used to obtain number of bytes needed for special temps.
+   * @details This space is always needed because temps have special location on stack.
+   * @return Returns number of bytes for the special temps.
+   */
+  size_t GetNumBytesForSpecialTemps() const;
+
+  /**
+   * @brief Used by backend as a hint for maximum number of bytes for non-special temps.
+   * @details Returns 4 bytes for each temp because that is the maximum amount needed
+   * for storing each temp. The BE could be smarter though and allocate a smaller
+   * spill region.
+   * @return Returns the maximum number of bytes needed for non-special temps.
+   */
+  size_t GetMaximumBytesForNonSpecialTemps() const {
+    return GetNumNonSpecialCompilerTemps() * sizeof(uint32_t);
   }
 
   /**
@@ -835,7 +900,9 @@ class MIRGraph {
    * @return Returns true if the max was set and false if failed to set.
    */
   bool SetMaxAvailableNonSpecialCompilerTemps(size_t new_max) {
-    if (new_max < GetNumNonSpecialCompilerTemps()) {
+    // Make sure that enough temps still exist for backend and also that the
+    // new max can still keep around all of the already requested temps.
+    if (new_max < (GetNumNonSpecialCompilerTemps() + reserved_temps_for_backend_)) {
       return false;
     } else {
       max_available_non_special_compiler_temps_ = new_max;
@@ -844,21 +911,12 @@ class MIRGraph {
   }
 
   /**
-   * @brief Provides the number of non-special compiler temps available.
+   * @brief Provides the number of non-special compiler temps available for use by ME.
    * @details Even if this returns zero, special compiler temps are guaranteed to be available.
+   * Additionally, this makes sure to not use any temps reserved for BE only.
    * @return Returns the number of available temps.
    */
-  size_t GetNumAvailableNonSpecialCompilerTemps();
-
-  /**
-   * @brief Used to obtain an existing compiler temporary.
-   * @param index The index of the temporary which must be strictly less than the
-   * number of temporaries.
-   * @return Returns the temporary that was asked for.
-   */
-  CompilerTemp* GetCompilerTemp(size_t index) const {
-    return compiler_temps_.Get(index);
-  }
+  size_t GetNumAvailableVRTemps();
 
   /**
    * @brief Used to obtain the maximum number of compiler temporaries that can be requested.
@@ -869,7 +927,22 @@ class MIRGraph {
   }
 
   /**
+   * @brief Used to signal that the compiler temps have been committed.
+   * @details This should be used once the number of temps can no longer change,
+   * such as after frame size is committed and cannot be changed.
+   */
+  void CommitCompilerTemps() {
+    compiler_temps_committed_ = true;
+  }
+
+  /**
    * @brief Used to obtain a new unique compiler temporary.
+   * @details Two things are done for convenience when allocating a new compiler
+   * temporary. The ssa register is automatically requested and the information
+   * about reg location is filled. This helps when the temp is requested post
+   * ssa initialization, such as when temps are requested by the backend.
+   * @warning If the temp requested will be used for ME and have multiple versions,
+   * the sreg provided by the temp will be invalidated on next ssa recalculation.
    * @param ct_type Type of compiler temporary requested.
    * @param wide Whether we should allocate a wide temporary.
    * @return Returns the newly created compiler temporary.
@@ -911,8 +984,49 @@ class MIRGraph {
   }
 
   // Is this vreg in the in set?
-  bool IsInVReg(int vreg) {
-    return (vreg >= cu_->num_regs);
+  bool IsInVReg(uint32_t vreg) {
+    return (vreg >= GetFirstInVR()) && (vreg < GetFirstTempVR());
+  }
+
+  uint32_t GetNumOfCodeVRs() const {
+    return current_code_item_->registers_size_;
+  }
+
+  uint32_t GetNumOfCodeAndTempVRs() const {
+    // Include all of the possible temps so that no structures overflow when initialized.
+    return GetNumOfCodeVRs() + GetMaxPossibleCompilerTemps();
+  }
+
+  uint32_t GetNumOfLocalCodeVRs() const {
+    // This also refers to the first "in" VR.
+    return GetNumOfCodeVRs() - current_code_item_->ins_size_;
+  }
+
+  uint32_t GetNumOfInVRs() const {
+    return current_code_item_->ins_size_;
+  }
+
+  uint32_t GetNumOfOutVRs() const {
+    return current_code_item_->outs_size_;
+  }
+
+  uint32_t GetFirstInVR() const {
+    return GetNumOfLocalCodeVRs();
+  }
+
+  uint32_t GetFirstTempVR() const {
+    // Temp VRs immediately follow code VRs.
+    return GetNumOfCodeVRs();
+  }
+
+  uint32_t GetFirstSpecialTempVR() const {
+    // Special temps appear first in the ordering before non special temps.
+    return GetFirstTempVR();
+  }
+
+  uint32_t GetFirstNonSpecialTempVR() const {
+    // We always leave space for all the special temps before the non-special ones.
+    return GetFirstSpecialTempVR() + max_available_special_compiler_temps_;
   }
 
   void DumpCheckStats();
@@ -965,6 +1079,7 @@ class MIRGraph {
     punt_to_interpreter_ = val;
   }
 
+  void DisassembleExtendedInstr(const MIR* mir, std::string* decoded_mir);
   char* GetDalvikDisassembly(const MIR* mir);
   void ReplaceSpecialChars(std::string& str);
   std::string GetSSAName(int ssa_reg);
@@ -1116,8 +1231,6 @@ class MIRGraph {
   void MarkPreOrder(BasicBlock* bb);
   void RecordDFSOrders(BasicBlock* bb);
   void ComputeDomPostOrderTraversal(BasicBlock* bb);
-  void SetConstant(int32_t ssa_reg, int value);
-  void SetConstantWide(int ssa_reg, int64_t value);
   int GetSSAUseCount(int s_reg);
   bool BasicBlockOpt(BasicBlock* bb);
   bool BuildExtendedBBList(struct BasicBlock* bb);
@@ -1160,7 +1273,7 @@ class MIRGraph {
   // Stack of the loop head indexes and recalculation flags for RepeatingTopologicalSortIterator.
   GrowableArray<std::pair<uint16_t, bool>>* topological_order_loop_head_stack_;
   int* i_dom_list_;
-  ArenaBitVector** def_block_matrix_;    // num_dalvik_register x num_blocks.
+  ArenaBitVector** def_block_matrix_;    // original num registers x num_blocks.
   std::unique_ptr<ScopedArenaAllocator> temp_scoped_alloc_;
   uint16_t* temp_insn_data_;
   uint32_t temp_bit_vector_size_;
@@ -1189,11 +1302,13 @@ class MIRGraph {
   ArenaAllocator* arena_;
   int backward_branches_;
   int forward_branches_;
-  GrowableArray<CompilerTemp*> compiler_temps_;
-  size_t num_non_special_compiler_temps_;
-  size_t max_available_non_special_compiler_temps_;
-  size_t max_available_special_compiler_temps_;
-  bool punt_to_interpreter_;                    // Difficult or not worthwhile - just interpret.
+  size_t num_non_special_compiler_temps_;  // Keeps track of allocated non-special compiler temps. These are VRs that are in compiler temp region on stack.
+  size_t max_available_non_special_compiler_temps_;  // Keeps track of maximum available non-special temps.
+  size_t max_available_special_compiler_temps_;      // Keeps track of maximum available special temps.
+  bool requested_backend_temp_;            // Keeps track whether BE temps have been requested.
+  size_t reserved_temps_for_backend_;      // Keeps track of the remaining temps that are reserved for BE.
+  bool compiler_temps_committed_;          // Keeps track whether number of temps has been frozen (for example post frame size calculation).
+  bool punt_to_interpreter_;               // Difficult or not worthwhile - just interpret.
   uint64_t merged_df_flags_;
   GrowableArray<MirIFieldLoweringInfo> ifield_lowering_infos_;
   GrowableArray<MirSFieldLoweringInfo> sfield_lowering_infos_;
index 932f453..bdc05a9 100644 (file)
@@ -103,6 +103,10 @@ class TopologicalSortOrderTest : public testing::Test {
     ASSERT_EQ(kEntryBlock, cu_.mir_graph->entry_block_->block_type);
     cu_.mir_graph->exit_block_ = cu_.mir_graph->block_list_.Get(2);
     ASSERT_EQ(kExitBlock, cu_.mir_graph->exit_block_->block_type);
+
+    DexFile::CodeItem* code_item = static_cast<DexFile::CodeItem*>(cu_.arena.Alloc(sizeof(DexFile::CodeItem),
+                                                                                   kArenaAllocMisc));
+    cu_.mir_graph->current_code_item_ = cu_.code_item = code_item;
   }
 
   template <size_t count>
index 23ceb56..5d7cbed 100644 (file)
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#include "base/bit_vector-inl.h"
 #include "compiler_internals.h"
 #include "global_value_numbering.h"
 #include "local_value_numbering.h"
@@ -21,6 +22,7 @@
 #include "dex/global_value_numbering.h"
 #include "dex/quick/dex_file_method_inliner.h"
 #include "dex/quick/dex_file_to_method_inliner_map.h"
+#include "stack.h"
 #include "utils/scoped_arena_containers.h"
 
 namespace art {
@@ -30,12 +32,12 @@ static unsigned int Predecessors(BasicBlock* bb) {
 }
 
 /* Setup a constant value for opcodes thare have the DF_SETS_CONST attribute */
-void MIRGraph::SetConstant(int32_t ssa_reg, int value) {
+void MIRGraph::SetConstant(int32_t ssa_reg, int32_t value) {
   is_constant_v_->SetBit(ssa_reg);
   constant_values_[ssa_reg] = value;
 }
 
-void MIRGraph::SetConstantWide(int ssa_reg, int64_t value) {
+void MIRGraph::SetConstantWide(int32_t ssa_reg, int64_t value) {
   is_constant_v_->SetBit(ssa_reg);
   is_constant_v_->SetBit(ssa_reg + 1);
   constant_values_[ssa_reg] = Low32Bits(value);
@@ -231,27 +233,59 @@ int MIRGraph::GetSSAUseCount(int s_reg) {
   return raw_use_counts_.Get(s_reg);
 }
 
-size_t MIRGraph::GetNumAvailableNonSpecialCompilerTemps() {
-  if (num_non_special_compiler_temps_ >= max_available_non_special_compiler_temps_) {
+size_t MIRGraph::GetNumBytesForSpecialTemps() const {
+  // This logic is written with assumption that Method* is only special temp.
+  DCHECK_EQ(max_available_special_compiler_temps_, 1u);
+  return sizeof(StackReference<mirror::ArtMethod>);
+}
+
+size_t MIRGraph::GetNumAvailableVRTemps() {
+  // First take into account all temps reserved for backend.
+  if (max_available_non_special_compiler_temps_ < reserved_temps_for_backend_) {
+    return 0;
+  }
+
+  // Calculate remaining ME temps available.
+  size_t remaining_me_temps = max_available_non_special_compiler_temps_ - reserved_temps_for_backend_;
+
+  if (num_non_special_compiler_temps_ >= remaining_me_temps) {
     return 0;
   } else {
-    return max_available_non_special_compiler_temps_ - num_non_special_compiler_temps_;
+    return remaining_me_temps - num_non_special_compiler_temps_;
   }
 }
 
-
 // FIXME - will probably need to revisit all uses of this, as type not defined.
 static const RegLocation temp_loc = {kLocCompilerTemp,
                                      0, 1 /*defined*/, 0, 0, 0, 0, 0, 1 /*home*/,
                                      RegStorage(), INVALID_SREG, INVALID_SREG};
 
 CompilerTemp* MIRGraph::GetNewCompilerTemp(CompilerTempType ct_type, bool wide) {
-  // There is a limit to the number of non-special temps so check to make sure it wasn't exceeded.
-  if (ct_type == kCompilerTempVR) {
-    size_t available_temps = GetNumAvailableNonSpecialCompilerTemps();
-    if (available_temps <= 0 || (available_temps <= 1 && wide)) {
-      return 0;
+  // Once the compiler temps have been committed, new ones cannot be requested anymore.
+  DCHECK_EQ(compiler_temps_committed_, false);
+  // Make sure that reserved for BE set is sane.
+  DCHECK_LE(reserved_temps_for_backend_, max_available_non_special_compiler_temps_);
+
+  bool verbose = cu_->verbose;
+  const char* ct_type_str = nullptr;
+
+  if (verbose) {
+    switch (ct_type) {
+      case kCompilerTempBackend:
+        ct_type_str = "backend";
+        break;
+      case kCompilerTempSpecialMethodPtr:
+        ct_type_str = "method*";
+        break;
+      case kCompilerTempVR:
+        ct_type_str = "VR";
+        break;
+      default:
+        ct_type_str = "unknown";
+        break;
     }
+    LOG(INFO) << "CompilerTemps: A compiler temp of type " << ct_type_str << " that is "
+        << (wide ? "wide is being requested." : "not wide is being requested.");
   }
 
   CompilerTemp *compiler_temp = static_cast<CompilerTemp *>(arena_->Alloc(sizeof(CompilerTemp),
@@ -260,51 +294,100 @@ CompilerTemp* MIRGraph::GetNewCompilerTemp(CompilerTempType ct_type, bool wide)
   // Create the type of temp requested. Special temps need special handling because
   // they have a specific virtual register assignment.
   if (ct_type == kCompilerTempSpecialMethodPtr) {
+    // This has a special location on stack which is 32-bit or 64-bit depending
+    // on mode. However, we don't want to overlap with non-special section
+    // and thus even for 64-bit, we allow only a non-wide temp to be requested.
     DCHECK_EQ(wide, false);
-    compiler_temp->v_reg = static_cast<int>(kVRegMethodPtrBaseReg);
-    compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
 
-    // The MIR graph keeps track of the sreg for method pointer specially, so record that now.
-    method_sreg_ = compiler_temp->s_reg_low;
+    // The vreg is always the first special temp for method ptr.
+    compiler_temp->v_reg = GetFirstSpecialTempVR();
+
+  } else if (ct_type == kCompilerTempBackend) {
+    requested_backend_temp_ = true;
+
+    // Make sure that we are not exceeding temps reserved for BE.
+    // Since VR temps cannot be requested once the BE temps are requested, we
+    // allow reservation of VR temps as well for BE. We
+    size_t available_temps = reserved_temps_for_backend_ + GetNumAvailableVRTemps();
+    if (available_temps <= 0 || (available_temps <= 1 && wide)) {
+      if (verbose) {
+        LOG(INFO) << "CompilerTemps: Not enough temp(s) of type " << ct_type_str << " are available.";
+      }
+      return nullptr;
+    }
+
+    // Update the remaining reserved temps since we have now used them.
+    // Note that the code below is actually subtracting to remove them from reserve
+    // once they have been claimed. It is careful to not go below zero.
+    if (reserved_temps_for_backend_ >= 1) {
+      reserved_temps_for_backend_--;
+    }
+    if (wide && reserved_temps_for_backend_ >= 1) {
+      reserved_temps_for_backend_--;
+    }
+
+    // The new non-special compiler temp must receive a unique v_reg.
+    compiler_temp->v_reg = GetFirstNonSpecialTempVR() + num_non_special_compiler_temps_;
+    num_non_special_compiler_temps_++;
+  } else if (ct_type == kCompilerTempVR) {
+    // Once we start giving out BE temps, we don't allow anymore ME temps to be requested.
+    // This is done in order to prevent problems with ssa since these structures are allocated
+    // and managed by the ME.
+    DCHECK_EQ(requested_backend_temp_, false);
+
+    // There is a limit to the number of non-special temps so check to make sure it wasn't exceeded.
+    size_t available_temps = GetNumAvailableVRTemps();
+    if (available_temps <= 0 || (available_temps <= 1 && wide)) {
+      if (verbose) {
+        LOG(INFO) << "CompilerTemps: Not enough temp(s) of type " << ct_type_str << " are available.";
+      }
+      return nullptr;
+    }
+
+    // The new non-special compiler temp must receive a unique v_reg.
+    compiler_temp->v_reg = GetFirstNonSpecialTempVR() + num_non_special_compiler_temps_;
+    num_non_special_compiler_temps_++;
   } else {
-    DCHECK_EQ(ct_type, kCompilerTempVR);
+    UNIMPLEMENTED(FATAL) << "No handling for compiler temp type " << ct_type_str << ".";
+  }
+
+  // We allocate an sreg as well to make developer life easier.
+  // However, if this is requested from an ME pass that will recalculate ssa afterwards,
+  // this sreg is no longer valid. The caller should be aware of this.
+  compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
+
+  if (verbose) {
+    LOG(INFO) << "CompilerTemps: New temp of type " << ct_type_str << " with v" << compiler_temp->v_reg
+        << " and s" << compiler_temp->s_reg_low << " has been created.";
+  }
 
-    // The new non-special compiler temp must receive a unique v_reg with a negative value.
-    compiler_temp->v_reg = static_cast<int>(kVRegNonSpecialTempBaseReg) -
-        num_non_special_compiler_temps_;
-    compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
+  if (wide) {
+    // Only non-special temps are handled as wide for now.
+    // Note that the number of non special temps is incremented below.
+    DCHECK(ct_type == kCompilerTempBackend || ct_type == kCompilerTempVR);
+
+    // Ensure that the two registers are consecutive.
+    int ssa_reg_low = compiler_temp->s_reg_low;
+    int ssa_reg_high = AddNewSReg(compiler_temp->v_reg + 1);
     num_non_special_compiler_temps_++;
 
-    if (wide) {
-      // Create a new CompilerTemp for the high part.
-      CompilerTemp *compiler_temp_high =
-          static_cast<CompilerTemp *>(arena_->Alloc(sizeof(CompilerTemp), kArenaAllocRegAlloc));
-      compiler_temp_high->v_reg = compiler_temp->v_reg;
-      compiler_temp_high->s_reg_low = compiler_temp->s_reg_low;
-      compiler_temps_.Insert(compiler_temp_high);
-
-      // Ensure that the two registers are consecutive. Since the virtual registers used for temps
-      // grow in a negative fashion, we need the smaller to refer to the low part. Thus, we
-      // redefine the v_reg and s_reg_low.
-      compiler_temp->v_reg--;
-      int ssa_reg_high = compiler_temp->s_reg_low;
-      compiler_temp->s_reg_low = AddNewSReg(compiler_temp->v_reg);
-      int ssa_reg_low = compiler_temp->s_reg_low;
-
-      // If needed initialize the register location for the high part.
-      // The low part is handled later in this method on a common path.
-      if (reg_location_ != nullptr) {
-        reg_location_[ssa_reg_high] = temp_loc;
-        reg_location_[ssa_reg_high].high_word = 1;
-        reg_location_[ssa_reg_high].s_reg_low = ssa_reg_low;
-        reg_location_[ssa_reg_high].wide = true;
-      }
+    if (verbose) {
+      LOG(INFO) << "CompilerTemps: The wide part of temp of type " << ct_type_str << " is v"
+          << compiler_temp->v_reg + 1 << " and s" << ssa_reg_high << ".";
+    }
 
-      num_non_special_compiler_temps_++;
+    if (reg_location_ != nullptr) {
+      reg_location_[ssa_reg_high] = temp_loc;
+      reg_location_[ssa_reg_high].high_word = true;
+      reg_location_[ssa_reg_high].s_reg_low = ssa_reg_low;
+      reg_location_[ssa_reg_high].wide = true;
     }
   }
 
-  // Have we already allocated the register locations?
+  // If the register locations have already been allocated, add the information
+  // about the temp. We will not overflow because they have been initialized
+  // to support the maximum number of temps. For ME temps that have multiple
+  // ssa versions, the structures below will be expanded on the post pass cleanup.
   if (reg_location_ != nullptr) {
     int ssa_reg_low = compiler_temp->s_reg_low;
     reg_location_[ssa_reg_low] = temp_loc;
@@ -312,7 +395,6 @@ CompilerTemp* MIRGraph::GetNewCompilerTemp(CompilerTempType ct_type, bool wide)
     reg_location_[ssa_reg_low].wide = wide;
   }
 
-  compiler_temps_.Insert(compiler_temp);
   return compiler_temp;
 }
 
@@ -692,10 +774,13 @@ void MIRGraph::CombineBlocks(struct BasicBlock* bb) {
     // Include the rest of the instructions
     bb->last_mir_insn = bb_next->last_mir_insn;
     /*
-     * If lower-half of pair of blocks to combine contained a return, move the flag
-     * to the newly combined block.
+     * If lower-half of pair of blocks to combine contained
+     * a return or a conditional branch or an explicit throw,
+     * move the flag to the newly combined block.
      */
     bb->terminated_by_return = bb_next->terminated_by_return;
+    bb->conditional_branch = bb_next->conditional_branch;
+    bb->explicit_throw = bb_next->explicit_throw;
 
     /*
      * NOTE: we aren't updating all dataflow info here.  Should either make sure this pass
@@ -744,13 +829,13 @@ bool MIRGraph::EliminateNullChecksAndInferTypes(BasicBlock* bb) {
     if (bb->block_type == kEntryBlock) {
       ssa_regs_to_check->ClearAllBits();
       // Assume all ins are objects.
-      for (uint16_t in_reg = cu_->num_dalvik_registers - cu_->num_ins;
-           in_reg < cu_->num_dalvik_registers; in_reg++) {
+      for (uint16_t in_reg = GetFirstInVR();
+           in_reg < GetNumOfCodeVRs(); in_reg++) {
         ssa_regs_to_check->SetBit(in_reg);
       }
       if ((cu_->access_flags & kAccStatic) == 0) {
         // If non-static method, mark "this" as non-null
-        int this_reg = cu_->num_dalvik_registers - cu_->num_ins;
+        int this_reg = GetFirstInVR();
         ssa_regs_to_check->ClearBit(this_reg);
       }
     } else if (bb->predecessors->Size() == 1) {
@@ -1250,7 +1335,7 @@ void MIRGraph::InlineSpecialMethods(BasicBlock* bb) {
     if (MIR::DecodedInstruction::IsPseudoMirOp(mir->dalvikInsn.opcode)) {
       continue;
     }
-    if (!(Instruction::FlagsOf(mir->dalvikInsn.opcode) & Instruction::kInvoke)) {
+    if (!(mir->dalvikInsn.FlagsOf() & Instruction::kInvoke)) {
       continue;
     }
     const MirMethodLoweringInfo& method_info = GetMethodLoweringInfo(mir);
index dbb5366..c377426 100644 (file)
@@ -81,7 +81,7 @@ class Pass {
    * @param data the object containing data necessary for the pass.
    * @return whether or not there is a change when walking the BasicBlock
    */
-  virtual bool Worker(const PassDataHolder* data) const {
+  virtual bool Worker(PassDataHolder* data) const {
     // Unused parameter.
     UNUSED(data);
 
index bd8f53c..8a3eae1 100644 (file)
@@ -161,6 +161,17 @@ class PassDriver {
     print_pass_list_ = list;
   }
 
+  /**
+   * @brief Used to set a string that contains the overridden pass options.
+   * @details An overridden pass option means that the pass uses this option
+   * instead of using its default option.
+   * @param s The string passed by user with overridden options. The string is in format
+   * Pass1Name:Pass1Option:Pass1Setting,Pass2Name:Pass2Option::Pass2Setting
+   */
+  static void SetOverriddenPassOptions(const std::string& s) {
+    overridden_pass_options_list_ = s;
+  }
+
   void SetDefaultPasses() {
     pass_list_ = PassDriver<PassDriverType>::g_default_pass_list;
   }
@@ -206,6 +217,9 @@ class PassDriver {
 
   /** @brief What are the passes we want to be dumping the CFG? */
   static std::string dump_pass_list_;
+
+  /** @brief String of all options that should be overridden for selected passes */
+  static std::string overridden_pass_options_list_;
 };
 
 }  // namespace art
index 133593c..537ceb6 100644 (file)
@@ -17,6 +17,8 @@
 #ifndef ART_COMPILER_DEX_PASS_DRIVER_ME_H_
 #define ART_COMPILER_DEX_PASS_DRIVER_ME_H_
 
+#include <cstdlib>
+#include <cstring>
 #include "bb_optimizations.h"
 #include "dataflow_iterator.h"
 #include "dataflow_iterator-inl.h"
@@ -94,19 +96,27 @@ class PassDriverME: public PassDriver<PassDriverType> {
       c_unit->NewTimingSplit(pass->GetName());
     }
 
-    // Check the pass gate first.
-    bool should_apply_pass = pass->Gate(&pass_me_data_holder_);
-    if (should_apply_pass) {
-      bool old_print_pass = c_unit->print_pass;
-
-      c_unit->print_pass = PassDriver<PassDriverType>::default_print_passes_;
-
-      const char* print_pass_list = PassDriver<PassDriverType>::print_pass_list_.c_str();
+    // First, work on determining pass verbosity.
+    bool old_print_pass = c_unit->print_pass;
+    c_unit->print_pass = PassDriver<PassDriverType>::default_print_passes_;
+    const char* print_pass_list = PassDriver<PassDriverType>::print_pass_list_.c_str();
+    if (print_pass_list != nullptr && strstr(print_pass_list, pass->GetName()) != nullptr) {
+      c_unit->print_pass = true;
+    }
 
-      if (print_pass_list != nullptr && strstr(print_pass_list, pass->GetName()) != nullptr) {
-        c_unit->print_pass = true;
+    // Next, check if there are any overridden settings for the pass that change default configuration.
+    c_unit->overridden_pass_options.clear();
+    FillOverriddenPassSettings(pass->GetName(), c_unit->overridden_pass_options);
+    if (c_unit->print_pass) {
+      for (auto setting_it : c_unit->overridden_pass_options) {
+        LOG(INFO) << "Overridden option \"" << setting_it.first << ":"
+          << setting_it.second << "\" for pass \"" << pass->GetName() << "\"";
       }
+    }
 
+    // Check the pass gate first.
+    bool should_apply_pass = pass->Gate(&pass_me_data_holder_);
+    if (should_apply_pass) {
       // Applying the pass: first start, doWork, and end calls.
       this->ApplyPass(&pass_me_data_holder_, pass);
 
@@ -137,10 +147,11 @@ class PassDriverME: public PassDriver<PassDriverType> {
           }
         }
       }
-
-      c_unit->print_pass = old_print_pass;
     }
 
+    // Before wrapping up with this pass, restore old pass verbosity flag.
+    c_unit->print_pass = old_print_pass;
+
     // If the pass gate passed, we can declare success.
     return should_apply_pass;
   }
@@ -149,6 +160,18 @@ class PassDriverME: public PassDriver<PassDriverType> {
     return dump_cfg_folder_;
   }
 
+  static void PrintPassOptions() {
+    for (auto pass : PassDriver<PassDriverType>::g_default_pass_list) {
+      const PassME* me_pass = down_cast<const PassME*>(pass);
+      if (me_pass->HasOptions()) {
+        LOG(INFO) << "Pass options for \"" << me_pass->GetName() << "\" are:";
+        SafeMap<const std::string, int> overridden_settings;
+        FillOverriddenPassSettings(me_pass->GetName(), overridden_settings);
+        me_pass->PrintPassOptions(overridden_settings);
+      }
+    }
+  }
+
  protected:
   /** @brief The data holder that contains data needed for the PassDriverME. */
   PassMEDataHolder pass_me_data_holder_;
@@ -175,6 +198,97 @@ class PassDriverME: public PassDriver<PassDriverType> {
       Iterator iterator(c_unit->mir_graph.get());
       DoWalkBasicBlocks(data, pass, &iterator);
     }
+
+  /**
+   * @brief Fills the settings_to_fill by finding all of the applicable options in the overridden_pass_options_list_.
+   * @param pass_name The pass name for which to fill settings.
+   * @param settings_to_fill Fills the options to contain the mapping of name of option to the new configuration.
+   */
+  static void FillOverriddenPassSettings(const char* pass_name, SafeMap<const std::string, int>& settings_to_fill) {
+    const std::string& settings = PassDriver<PassDriverType>::overridden_pass_options_list_;
+    const size_t settings_len = settings.size();
+
+    // Before anything, check if we care about anything right now.
+    if (settings_len == 0) {
+      return;
+    }
+
+    const size_t pass_name_len = strlen(pass_name);
+    const size_t min_setting_size = 4;  // 2 delimiters, 1 setting name, 1 setting
+    size_t search_pos = 0;
+
+    // If there is no room for pass options, exit early.
+    if (settings_len < pass_name_len + min_setting_size) {
+      return;
+    }
+
+    do {
+      search_pos = settings.find(pass_name, search_pos);
+
+      // Check if we found this pass name in rest of string.
+      if (search_pos == std::string::npos) {
+        // No more settings for this pass.
+        break;
+      }
+
+      // The string contains the pass name. Now check that there is
+      // room for the settings: at least one char for setting name,
+      // two chars for two delimiter, and at least one char for setting.
+      if (search_pos + pass_name_len + min_setting_size >= settings_len) {
+        // No more settings for this pass.
+        break;
+      }
+
+      // Update the current search position to not include the pass name.
+      search_pos += pass_name_len;
+
+      // The format must be "PassName:SettingName:#" where # is the setting.
+      // Thus look for the first ":" which must exist.
+      if (settings[search_pos] != ':') {
+        // Missing delimiter right after pass name.
+        continue;
+      } else {
+        search_pos += 1;
+      }
+
+      // Now look for the actual setting by finding the next ":" delimiter.
+      const size_t setting_name_pos = search_pos;
+      size_t setting_pos = settings.find(':', setting_name_pos);
+
+      if (setting_pos == std::string::npos) {
+        // Missing a delimiter that would capture where setting starts.
+        continue;
+      } else if (setting_pos == setting_name_pos) {
+        // Missing setting thus did not move from setting name
+        continue;
+      } else {
+        // Skip the delimiter.
+        setting_pos += 1;
+      }
+
+      // Look for the terminating delimiter which must be a comma.
+      size_t next_configuration_separator = settings.find(',', setting_pos);
+      if (next_configuration_separator == std::string::npos) {
+        next_configuration_separator = settings_len;
+      }
+
+      // Prevent end of string errors.
+      if (next_configuration_separator == setting_pos) {
+          continue;
+      }
+
+      // Get the actual setting itself. Strtol is being used to convert because it is
+      // exception safe. If the input is not sane, it will set a setting of 0.
+      std::string setting_string = settings.substr(setting_pos, next_configuration_separator - setting_pos);
+      int setting = std::strtol(setting_string.c_str(), 0, 0);
+
+      std::string setting_name = settings.substr(setting_name_pos, setting_pos - setting_name_pos - 1);
+
+      settings_to_fill.Put(setting_name, setting);
+
+      search_pos = next_configuration_separator;
+    } while (true);
+  }
 };
 }  // namespace art
 #endif  // ART_COMPILER_DEX_PASS_DRIVER_ME_H_
index c72a4a6..ed2c2d2 100644 (file)
@@ -69,20 +69,32 @@ std::string PassDriver<PassDriverMEOpts>::print_pass_list_ = std::string();
 template<>
 bool PassDriver<PassDriverMEOpts>::default_print_passes_ = false;
 
-void PassDriverMEOpts::ApplyPass(PassDataHolder* data, const Pass* pass) {
-  // First call the base class' version.
-  PassDriver::ApplyPass(data, pass);
+// By default, there are no overridden pass settings.
+template<>
+std::string PassDriver<PassDriverMEOpts>::overridden_pass_options_list_ = std::string();
 
+void PassDriverMEOpts::ApplyPass(PassDataHolder* data, const Pass* pass) {
   const PassME* pass_me = down_cast<const PassME*> (pass);
   DCHECK(pass_me != nullptr);
 
   PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
 
+  // Set to dirty.
+  pass_me_data_holder->dirty = true;
+
+  // First call the base class' version.
+  PassDriver::ApplyPass(data, pass);
+
   // Now we care about flags.
   if ((pass_me->GetFlag(kOptimizationBasicBlockChange) == true) ||
       (pass_me->GetFlag(kOptimizationDefUsesChange) == true)) {
     CompilationUnit* c_unit = pass_me_data_holder->c_unit;
     c_unit->mir_graph.get()->CalculateBasicBlockInformation();
+
+    // Is it dirty at least?
+    if (pass_me_data_holder->dirty == true) {
+      c_unit->mir_graph.get()->CalculateBasicBlockInformation();
+    }
   }
 }
 
index 14108af..4acab6c 100644 (file)
@@ -73,4 +73,8 @@ std::string PassDriver<PassDriverMEPostOpt>::print_pass_list_ = std::string();
 template<>
 bool PassDriver<PassDriverMEPostOpt>::default_print_passes_ = false;
 
+// By default, there are no overridden pass settings.
+template<>
+std::string PassDriver<PassDriverMEPostOpt>::overridden_pass_options_list_ = std::string();
+
 }  // namespace art
index c7276eb..8242cb8 100644 (file)
@@ -42,7 +42,8 @@ class PassMEDataHolder: public PassDataHolder {
   public:
     CompilationUnit* c_unit;
     BasicBlock* bb;
-    void* data;
+    void* data;               /**< @brief Any data the pass wants to use */
+    bool dirty;               /**< @brief Has the pass rendered the CFG dirty, requiring post-opt? */
 };
 
 enum DataFlowAnalysisMode {
@@ -80,12 +81,53 @@ class PassME: public Pass {
   }
 
   ~PassME() {
+    default_options_.clear();
   }
 
   virtual DataFlowAnalysisMode GetTraversal() const {
     return traversal_type_;
   }
 
+  /**
+   * @return Returns whether the pass has any configurable options.
+   */
+  bool HasOptions() const {
+    return default_options_.size() != 0;
+  }
+
+  /**
+   * @brief Prints the pass options along with default settings if there are any.
+   * @details The printing is done using LOG(INFO).
+   */
+  void PrintPassDefaultOptions() const {
+    for (auto option_it = default_options_.begin(); option_it != default_options_.end(); option_it++) {
+      LOG(INFO) << "\t" << option_it->first << ":" << std::dec << option_it->second;
+    }
+  }
+
+  /**
+   * @brief Prints the pass options along with either default or overridden setting.
+   * @param overridden_options The overridden settings for this pass.
+   */
+  void PrintPassOptions(SafeMap<const std::string, int>& overridden_options) const {
+    // We walk through the default options only to get the pass names. We use GetPassOption to
+    // also consider the overridden ones.
+    for (auto option_it = default_options_.begin(); option_it != default_options_.end(); option_it++) {
+      LOG(INFO) << "\t" << option_it->first << ":" << std::dec << GetPassOption(option_it->first, overridden_options);
+    }
+  }
+
+  /**
+   * @brief Used to obtain the option for a pass.
+   * @details Will return the overridden option if it exists or default one.
+   * @param option_name The name of option whose setting to look for.
+   * @param c_unit The compilation unit currently being handled.
+   * @return Returns the setting for the pass option.
+   */
+  int GetPassOption(const char* option_name, CompilationUnit* c_unit) const {
+    return GetPassOption(option_name, c_unit->overridden_pass_options);
+  }
+
   const char* GetDumpCFGFolder() const {
     return dump_cfg_folder_;
   }
@@ -95,6 +137,25 @@ class PassME: public Pass {
   }
 
  protected:
+  int GetPassOption(const char* option_name, const SafeMap<const std::string, int>& overridden_options) const {
+    // First check if there are any overridden settings.
+    auto overridden_it = overridden_options.find(std::string(option_name));
+    if (overridden_it != overridden_options.end()) {
+      return overridden_it->second;
+    }
+
+    // Next check the default options.
+    auto default_it = default_options_.find(option_name);
+
+    if (default_it == default_options_.end()) {
+      // An invalid option is being requested.
+      DCHECK(false);
+      return 0;
+    }
+
+    return default_it->second;
+  }
+
   /** @brief Type of traversal: determines the order to execute the pass on the BasicBlocks. */
   const DataFlowAnalysisMode traversal_type_;
 
@@ -103,6 +164,13 @@ class PassME: public Pass {
 
   /** @brief CFG Dump Folder: what sub-folder to use for dumping the CFGs post pass. */
   const char* const dump_cfg_folder_;
+
+  /**
+   * @brief Contains a map of options with the default settings.
+   * @details The constructor of the specific pass instance should fill this
+   * with default options.
+   * */
+  SafeMap<const char*, int> default_options_;
 };
 }  // namespace art
 #endif  // ART_COMPILER_DEX_PASS_ME_H_
index fd67608..b0b0606 100644 (file)
@@ -140,11 +140,11 @@ void MirConverter::InitIR() {
   return GetLLVMBlock(bb->id);
 }
 
-void MirConverter::ConvertPackedSwitch(BasicBlock* bb,
+void MirConverter::ConvertPackedSwitch(BasicBlock* bb, MIR* mir,
                                 int32_t table_offset, RegLocation rl_src) {
   const Instruction::PackedSwitchPayload* payload =
       reinterpret_cast<const Instruction::PackedSwitchPayload*>(
-      cu_->insns + current_dalvik_offset_ + table_offset);
+      mir_graph_->GetTable(mir, table_offset));
 
   ::llvm::Value* value = GetLLVMValue(rl_src.orig_sreg);
 
@@ -164,11 +164,11 @@ void MirConverter::ConvertPackedSwitch(BasicBlock* bb,
   bb->fall_through = NullBasicBlockId;
 }
 
-void MirConverter::ConvertSparseSwitch(BasicBlock* bb,
+void MirConverter::ConvertSparseSwitch(BasicBlock* bb, MIR* mir,
                                 int32_t table_offset, RegLocation rl_src) {
   const Instruction::SparseSwitchPayload* payload =
       reinterpret_cast<const Instruction::SparseSwitchPayload*>(
-      cu_->insns + current_dalvik_offset_ + table_offset);
+      mir_graph_->GetTable(mir, table_offset));
 
   const int32_t* keys = payload->GetKeys();
   const int32_t* targets = payload->GetTargets();
@@ -1536,9 +1536,9 @@ void MirConverter::SetMethodInfo() {
   ::llvm::Function* intr = intrinsic_helper_->GetIntrinsicFunction(id);
   ::llvm::Instruction* inst = irb_->CreateCall(intr);
   ::llvm::SmallVector< ::llvm::Value*, 2> reg_info;
-  reg_info.push_back(irb_->getInt32(cu_->num_ins));
-  reg_info.push_back(irb_->getInt32(cu_->num_regs));
-  reg_info.push_back(irb_->getInt32(cu_->num_outs));
+  reg_info.push_back(irb_->getInt32(mir_graph_->GetNumOfInVRs()));
+  reg_info.push_back(irb_->getInt32(mir_graph_->GetNumOfLocalCodeVRs()));
+  reg_info.push_back(irb_->getInt32(mir_graph_->GetNumOfOutVRs()));
   reg_info.push_back(irb_->getInt32(mir_graph_->GetNumUsedCompilerTemps()));
   reg_info.push_back(irb_->getInt32(mir_graph_->GetNumSSARegs()));
   ::llvm::MDNode* reg_info_node = ::llvm::MDNode::get(*context_, reg_info);
@@ -1669,12 +1669,12 @@ bool MirConverter::BlockBitcodeConversion(BasicBlock* bb) {
       art::llvm::IntrinsicHelper::IntrinsicId id =
               art::llvm::IntrinsicHelper::AllocaShadowFrame;
       ::llvm::Function* func = intrinsic_helper_->GetIntrinsicFunction(id);
-      ::llvm::Value* entries = irb_->getInt32(cu_->num_dalvik_registers);
+      ::llvm::Value* entries = irb_->getInt32(mir_graph_->GetNumOfCodeVRs());
       irb_->CreateCall(func, entries);
     }
 
     {  // Store arguments to vregs.
-      uint16_t arg_reg = cu_->num_regs;
+      uint16_t arg_reg = mir_graph_->GetFirstInVR();
 
       ::llvm::Function::arg_iterator arg_iter(func_->arg_begin());
 
@@ -1843,7 +1843,7 @@ bool MirConverter::CreateFunction() {
   arg_iter->setName("method");
   ++arg_iter;
 
-  int start_sreg = cu_->num_regs;
+  int start_sreg = mir_graph_->GetFirstInVR();
 
   for (unsigned i = 0; arg_iter != arg_end; ++i, ++arg_iter) {
     arg_iter->setName(StringPrintf("v%i_0", start_sreg));
@@ -1909,8 +1909,8 @@ void MirConverter::MethodMIR2Bitcode() {
     RegLocation rl_temp = mir_graph_->reg_location_[i];
     if ((mir_graph_->SRegToVReg(i) < 0) || rl_temp.high_word) {
       llvm_values_.Insert(0);
-    } else if ((i < cu_->num_regs) ||
-               (i >= (cu_->num_regs + cu_->num_ins))) {
+    } else if ((i < mir_graph_->GetFirstInVR()) ||
+               (i >= (mir_graph_->GetFirstTempVR()))) {
       ::llvm::Constant* imm_value = mir_graph_->reg_location_[i].wide ?
          irb_->getJLong(0) : irb_->getJInt(0);
       val = EmitConst(imm_value, mir_graph_->reg_location_[i]);
index e97634c..e6dee5d 100644 (file)
 #include "llvm/llvm_compilation_unit.h"
 #include "safe_map.h"
 
+namespace llvm {
+  class Module;
+  class LLVMContext;
+}
+
 namespace art {
 
+namespace llvm {
+  class IntrinsicHelper;
+  class IRBuilder;
+}
+
+class LLVMInfo {
+  public:
+    LLVMInfo();
+    ~LLVMInfo();
+
+    ::llvm::LLVMContext* GetLLVMContext() {
+      return llvm_context_.get();
+    }
+
+    ::llvm::Module* GetLLVMModule() {
+      return llvm_module_;
+    }
+
+    art::llvm::IntrinsicHelper* GetIntrinsicHelper() {
+      return intrinsic_helper_.get();
+    }
+
+    art::llvm::IRBuilder* GetIRBuilder() {
+      return ir_builder_.get();
+    }
+
+  private:
+    std::unique_ptr< ::llvm::LLVMContext> llvm_context_;
+    ::llvm::Module* llvm_module_;  // Managed by context_.
+    std::unique_ptr<art::llvm::IntrinsicHelper> intrinsic_helper_;
+    std::unique_ptr<art::llvm::IRBuilder> ir_builder_;
+};
+
 struct BasicBlock;
 struct CallInfo;
 struct CompilationUnit;
@@ -91,9 +129,9 @@ class MirConverter : public Backend {
     ::llvm::Type* LlvmTypeFromLocRec(RegLocation loc);
     void InitIR();
     ::llvm::BasicBlock* FindCaseTarget(uint32_t vaddr);
-    void ConvertPackedSwitch(BasicBlock* bb, int32_t table_offset,
+    void ConvertPackedSwitch(BasicBlock* bb, MIR* mir, int32_t table_offset,
                              RegLocation rl_src);
-    void ConvertSparseSwitch(BasicBlock* bb, int32_t table_offset,
+    void ConvertSparseSwitch(BasicBlock* bb, MIR* mir, int32_t table_offset,
                              RegLocation rl_src);
     void ConvertSget(int32_t field_index,
                      art::llvm::IntrinsicHelper::IntrinsicId id, RegLocation rl_dest);
index 1371652..b3d5c8a 100644 (file)
@@ -36,9 +36,9 @@ bool MethodUseCount::Gate(const PassDataHolder* data) const {
   return res;
 }
 
-bool MethodUseCount::Worker(const PassDataHolder* data) const {
+bool MethodUseCount::Worker(PassDataHolder* data) const {
   DCHECK(data != nullptr);
-  const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
+  PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
   CompilationUnit* c_unit = pass_me_data_holder->c_unit;
   DCHECK(c_unit != nullptr);
   BasicBlock* bb = pass_me_data_holder->bb;
@@ -49,9 +49,9 @@ bool MethodUseCount::Worker(const PassDataHolder* data) const {
 }
 
 
-bool ClearPhiInstructions::Worker(const PassDataHolder* data) const {
+bool ClearPhiInstructions::Worker(PassDataHolder* data) const {
   DCHECK(data != nullptr);
-  const PassMEDataHolder* pass_me_data_holder = down_cast<const PassMEDataHolder*>(data);
+  PassMEDataHolder* pass_me_data_holder = down_cast<PassMEDataHolder*>(data);
   CompilationUnit* c_unit = pass_me_data_holder->c_unit;
   DCHECK(c_unit != nullptr);
   BasicBlock* bb = pass_me_data_holder->bb;
index a1b0df4..e7805ba 100644 (file)
@@ -17,6 +17,7 @@
 #ifndef ART_COMPILER_DEX_POST_OPT_PASSES_H_
 #define ART_COMPILER_DEX_POST_OPT_PASSES_H_
 
+#include "dex/quick/mir_to_lir.h"
 #include "compiler_internals.h"
 #include "pass_me.h"
 
@@ -52,7 +53,7 @@ class MethodUseCount : public PassME {
   MethodUseCount() : PassME("UseCount") {
   }
 
-  bool Worker(const PassDataHolder* data) const;
+  bool Worker(PassDataHolder* data) const;
 
   bool Gate(const PassDataHolder* data) const;
 };
@@ -66,7 +67,7 @@ class ClearPhiInstructions : public PassME {
   ClearPhiInstructions() : PassME("ClearPhiInstructions") {
   }
 
-  bool Worker(const PassDataHolder* data) const;
+  bool Worker(PassDataHolder* data) const;
 };
 
 /**
@@ -222,11 +223,11 @@ class PhiNodeOperands : public PassME {
   PhiNodeOperands() : PassME("PhiNodeOperands", kPreOrderDFSTraversal) {
   }
 
-  bool Worker(const PassDataHolder* data) const {
+  bool Worker(PassDataHolder* data) const {
     DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
     DCHECK(c_unit != nullptr);
-    BasicBlock* bb = down_cast<const PassMEDataHolder*>(data)->bb;
+    BasicBlock* bb = down_cast<PassMEDataHolder*>(data)->bb;
     DCHECK(bb != nullptr);
     c_unit->mir_graph->InsertPhiNodeOperands(bb);
     // No need of repeating, so just return false.
@@ -260,11 +261,11 @@ class ConstantPropagation : public PassME {
   ConstantPropagation() : PassME("ConstantPropagation") {
   }
 
-  bool Worker(const PassDataHolder* data) const {
+  bool Worker(PassDataHolder* data) const {
     DCHECK(data != nullptr);
-    CompilationUnit* c_unit = down_cast<const PassMEDataHolder*>(data)->c_unit;
+    CompilationUnit* c_unit = down_cast<PassMEDataHolder*>(data)->c_unit;
     DCHECK(c_unit != nullptr);
-    BasicBlock* bb = down_cast<const PassMEDataHolder*>(data)->bb;
+    BasicBlock* bb = down_cast<PassMEDataHolder*>(data)->bb;
     DCHECK(bb != nullptr);
     c_unit->mir_graph->DoConstantPropagation(bb);
     // No need of repeating, so just return false.
diff --git a/compiler/dex/quick/arm/backend_arm.h b/compiler/dex/quick/arm/backend_arm.h
new file mode 100644 (file)
index 0000000..42a9bca
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_QUICK_ARM_BACKEND_ARM_H_
+#define ART_COMPILER_DEX_QUICK_ARM_BACKEND_ARM_H_
+
+namespace art {
+
+struct CompilationUnit;
+class Mir2Lir;
+class MIRGraph;
+class ArenaAllocator;
+
+Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+                          ArenaAllocator* const arena);
+
+}  // namespace art
+
+#endif  // ART_COMPILER_DEX_QUICK_ARM_BACKEND_ARM_H_
index 4ba3c4b..fc98d31 100644 (file)
@@ -44,7 +44,7 @@ namespace art {
  *   cbnz  r_idx, lp
  */
 void ArmMir2Lir::GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) {
-  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
   if (cu_->verbose) {
     DumpSparseSwitchTable(table);
   }
@@ -92,7 +92,7 @@ void ArmMir2Lir::GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocati
 
 
 void ArmMir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) {
-  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
   if (cu_->verbose) {
     DumpPackedSwitchTable(table);
   }
@@ -147,8 +147,8 @@ void ArmMir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocati
  *
  * Total size is 4+(width * size + 1)/2 16-bit code units.
  */
-void ArmMir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) {
-  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+void ArmMir2Lir::GenFillArrayData(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
+  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
   // Add the table to the list - we'll process it later
   FillArrayData *tab_rec =
       static_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData), kArenaAllocData));
index cd6c9cc..84881e0 100644 (file)
@@ -19,6 +19,7 @@
 
 #include "arm_lir.h"
 #include "dex/compiler_internals.h"
+#include "dex/quick/mir_to_lir.h"
 
 namespace art {
 
@@ -116,7 +117,7 @@ class ArmMir2Lir FINAL : public Mir2Lir {
     void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
     void GenExitSequence();
     void GenSpecialExitSequence();
-    void GenFillArrayData(DexOffset table_offset, RegLocation rl_src);
+    void GenFillArrayData(MIR* mir, DexOffset table_offset, RegLocation rl_src);
     void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
     void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
     void GenSelect(BasicBlock* bb, MIR* mir);
index 0509ad3..0be478d 100644 (file)
@@ -20,6 +20,7 @@
 
 #include <string>
 
+#include "backend_arm.h"
 #include "dex/compiler_internals.h"
 #include "dex/quick/mir_to_lir-inl.h"
 
index a449cbd..d001dd6 100644 (file)
@@ -116,6 +116,7 @@ enum Arm64ResourceEncodingPos {
 #define IS_SIGNED_IMM7(value) IS_SIGNED_IMM(7, value)
 #define IS_SIGNED_IMM9(value) IS_SIGNED_IMM(9, value)
 #define IS_SIGNED_IMM12(value) IS_SIGNED_IMM(12, value)
+#define IS_SIGNED_IMM14(value) IS_SIGNED_IMM(14, value)
 #define IS_SIGNED_IMM19(value) IS_SIGNED_IMM(19, value)
 #define IS_SIGNED_IMM21(value) IS_SIGNED_IMM(21, value)
 
@@ -355,7 +356,10 @@ enum ArmOpcode {
   kA64Sub4rrro,      // sub [s1001011000] rm[20-16] imm_6[15-10] rn[9-5] rd[4-0].
   kA64Sub4RRre,      // sub [s1001011001] rm[20-16] option[15-13] imm_3[12-10] rn[9-5] rd[4-0].
   kA64Subs3rRd,      // subs[s111000100] imm_12[21-10] rn[9-5] rd[4-0].
+  kA64Tst2rl,        // tst alias of "ands rzr, rn, #imm".
   kA64Tst3rro,       // tst alias of "ands rzr, arg1, arg2, arg3".
+  kA64Tbnz3rht,      // tbnz imm_6_b5[31] [0110111] imm_6_b40[23-19] imm_14[18-5] rt[4-0].
+  kA64Tbz3rht,       // tbz imm_6_b5[31] [0110110] imm_6_b40[23-19] imm_14[18-5] rt[4-0].
   kA64Ubfm4rrdd,     // ubfm[s10100110] N[22] imm_r[21-16] imm_s[15-10] rn[9-5] rd[4-0].
   kA64Last,
   kA64NotWide = 0,   // Flag used to select the first instruction variant.
@@ -400,23 +404,24 @@ enum ArmOpDmbOptions {
 enum ArmEncodingKind {
   // All the formats below are encoded in the same way (as a kFmtBitBlt).
   // These are grouped together, for fast handling (e.g. "if (LIKELY(fmt <= kFmtBitBlt)) ...").
-  kFmtRegW = 0,  // Word register (w) or wzr.
-  kFmtRegX,      // Extended word register (x) or xzr.
-  kFmtRegR,      // Register with same width as the instruction or zr.
-  kFmtRegWOrSp,  // Word register (w) or wsp.
-  kFmtRegXOrSp,  // Extended word register (x) or sp.
-  kFmtRegROrSp,  // Register with same width as the instruction or sp.
-  kFmtRegS,      // Single FP reg.
-  kFmtRegD,      // Double FP reg.
-  kFmtRegF,      // Single/double FP reg depending on the instruction width.
-  kFmtBitBlt,    // Bit string using end/start.
+  kFmtRegW = 0,   // Word register (w) or wzr.
+  kFmtRegX,       // Extended word register (x) or xzr.
+  kFmtRegR,       // Register with same width as the instruction or zr.
+  kFmtRegWOrSp,   // Word register (w) or wsp.
+  kFmtRegXOrSp,   // Extended word register (x) or sp.
+  kFmtRegROrSp,   // Register with same width as the instruction or sp.
+  kFmtRegS,       // Single FP reg.
+  kFmtRegD,       // Double FP reg.
+  kFmtRegF,       // Single/double FP reg depending on the instruction width.
+  kFmtBitBlt,     // Bit string using end/start.
 
   // Less likely formats.
-  kFmtUnused,    // Unused field and marks end of formats.
-  kFmtImm21,     // Sign-extended immediate using [23..5,30..29].
-  kFmtShift,     // Register shift, 9-bit at [23..21, 15..10]..
-  kFmtExtend,    // Register extend, 9-bit at [23..21, 15..10].
-  kFmtSkip,      // Unused field, but continue to next.
+  kFmtUnused,     // Unused field and marks end of formats.
+  kFmtImm6Shift,  // Shift immediate, 6-bit at [31, 23..19].
+  kFmtImm21,      // Sign-extended immediate using [23..5,30..29].
+  kFmtShift,      // Register shift, 9-bit at [23..21, 15..10]..
+  kFmtExtend,     // Register extend, 9-bit at [23..21, 15..10].
+  kFmtSkip,       // Unused field, but continue to next.
 };
 
 // Struct used to define the snippet positions for each A64 opcode.
index 15c89f2..5115246 100644 (file)
@@ -89,6 +89,7 @@ namespace art {
  *     M -> 16-bit shift expression ("" or ", lsl #16" or ", lsl #32"...)
  *     B -> dmb option string (sy, st, ish, ishst, nsh, hshst)
  *     H -> operand shift
+ *     h -> 6-bit shift immediate
  *     T -> register shift (either ", lsl #0" or ", lsl #12")
  *     e -> register extend (e.g. uxtb #1)
  *     o -> register shift (e.g. lsl #1) for Word registers
@@ -614,10 +615,24 @@ const ArmEncodingMap Arm64Mir2Lir::EncodingMap[kA64Last] = {
                  kFmtRegR, 4, 0, kFmtRegROrSp, 9, 5, kFmtBitBlt, 21, 10,
                  kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_DEF0_USE1 | SETS_CCODES,
                  "subs", "!0r, !1R, #!2d", kFixupNone),
-    ENCODING_MAP(WIDE(kA64Tst3rro), SF_VARIANTS(0x6a000000),
+    ENCODING_MAP(WIDE(kA64Tst2rl), SF_VARIANTS(0x7200001f),
+                 kFmtRegR, 9, 5, kFmtBitBlt, 22, 10, kFmtUnused, -1, -1,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE0 | SETS_CCODES,
+                 "tst", "!0r, !1l", kFixupNone),
+    ENCODING_MAP(WIDE(kA64Tst3rro), SF_VARIANTS(0x6a00001f),
                  kFmtRegR, 9, 5, kFmtRegR, 20, 16, kFmtShift, -1, -1,
-                 kFmtUnused, -1, -1, IS_QUAD_OP | REG_USE01 | SETS_CCODES,
+                 kFmtUnused, -1, -1, IS_TERTIARY_OP | REG_USE01 | SETS_CCODES,
                  "tst", "!0r, !1r!2o", kFixupNone),
+    // NOTE: Tbz/Tbnz does not require SETS_CCODES, but it may be replaced by some other LIRs
+    // which require SETS_CCODES in the fix-up stage.
+    ENCODING_MAP(WIDE(kA64Tbnz3rht), CUSTOM_VARIANTS(0x37000000, 0x37000000),
+                 kFmtRegR, 4, 0, kFmtImm6Shift, -1, -1, kFmtBitBlt, 18, 5, kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_USE0 | IS_BRANCH | NEEDS_FIXUP | SETS_CCODES,
+                 "tbnz", "!0r, #!1h, !2t", kFixupTBxZ),
+    ENCODING_MAP(WIDE(kA64Tbz3rht), CUSTOM_VARIANTS(0x36000000, 0x36000000),
+                 kFmtRegR, 4, 0, kFmtImm6Shift, -1, -1, kFmtBitBlt, 18, 5, kFmtUnused, -1, -1,
+                 IS_TERTIARY_OP | REG_USE0 | IS_BRANCH | NEEDS_FIXUP | SETS_CCODES,
+                 "tbz", "!0r, #!1h, !2t", kFixupTBxZ),
     ENCODING_MAP(WIDE(kA64Ubfm4rrdd), SF_N_VARIANTS(0x53000000),
                  kFmtRegR, 4, 0, kFmtRegR, 9, 5, kFmtBitBlt, 21, 16,
                  kFmtBitBlt, 15, 10, IS_QUAD_OP | REG_DEF0_USE1,
@@ -787,6 +802,11 @@ uint8_t* Arm64Mir2Lir::EncodeLIRs(uint8_t* write_pos, LIR* lir) {
               value |= ((operand & 0x1ffffc) >> 2) << 5;
               bits |= value;
               break;
+            case kFmtImm6Shift:
+              value = (operand & 0x1f) << 19;
+              value |= ((operand & 0x20) >> 5) << 31;
+              bits |= value;
+              break;
             default:
               LOG(FATAL) << "Bad fmt for arg. " << i << " in " << encoder->name
                          << " (" << kind << ")";
@@ -827,11 +847,6 @@ void Arm64Mir2Lir::AssembleLIR() {
    */
   int generation = 0;
   while (true) {
-    // TODO(Arm64): check whether passes and offset adjustments are really necessary.
-    //   Currently they aren't, as - in the fixups below - LIR are never inserted.
-    //   Things can be different if jump ranges above 1 MB need to be supported.
-    //   If they are not, then we can get rid of the assembler retry logic.
-
     offset_adjustment = 0;
     AssemblerStatus res = kSuccess;  // Assume success
     generation ^= 1;
@@ -839,13 +854,9 @@ void Arm64Mir2Lir::AssembleLIR() {
     lir = first_fixup_;
     prev_lir = NULL;
     while (lir != NULL) {
-      /*
-       * NOTE: the lir being considered here will be encoded following the switch (so long as
-       * we're not in a retry situation).  However, any new non-pc_rel instructions inserted
-       * due to retry must be explicitly encoded at the time of insertion.  Note that
-       * inserted instructions don't need use/def flags, but do need size and pc-rel status
-       * properly updated.
-       */
+      // NOTE: Any new non-pc_rel instructions inserted due to retry must be explicitly encoded at
+      // the time of insertion.  Note that inserted instructions don't need use/def flags, but do
+      // need size and pc-rel status properly updated.
       lir->offset += offset_adjustment;
       // During pass, allows us to tell whether a node has been updated with offset_adjustment yet.
       lir->flags.generation = generation;
@@ -861,7 +872,8 @@ void Arm64Mir2Lir::AssembleLIR() {
           CodeOffset target = target_lir->offset +
               ((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
           int32_t delta = target - pc;
-          if (!((delta & 0x3) == 0 && IS_SIGNED_IMM19(delta >> 2))) {
+          DCHECK_EQ(delta & 0x3, 0);
+          if (!IS_SIGNED_IMM19(delta >> 2)) {
             LOG(FATAL) << "Invalid jump range in kFixupT1Branch";
           }
           lir->operands[0] = delta >> 2;
@@ -876,12 +888,75 @@ void Arm64Mir2Lir::AssembleLIR() {
           CodeOffset target = target_lir->offset +
             ((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
           int32_t delta = target - pc;
-          if (!((delta & 0x3) == 0 && IS_SIGNED_IMM19(delta >> 2))) {
+          DCHECK_EQ(delta & 0x3, 0);
+          if (!IS_SIGNED_IMM19(delta >> 2)) {
             LOG(FATAL) << "Invalid jump range in kFixupLoad";
           }
           lir->operands[1] = delta >> 2;
           break;
         }
+        case kFixupTBxZ: {
+          int16_t opcode = lir->opcode;
+          RegStorage reg(lir->operands[0] | RegStorage::kValid);
+          int32_t imm = lir->operands[1];
+          DCHECK_EQ(IS_WIDE(opcode), reg.Is64Bit());
+          DCHECK_LT(imm, 64);
+          if (imm >= 32) {
+            DCHECK(IS_WIDE(opcode));
+          } else if (kIsDebugBuild && IS_WIDE(opcode)) {
+            // "tbz/tbnz x0, #imm(<32)" is the same with "tbz/tbnz w0, #imm(<32)", but GCC/oatdump
+            // will disassemble it as "tbz/tbnz w0, #imm(<32)". So unwide the LIR to make the
+            // compiler log behave the same with those disassembler in debug build.
+            // This will also affect tst instruction if it need to be replaced, but there is no
+            // performance difference between "tst Xt" and "tst Wt".
+            lir->opcode = UNWIDE(opcode);
+            lir->operands[0] = As32BitReg(reg).GetReg();
+          }
+
+          // Fix-up branch offset.
+          LIR *target_lir = lir->target;
+          DCHECK(target_lir);
+          CodeOffset pc = lir->offset;
+          CodeOffset target = target_lir->offset +
+              ((target_lir->flags.generation == lir->flags.generation) ? 0 : offset_adjustment);
+          int32_t delta = target - pc;
+          DCHECK_EQ(delta & 0x3, 0);
+          // Check if branch offset can be encoded in tbz/tbnz.
+          if (!IS_SIGNED_IMM14(delta >> 2)) {
+            DexOffset dalvik_offset = lir->dalvik_offset;
+            int16_t opcode = lir->opcode;
+            LIR* target = lir->target;
+            // "tbz/tbnz Rt, #imm, label" -> "tst Rt, #(1<<imm)".
+            offset_adjustment -= lir->flags.size;
+            int32_t imm = EncodeLogicalImmediate(IS_WIDE(opcode), 1 << lir->operands[1]);
+            DCHECK_NE(imm, -1);
+            lir->opcode = IS_WIDE(opcode) ? WIDE(kA64Tst2rl) : kA64Tst2rl;
+            lir->operands[1] = imm;
+            lir->target = nullptr;
+            lir->flags.fixup = EncodingMap[kA64Tst2rl].fixup;
+            lir->flags.size = EncodingMap[kA64Tst2rl].size;
+            offset_adjustment += lir->flags.size;
+            // Insert "beq/bneq label".
+            opcode = UNWIDE(opcode);
+            DCHECK(opcode == kA64Tbz3rht || opcode == kA64Tbnz3rht);
+            LIR* new_lir = RawLIR(dalvik_offset, kA64B2ct,
+                opcode == kA64Tbz3rht ? kArmCondEq : kArmCondNe, 0, 0, 0, 0, target);
+            InsertLIRAfter(lir, new_lir);
+            new_lir->offset = lir->offset + lir->flags.size;
+            new_lir->flags.generation = generation;
+            new_lir->flags.fixup = EncodingMap[kA64B2ct].fixup;
+            new_lir->flags.size = EncodingMap[kA64B2ct].size;
+            offset_adjustment += new_lir->flags.size;
+            // lir no longer pcrel, unlink and link in new_lir.
+            ReplaceFixup(prev_lir, lir, new_lir);
+            prev_lir = new_lir;  // Continue with the new instruction.
+            lir = new_lir->u.a.pcrel_next;
+            res = kRetryAll;
+            continue;
+          }
+          lir->operands[2] = delta >> 2;
+          break;
+        }
         case kFixupAdr: {
           LIR* target_lir = lir->target;
           int32_t delta;
@@ -910,6 +985,7 @@ void Arm64Mir2Lir::AssembleLIR() {
     }
 
     if (res == kSuccess) {
+      DCHECK_EQ(offset_adjustment, 0);
       break;
     } else {
       assembler_retries++;
diff --git a/compiler/dex/quick/arm64/backend_arm64.h b/compiler/dex/quick/arm64/backend_arm64.h
new file mode 100644 (file)
index 0000000..53650c4
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_QUICK_ARM64_BACKEND_ARM64_H_
+#define ART_COMPILER_DEX_QUICK_ARM64_BACKEND_ARM64_H_
+
+namespace art {
+
+struct CompilationUnit;
+class Mir2Lir;
+class MIRGraph;
+class ArenaAllocator;
+
+Mir2Lir* Arm64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+                            ArenaAllocator* const arena);
+
+}  // namespace art
+
+#endif  // ART_COMPILER_DEX_QUICK_ARM64_BACKEND_ARM64_H_
index eddc3a3..b9c0990 100644 (file)
@@ -44,7 +44,7 @@ namespace art {
  * quit:
  */
 void Arm64Mir2Lir::GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) {
-  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
   if (cu_->verbose) {
     DumpSparseSwitchTable(table);
   }
@@ -96,7 +96,7 @@ void Arm64Mir2Lir::GenLargeSparseSwitch(MIR* mir, uint32_t table_offset, RegLoca
 
 
 void Arm64Mir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLocation rl_src) {
-  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
   if (cu_->verbose) {
     DumpPackedSwitchTable(table);
   }
@@ -156,8 +156,8 @@ void Arm64Mir2Lir::GenLargePackedSwitch(MIR* mir, uint32_t table_offset, RegLoca
  *
  * Total size is 4+(width * size + 1)/2 16-bit code units.
  */
-void Arm64Mir2Lir::GenFillArrayData(uint32_t table_offset, RegLocation rl_src) {
-  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+void Arm64Mir2Lir::GenFillArrayData(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
+  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
   // Add the table to the list - we'll process it later
   FillArrayData *tab_rec =
       static_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData), kArenaAllocData));
index 3e1c18b..1c40292 100644 (file)
@@ -19,6 +19,7 @@
 
 #include "arm64_lir.h"
 #include "dex/compiler_internals.h"
+#include "dex/quick/mir_to_lir.h"
 
 #include <map>
 
@@ -167,6 +168,7 @@ class Arm64Mir2Lir FINAL : public Mir2Lir {
   bool GenInlinedRound(CallInfo* info, bool is_double) OVERRIDE;
   bool GenInlinedPeek(CallInfo* info, OpSize size) OVERRIDE;
   bool GenInlinedPoke(CallInfo* info, OpSize size) OVERRIDE;
+  bool GenInlinedAbsInt(CallInfo* info) OVERRIDE;
   bool GenInlinedAbsLong(CallInfo* info) OVERRIDE;
   bool GenInlinedArrayCopyCharArray(CallInfo* info) OVERRIDE;
   void GenIntToLong(RegLocation rl_dest, RegLocation rl_src) OVERRIDE;
@@ -181,7 +183,7 @@ class Arm64Mir2Lir FINAL : public Mir2Lir {
   void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
   void GenExitSequence() OVERRIDE;
   void GenSpecialExitSequence() OVERRIDE;
-  void GenFillArrayData(DexOffset table_offset, RegLocation rl_src) OVERRIDE;
+  void GenFillArrayData(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
   void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) OVERRIDE;
   void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) OVERRIDE;
   void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
index d0b2636..5d63dd0 100644 (file)
@@ -353,7 +353,8 @@ bool Arm64Mir2Lir::GenInlinedAbsFloat(CallInfo* info) {
   if (reg_class == kFPReg) {
     NewLIR2(kA64Fabs2ff, rl_result.reg.GetReg(), rl_src.reg.GetReg());
   } else {
-    NewLIR4(kA64Ubfm4rrdd, rl_result.reg.GetReg(), rl_src.reg.GetReg(), 0, 30);
+    // Clear the sign bit in an integer register.
+    OpRegRegImm(kOpAnd, rl_result.reg, rl_src.reg, 0x7fffffff);
   }
   StoreValue(rl_dest, rl_result);
   return true;
@@ -371,7 +372,8 @@ bool Arm64Mir2Lir::GenInlinedAbsDouble(CallInfo* info) {
   if (reg_class == kFPReg) {
     NewLIR2(FWIDE(kA64Fabs2ff), rl_result.reg.GetReg(), rl_src.reg.GetReg());
   } else {
-    NewLIR4(WIDE(kA64Ubfm4rrdd), rl_result.reg.GetReg(), rl_src.reg.GetReg(), 0, 62);
+    // Clear the sign bit in an integer register.
+    OpRegRegImm64(kOpAnd, rl_result.reg, rl_src.reg, 0x7fffffffffffffff);
   }
   StoreValueWide(rl_dest, rl_result);
   return true;
index d00c57d..1777e98 100644 (file)
@@ -271,8 +271,12 @@ LIR* Arm64Mir2Lir::OpCmpImmBranch(ConditionCode cond, RegStorage reg, int check_
       ArmOpcode opcode = kA64Cbz2rt;
       ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
       branch = NewLIR2(opcode | wide, reg.GetReg(), 0);
+    } else if (arm_cond == kArmCondLt || arm_cond == kArmCondGe) {
+      ArmOpcode opcode = (arm_cond == kArmCondLt) ? kA64Tbnz3rht : kA64Tbz3rht;
+      ArmOpcode wide = reg.Is64Bit() ? WIDE(0) : UNWIDE(0);
+      int value = reg.Is64Bit() ? 63 : 31;
+      branch = NewLIR3(opcode | wide, reg.GetReg(), value, 0);
     }
-    // TODO: Use tbz/tbnz for < 0 or >= 0.
   }
 
   if (branch == nullptr) {
@@ -642,16 +646,32 @@ RegLocation Arm64Mir2Lir::GenDivRem(RegLocation rl_dest, RegStorage r_src1, RegS
   return rl_result;
 }
 
+bool Arm64Mir2Lir::GenInlinedAbsInt(CallInfo* info) {
+  RegLocation rl_src = info->args[0];
+  rl_src = LoadValue(rl_src, kCoreReg);
+  RegLocation rl_dest = InlineTarget(info);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+
+  // Compare the source value with zero. Write the negated value to the result if
+  // negative, otherwise write the original value.
+  OpRegImm(kOpCmp, rl_src.reg, 0);
+  NewLIR4(kA64Csneg4rrrc, rl_result.reg.GetReg(), rl_src.reg.GetReg(), rl_src.reg.GetReg(),
+          kArmCondPl);
+  StoreValue(rl_dest, rl_result);
+  return true;
+}
+
 bool Arm64Mir2Lir::GenInlinedAbsLong(CallInfo* info) {
   RegLocation rl_src = info->args[0];
   rl_src = LoadValueWide(rl_src, kCoreReg);
   RegLocation rl_dest = InlineTargetWide(info);
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  RegStorage sign_reg = AllocTempWide();
-  // abs(x) = y<=x>>63, (x+y)^y.
-  OpRegRegImm(kOpAsr, sign_reg, rl_src.reg, 63);
-  OpRegRegReg(kOpAdd, rl_result.reg, rl_src.reg, sign_reg);
-  OpRegReg(kOpXor, rl_result.reg, sign_reg);
+
+  // Compare the source value with zero. Write the negated value to the result if
+  // negative, otherwise write the original value.
+  OpRegImm(kOpCmp, rl_src.reg, 0);
+  NewLIR4(WIDE(kA64Csneg4rrrc), rl_result.reg.GetReg(), rl_src.reg.GetReg(),
+          rl_src.reg.GetReg(), kArmCondPl);
   StoreValueWide(rl_dest, rl_result);
   return true;
 }
@@ -856,16 +876,14 @@ bool Arm64Mir2Lir::GenInlinedArrayCopyCharArray(CallInfo* info) {
   OpRegRegImm(kOpLsl, rs_length, rs_length, 1);
 
   // Copy one element.
-  OpRegRegImm(kOpAnd, rs_tmp, As32BitReg(rs_length), 2);
-  LIR* jmp_to_copy_two = OpCmpImmBranch(kCondEq, rs_tmp, 0, nullptr);
+  LIR* jmp_to_copy_two = NewLIR3(WIDE(kA64Tbz3rht), rs_length.GetReg(), 1, 0);
   OpRegImm(kOpSub, rs_length, 2);
   LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, kSignedHalf);
   StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, kSignedHalf);
 
   // Copy two elements.
   LIR *copy_two = NewLIR0(kPseudoTargetLabel);
-  OpRegRegImm(kOpAnd, rs_tmp, As32BitReg(rs_length), 4);
-  LIR* jmp_to_copy_four = OpCmpImmBranch(kCondEq, rs_tmp, 0, nullptr);
+  LIR* jmp_to_copy_four = NewLIR3(WIDE(kA64Tbz3rht), rs_length.GetReg(), 2, 0);
   OpRegImm(kOpSub, rs_length, 4);
   LoadBaseIndexed(rs_src, rs_length, rs_tmp, 0, k32);
   StoreBaseIndexed(rs_dst, rs_length, rs_tmp, 0, k32);
@@ -1686,13 +1704,13 @@ void Arm64Mir2Lir::UnspillRegs(RegStorage base, uint32_t core_reg_mask, uint32_t
 }
 
 bool Arm64Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
-  ArmOpcode wide = (size == k64) ? WIDE(0) : UNWIDE(0);
+  ArmOpcode wide = IsWide(size) ? WIDE(0) : UNWIDE(0);
   RegLocation rl_src_i = info->args[0];
-  RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info);  // result reg
+  RegLocation rl_dest = IsWide(size) ? InlineTargetWide(info) : InlineTarget(info);  // result reg
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  RegLocation rl_i = (size == k64) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg);
+  RegLocation rl_i = IsWide(size) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg);
   NewLIR2(kA64Rbit2rr | wide, rl_result.reg.GetReg(), rl_i.reg.GetReg());
-  (size == k64) ? StoreValueWide(rl_dest, rl_result) : StoreValue(rl_dest, rl_result);
+  IsWide(size) ? StoreValueWide(rl_dest, rl_result) : StoreValue(rl_dest, rl_result);
   return true;
 }
 
index 9b4546a..d7d5651 100644 (file)
@@ -20,6 +20,7 @@
 
 #include <string>
 
+#include "backend_arm64.h"
 #include "dex/compiler_internals.h"
 #include "dex/quick/mir_to_lir-inl.h"
 #include "dex/reg_storage_eq.h"
@@ -248,19 +249,22 @@ static void DecodeRegExtendOrShift(int operand, char *buf, size_t buf_size) {
   }
 }
 
-#define BIT_MASK(w) ((UINT64_C(1) << (w)) - UINT64_C(1))
+static uint64_t bit_mask(unsigned width) {
+  DCHECK_LE(width, 64U);
+  return (width == 64) ? static_cast<uint64_t>(-1) : ((UINT64_C(1) << (width)) - UINT64_C(1));
+}
 
 static uint64_t RotateRight(uint64_t value, unsigned rotate, unsigned width) {
   DCHECK_LE(width, 64U);
   rotate &= 63;
-  value = value & BIT_MASK(width);
-  return ((value & BIT_MASK(rotate)) << (width - rotate)) | (value >> rotate);
+  value = value & bit_mask(width);
+  return ((value & bit_mask(rotate)) << (width - rotate)) | (value >> rotate);
 }
 
 static uint64_t RepeatBitsAcrossReg(bool is_wide, uint64_t value, unsigned width) {
   unsigned i;
   unsigned reg_size = (is_wide) ? 64 : 32;
-  uint64_t result = value & BIT_MASK(width);
+  uint64_t result = value & bit_mask(width);
   for (i = width; i < reg_size; i *= 2) {
     result |= (result << i);
   }
@@ -299,7 +303,7 @@ uint64_t Arm64Mir2Lir::DecodeLogicalImmediate(bool is_wide, int value) {
 
   if (n == 1) {
     DCHECK_NE(imm_s, 0x3fU);
-    uint64_t bits = BIT_MASK(imm_s + 1);
+    uint64_t bits = bit_mask(imm_s + 1);
     return RotateRight(bits, imm_r, 64);
   } else {
     DCHECK_NE((imm_s >> 1), 0x1fU);
@@ -307,7 +311,7 @@ uint64_t Arm64Mir2Lir::DecodeLogicalImmediate(bool is_wide, int value) {
       if ((imm_s & width) == 0) {
         unsigned mask = (unsigned)(width - 1);
         DCHECK_NE((imm_s & mask), mask);
-        uint64_t bits = BIT_MASK((imm_s & mask) + 1);
+        uint64_t bits = bit_mask((imm_s & mask) + 1);
         return RepeatBitsAcrossReg(is_wide, RotateRight(bits, imm_r & mask, width), width);
       }
     }
@@ -504,6 +508,9 @@ std::string Arm64Mir2Lir::BuildInsnString(const char* fmt, LIR* lir, unsigned ch
              else
                strcpy(tbuf, ", DecodeError3");
              break;
+           case 'h':
+             snprintf(tbuf, arraysize(tbuf), "%d", operand);
+             break;
            default:
              strcpy(tbuf, "DecodeError1");
              break;
@@ -889,11 +896,11 @@ static RegStorage GetArgPhysicalReg(RegLocation* loc, int* num_gpr_used, int* nu
 
 RegStorage Arm64Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
   if (!in_to_reg_storage_mapping_.IsInitialized()) {
-    int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
+    int start_vreg = mir_graph_->GetFirstInVR();
     RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg];
 
     InToRegStorageArm64Mapper mapper;
-    in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper);
+    in_to_reg_storage_mapping_.Initialize(arg_locs, mir_graph_->GetNumOfInVRs(), &mapper);
   }
   return in_to_reg_storage_mapping_.Get(arg_num);
 }
@@ -927,14 +934,14 @@ void Arm64Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
     StoreRefDisp(TargetPtrReg(kSp), 0, rl_src.reg, kNotVolatile);
   }
 
-  if (cu_->num_ins == 0) {
+  if (mir_graph_->GetNumOfInVRs() == 0) {
     return;
   }
 
   // Handle dalvik registers.
   ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-  int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
-  for (int i = 0; i < cu_->num_ins; i++) {
+  int start_vreg = mir_graph_->GetFirstInVR();
+  for (uint32_t i = 0; i < mir_graph_->GetNumOfInVRs(); i++) {
     RegLocation* t_loc = &ArgLocs[i];
     OpSize op_size;
     RegStorage reg = GetArgPhysicalReg(t_loc, &num_gpr_used, &num_fpr_used, &op_size);
@@ -1077,9 +1084,6 @@ int Arm64Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
       }
     }
 
-    // Logic below assumes that Method pointer is at offset zero from SP.
-    DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0);
-
     // The rest can be copied together
     int start_offset = SRegOffset(info->args[last_mapped_in + 1].s_reg_low);
     int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + 1,
index ee1c467..6d8f288 100644 (file)
@@ -15,6 +15,7 @@
  */
 
 #include "dex/compiler_internals.h"
+#include "driver/compiler_options.h"
 #include "dex_file-inl.h"
 #include "gc_map.h"
 #include "gc_map_builder.h"
@@ -274,8 +275,8 @@ void Mir2Lir::DumpLIRInsn(LIR* lir, unsigned char* base_addr) {
 }
 
 void Mir2Lir::DumpPromotionMap() {
-  int num_regs = cu_->num_dalvik_registers + mir_graph_->GetNumUsedCompilerTemps();
-  for (int i = 0; i < num_regs; i++) {
+  uint32_t num_regs = mir_graph_->GetNumOfCodeAndTempVRs();
+  for (uint32_t i = 0; i < num_regs; i++) {
     PromotionMap v_reg_map = promotion_map_[i];
     std::string buf;
     if (v_reg_map.fp_location == kLocPhysReg) {
@@ -283,12 +284,13 @@ void Mir2Lir::DumpPromotionMap() {
     }
 
     std::string buf3;
-    if (i < cu_->num_dalvik_registers) {
+    if (i < mir_graph_->GetNumOfCodeVRs()) {
       StringAppendF(&buf3, "%02d", i);
-    } else if (i == mir_graph_->GetMethodSReg()) {
+    } else if (i == mir_graph_->GetNumOfCodeVRs()) {
       buf3 = "Method*";
     } else {
-      StringAppendF(&buf3, "ct%d", i - cu_->num_dalvik_registers);
+      uint32_t diff = i - mir_graph_->GetNumOfCodeVRs();
+      StringAppendF(&buf3, "ct%d", diff);
     }
 
     LOG(INFO) << StringPrintf("V[%s] -> %s%d%s", buf3.c_str(),
@@ -319,9 +321,9 @@ void Mir2Lir::CodegenDump() {
   LIR* lir_insn;
   int insns_size = cu_->code_item->insns_size_in_code_units_;
 
-  LOG(INFO) << "Regs (excluding ins) : " << cu_->num_regs;
-  LOG(INFO) << "Ins          : " << cu_->num_ins;
-  LOG(INFO) << "Outs         : " << cu_->num_outs;
+  LOG(INFO) << "Regs (excluding ins) : " << mir_graph_->GetNumOfLocalCodeVRs();
+  LOG(INFO) << "Ins          : " << mir_graph_->GetNumOfInVRs();
+  LOG(INFO) << "Outs         : " << mir_graph_->GetNumOfOutVRs();
   LOG(INFO) << "CoreSpills       : " << num_core_spills_;
   LOG(INFO) << "FPSpills       : " << num_fp_spills_;
   LOG(INFO) << "CompilerTemps    : " << mir_graph_->GetNumUsedCompilerTemps();
@@ -401,6 +403,18 @@ LIR* Mir2Lir::ScanLiteralPoolMethod(LIR* data_target, const MethodReference& met
   return nullptr;
 }
 
+/* Search the existing constants in the literal pool for an exact class match */
+LIR* Mir2Lir::ScanLiteralPoolClass(LIR* data_target, const DexFile& dex_file, uint32_t type_idx) {
+  while (data_target) {
+    if (static_cast<uint32_t>(data_target->operands[0]) == type_idx &&
+        UnwrapPointer(data_target->operands[1]) == &dex_file) {
+      return data_target;
+    }
+    data_target = data_target->next;
+  }
+  return nullptr;
+}
+
 /*
  * The following are building blocks to insert constants into the pool or
  * instruction streams.
@@ -499,12 +513,15 @@ void Mir2Lir::InstallLiteralPools() {
   data_lir = class_literal_list_;
   while (data_lir != NULL) {
     uint32_t target_method_idx = data_lir->operands[0];
+    const DexFile* class_dex_file =
+      reinterpret_cast<const DexFile*>(UnwrapPointer(data_lir->operands[1]));
     cu_->compiler_driver->AddClassPatch(cu_->dex_file,
                                         cu_->class_def_idx,
                                         cu_->method_idx,
                                         target_method_idx,
+                                        class_dex_file,
                                         code_buffer_.size());
-    const DexFile::TypeId& target_method_id = cu_->dex_file->GetTypeId(target_method_idx);
+    const DexFile::TypeId& target_method_id = class_dex_file->GetTypeId(target_method_idx);
     // unique value based on target to ensure code deduplication works
     PushPointer(code_buffer_, &target_method_id, cu_->target64);
     data_lir = NEXT_LIR(data_lir);
@@ -640,15 +657,19 @@ bool Mir2Lir::VerifyCatchEntries() {
 
 
 void Mir2Lir::CreateMappingTables() {
+  bool generate_src_map = cu_->compiler_driver->GetCompilerOptions().GetIncludeDebugSymbols();
+
   uint32_t pc2dex_data_size = 0u;
   uint32_t pc2dex_entries = 0u;
   uint32_t pc2dex_offset = 0u;
   uint32_t pc2dex_dalvik_offset = 0u;
+  uint32_t pc2dex_src_entries = 0u;
   uint32_t dex2pc_data_size = 0u;
   uint32_t dex2pc_entries = 0u;
   uint32_t dex2pc_offset = 0u;
   uint32_t dex2pc_dalvik_offset = 0u;
   for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+    pc2dex_src_entries++;
     if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
       pc2dex_entries += 1;
       DCHECK(pc2dex_offset <= tgt_lir->offset);
@@ -669,6 +690,10 @@ void Mir2Lir::CreateMappingTables() {
     }
   }
 
+  if (generate_src_map) {
+    src_mapping_table_.reserve(pc2dex_src_entries);
+  }
+
   uint32_t total_entries = pc2dex_entries + dex2pc_entries;
   uint32_t hdr_data_size = UnsignedLeb128Size(total_entries) + UnsignedLeb128Size(pc2dex_entries);
   uint32_t data_size = hdr_data_size + pc2dex_data_size + dex2pc_data_size;
@@ -684,6 +709,10 @@ void Mir2Lir::CreateMappingTables() {
   dex2pc_offset = 0u;
   dex2pc_dalvik_offset = 0u;
   for (LIR* tgt_lir = first_lir_insn_; tgt_lir != NULL; tgt_lir = NEXT_LIR(tgt_lir)) {
+    if (generate_src_map && !tgt_lir->flags.is_nop) {
+      src_mapping_table_.push_back(SrcMapElem({tgt_lir->offset,
+              static_cast<int32_t>(tgt_lir->dalvik_offset)}));
+    }
     if (!tgt_lir->flags.is_nop && (tgt_lir->opcode == kPseudoSafepointPC)) {
       DCHECK(pc2dex_offset <= tgt_lir->offset);
       write_pos = EncodeUnsignedLeb128(write_pos, tgt_lir->offset - pc2dex_offset);
@@ -1077,10 +1106,10 @@ CompiledMethod* Mir2Lir::GetCompiledMethod() {
     vmap_encoder.PushBackUnsigned(0u);  // Size is 0.
   }
 
-  std::unique_ptr<std::vector<uint8_t>> cfi_info(ReturnCallFrameInformation());
+  std::unique_ptr<std::vector<uint8_t>> cfi_info(ReturnFrameDescriptionEntry());
   CompiledMethod* result =
       new CompiledMethod(cu_->compiler_driver, cu_->instruction_set, code_buffer_, frame_size_,
-                         core_spill_mask_, fp_spill_mask_, encoded_mapping_table_,
+                         core_spill_mask_, fp_spill_mask_, &src_mapping_table_, encoded_mapping_table_,
                          vmap_encoder.GetData(), native_gc_map_, cfi_info.get());
   return result;
 }
@@ -1096,7 +1125,8 @@ size_t Mir2Lir::GetNumBytesForCompilerTempSpillRegion() {
   // By default assume that the Mir2Lir will need one slot for each temporary.
   // If the backend can better determine temps that have non-overlapping ranges and
   // temps that do not need spilled, it can actually provide a small region.
-  return (mir_graph_->GetNumUsedCompilerTemps() * sizeof(uint32_t));
+  mir_graph_->CommitCompilerTemps();
+  return mir_graph_->GetNumBytesForSpecialTemps() + mir_graph_->GetMaximumBytesForNonSpecialTemps();
 }
 
 int Mir2Lir::ComputeFrameSize() {
@@ -1104,7 +1134,8 @@ int Mir2Lir::ComputeFrameSize() {
   uint32_t size = num_core_spills_ * GetBytesPerGprSpillLocation(cu_->instruction_set)
                   + num_fp_spills_ * GetBytesPerFprSpillLocation(cu_->instruction_set)
                   + sizeof(uint32_t)  // Filler.
-                  + (cu_->num_regs + cu_->num_outs) * sizeof(uint32_t)
+                  + mir_graph_->GetNumOfLocalCodeVRs()  * sizeof(uint32_t)
+                  + mir_graph_->GetNumOfOutVRs() * sizeof(uint32_t)
                   + GetNumBytesForCompilerTempSpillRegion();
   /* Align and set */
   return RoundUp(size, kStackAlignment);
@@ -1229,18 +1260,20 @@ void Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeType
   DCHECK_NE(cu_->instruction_set, kMips) << reinterpret_cast<void*>(data_target);
 }
 
-void Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) {
+void Mir2Lir::LoadClassType(const DexFile& dex_file, uint32_t type_idx,
+                            SpecialTargetRegister symbolic_reg) {
   // Use the literal pool and a PC-relative load from a data word.
-  LIR* data_target = ScanLiteralPool(class_literal_list_, type_idx, 0);
+  LIR* data_target = ScanLiteralPoolClass(class_literal_list_, dex_file, type_idx);
   if (data_target == nullptr) {
     data_target = AddWordData(&class_literal_list_, type_idx);
+    data_target->operands[1] = WrapPointer(const_cast<DexFile*>(&dex_file));
   }
   // Loads a Class pointer, which is a reference as it lives in the heap.
   LIR* load_pc_rel = OpPcRelLoad(TargetReg(symbolic_reg, kRef), data_target);
   AppendLIR(load_pc_rel);
 }
 
-std::vector<uint8_t>* Mir2Lir::ReturnCallFrameInformation() {
+std::vector<uint8_t>* Mir2Lir::ReturnFrameDescriptionEntry() {
   // Default case is to do nothing.
   return nullptr;
 }
index 7abf3e7..ffcce7d 100644 (file)
 #include "thread.h"
 #include "thread-inl.h"
 #include "dex/mir_graph.h"
+#include "dex/quick/mir_to_lir.h"
 #include "dex_instruction.h"
 #include "dex_instruction-inl.h"
+#include "driver/dex_compilation_unit.h"
 #include "verifier/method_verifier.h"
 #include "verifier/method_verifier-inl.h"
 
@@ -53,7 +55,7 @@ static constexpr bool kIntrinsicIsStatic[] = {
     true,   // kIntrinsicRint
     true,   // kIntrinsicRoundFloat
     true,   // kIntrinsicRoundDouble
-    false,  // kIntrinsicGet
+    false,  // kIntrinsicReferenceGet
     false,  // kIntrinsicCharAt
     false,  // kIntrinsicCompareTo
     false,  // kIntrinsicIsEmptyOrLength
@@ -85,7 +87,7 @@ COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicFloor], Floor_must_be_static);
 COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicRint], Rint_must_be_static);
 COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicRoundFloat], RoundFloat_must_be_static);
 COMPILE_ASSERT(kIntrinsicIsStatic[kIntrinsicRoundDouble], RoundDouble_must_be_static);
-COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicGet], Get_must_not_be_static);
+COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicReferenceGet], Get_must_not_be_static);
 COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCharAt], CharAt_must_not_be_static);
 COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicCompareTo], CompareTo_must_not_be_static);
 COMPILE_ASSERT(!kIntrinsicIsStatic[kIntrinsicIsEmptyOrLength], IsEmptyOrLength_must_not_be_static);
@@ -169,7 +171,7 @@ const char* const DexFileMethodInliner::kNameCacheNames[] = {
     "floor",                 // kNameCacheFloor
     "rint",                  // kNameCacheRint
     "round",                 // kNameCacheRound
-    "get",                   // kNameCacheGet
+    "get",                   // kNameCacheReferenceGet
     "charAt",                // kNameCacheCharAt
     "compareTo",             // kNameCacheCompareTo
     "isEmpty",               // kNameCacheIsEmpty
@@ -339,7 +341,7 @@ const DexFileMethodInliner::IntrinsicDef DexFileMethodInliner::kIntrinsicMethods
     INTRINSIC(JavaLangMath,       Round, D_J, kIntrinsicRoundDouble, 0),
     INTRINSIC(JavaLangStrictMath, Round, D_J, kIntrinsicRoundDouble, 0),
 
-    INTRINSIC(JavaLangRefReference, Get, _Object, kIntrinsicGet, 0),
+    INTRINSIC(JavaLangRefReference, ReferenceGet, _Object, kIntrinsicReferenceGet, 0),
 
     INTRINSIC(JavaLangString, CharAt, I_C, kIntrinsicCharAt, 0),
     INTRINSIC(JavaLangString, CompareTo, String_I, kIntrinsicCompareTo, 0),
@@ -471,8 +473,8 @@ bool DexFileMethodInliner::GenIntrinsic(Mir2Lir* backend, CallInfo* info) {
       return backend->GenInlinedRound(info, false /* is_double */);
     case kIntrinsicRoundDouble:
       return backend->GenInlinedRound(info, true /* is_double */);
-    case kIntrinsicGet:
-      return backend->GenInlinedGet(info);
+    case kIntrinsicReferenceGet:
+      return backend->GenInlinedReferenceGet(info);
     case kIntrinsicCharAt:
       return backend->GenInlinedCharAt(info);
     case kIntrinsicCompareTo:
@@ -561,9 +563,25 @@ bool DexFileMethodInliner::GenInline(MIRGraph* mir_graph, BasicBlock* bb, MIR* i
       break;
     default:
       LOG(FATAL) << "Unexpected inline op: " << method.opcode;
+      break;
   }
   if (result) {
     invoke->optimization_flags |= MIR_INLINED;
+    // If the invoke has not been eliminated yet, check now whether we should do it.
+    // This is done so that dataflow analysis does not get tripped up seeing nop invoke.
+    if (static_cast<int>(invoke->dalvikInsn.opcode) != kMirOpNop) {
+      bool is_static = invoke->dalvikInsn.opcode == Instruction::INVOKE_STATIC ||
+          invoke->dalvikInsn.opcode == Instruction::INVOKE_STATIC_RANGE;
+      if (is_static || (invoke->optimization_flags & MIR_IGNORE_NULL_CHECK) != 0) {
+        // No null object register involved here so we can eliminate the invoke.
+        invoke->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
+      } else {
+        // Invoke was kept around because null check needed to be done.
+        invoke->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNullCheck);
+        // For invokes, the object register is in vC. For null check mir, it is in vA.
+        invoke->dalvikInsn.vA = invoke->dalvikInsn.vC;
+      }
+    }
     if (move_result != nullptr) {
       move_result->optimization_flags |= MIR_INLINED;
       move_result->dalvikInsn.opcode = static_cast<Instruction::Code>(kMirOpNop);
index 1bd3c48..b875e2b 100644 (file)
@@ -145,7 +145,7 @@ class DexFileMethodInliner {
       kNameCacheFloor,
       kNameCacheRint,
       kNameCacheRound,
-      kNameCacheGet,
+      kNameCacheReferenceGet,
       kNameCacheCharAt,
       kNameCacheCompareTo,
       kNameCacheIsEmpty,
index f6c77fc..fbe710b 100644 (file)
@@ -361,7 +361,7 @@ void Mir2Lir::GenNewArray(uint32_t type_idx, RegLocation rl_dest,
                                    &direct_type_ptr, &is_finalizable)) {
       // The fast path.
       if (!use_direct_type_ptr) {
-        LoadClassType(type_idx, kArg0);
+        LoadClassType(*dex_file, type_idx, kArg0);
         CallRuntimeHelperRegMethodRegLocation(kQuickAllocArrayResolved, TargetReg(kArg0, kNotWide),
                                               rl_src, true);
       } else {
@@ -524,11 +524,9 @@ class StaticFieldSlowPath : public Mir2Lir::LIRSlowPath {
   const RegStorage r_base_;
 };
 
-void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
-                      bool is_object) {
+void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, OpSize size) {
   const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
   cu_->compiler_driver->ProcessedStaticField(field_info.FastPut(), field_info.IsReferrersClass());
-  OpSize store_size = LoadStoreOpSize(is_long_or_double, is_object);
   if (!SLOW_FIELD_PATH && field_info.FastPut()) {
     DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
     RegStorage r_base;
@@ -587,37 +585,59 @@ void Mir2Lir::GenSput(MIR* mir, RegLocation rl_src, bool is_long_or_double,
       FreeTemp(r_method);
     }
     // rBase now holds static storage base
-    RegisterClass reg_class = RegClassForFieldLoadStore(store_size, field_info.IsVolatile());
-    if (is_long_or_double) {
+    RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile());
+    if (IsWide(size)) {
       rl_src = LoadValueWide(rl_src, reg_class);
     } else {
       rl_src = LoadValue(rl_src, reg_class);
     }
-    if (is_object) {
+    if (IsRef(size)) {
       StoreRefDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg,
                    field_info.IsVolatile() ? kVolatile : kNotVolatile);
     } else {
-      StoreBaseDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, store_size,
+      StoreBaseDisp(r_base, field_info.FieldOffset().Int32Value(), rl_src.reg, size,
                     field_info.IsVolatile() ? kVolatile : kNotVolatile);
     }
-    if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
+    if (IsRef(size) && !mir_graph_->IsConstantNullRef(rl_src)) {
       MarkGCCard(rl_src.reg, r_base);
     }
     FreeTemp(r_base);
   } else {
     FlushAllRegs();  // Everything to home locations
-    QuickEntrypointEnum target =
-        is_long_or_double ? kQuickSet64Static
-            : (is_object ? kQuickSetObjStatic : kQuickSet32Static);
+    QuickEntrypointEnum target;
+    switch (size) {
+      case kReference:
+        target = kQuickSetObjStatic;
+        break;
+      case k64:
+      case kDouble:
+        target = kQuickSet64Static;
+        break;
+      case k32:
+      case kSingle:
+        target = kQuickSet32Static;
+        break;
+      case kSignedHalf:
+      case kUnsignedHalf:
+        target = kQuickSet16Static;
+        break;
+      case kSignedByte:
+      case kUnsignedByte:
+        target = kQuickSet8Static;
+        break;
+      case kWord:  // Intentional fallthrough.
+      default:
+        LOG(FATAL) << "Can't determine entrypoint for: " << size;
+        target = kQuickSet32Static;
+    }
     CallRuntimeHelperImmRegLocation(target, field_info.FieldIndex(), rl_src, true);
   }
 }
 
-void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
-                      bool is_long_or_double, bool is_object) {
+void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest, OpSize size, Primitive::Type type) {
   const MirSFieldLoweringInfo& field_info = mir_graph_->GetSFieldLoweringInfo(mir);
   cu_->compiler_driver->ProcessedStaticField(field_info.FastGet(), field_info.IsReferrersClass());
-  OpSize load_size = LoadStoreOpSize(is_long_or_double, is_object);
+
   if (!SLOW_FIELD_PATH && field_info.FastGet()) {
     DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
     RegStorage r_base;
@@ -668,33 +688,62 @@ void Mir2Lir::GenSget(MIR* mir, RegLocation rl_dest,
       FreeTemp(r_method);
     }
     // r_base now holds static storage base
-    RegisterClass reg_class = RegClassForFieldLoadStore(load_size, field_info.IsVolatile());
+    RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile());
     RegLocation rl_result = EvalLoc(rl_dest, reg_class, true);
 
     int field_offset = field_info.FieldOffset().Int32Value();
-    if (is_object) {
+    if (IsRef(size)) {
+      // TODO: DCHECK?
       LoadRefDisp(r_base, field_offset, rl_result.reg, field_info.IsVolatile() ? kVolatile :
           kNotVolatile);
     } else {
-      LoadBaseDisp(r_base, field_offset, rl_result.reg, load_size, field_info.IsVolatile() ?
+      LoadBaseDisp(r_base, field_offset, rl_result.reg, size, field_info.IsVolatile() ?
           kVolatile : kNotVolatile);
     }
     FreeTemp(r_base);
 
-    if (is_long_or_double) {
+    if (IsWide(size)) {
       StoreValueWide(rl_dest, rl_result);
     } else {
       StoreValue(rl_dest, rl_result);
     }
   } else {
+    DCHECK(SizeMatchesTypeForEntrypoint(size, type));
     FlushAllRegs();  // Everything to home locations
-    QuickEntrypointEnum target =
-        is_long_or_double ? kQuickGet64Static
-            : (is_object ? kQuickGetObjStatic : kQuickGet32Static);
+    QuickEntrypointEnum target;
+    switch (type) {
+      case Primitive::kPrimNot:
+        target = kQuickGetObjStatic;
+        break;
+      case Primitive::kPrimLong:
+      case Primitive::kPrimDouble:
+        target = kQuickGet64Static;
+        break;
+      case Primitive::kPrimInt:
+      case Primitive::kPrimFloat:
+        target = kQuickGet32Static;
+        break;
+      case Primitive::kPrimShort:
+        target = kQuickGetShortStatic;
+        break;
+      case Primitive::kPrimChar:
+        target = kQuickGetCharStatic;
+        break;
+      case Primitive::kPrimByte:
+        target = kQuickGetByteStatic;
+        break;
+      case Primitive::kPrimBoolean:
+        target = kQuickGetBooleanStatic;
+        break;
+      case Primitive::kPrimVoid:  // Intentional fallthrough.
+      default:
+        LOG(FATAL) << "Can't determine entrypoint for: " << type;
+        target = kQuickGet32Static;
+    }
     CallRuntimeHelperImm(target, field_info.FieldIndex(), true);
 
     // FIXME: pGetXXStatic always return an int or int64 regardless of rl_dest.fp.
-    if (is_long_or_double) {
+    if (IsWide(size)) {
       RegLocation rl_result = GetReturnWide(kCoreReg);
       StoreValueWide(rl_dest, rl_result);
     } else {
@@ -715,14 +764,12 @@ void Mir2Lir::HandleSlowPaths() {
   slow_paths_.Reset();
 }
 
-void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size,
-                      RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double,
-                      bool is_object) {
+void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size, Primitive::Type type,
+                      RegLocation rl_dest, RegLocation rl_obj) {
   const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
   cu_->compiler_driver->ProcessedInstanceField(field_info.FastGet());
-  OpSize load_size = LoadStoreOpSize(is_long_or_double, is_object);
   if (!SLOW_FIELD_PATH && field_info.FastGet()) {
-    RegisterClass reg_class = RegClassForFieldLoadStore(load_size, field_info.IsVolatile());
+    RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile());
     // A load of the class will lead to an iget with offset 0.
     DCHECK_GE(field_info.FieldOffset().Int32Value(), 0);
     rl_obj = LoadValue(rl_obj, kRefReg);
@@ -730,29 +777,57 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size,
     RegLocation rl_result = EvalLoc(rl_dest, reg_class, true);
     int field_offset = field_info.FieldOffset().Int32Value();
     LIR* load_lir;
-    if (is_object) {
+    if (IsRef(size)) {
       load_lir = LoadRefDisp(rl_obj.reg, field_offset, rl_result.reg, field_info.IsVolatile() ?
           kVolatile : kNotVolatile);
     } else {
-      load_lir = LoadBaseDisp(rl_obj.reg, field_offset, rl_result.reg, load_size,
+      load_lir = LoadBaseDisp(rl_obj.reg, field_offset, rl_result.reg, size,
                               field_info.IsVolatile() ? kVolatile : kNotVolatile);
     }
     MarkPossibleNullPointerExceptionAfter(opt_flags, load_lir);
-    if (is_long_or_double) {
+    if (IsWide(size)) {
       StoreValueWide(rl_dest, rl_result);
     } else {
       StoreValue(rl_dest, rl_result);
     }
   } else {
-    QuickEntrypointEnum target =
-        is_long_or_double ? kQuickGet64Instance
-            : (is_object ? kQuickGetObjInstance : kQuickGet32Instance);
+    DCHECK(SizeMatchesTypeForEntrypoint(size, type));
+    QuickEntrypointEnum target;
+    switch (type) {
+      case Primitive::kPrimNot:
+        target = kQuickGetObjInstance;
+        break;
+      case Primitive::kPrimLong:
+      case Primitive::kPrimDouble:
+        target = kQuickGet64Instance;
+        break;
+      case Primitive::kPrimFloat:
+      case Primitive::kPrimInt:
+        target = kQuickGet32Instance;
+        break;
+      case Primitive::kPrimShort:
+        target = kQuickGetShortInstance;
+        break;
+      case Primitive::kPrimChar:
+        target = kQuickGetCharInstance;
+        break;
+      case Primitive::kPrimByte:
+        target = kQuickGetByteInstance;
+        break;
+      case Primitive::kPrimBoolean:
+        target = kQuickGetBooleanInstance;
+        break;
+      case Primitive::kPrimVoid:  // Intentional fallthrough.
+      default:
+        LOG(FATAL) << "Can't determine entrypoint for: " << type;
+        target = kQuickGet32Instance;
+    }
     // Second argument of pGetXXInstance is always a reference.
     DCHECK_EQ(static_cast<unsigned int>(rl_obj.wide), 0U);
     CallRuntimeHelperImmRegLocation(target, field_info.FieldIndex(), rl_obj, true);
 
     // FIXME: pGetXXInstance always return an int or int64 regardless of rl_dest.fp.
-    if (is_long_or_double) {
+    if (IsWide(size)) {
       RegLocation rl_result = GetReturnWide(kCoreReg);
       StoreValueWide(rl_dest, rl_result);
     } else {
@@ -763,18 +838,16 @@ void Mir2Lir::GenIGet(MIR* mir, int opt_flags, OpSize size,
 }
 
 void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
-                      RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double,
-                      bool is_object) {
+                      RegLocation rl_src, RegLocation rl_obj) {
   const MirIFieldLoweringInfo& field_info = mir_graph_->GetIFieldLoweringInfo(mir);
   cu_->compiler_driver->ProcessedInstanceField(field_info.FastPut());
-  OpSize store_size = LoadStoreOpSize(is_long_or_double, is_object);
   if (!SLOW_FIELD_PATH && field_info.FastPut()) {
-    RegisterClass reg_class = RegClassForFieldLoadStore(store_size, field_info.IsVolatile());
+    RegisterClass reg_class = RegClassForFieldLoadStore(size, field_info.IsVolatile());
     // Dex code never writes to the class field.
     DCHECK_GE(static_cast<uint32_t>(field_info.FieldOffset().Int32Value()),
               sizeof(mirror::HeapReference<mirror::Class>));
     rl_obj = LoadValue(rl_obj, kRefReg);
-    if (is_long_or_double) {
+    if (IsWide(size)) {
       rl_src = LoadValueWide(rl_src, reg_class);
     } else {
       rl_src = LoadValue(rl_src, reg_class);
@@ -782,21 +855,44 @@ void Mir2Lir::GenIPut(MIR* mir, int opt_flags, OpSize size,
     GenNullCheck(rl_obj.reg, opt_flags);
     int field_offset = field_info.FieldOffset().Int32Value();
     LIR* store;
-    if (is_object) {
+    if (IsRef(size)) {
       store = StoreRefDisp(rl_obj.reg, field_offset, rl_src.reg, field_info.IsVolatile() ?
           kVolatile : kNotVolatile);
     } else {
-      store = StoreBaseDisp(rl_obj.reg, field_offset, rl_src.reg, store_size,
+      store = StoreBaseDisp(rl_obj.reg, field_offset, rl_src.reg, size,
                             field_info.IsVolatile() ? kVolatile : kNotVolatile);
     }
     MarkPossibleNullPointerExceptionAfter(opt_flags, store);
-    if (is_object && !mir_graph_->IsConstantNullRef(rl_src)) {
+    if (IsRef(size) && !mir_graph_->IsConstantNullRef(rl_src)) {
       MarkGCCard(rl_src.reg, rl_obj.reg);
     }
   } else {
-    QuickEntrypointEnum target =
-        is_long_or_double ? kQuickSet64Instance
-            : (is_object ? kQuickSetObjInstance : kQuickSet32Instance);
+    QuickEntrypointEnum target;
+    switch (size) {
+      case kReference:
+        target = kQuickSetObjInstance;
+        break;
+      case k64:
+      case kDouble:
+        target = kQuickSet64Instance;
+        break;
+      case k32:
+      case kSingle:
+        target = kQuickSet32Instance;
+        break;
+      case kSignedHalf:
+      case kUnsignedHalf:
+        target = kQuickSet16Instance;
+        break;
+      case kSignedByte:
+      case kUnsignedByte:
+        target = kQuickSet8Instance;
+        break;
+      case kWord:  // Intentional fallthrough.
+      default:
+        LOG(FATAL) << "Can't determine entrypoint for: " << size;
+        target = kQuickSet32Instance;
+    }
     CallRuntimeHelperImmRegLocationRegLocation(target, field_info.FieldIndex(), rl_obj, rl_src,
                                                true);
   }
@@ -961,7 +1057,7 @@ void Mir2Lir::GenNewInstance(uint32_t type_idx, RegLocation rl_dest) {
                                    !is_finalizable) {
       // The fast path.
       if (!use_direct_type_ptr) {
-        LoadClassType(type_idx, kArg0);
+        LoadClassType(*dex_file, type_idx, kArg0);
         if (!is_type_initialized) {
           CallRuntimeHelperRegMethod(kQuickAllocObjectResolved, TargetReg(kArg0, kRef), true);
         } else {
@@ -2009,7 +2105,7 @@ void Mir2Lir::GenConstWide(RegLocation rl_dest, int64_t value) {
 }
 
 void Mir2Lir::GenSmallPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
-  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
   const uint16_t entries = table[1];
   // Chained cmp-and-branch.
   const int32_t* as_int32 = reinterpret_cast<const int32_t*>(&table[2]);
@@ -2052,7 +2148,7 @@ void Mir2Lir::GenSmallPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation
 }
 
 void Mir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
-  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
   if (cu_->verbose) {
     DumpSparseSwitchTable(table);
   }
@@ -2067,7 +2163,7 @@ void Mir2Lir::GenPackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_s
 }
 
 void Mir2Lir::GenSmallSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
-  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
   const uint16_t entries = table[1];
   // Chained cmp-and-branch.
   const int32_t* keys = reinterpret_cast<const int32_t*>(&table[2]);
@@ -2082,7 +2178,7 @@ void Mir2Lir::GenSmallSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation
 }
 
 void Mir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
-  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
   if (cu_->verbose) {
     DumpSparseSwitchTable(table);
   }
@@ -2096,4 +2192,28 @@ void Mir2Lir::GenSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_s
   }
 }
 
+bool Mir2Lir::SizeMatchesTypeForEntrypoint(OpSize size, Primitive::Type type) {
+  switch (size) {
+    case kReference:
+      return type == Primitive::kPrimNot;
+    case k64:
+    case kDouble:
+      return type == Primitive::kPrimLong || type == Primitive::kPrimDouble;
+    case k32:
+    case kSingle:
+      return type == Primitive::kPrimInt || type == Primitive::kPrimFloat;
+    case kSignedHalf:
+      return type == Primitive::kPrimShort;
+    case kUnsignedHalf:
+      return type == Primitive::kPrimChar;
+    case kSignedByte:
+      return type == Primitive::kPrimByte;
+    case kUnsignedByte:
+      return type == Primitive::kPrimBoolean;
+    case kWord:  // Intentional fallthrough.
+    default:
+      return false;  // There are no sane types with this op size.
+  }
+}
+
 }  // namespace art
index e70b0c5..e1d3241 100755 (executable)
 #include "mirror/class-inl.h"
 #include "mirror/dex_cache.h"
 #include "mirror/object_array-inl.h"
-#include "mirror/reference-inl.h"
 #include "mirror/string.h"
 #include "mir_to_lir-inl.h"
-#include "scoped_thread_state_change.h"
 #include "x86/codegen_x86.h"
 
 namespace art {
@@ -383,11 +381,11 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
     StoreRefDisp(TargetPtrReg(kSp), 0, rl_src.reg, kNotVolatile);
   }
 
-  if (cu_->num_ins == 0) {
+  if (mir_graph_->GetNumOfInVRs() == 0) {
     return;
   }
 
-  int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
+  int start_vreg = mir_graph_->GetFirstInVR();
   /*
    * Copy incoming arguments to their proper home locations.
    * NOTE: an older version of dx had an issue in which
@@ -401,7 +399,7 @@ void Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
    * half to memory as well.
    */
   ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-  for (int i = 0; i < cu_->num_ins; i++) {
+  for (uint32_t i = 0; i < mir_graph_->GetNumOfInVRs(); i++) {
     PromotionMap* v_map = &promotion_map_[start_vreg + i];
     RegStorage reg = GetArgMappingToPhysicalReg(i);
 
@@ -935,9 +933,6 @@ int Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
     }
   }
 
-  // Logic below assumes that Method pointer is at offset zero from SP.
-  DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0);
-
   // The first 3 arguments are passed via registers.
   // TODO: For 64-bit, instead of hardcoding 4 for Method* size, we should either
   // get size of uintptr_t or size of object reference according to model being used.
@@ -1129,74 +1124,43 @@ RegLocation Mir2Lir::InlineTargetWide(CallInfo* info) {
   return res;
 }
 
-bool Mir2Lir::GenInlinedGet(CallInfo* info) {
+bool Mir2Lir::GenInlinedReferenceGet(CallInfo* info) {
   if (cu_->instruction_set == kMips) {
     // TODO - add Mips implementation
     return false;
   }
 
-  // the refrence class is stored in the image dex file which might not be the same as the cu's
-  // dex file. Query the reference class for the image dex file then reset to starting dex file
-  // in after loading class type.
-  uint16_t type_idx = 0;
-  const DexFile* ref_dex_file = nullptr;
-  {
-    ScopedObjectAccess soa(Thread::Current());
-    type_idx = mirror::Reference::GetJavaLangRefReference()->GetDexTypeIndex();
-    ref_dex_file = mirror::Reference::GetJavaLangRefReference()->GetDexCache()->GetDexFile();
-  }
-  CHECK(LIKELY(ref_dex_file != nullptr));
-
-  // address is either static within the image file, or needs to be patched up after compilation.
-  bool unused_type_initialized;
   bool use_direct_type_ptr;
   uintptr_t direct_type_ptr;
-  bool is_finalizable;
-  const DexFile* old_dex = cu_->dex_file;
-  cu_->dex_file = ref_dex_file;
+  ClassReference ref;
+  if (!cu_->compiler_driver->CanEmbedReferenceTypeInCode(&ref,
+        &use_direct_type_ptr, &direct_type_ptr)) {
+    return false;
+  }
+
   RegStorage reg_class = TargetReg(kArg1, kRef);
   Clobber(reg_class);
   LockTemp(reg_class);
-  if (!cu_->compiler_driver->CanEmbedTypeInCode(*ref_dex_file, type_idx, &unused_type_initialized,
-                                                &use_direct_type_ptr, &direct_type_ptr,
-                                                &is_finalizable) || is_finalizable) {
-    cu_->dex_file = old_dex;
-    // address is not known and post-compile patch is not possible, cannot insert intrinsic.
-    return false;
-  }
   if (use_direct_type_ptr) {
     LoadConstant(reg_class, direct_type_ptr);
-  } else if (cu_->dex_file == old_dex) {
-    // TODO: Bug 16656190 If cu_->dex_file != old_dex the patching could retrieve the wrong class
-    // since the load class is indexed only by the type_idx. We should include which dex file a
-    // class is from in the LoadClassType LIR.
-    LoadClassType(type_idx, kArg1);
   } else {
-    cu_->dex_file = old_dex;
-    return false;
+    uint16_t type_idx = ref.first->GetClassDef(ref.second).class_idx_;
+    LoadClassType(*ref.first, type_idx, kArg1);
   }
-  cu_->dex_file = old_dex;
 
-  // get the offset for flags in reference class.
-  uint32_t slow_path_flag_offset = 0;
-  uint32_t disable_flag_offset = 0;
-  {
-    ScopedObjectAccess soa(Thread::Current());
-    mirror::Class* reference_class = mirror::Reference::GetJavaLangRefReference();
-    slow_path_flag_offset = reference_class->GetSlowPathFlagOffset().Uint32Value();
-    disable_flag_offset = reference_class->GetDisableIntrinsicFlagOffset().Uint32Value();
-  }
+  uint32_t slow_path_flag_offset = cu_->compiler_driver->GetReferenceSlowFlagOffset();
+  uint32_t disable_flag_offset = cu_->compiler_driver->GetReferenceDisableFlagOffset();
   CHECK(slow_path_flag_offset && disable_flag_offset &&
         (slow_path_flag_offset != disable_flag_offset));
 
   // intrinsic logic start.
   RegLocation rl_obj = info->args[0];
-  rl_obj = LoadValue(rl_obj);
+  rl_obj = LoadValue(rl_obj, kRefReg);
 
   RegStorage reg_slow_path = AllocTemp();
   RegStorage reg_disabled = AllocTemp();
-  Load32Disp(reg_class, slow_path_flag_offset, reg_slow_path);
-  Load32Disp(reg_class, disable_flag_offset, reg_disabled);
+  Load8Disp(reg_class, slow_path_flag_offset, reg_slow_path);
+  Load8Disp(reg_class, disable_flag_offset, reg_disabled);
   FreeTemp(reg_class);
   LIR* or_inst = OpRegRegReg(kOpOr, reg_slow_path, reg_slow_path, reg_disabled);
   FreeTemp(reg_disabled);
@@ -1330,10 +1294,10 @@ bool Mir2Lir::GenInlinedReverseBytes(CallInfo* info, OpSize size) {
     return false;
   }
   RegLocation rl_src_i = info->args[0];
-  RegLocation rl_i = (size == k64) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg);
-  RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info);  // result reg
+  RegLocation rl_i = IsWide(size) ? LoadValueWide(rl_src_i, kCoreReg) : LoadValue(rl_src_i, kCoreReg);
+  RegLocation rl_dest = IsWide(size) ? InlineTargetWide(info) : InlineTarget(info);  // result reg
   RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
-  if (size == k64) {
+  if (IsWide(size)) {
     if (cu_->instruction_set == kArm64 || cu_->instruction_set == kX86_64) {
       OpRegReg(kOpRev, rl_result.reg, rl_i.reg);
       StoreValueWide(rl_dest, rl_result);
diff --git a/compiler/dex/quick/mips/backend_mips.h b/compiler/dex/quick/mips/backend_mips.h
new file mode 100644 (file)
index 0000000..f65e984
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_QUICK_MIPS_BACKEND_MIPS_H_
+#define ART_COMPILER_DEX_QUICK_MIPS_BACKEND_MIPS_H_
+
+namespace art {
+
+struct CompilationUnit;
+class Mir2Lir;
+class MIRGraph;
+class ArenaAllocator;
+
+Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+                           ArenaAllocator* const arena);
+
+}  // namespace art
+
+#endif  // ART_COMPILER_DEX_QUICK_MIPS_BACKEND_MIPS_H_
index e8cb356..f3edd7e 100644 (file)
@@ -62,7 +62,7 @@ bool MipsMir2Lir::GenSpecialCase(BasicBlock* bb, MIR* mir,
  *
  */
 void MipsMir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
-  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
   if (cu_->verbose) {
     DumpSparseSwitchTable(table);
   }
@@ -139,7 +139,7 @@ void MipsMir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLoca
  * done:
  */
 void MipsMir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
-  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
   if (cu_->verbose) {
     DumpPackedSwitchTable(table);
   }
@@ -220,8 +220,8 @@ void MipsMir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLoca
  *
  * Total size is 4+(width * size + 1)/2 16-bit code units.
  */
-void MipsMir2Lir::GenFillArrayData(DexOffset table_offset, RegLocation rl_src) {
-  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+void MipsMir2Lir::GenFillArrayData(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
+  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
   // Add the table to the list - we'll process it later
   FillArrayData* tab_rec =
       reinterpret_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData),
index 43cbde7..be94019 100644 (file)
@@ -18,6 +18,7 @@
 #define ART_COMPILER_DEX_QUICK_MIPS_CODEGEN_MIPS_H_
 
 #include "dex/compiler_internals.h"
+#include "dex/quick/mir_to_lir.h"
 #include "mips_lir.h"
 
 namespace art {
@@ -115,7 +116,7 @@ class MipsMir2Lir FINAL : public Mir2Lir {
     void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method);
     void GenExitSequence();
     void GenSpecialExitSequence();
-    void GenFillArrayData(uint32_t table_offset, RegLocation rl_src);
+    void GenFillArrayData(MIR* mir, DexOffset table_offset, RegLocation rl_src);
     void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double);
     void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir);
     void GenSelect(BasicBlock* bb, MIR* mir);
index ea56989..95c1262 100644 (file)
@@ -220,9 +220,9 @@ void MipsMir2Lir::GenSelectConst32(RegStorage left_op, RegStorage right_op, Cond
                                    int dest_reg_class) {
   // Implement as a branch-over.
   // TODO: Conditional move?
-  LoadConstant(rs_dest, false_val);  // Favors false.
-  LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, NULL);
   LoadConstant(rs_dest, true_val);
+  LIR* ne_branchover = OpCmpBranch(code, left_op, right_op, NULL);
+  LoadConstant(rs_dest, false_val);
   LIR* target_label = NewLIR0(kPseudoTargetLabel);
   ne_branchover->target = target_label;
 }
index bc91fbc..9c4426f 100644 (file)
@@ -20,6 +20,7 @@
 
 #include <string>
 
+#include "backend_mips.h"
 #include "dex/compiler_internals.h"
 #include "dex/quick/mir_to_lir-inl.h"
 #include "mips_lir.h"
index ff5a46f..2e4e292 100644 (file)
@@ -249,7 +249,7 @@ inline void Mir2Lir::SetupResourceMasks(LIR* lir) {
   }
 
   // Handle target-specific actions
-  SetupTargetResourceMasks(lir, flags, &def_mask, &use_mask);
+  SetupTargetResourceMasks(lir, flags, &use_mask, &def_mask);
 
   lir->u.m.use_mask = mask_cache_.GetMask(use_mask);
   lir->u.m.def_mask = mask_cache_.GetMask(def_mask);
index e519011..96f00e7 100644 (file)
@@ -18,6 +18,7 @@
 #include "dex/dataflow_iterator-inl.h"
 #include "dex/quick/dex_file_method_inliner.h"
 #include "mir_to_lir-inl.h"
+#include "primitive.h"
 #include "thread-inl.h"
 
 namespace art {
@@ -223,9 +224,27 @@ bool Mir2Lir::GenSpecialIGet(MIR* mir, const InlineMethod& special) {
     return false;
   }
 
-  bool wide = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE));
-  bool ref = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT));
-  OpSize size = LoadStoreOpSize(wide, ref);
+  OpSize size = k32;
+  switch (data.op_variant) {
+    case InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT):
+      size = kReference;
+      break;
+    case InlineMethodAnalyser::IGetVariant(Instruction::IGET_WIDE):
+      size = k64;
+      break;
+    case InlineMethodAnalyser::IGetVariant(Instruction::IGET_SHORT):
+      size = kSignedHalf;
+      break;
+    case InlineMethodAnalyser::IGetVariant(Instruction::IGET_CHAR):
+      size = kUnsignedHalf;
+      break;
+    case InlineMethodAnalyser::IGetVariant(Instruction::IGET_BYTE):
+      size = kSignedByte;
+      break;
+    case InlineMethodAnalyser::IGetVariant(Instruction::IGET_BOOLEAN):
+      size = kUnsignedByte;
+      break;
+  }
 
   // Point of no return - no aborts after this
   GenPrintLabel(mir);
@@ -233,20 +252,20 @@ bool Mir2Lir::GenSpecialIGet(MIR* mir, const InlineMethod& special) {
   RegStorage reg_obj = LoadArg(data.object_arg, kRefReg);
   RegisterClass reg_class = RegClassForFieldLoadStore(size, data.is_volatile);
   RegisterClass ret_reg_class = ShortyToRegClass(cu_->shorty[0]);
-  RegLocation rl_dest = wide ? GetReturnWide(ret_reg_class) : GetReturn(ret_reg_class);
+  RegLocation rl_dest = IsWide(size) ? GetReturnWide(ret_reg_class) : GetReturn(ret_reg_class);
   RegStorage r_result = rl_dest.reg;
   if (!RegClassMatches(reg_class, r_result)) {
-    r_result = wide ? AllocTypedTempWide(rl_dest.fp, reg_class)
-                    : AllocTypedTemp(rl_dest.fp, reg_class);
+    r_result = IsWide(size) ? AllocTypedTempWide(rl_dest.fp, reg_class)
+                            : AllocTypedTemp(rl_dest.fp, reg_class);
   }
-  if (ref) {
+  if (IsRef(size)) {
     LoadRefDisp(reg_obj, data.field_offset, r_result, data.is_volatile ? kVolatile : kNotVolatile);
   } else {
     LoadBaseDisp(reg_obj, data.field_offset, r_result, size, data.is_volatile ? kVolatile :
         kNotVolatile);
   }
   if (r_result.NotExactlyEquals(rl_dest.reg)) {
-    if (wide) {
+    if (IsWide(size)) {
       OpRegCopyWide(rl_dest.reg, r_result);
     } else {
       OpRegCopy(rl_dest.reg, r_result);
@@ -267,24 +286,42 @@ bool Mir2Lir::GenSpecialIPut(MIR* mir, const InlineMethod& special) {
     return false;
   }
 
-  bool wide = (data.op_variant == InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE));
-  bool ref = (data.op_variant == InlineMethodAnalyser::IGetVariant(Instruction::IGET_OBJECT));
-  OpSize size = LoadStoreOpSize(wide, ref);
+  OpSize size = k32;
+  switch (data.op_variant) {
+    case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_OBJECT):
+      size = kReference;
+      break;
+    case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_WIDE):
+      size = k64;
+      break;
+    case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_SHORT):
+      size = kSignedHalf;
+      break;
+    case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_CHAR):
+      size = kUnsignedHalf;
+      break;
+    case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BYTE):
+      size = kSignedByte;
+      break;
+    case InlineMethodAnalyser::IPutVariant(Instruction::IPUT_BOOLEAN):
+      size = kUnsignedByte;
+      break;
+  }
 
   // Point of no return - no aborts after this
   GenPrintLabel(mir);
   LockArg(data.object_arg);
-  LockArg(data.src_arg, wide);
+  LockArg(data.src_arg, IsWide(size));
   RegStorage reg_obj = LoadArg(data.object_arg, kRefReg);
   RegisterClass reg_class = RegClassForFieldLoadStore(size, data.is_volatile);
-  RegStorage reg_src = LoadArg(data.src_arg, reg_class, wide);
-  if (ref) {
+  RegStorage reg_src = LoadArg(data.src_arg, reg_class, IsWide(size));
+  if (IsRef(size)) {
     StoreRefDisp(reg_obj, data.field_offset, reg_src, data.is_volatile ? kVolatile : kNotVolatile);
   } else {
     StoreBaseDisp(reg_obj, data.field_offset, reg_src, size, data.is_volatile ? kVolatile :
         kNotVolatile);
   }
-  if (ref) {
+  if (IsRef(size)) {
     MarkGCCard(reg_src, reg_obj);
   }
   return true;
@@ -562,7 +599,7 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
       break;
 
     case Instruction::FILL_ARRAY_DATA:
-      GenFillArrayData(vB, rl_src[0]);
+      GenFillArrayData(mir, vB, rl_src[0]);
       break;
 
     case Instruction::FILLED_NEW_ARRAY:
@@ -720,84 +757,112 @@ void Mir2Lir::CompileDalvikInstruction(MIR* mir, BasicBlock* bb, LIR* label_list
       break;
 
     case Instruction::IGET_OBJECT:
-      GenIGet(mir, opt_flags, kReference, rl_dest, rl_src[0], false, true);
+      GenIGet(mir, opt_flags, kReference, Primitive::kPrimNot, rl_dest, rl_src[0]);
       break;
 
     case Instruction::IGET_WIDE:
-      GenIGet(mir, opt_flags, k64, rl_dest, rl_src[0], true, false);
+      // kPrimLong and kPrimDouble share the same entrypoints.
+      GenIGet(mir, opt_flags, k64, Primitive::kPrimLong, rl_dest, rl_src[0]);
       break;
 
     case Instruction::IGET:
-      GenIGet(mir, opt_flags, k32, rl_dest, rl_src[0], false, false);
+      GenIGet(mir, opt_flags, k32, Primitive::kPrimInt, rl_dest, rl_src[0]);
       break;
 
     case Instruction::IGET_CHAR:
-      GenIGet(mir, opt_flags, kUnsignedHalf, rl_dest, rl_src[0], false, false);
+      GenIGet(mir, opt_flags, kUnsignedHalf, Primitive::kPrimChar, rl_dest, rl_src[0]);
       break;
 
     case Instruction::IGET_SHORT:
-      GenIGet(mir, opt_flags, kSignedHalf, rl_dest, rl_src[0], false, false);
+      GenIGet(mir, opt_flags, kSignedHalf, Primitive::kPrimShort, rl_dest, rl_src[0]);
       break;
 
     case Instruction::IGET_BOOLEAN:
+      GenIGet(mir, opt_flags, kUnsignedByte, Primitive::kPrimBoolean, rl_dest, rl_src[0]);
+      break;
+
     case Instruction::IGET_BYTE:
-      GenIGet(mir, opt_flags, kUnsignedByte, rl_dest, rl_src[0], false, false);
+      GenIGet(mir, opt_flags, kSignedByte, Primitive::kPrimByte, rl_dest, rl_src[0]);
       break;
 
     case Instruction::IPUT_WIDE:
-      GenIPut(mir, opt_flags, k64, rl_src[0], rl_src[1], true, false);
+      GenIPut(mir, opt_flags, k64, rl_src[0], rl_src[1]);
       break;
 
     case Instruction::IPUT_OBJECT:
-      GenIPut(mir, opt_flags, kReference, rl_src[0], rl_src[1], false, true);
+      GenIPut(mir, opt_flags, kReference, rl_src[0], rl_src[1]);
       break;
 
     case Instruction::IPUT:
-      GenIPut(mir, opt_flags, k32, rl_src[0], rl_src[1], false, false);
+      GenIPut(mir, opt_flags, k32, rl_src[0], rl_src[1]);
       break;
 
-    case Instruction::IPUT_BOOLEAN:
     case Instruction::IPUT_BYTE:
-      GenIPut(mir, opt_flags, kUnsignedByte, rl_src[0], rl_src[1], false, false);
+    case Instruction::IPUT_BOOLEAN:
+      GenIPut(mir, opt_flags, kUnsignedByte, rl_src[0], rl_src[1]);
       break;
 
     case Instruction::IPUT_CHAR:
-      GenIPut(mir, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1], false, false);
+      GenIPut(mir, opt_flags, kUnsignedHalf, rl_src[0], rl_src[1]);
       break;
 
     case Instruction::IPUT_SHORT:
-      GenIPut(mir, opt_flags, kSignedHalf, rl_src[0], rl_src[1], false, false);
+      GenIPut(mir, opt_flags, kSignedHalf, rl_src[0], rl_src[1]);
       break;
 
     case Instruction::SGET_OBJECT:
-      GenSget(mir, rl_dest, false, true);
+      GenSget(mir, rl_dest, kReference, Primitive::kPrimNot);
       break;
+
     case Instruction::SGET:
-    case Instruction::SGET_BOOLEAN:
-    case Instruction::SGET_BYTE:
+      GenSget(mir, rl_dest, k32, Primitive::kPrimInt);
+      break;
+
     case Instruction::SGET_CHAR:
+      GenSget(mir, rl_dest, kUnsignedHalf, Primitive::kPrimChar);
+      break;
+
     case Instruction::SGET_SHORT:
-      GenSget(mir, rl_dest, false, false);
+      GenSget(mir, rl_dest, kSignedHalf, Primitive::kPrimShort);
+      break;
+
+    case Instruction::SGET_BOOLEAN:
+      GenSget(mir, rl_dest, kUnsignedByte, Primitive::kPrimBoolean);
+      break;
+
+    case Instruction::SGET_BYTE:
+      GenSget(mir, rl_dest, kSignedByte, Primitive::kPrimByte);
       break;
 
     case Instruction::SGET_WIDE:
-      GenSget(mir, rl_dest, true, false);
+      // kPrimLong and kPrimDouble share the same entrypoints.
+      GenSget(mir, rl_dest, k64, Primitive::kPrimLong);
       break;
 
     case Instruction::SPUT_OBJECT:
-      GenSput(mir, rl_src[0], false, true);
+      GenSput(mir, rl_src[0], kReference);
       break;
 
     case Instruction::SPUT:
-    case Instruction::SPUT_BOOLEAN:
+      GenSput(mir, rl_src[0], k32);
+      break;
+
     case Instruction::SPUT_BYTE:
+    case Instruction::SPUT_BOOLEAN:
+      GenSput(mir, rl_src[0], kUnsignedByte);
+      break;
+
     case Instruction::SPUT_CHAR:
+      GenSput(mir, rl_src[0], kUnsignedHalf);
+      break;
+
     case Instruction::SPUT_SHORT:
-      GenSput(mir, rl_src[0], false, false);
+      GenSput(mir, rl_src[0], kSignedHalf);
       break;
 
+
     case Instruction::SPUT_WIDE:
-      GenSput(mir, rl_src[0], true, false);
+      GenSput(mir, rl_src[0], k64);
       break;
 
     case Instruction::INVOKE_STATIC_RANGE:
@@ -1077,9 +1142,17 @@ void Mir2Lir::HandleExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
     case kMirOpSelect:
       GenSelect(bb, mir);
       break;
+    case kMirOpNullCheck: {
+      RegLocation rl_obj = mir_graph_->GetSrc(mir, 0);
+      rl_obj = LoadValue(rl_obj, kRefReg);
+      // An explicit check is done because it is not expected that when this is used,
+      // that it will actually trip up the implicit checks (since an invalid access
+      // is needed on the null object).
+      GenExplicitNullCheck(rl_obj.reg, mir->optimization_flags);
+      break;
+    }
     case kMirOpPhi:
     case kMirOpNop:
-    case kMirOpNullCheck:
     case kMirOpRangeCheck:
     case kMirOpDivZeroCheck:
     case kMirOpCheck:
@@ -1127,9 +1200,8 @@ bool Mir2Lir::MethodBlockCodeGen(BasicBlock* bb) {
 
   if (bb->block_type == kEntryBlock) {
     ResetRegPool();
-    int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
-    GenEntrySequence(&mir_graph_->reg_location_[start_vreg],
-                         mir_graph_->reg_location_[mir_graph_->GetMethodSReg()]);
+    int start_vreg = mir_graph_->GetFirstInVR();
+    GenEntrySequence(&mir_graph_->reg_location_[start_vreg], mir_graph_->GetMethodLoc());
   } else if (bb->block_type == kExitBlock) {
     ResetRegPool();
     GenExitSequence();
index 0e6f36b..d6fc2e9 100644 (file)
 
 namespace art {
 
-/*
- * TODO: refactoring pass to move these (and other) typdefs towards usage style of runtime to
- * add type safety (see runtime/offsets.h).
- */
-typedef uint32_t DexOffset;          // Dex offset in code units.
-typedef uint16_t NarrowDexOffset;    // For use in structs, Dex offsets range from 0 .. 0xffff.
-typedef uint32_t CodeOffset;         // Native code offset in bytes.
-
 // Set to 1 to measure cost of suspend check.
 #define NO_SUSPEND 0
 
@@ -187,16 +179,6 @@ struct LIR {
   int32_t operands[5];           // [0..4] = [dest, src1, src2, extra, extra2].
 };
 
-// Target-specific initialization.
-Mir2Lir* ArmCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
-                          ArenaAllocator* const arena);
-Mir2Lir* Arm64CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
-                            ArenaAllocator* const arena);
-Mir2Lir* MipsCodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
-                          ArenaAllocator* const arena);
-Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
-                          ArenaAllocator* const arena);
-
 // Utility macros to traverse the LIR list.
 #define NEXT_LIR(lir) (lir->next)
 #define PREV_LIR(lir) (lir->prev)
@@ -685,6 +667,7 @@ class Mir2Lir : public Backend {
     LIR* ScanLiteralPool(LIR* data_target, int value, unsigned int delta);
     LIR* ScanLiteralPoolWide(LIR* data_target, int val_lo, int val_hi);
     LIR* ScanLiteralPoolMethod(LIR* data_target, const MethodReference& method);
+    LIR* ScanLiteralPoolClass(LIR* data_target, const DexFile& dex_file, uint32_t type_idx);
     LIR* AddWordData(LIR* *constant_list_p, int value);
     LIR* AddWideData(LIR* *constant_list_p, int val_lo, int val_hi);
     void ProcessSwitchTables();
@@ -848,14 +831,14 @@ class Mir2Lir : public Backend {
     void GenNewArray(uint32_t type_idx, RegLocation rl_dest,
                      RegLocation rl_src);
     void GenFilledNewArray(CallInfo* info);
-    void GenSput(MIR* mir, RegLocation rl_src,
-                 bool is_long_or_double, bool is_object);
-    void GenSget(MIR* mir, RegLocation rl_dest,
-                 bool is_long_or_double, bool is_object);
-    void GenIGet(MIR* mir, int opt_flags, OpSize size,
-                 RegLocation rl_dest, RegLocation rl_obj, bool is_long_or_double, bool is_object);
+    void GenSput(MIR* mir, RegLocation rl_src, OpSize size);
+    // Get entrypoints are specific for types, size alone is not sufficient to safely infer
+    // entrypoint.
+    void GenSget(MIR* mir, RegLocation rl_dest, OpSize size, Primitive::Type type);
+    void GenIGet(MIR* mir, int opt_flags, OpSize size, Primitive::Type type,
+                 RegLocation rl_dest, RegLocation rl_obj);
     void GenIPut(MIR* mir, int opt_flags, OpSize size,
-                 RegLocation rl_src, RegLocation rl_obj, bool is_long_or_double, bool is_object);
+                 RegLocation rl_src, RegLocation rl_obj);
     void GenArrayObjPut(int opt_flags, RegLocation rl_array, RegLocation rl_index,
                         RegLocation rl_src);
 
@@ -958,12 +941,12 @@ class Mir2Lir : public Backend {
      */
     RegLocation InlineTargetWide(CallInfo* info);
 
-    bool GenInlinedGet(CallInfo* info);
+    bool GenInlinedReferenceGet(CallInfo* info);
     virtual bool GenInlinedCharAt(CallInfo* info);
     bool GenInlinedStringIsEmptyOrLength(CallInfo* info, bool is_empty);
     virtual bool GenInlinedReverseBits(CallInfo* info, OpSize size);
     bool GenInlinedReverseBytes(CallInfo* info, OpSize size);
-    bool GenInlinedAbsInt(CallInfo* info);
+    virtual bool GenInlinedAbsInt(CallInfo* info);
     virtual bool GenInlinedAbsLong(CallInfo* info);
     virtual bool GenInlinedAbsFloat(CallInfo* info) = 0;
     virtual bool GenInlinedAbsDouble(CallInfo* info) = 0;
@@ -995,6 +978,10 @@ class Mir2Lir : public Backend {
     virtual LIR* LoadWordDisp(RegStorage r_base, int displacement, RegStorage r_dest) {
       return LoadBaseDisp(r_base, displacement, r_dest, kWord, kNotVolatile);
     }
+    // Load 8 bits, regardless of target.
+    virtual LIR* Load8Disp(RegStorage r_base, int displacement, RegStorage r_dest) {
+      return LoadBaseDisp(r_base, displacement, r_dest, kSignedByte, kNotVolatile);
+    }
     // Load 32 bits, regardless of target.
     virtual LIR* Load32Disp(RegStorage r_base, int displacement, RegStorage r_dest)  {
       return LoadBaseDisp(r_base, displacement, r_dest, k32, kNotVolatile);
@@ -1113,11 +1100,13 @@ class Mir2Lir : public Backend {
 
     /*
      * @brief Load the Class* of a Dex Class type into the register.
+     * @param dex DexFile that contains the class type.
      * @param type How the method will be invoked.
      * @param register that will contain the code address.
      * @note register will be passed to TargetReg to get physical register.
      */
-    virtual void LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg);
+    virtual void LoadClassType(const DexFile& dex_file, uint32_t type_idx,
+                               SpecialTargetRegister symbolic_reg);
 
     // Routines that work for the generic case, but may be overriden by target.
     /*
@@ -1164,6 +1153,14 @@ class Mir2Lir : public Backend {
              (info1->StorageMask() & info2->StorageMask()) != 0);
     }
 
+    static constexpr bool IsWide(OpSize size) {
+      return size == k64 || size == kDouble;
+    }
+
+    static constexpr bool IsRef(OpSize size) {
+      return size == kReference;
+    }
+
     /**
      * @brief Portable way of getting special registers from the backend.
      * @param reg Enumeration describing the purpose of the register.
@@ -1316,7 +1313,7 @@ class Mir2Lir : public Backend {
 
     virtual void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) = 0;
     virtual void GenExitSequence() = 0;
-    virtual void GenFillArrayData(DexOffset table_offset, RegLocation rl_src) = 0;
+    virtual void GenFillArrayData(MIR* mir, DexOffset table_offset, RegLocation rl_src) = 0;
     virtual void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) = 0;
     virtual void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) = 0;
 
@@ -1498,10 +1495,6 @@ class Mir2Lir : public Backend {
      */
     virtual RegLocation ForceTempWide(RegLocation loc);
 
-    static constexpr OpSize LoadStoreOpSize(bool wide, bool ref) {
-      return wide ? k64 : ref ? kReference : k32;
-    }
-
     virtual void GenInstanceofFinal(bool use_declaring_class, uint32_t type_idx,
                                     RegLocation rl_dest, RegLocation rl_src);
 
@@ -1525,10 +1518,10 @@ class Mir2Lir : public Backend {
                                     uint32_t type_idx, RegLocation rl_dest,
                                     RegLocation rl_src);
     /*
-     * @brief Generate the debug_frame FDE information if possible.
-     * @returns pointer to vector containg CFE information, or NULL.
+     * @brief Generate the eh_frame FDE information if possible.
+     * @returns pointer to vector containg FDE information, or NULL.
      */
-    virtual std::vector<uint8_t>* ReturnCallFrameInformation();
+    virtual std::vector<uint8_t>* ReturnFrameDescriptionEntry();
 
     /**
      * @brief Used to insert marker that can be used to associate MIR with LIR.
@@ -1712,6 +1705,8 @@ class Mir2Lir : public Backend {
      */
     int live_sreg_;
     CodeBuffer code_buffer_;
+    // The source mapping table data (pc -> dex). More entries than in encoded_mapping_table_
+    SrcMap src_mapping_table_;
     // The encoding mapping table data (dex -> pc offset and pc offset -> dex) with a size prefix.
     std::vector<uint8_t> encoded_mapping_table_;
     ArenaVector<uint32_t> core_vmap_table_;
@@ -1737,6 +1732,9 @@ class Mir2Lir : public Backend {
     // (i.e. 8 bytes on 32-bit arch, 16 bytes on 64-bit arch) and we use ResourceMaskCache
     // to deduplicate the masks.
     ResourceMaskCache mask_cache_;
+
+  private:
+    static bool SizeMatchesTypeForEntrypoint(OpSize size, Primitive::Type type);
 };  // Class Mir2Lir
 
 }  // namespace art
diff --git a/compiler/dex/quick/quick_compiler.cc b/compiler/dex/quick/quick_compiler.cc
new file mode 100644 (file)
index 0000000..2c5f79c
--- /dev/null
@@ -0,0 +1,650 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "quick_compiler.h"
+
+#include <cstdint>
+
+#include "compiler.h"
+#include "dex/frontend.h"
+#include "dex/mir_graph.h"
+#include "dex/quick/mir_to_lir.h"
+#include "driver/compiler_driver.h"
+#include "elf_writer_quick.h"
+#include "jni/quick/jni_compiler.h"
+#include "mirror/art_method-inl.h"
+#include "base/logging.h"
+
+// Specific compiler backends.
+#include "dex/quick/arm/backend_arm.h"
+#include "dex/quick/arm64/backend_arm64.h"
+#include "dex/quick/mips/backend_mips.h"
+#include "dex/quick/x86/backend_x86.h"
+
+namespace art {
+
+class QuickCompiler : public Compiler {
+ public:
+  explicit QuickCompiler(CompilerDriver* driver) : Compiler(driver, 100) {}
+
+  void Init() const OVERRIDE;
+
+  void UnInit() const OVERRIDE;
+
+  bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file, CompilationUnit* cu) const
+      OVERRIDE;
+
+  CompiledMethod* Compile(const DexFile::CodeItem* code_item,
+                          uint32_t access_flags,
+                          InvokeType invoke_type,
+                          uint16_t class_def_idx,
+                          uint32_t method_idx,
+                          jobject class_loader,
+                          const DexFile& dex_file) const OVERRIDE;
+
+  CompiledMethod* JniCompile(uint32_t access_flags,
+                             uint32_t method_idx,
+                             const DexFile& dex_file) const OVERRIDE;
+
+  uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const OVERRIDE
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  bool WriteElf(art::File* file,
+                OatWriter* oat_writer,
+                const std::vector<const art::DexFile*>& dex_files,
+                const std::string& android_root,
+                bool is_host) const
+    OVERRIDE
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  Backend* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const OVERRIDE;
+
+  void InitCompilationUnit(CompilationUnit& cu) const OVERRIDE;
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(QuickCompiler);
+};
+
+COMPILE_ASSERT(0U == static_cast<size_t>(kNone), kNone_not_0);
+COMPILE_ASSERT(1U == static_cast<size_t>(kArm), kArm_not_1);
+COMPILE_ASSERT(2U == static_cast<size_t>(kArm64), kArm64_not_2);
+COMPILE_ASSERT(3U == static_cast<size_t>(kThumb2), kThumb2_not_3);
+COMPILE_ASSERT(4U == static_cast<size_t>(kX86), kX86_not_4);
+COMPILE_ASSERT(5U == static_cast<size_t>(kX86_64), kX86_64_not_5);
+COMPILE_ASSERT(6U == static_cast<size_t>(kMips), kMips_not_6);
+COMPILE_ASSERT(7U == static_cast<size_t>(kMips64), kMips64_not_7);
+
+// Additional disabled optimizations (over generally disabled) per instruction set.
+static constexpr uint32_t kDisabledOptimizationsPerISA[] = {
+    // 0 = kNone.
+    ~0U,
+    // 1 = kArm, unused (will use kThumb2).
+    ~0U,
+    // 2 = kArm64.
+    0,
+    // 3 = kThumb2.
+    0,
+    // 4 = kX86.
+    (1 << kLoadStoreElimination) |
+    0,
+    // 5 = kX86_64.
+    (1 << kLoadStoreElimination) |
+    0,
+    // 6 = kMips.
+    (1 << kLoadStoreElimination) |
+    (1 << kLoadHoisting) |
+    (1 << kSuppressLoads) |
+    (1 << kNullCheckElimination) |
+    (1 << kPromoteRegs) |
+    (1 << kTrackLiveTemps) |
+    (1 << kSafeOptimizations) |
+    (1 << kBBOpt) |
+    (1 << kMatch) |
+    (1 << kPromoteCompilerTemps) |
+    0,
+    // 7 = kMips64.
+    ~0U
+};
+COMPILE_ASSERT(sizeof(kDisabledOptimizationsPerISA) == 8 * sizeof(uint32_t), kDisabledOpts_unexp);
+
+// Supported shorty types per instruction set. nullptr means that all are available.
+// Z : boolean
+// B : byte
+// S : short
+// C : char
+// I : int
+// J : long
+// F : float
+// D : double
+// L : reference(object, array)
+// V : void
+static const char* kSupportedTypes[] = {
+    // 0 = kNone.
+    "",
+    // 1 = kArm, unused (will use kThumb2).
+    "",
+    // 2 = kArm64.
+    nullptr,
+    // 3 = kThumb2.
+    nullptr,
+    // 4 = kX86.
+    nullptr,
+    // 5 = kX86_64.
+    nullptr,
+    // 6 = kMips.
+    nullptr,
+    // 7 = kMips64.
+    ""
+};
+COMPILE_ASSERT(sizeof(kSupportedTypes) == 8 * sizeof(char*), kSupportedTypes_unexp);
+
+static int kAllOpcodes[] = {
+    Instruction::NOP,
+    Instruction::MOVE,
+    Instruction::MOVE_FROM16,
+    Instruction::MOVE_16,
+    Instruction::MOVE_WIDE,
+    Instruction::MOVE_WIDE_FROM16,
+    Instruction::MOVE_WIDE_16,
+    Instruction::MOVE_OBJECT,
+    Instruction::MOVE_OBJECT_FROM16,
+    Instruction::MOVE_OBJECT_16,
+    Instruction::MOVE_RESULT,
+    Instruction::MOVE_RESULT_WIDE,
+    Instruction::MOVE_RESULT_OBJECT,
+    Instruction::MOVE_EXCEPTION,
+    Instruction::RETURN_VOID,
+    Instruction::RETURN,
+    Instruction::RETURN_WIDE,
+    Instruction::RETURN_OBJECT,
+    Instruction::CONST_4,
+    Instruction::CONST_16,
+    Instruction::CONST,
+    Instruction::CONST_HIGH16,
+    Instruction::CONST_WIDE_16,
+    Instruction::CONST_WIDE_32,
+    Instruction::CONST_WIDE,
+    Instruction::CONST_WIDE_HIGH16,
+    Instruction::CONST_STRING,
+    Instruction::CONST_STRING_JUMBO,
+    Instruction::CONST_CLASS,
+    Instruction::MONITOR_ENTER,
+    Instruction::MONITOR_EXIT,
+    Instruction::CHECK_CAST,
+    Instruction::INSTANCE_OF,
+    Instruction::ARRAY_LENGTH,
+    Instruction::NEW_INSTANCE,
+    Instruction::NEW_ARRAY,
+    Instruction::FILLED_NEW_ARRAY,
+    Instruction::FILLED_NEW_ARRAY_RANGE,
+    Instruction::FILL_ARRAY_DATA,
+    Instruction::THROW,
+    Instruction::GOTO,
+    Instruction::GOTO_16,
+    Instruction::GOTO_32,
+    Instruction::PACKED_SWITCH,
+    Instruction::SPARSE_SWITCH,
+    Instruction::CMPL_FLOAT,
+    Instruction::CMPG_FLOAT,
+    Instruction::CMPL_DOUBLE,
+    Instruction::CMPG_DOUBLE,
+    Instruction::CMP_LONG,
+    Instruction::IF_EQ,
+    Instruction::IF_NE,
+    Instruction::IF_LT,
+    Instruction::IF_GE,
+    Instruction::IF_GT,
+    Instruction::IF_LE,
+    Instruction::IF_EQZ,
+    Instruction::IF_NEZ,
+    Instruction::IF_LTZ,
+    Instruction::IF_GEZ,
+    Instruction::IF_GTZ,
+    Instruction::IF_LEZ,
+    Instruction::UNUSED_3E,
+    Instruction::UNUSED_3F,
+    Instruction::UNUSED_40,
+    Instruction::UNUSED_41,
+    Instruction::UNUSED_42,
+    Instruction::UNUSED_43,
+    Instruction::AGET,
+    Instruction::AGET_WIDE,
+    Instruction::AGET_OBJECT,
+    Instruction::AGET_BOOLEAN,
+    Instruction::AGET_BYTE,
+    Instruction::AGET_CHAR,
+    Instruction::AGET_SHORT,
+    Instruction::APUT,
+    Instruction::APUT_WIDE,
+    Instruction::APUT_OBJECT,
+    Instruction::APUT_BOOLEAN,
+    Instruction::APUT_BYTE,
+    Instruction::APUT_CHAR,
+    Instruction::APUT_SHORT,
+    Instruction::IGET,
+    Instruction::IGET_WIDE,
+    Instruction::IGET_OBJECT,
+    Instruction::IGET_BOOLEAN,
+    Instruction::IGET_BYTE,
+    Instruction::IGET_CHAR,
+    Instruction::IGET_SHORT,
+    Instruction::IPUT,
+    Instruction::IPUT_WIDE,
+    Instruction::IPUT_OBJECT,
+    Instruction::IPUT_BOOLEAN,
+    Instruction::IPUT_BYTE,
+    Instruction::IPUT_CHAR,
+    Instruction::IPUT_SHORT,
+    Instruction::SGET,
+    Instruction::SGET_WIDE,
+    Instruction::SGET_OBJECT,
+    Instruction::SGET_BOOLEAN,
+    Instruction::SGET_BYTE,
+    Instruction::SGET_CHAR,
+    Instruction::SGET_SHORT,
+    Instruction::SPUT,
+    Instruction::SPUT_WIDE,
+    Instruction::SPUT_OBJECT,
+    Instruction::SPUT_BOOLEAN,
+    Instruction::SPUT_BYTE,
+    Instruction::SPUT_CHAR,
+    Instruction::SPUT_SHORT,
+    Instruction::INVOKE_VIRTUAL,
+    Instruction::INVOKE_SUPER,
+    Instruction::INVOKE_DIRECT,
+    Instruction::INVOKE_STATIC,
+    Instruction::INVOKE_INTERFACE,
+    Instruction::RETURN_VOID_BARRIER,
+    Instruction::INVOKE_VIRTUAL_RANGE,
+    Instruction::INVOKE_SUPER_RANGE,
+    Instruction::INVOKE_DIRECT_RANGE,
+    Instruction::INVOKE_STATIC_RANGE,
+    Instruction::INVOKE_INTERFACE_RANGE,
+    Instruction::UNUSED_79,
+    Instruction::UNUSED_7A,
+    Instruction::NEG_INT,
+    Instruction::NOT_INT,
+    Instruction::NEG_LONG,
+    Instruction::NOT_LONG,
+    Instruction::NEG_FLOAT,
+    Instruction::NEG_DOUBLE,
+    Instruction::INT_TO_LONG,
+    Instruction::INT_TO_FLOAT,
+    Instruction::INT_TO_DOUBLE,
+    Instruction::LONG_TO_INT,
+    Instruction::LONG_TO_FLOAT,
+    Instruction::LONG_TO_DOUBLE,
+    Instruction::FLOAT_TO_INT,
+    Instruction::FLOAT_TO_LONG,
+    Instruction::FLOAT_TO_DOUBLE,
+    Instruction::DOUBLE_TO_INT,
+    Instruction::DOUBLE_TO_LONG,
+    Instruction::DOUBLE_TO_FLOAT,
+    Instruction::INT_TO_BYTE,
+    Instruction::INT_TO_CHAR,
+    Instruction::INT_TO_SHORT,
+    Instruction::ADD_INT,
+    Instruction::SUB_INT,
+    Instruction::MUL_INT,
+    Instruction::DIV_INT,
+    Instruction::REM_INT,
+    Instruction::AND_INT,
+    Instruction::OR_INT,
+    Instruction::XOR_INT,
+    Instruction::SHL_INT,
+    Instruction::SHR_INT,
+    Instruction::USHR_INT,
+    Instruction::ADD_LONG,
+    Instruction::SUB_LONG,
+    Instruction::MUL_LONG,
+    Instruction::DIV_LONG,
+    Instruction::REM_LONG,
+    Instruction::AND_LONG,
+    Instruction::OR_LONG,
+    Instruction::XOR_LONG,
+    Instruction::SHL_LONG,
+    Instruction::SHR_LONG,
+    Instruction::USHR_LONG,
+    Instruction::ADD_FLOAT,
+    Instruction::SUB_FLOAT,
+    Instruction::MUL_FLOAT,
+    Instruction::DIV_FLOAT,
+    Instruction::REM_FLOAT,
+    Instruction::ADD_DOUBLE,
+    Instruction::SUB_DOUBLE,
+    Instruction::MUL_DOUBLE,
+    Instruction::DIV_DOUBLE,
+    Instruction::REM_DOUBLE,
+    Instruction::ADD_INT_2ADDR,
+    Instruction::SUB_INT_2ADDR,
+    Instruction::MUL_INT_2ADDR,
+    Instruction::DIV_INT_2ADDR,
+    Instruction::REM_INT_2ADDR,
+    Instruction::AND_INT_2ADDR,
+    Instruction::OR_INT_2ADDR,
+    Instruction::XOR_INT_2ADDR,
+    Instruction::SHL_INT_2ADDR,
+    Instruction::SHR_INT_2ADDR,
+    Instruction::USHR_INT_2ADDR,
+    Instruction::ADD_LONG_2ADDR,
+    Instruction::SUB_LONG_2ADDR,
+    Instruction::MUL_LONG_2ADDR,
+    Instruction::DIV_LONG_2ADDR,
+    Instruction::REM_LONG_2ADDR,
+    Instruction::AND_LONG_2ADDR,
+    Instruction::OR_LONG_2ADDR,
+    Instruction::XOR_LONG_2ADDR,
+    Instruction::SHL_LONG_2ADDR,
+    Instruction::SHR_LONG_2ADDR,
+    Instruction::USHR_LONG_2ADDR,
+    Instruction::ADD_FLOAT_2ADDR,
+    Instruction::SUB_FLOAT_2ADDR,
+    Instruction::MUL_FLOAT_2ADDR,
+    Instruction::DIV_FLOAT_2ADDR,
+    Instruction::REM_FLOAT_2ADDR,
+    Instruction::ADD_DOUBLE_2ADDR,
+    Instruction::SUB_DOUBLE_2ADDR,
+    Instruction::MUL_DOUBLE_2ADDR,
+    Instruction::DIV_DOUBLE_2ADDR,
+    Instruction::REM_DOUBLE_2ADDR,
+    Instruction::ADD_INT_LIT16,
+    Instruction::RSUB_INT,
+    Instruction::MUL_INT_LIT16,
+    Instruction::DIV_INT_LIT16,
+    Instruction::REM_INT_LIT16,
+    Instruction::AND_INT_LIT16,
+    Instruction::OR_INT_LIT16,
+    Instruction::XOR_INT_LIT16,
+    Instruction::ADD_INT_LIT8,
+    Instruction::RSUB_INT_LIT8,
+    Instruction::MUL_INT_LIT8,
+    Instruction::DIV_INT_LIT8,
+    Instruction::REM_INT_LIT8,
+    Instruction::AND_INT_LIT8,
+    Instruction::OR_INT_LIT8,
+    Instruction::XOR_INT_LIT8,
+    Instruction::SHL_INT_LIT8,
+    Instruction::SHR_INT_LIT8,
+    Instruction::USHR_INT_LIT8,
+    Instruction::IGET_QUICK,
+    Instruction::IGET_WIDE_QUICK,
+    Instruction::IGET_OBJECT_QUICK,
+    Instruction::IPUT_QUICK,
+    Instruction::IPUT_WIDE_QUICK,
+    Instruction::IPUT_OBJECT_QUICK,
+    Instruction::INVOKE_VIRTUAL_QUICK,
+    Instruction::INVOKE_VIRTUAL_RANGE_QUICK,
+    Instruction::IPUT_BOOLEAN_QUICK,
+    Instruction::IPUT_BYTE_QUICK,
+    Instruction::IPUT_CHAR_QUICK,
+    Instruction::IPUT_SHORT_QUICK,
+    Instruction::UNUSED_EF,
+    Instruction::UNUSED_F0,
+    Instruction::UNUSED_F1,
+    Instruction::UNUSED_F2,
+    Instruction::UNUSED_F3,
+    Instruction::UNUSED_F4,
+    Instruction::UNUSED_F5,
+    Instruction::UNUSED_F6,
+    Instruction::UNUSED_F7,
+    Instruction::UNUSED_F8,
+    Instruction::UNUSED_F9,
+    Instruction::UNUSED_FA,
+    Instruction::UNUSED_FB,
+    Instruction::UNUSED_FC,
+    Instruction::UNUSED_FD,
+    Instruction::UNUSED_FE,
+    Instruction::UNUSED_FF,
+    // ----- ExtendedMIROpcode -----
+    kMirOpPhi,
+    kMirOpCopy,
+    kMirOpFusedCmplFloat,
+    kMirOpFusedCmpgFloat,
+    kMirOpFusedCmplDouble,
+    kMirOpFusedCmpgDouble,
+    kMirOpFusedCmpLong,
+    kMirOpNop,
+    kMirOpNullCheck,
+    kMirOpRangeCheck,
+    kMirOpDivZeroCheck,
+    kMirOpCheck,
+    kMirOpCheckPart2,
+    kMirOpSelect,
+};
+
+// Unsupported opcodes. nullptr can be used when everything is supported. Size of the lists is
+// recorded below.
+static const int* kUnsupportedOpcodes[] = {
+    // 0 = kNone.
+    kAllOpcodes,
+    // 1 = kArm, unused (will use kThumb2).
+    kAllOpcodes,
+    // 2 = kArm64.
+    nullptr,
+    // 3 = kThumb2.
+    nullptr,
+    // 4 = kX86.
+    nullptr,
+    // 5 = kX86_64.
+    nullptr,
+    // 6 = kMips.
+    nullptr,
+    // 7 = kMips64.
+    kAllOpcodes
+};
+COMPILE_ASSERT(sizeof(kUnsupportedOpcodes) == 8 * sizeof(int*), kUnsupportedOpcodes_unexp);
+
+// Size of the arrays stored above.
+static const size_t kUnsupportedOpcodesSize[] = {
+    // 0 = kNone.
+    arraysize(kAllOpcodes),
+    // 1 = kArm, unused (will use kThumb2).
+    arraysize(kAllOpcodes),
+    // 2 = kArm64.
+    0,
+    // 3 = kThumb2.
+    0,
+    // 4 = kX86.
+    0,
+    // 5 = kX86_64.
+    0,
+    // 6 = kMips.
+    0,
+    // 7 = kMips64.
+    arraysize(kAllOpcodes),
+};
+COMPILE_ASSERT(sizeof(kUnsupportedOpcodesSize) == 8 * sizeof(size_t),
+               kUnsupportedOpcodesSize_unexp);
+
+// The maximum amount of Dalvik register in a method for which we will start compiling. Tries to
+// avoid an abort when we need to manage more SSA registers than we can.
+static constexpr size_t kMaxAllowedDalvikRegisters = INT16_MAX / 2;
+
+static bool CanCompileShorty(const char* shorty, InstructionSet instruction_set) {
+  const char* supported_types = kSupportedTypes[instruction_set];
+  if (supported_types == nullptr) {
+    // Everything available.
+    return true;
+  }
+
+  uint32_t shorty_size = strlen(shorty);
+  CHECK_GE(shorty_size, 1u);
+
+  for (uint32_t i = 0; i < shorty_size; i++) {
+    if (strchr(supported_types, shorty[i]) == nullptr) {
+      return false;
+    }
+  }
+  return true;
+};
+
+// Skip the method that we do not support currently.
+bool QuickCompiler::CanCompileMethod(uint32_t method_idx, const DexFile& dex_file,
+                                     CompilationUnit* cu) const {
+  // This is a limitation in mir_graph. See MirGraph::SetNumSSARegs.
+  if (cu->mir_graph->GetNumOfCodeAndTempVRs() > kMaxAllowedDalvikRegisters) {
+    VLOG(compiler) << "Too many dalvik registers : " << cu->mir_graph->GetNumOfCodeAndTempVRs();
+    return false;
+  }
+
+  // Check whether we do have limitations at all.
+  if (kSupportedTypes[cu->instruction_set] == nullptr &&
+      kUnsupportedOpcodesSize[cu->instruction_set] == 0U) {
+    return true;
+  }
+
+  // Check if we can compile the prototype.
+  const char* shorty = dex_file.GetMethodShorty(dex_file.GetMethodId(method_idx));
+  if (!CanCompileShorty(shorty, cu->instruction_set)) {
+    VLOG(compiler) << "Unsupported shorty : " << shorty;
+    return false;
+  }
+
+  const int *unsupport_list = kUnsupportedOpcodes[cu->instruction_set];
+  int unsupport_list_size = kUnsupportedOpcodesSize[cu->instruction_set];
+
+  for (unsigned int idx = 0; idx < cu->mir_graph->GetNumBlocks(); idx++) {
+    BasicBlock* bb = cu->mir_graph->GetBasicBlock(idx);
+    if (bb == NULL) continue;
+    if (bb->block_type == kDead) continue;
+    for (MIR* mir = bb->first_mir_insn; mir != nullptr; mir = mir->next) {
+      int opcode = mir->dalvikInsn.opcode;
+      // Check if we support the byte code.
+      if (std::find(unsupport_list, unsupport_list + unsupport_list_size,
+                    opcode) != unsupport_list + unsupport_list_size) {
+        if (!MIR::DecodedInstruction::IsPseudoMirOp(opcode)) {
+          VLOG(compiler) << "Unsupported dalvik byte code : "
+              << mir->dalvikInsn.opcode;
+        } else {
+          VLOG(compiler) << "Unsupported extended MIR opcode : "
+              << MIRGraph::extended_mir_op_names_[opcode - kMirOpFirst];
+        }
+        return false;
+      }
+      // Check if it invokes a prototype that we cannot support.
+      if (Instruction::INVOKE_VIRTUAL == opcode ||
+          Instruction::INVOKE_SUPER == opcode ||
+          Instruction::INVOKE_DIRECT == opcode ||
+          Instruction::INVOKE_STATIC == opcode ||
+          Instruction::INVOKE_INTERFACE == opcode) {
+        uint32_t invoke_method_idx = mir->dalvikInsn.vB;
+        const char* invoke_method_shorty = dex_file.GetMethodShorty(
+            dex_file.GetMethodId(invoke_method_idx));
+        if (!CanCompileShorty(invoke_method_shorty, cu->instruction_set)) {
+          VLOG(compiler) << "Unsupported to invoke '"
+              << PrettyMethod(invoke_method_idx, dex_file)
+              << "' with shorty : " << invoke_method_shorty;
+          return false;
+        }
+      }
+    }
+  }
+  return true;
+}
+
+void QuickCompiler::InitCompilationUnit(CompilationUnit& cu) const {
+  // Disable optimizations according to instruction set.
+  cu.disable_opt |= kDisabledOptimizationsPerISA[cu.instruction_set];
+}
+
+void QuickCompiler::Init() const {
+  CHECK(GetCompilerDriver()->GetCompilerContext() == nullptr);
+}
+
+void QuickCompiler::UnInit() const {
+  CHECK(GetCompilerDriver()->GetCompilerContext() == nullptr);
+}
+
+CompiledMethod* QuickCompiler::Compile(const DexFile::CodeItem* code_item,
+                                       uint32_t access_flags,
+                                       InvokeType invoke_type,
+                                       uint16_t class_def_idx,
+                                       uint32_t method_idx,
+                                       jobject class_loader,
+                                       const DexFile& dex_file) const {
+  CompiledMethod* method = TryCompileWithSeaIR(code_item,
+                                               access_flags,
+                                               invoke_type,
+                                               class_def_idx,
+                                               method_idx,
+                                               class_loader,
+                                               dex_file);
+  if (method != nullptr) {
+    return method;
+  }
+
+  // TODO: check method fingerprint here to determine appropriate backend type.  Until then, use
+  // build default.
+  CompilerDriver* driver = GetCompilerDriver();
+  return CompileOneMethod(driver, this, code_item, access_flags, invoke_type, class_def_idx,
+                          method_idx, class_loader, dex_file, nullptr /* use thread llvm_info */);
+}
+
+CompiledMethod* QuickCompiler::JniCompile(uint32_t access_flags,
+                                          uint32_t method_idx,
+                                          const DexFile& dex_file) const {
+  return ArtQuickJniCompileMethod(GetCompilerDriver(), access_flags, method_idx, dex_file);
+}
+
+uintptr_t QuickCompiler::GetEntryPointOf(mirror::ArtMethod* method) const {
+  return reinterpret_cast<uintptr_t>(method->GetEntryPointFromQuickCompiledCode());
+}
+
+bool QuickCompiler::WriteElf(art::File* file,
+                             OatWriter* oat_writer,
+                             const std::vector<const art::DexFile*>& dex_files,
+                             const std::string& android_root,
+                             bool is_host) const {
+  return art::ElfWriterQuick32::Create(file, oat_writer, dex_files, android_root, is_host,
+                                       *GetCompilerDriver());
+}
+
+Backend* QuickCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const {
+  Mir2Lir* mir_to_lir = nullptr;
+  switch (cu->instruction_set) {
+    case kThumb2:
+      mir_to_lir = ArmCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
+      break;
+    case kArm64:
+      mir_to_lir = Arm64CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
+      break;
+    case kMips:
+      mir_to_lir = MipsCodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
+      break;
+    case kX86:
+      // Fall-through.
+    case kX86_64:
+      mir_to_lir = X86CodeGenerator(cu, cu->mir_graph.get(), &cu->arena);
+      break;
+    default:
+      LOG(FATAL) << "Unexpected instruction set: " << cu->instruction_set;
+  }
+
+  /* The number of compiler temporaries depends on backend so set it up now if possible */
+  if (mir_to_lir) {
+    size_t max_temps = mir_to_lir->GetMaxPossibleCompilerTemps();
+    bool set_max = cu->mir_graph->SetMaxAvailableNonSpecialCompilerTemps(max_temps);
+    CHECK(set_max);
+  }
+  return mir_to_lir;
+}
+
+
+Compiler* CreateQuickCompiler(CompilerDriver* driver) {
+  return new QuickCompiler(driver);
+}
+
+}  // namespace art
diff --git a/compiler/dex/quick/quick_compiler.h b/compiler/dex/quick/quick_compiler.h
new file mode 100644 (file)
index 0000000..10de5fb
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_QUICK_QUICK_COMPILER_H_
+#define ART_COMPILER_DEX_QUICK_QUICK_COMPILER_H_
+
+namespace art {
+
+class Compiler;
+class CompilerDriver;
+
+Compiler* CreateQuickCompiler(CompilerDriver* driver);
+
+}  // namespace art
+
+#endif  // ART_COMPILER_DEX_QUICK_QUICK_COMPILER_H_
index bed86d8..195da0d 100644 (file)
@@ -252,20 +252,7 @@ int Mir2Lir::SRegToPMap(int s_reg) {
   DCHECK_LT(s_reg, mir_graph_->GetNumSSARegs());
   DCHECK_GE(s_reg, 0);
   int v_reg = mir_graph_->SRegToVReg(s_reg);
-  if (v_reg >= 0) {
-    DCHECK_LT(v_reg, cu_->num_dalvik_registers);
-    return v_reg;
-  } else {
-    /*
-     * It must be the case that the v_reg for temporary is less than or equal to the
-     * base reg for temps. For that reason, "position" must be zero or positive.
-     */
-    unsigned int position = std::abs(v_reg) - std::abs(static_cast<int>(kVRegTempBaseReg));
-
-    // The temporaries are placed after dalvik registers in the promotion map
-    DCHECK_LT(position, mir_graph_->GetNumUsedCompilerTemps());
-    return cu_->num_dalvik_registers + position;
-  }
+  return v_reg;
 }
 
 // TODO: refactor following Alloc/Record routines - much commonality.
@@ -1207,8 +1194,7 @@ void Mir2Lir::DumpCounts(const RefCounts* arr, int size, const char* msg) {
  * optimization is disabled.
  */
 void Mir2Lir::DoPromotion() {
-  int dalvik_regs = cu_->num_dalvik_registers;
-  int num_regs = dalvik_regs + mir_graph_->GetNumUsedCompilerTemps();
+  int num_regs = mir_graph_->GetNumOfCodeAndTempVRs();
   const int promotion_threshold = 1;
   // Allocate the promotion map - one entry for each Dalvik vReg or compiler temp
   promotion_map_ = static_cast<PromotionMap*>
@@ -1237,17 +1223,10 @@ void Mir2Lir::DoPromotion() {
       static_cast<RefCounts *>(arena_->Alloc(sizeof(RefCounts) * fp_reg_count_size,
                                              kArenaAllocRegAlloc));
   // Set ssa names for original Dalvik registers
-  for (int i = 0; i < dalvik_regs; i++) {
+  for (int i = 0; i < num_regs; i++) {
     core_regs[i].s_reg = fp_regs[i].s_reg = i;
   }
 
-  // Set ssa names for compiler temporaries
-  for (unsigned int ct_idx = 0; ct_idx < mir_graph_->GetNumUsedCompilerTemps(); ct_idx++) {
-    CompilerTemp* ct = mir_graph_->GetCompilerTemp(ct_idx);
-    core_regs[dalvik_regs + ct_idx].s_reg = ct->s_reg_low;
-    fp_regs[dalvik_regs + ct_idx].s_reg = ct->s_reg_low;
-  }
-
   // Duplicate in upper half to represent possible wide starting sregs.
   for (size_t i = num_regs; i < fp_reg_count_size; i++) {
     fp_regs[i].s_reg = fp_regs[i - num_regs].s_reg | STARTING_WIDE_SREG;
index 8ebe55c..46f5dd3 100644 (file)
@@ -188,6 +188,8 @@ ENCODING_MAP(Cmp, IS_LOAD, 0, 0,
 
   { kX86Mov32MR, kMemReg,    IS_STORE | IS_TERTIARY_OP | REG_USE02,      { 0,             0, 0x89, 0, 0, 0, 0, 0, false }, "Mov32MR", "[!0r+!1d],!2r" },
   { kX86Mov32AR, kArrayReg,  IS_STORE | IS_QUIN_OP     | REG_USE014,     { 0,             0, 0x89, 0, 0, 0, 0, 0, false }, "Mov32AR", "[!0r+!1r<<!2d+!3d],!4r" },
+  { kX86Movnti32MR, kMemReg,    IS_STORE | IS_TERTIARY_OP | REG_USE02,   { 0x0F,          0, 0xC3, 0, 0, 0, 0, 0, false }, "Movnti32MR", "[!0r+!1d],!2r" },
+  { kX86Movnti32AR, kArrayReg,  IS_STORE | IS_QUIN_OP     | REG_USE014,  { 0x0F,          0, 0xC3, 0, 0, 0, 0, 0, false }, "Movnti32AR", "[!0r+!1r<<!2d+!3d],!4r" },
   { kX86Mov32TR, kThreadReg, IS_STORE | IS_BINARY_OP   | REG_USE1,       { THREAD_PREFIX, 0, 0x89, 0, 0, 0, 0, 0, false }, "Mov32TR", "fs:[!0d],!1r" },
   { kX86Mov32RR, kRegReg,               IS_BINARY_OP   | REG_DEF0_USE1,  { 0,             0, 0x8B, 0, 0, 0, 0, 0, false }, "Mov32RR", "!0r,!1r" },
   { kX86Mov32RM, kRegMem,    IS_LOAD  | IS_TERTIARY_OP | REG_DEF0_USE1,  { 0,             0, 0x8B, 0, 0, 0, 0, 0, false }, "Mov32RM", "!0r,[!1r+!2d]" },
@@ -203,6 +205,8 @@ ENCODING_MAP(Cmp, IS_LOAD, 0, 0,
 
   { kX86Mov64MR, kMemReg,    IS_STORE | IS_TERTIARY_OP | REG_USE02,      { REX_W,             0, 0x89, 0, 0, 0, 0, 0, false }, "Mov64MR", "[!0r+!1d],!2r" },
   { kX86Mov64AR, kArrayReg,  IS_STORE | IS_QUIN_OP     | REG_USE014,     { REX_W,             0, 0x89, 0, 0, 0, 0, 0, false }, "Mov64AR", "[!0r+!1r<<!2d+!3d],!4r" },
+  { kX86Movnti64MR, kMemReg,    IS_STORE | IS_TERTIARY_OP | REG_USE02,   { 0x0F,              0, 0xC3, 0, 0, 0, 0, 0, false }, "Movnti64MR", "[!0r+!1d],!2r" },
+  { kX86Movnti64AR, kArrayReg,  IS_STORE | IS_QUIN_OP     | REG_USE014,  { 0x0F,              0, 0xC3, 0, 0, 0, 0, 0, false }, "Movnti64AR", "[!0r+!1r<<!2d+!3d],!4r" },
   { kX86Mov64TR, kThreadReg, IS_STORE | IS_BINARY_OP   | REG_USE1,       { THREAD_PREFIX, REX_W, 0x89, 0, 0, 0, 0, 0, false }, "Mov64TR", "fs:[!0d],!1r" },
   { kX86Mov64RR, kRegReg,               IS_BINARY_OP   | REG_DEF0_USE1,  { REX_W,             0, 0x8B, 0, 0, 0, 0, 0, false }, "Mov64RR", "!0r,!1r" },
   { kX86Mov64RM, kRegMem,    IS_LOAD  | IS_TERTIARY_OP | REG_DEF0_USE1,  { REX_W,             0, 0x8B, 0, 0, 0, 0, 0, false }, "Mov64RM", "!0r,[!1r+!2d]" },
@@ -263,8 +267,10 @@ ENCODING_MAP(Cmp, IS_LOAD, 0, 0,
 
   { kX86Cmc, kNullary, NO_OPERAND, { 0, 0, 0xF5, 0, 0, 0, 0, 0, false }, "Cmc", "" },
   { kX86Shld32RRI,  kRegRegImmStore, IS_TERTIARY_OP | REG_DEF0_USE01  | SETS_CCODES,            { 0,    0, 0x0F, 0xA4, 0, 0, 0, 1, false }, "Shld32RRI", "!0r,!1r,!2d" },
+  { kX86Shld32RRC,  kShiftRegRegCl,  IS_TERTIARY_OP | REG_DEF0_USE01  | REG_USEC | SETS_CCODES, { 0,    0, 0x0F, 0xA5, 0, 0, 0, 0, false }, "Shld32RRC", "!0r,!1r,cl" },
   { kX86Shld32MRI,  kMemRegImm,      IS_QUAD_OP | REG_USE02 | IS_LOAD | IS_STORE | SETS_CCODES, { 0,    0, 0x0F, 0xA4, 0, 0, 0, 1, false }, "Shld32MRI", "[!0r+!1d],!2r,!3d" },
   { kX86Shrd32RRI,  kRegRegImmStore, IS_TERTIARY_OP | REG_DEF0_USE01  | SETS_CCODES,            { 0,    0, 0x0F, 0xAC, 0, 0, 0, 1, false }, "Shrd32RRI", "!0r,!1r,!2d" },
+  { kX86Shrd32RRC,  kShiftRegRegCl,  IS_TERTIARY_OP | REG_DEF0_USE01  | REG_USEC | SETS_CCODES, { 0,    0, 0x0F, 0xAD, 0, 0, 0, 0, false }, "Shrd32RRC", "!0r,!1r,cl" },
   { kX86Shrd32MRI,  kMemRegImm,      IS_QUAD_OP | REG_USE02 | IS_LOAD | IS_STORE | SETS_CCODES, { 0,    0, 0x0F, 0xAC, 0, 0, 0, 1, false }, "Shrd32MRI", "[!0r+!1d],!2r,!3d" },
   { kX86Shld64RRI,  kRegRegImmStore, IS_TERTIARY_OP | REG_DEF0_USE01  | SETS_CCODES,            { REX_W,    0, 0x0F, 0xA4, 0, 0, 0, 1, false }, "Shld64RRI", "!0r,!1r,!2d" },
   { kX86Shld64MRI,  kMemRegImm,      IS_QUAD_OP | REG_USE02 | IS_LOAD | IS_STORE | SETS_CCODES, { REX_W,    0, 0x0F, 0xA4, 0, 0, 0, 1, false }, "Shld64MRI", "[!0r+!1d],!2r,!3d" },
@@ -484,7 +490,9 @@ ENCODING_MAP(Cmp, IS_LOAD, 0, 0,
 
   // TODO: load/store?
   // Encode the modrm opcode as an extra opcode byte to avoid computation during assembly.
+  { kX86Lfence, kReg,                 NO_OPERAND,     { 0, 0, 0x0F, 0xAE, 0, 5, 0, 0, false }, "Lfence", "" },
   { kX86Mfence, kReg,                 NO_OPERAND,     { 0, 0, 0x0F, 0xAE, 0, 6, 0, 0, false }, "Mfence", "" },
+  { kX86Sfence, kReg,                 NO_OPERAND,     { 0, 0, 0x0F, 0xAE, 0, 7, 0, 0, false }, "Sfence", "" },
 
   EXT_0F_ENCODING_MAP(Imul16,  0x66, 0xAF, REG_USE0 | REG_DEF0 | SETS_CCODES),
   EXT_0F_ENCODING_MAP(Imul32,  0x00, 0xAF, REG_USE0 | REG_DEF0 | SETS_CCODES),
@@ -526,7 +534,7 @@ ENCODING_MAP(Cmp, IS_LOAD, 0, 0,
 
   { kX86StartOfMethod, kMacro,  IS_UNARY_OP | SETS_CCODES,             { 0, 0, 0,    0, 0, 0, 0, 0, false }, "StartOfMethod", "!0r" },
   { kX86PcRelLoadRA,   kPcRel,  IS_LOAD | IS_QUIN_OP | REG_DEF0_USE12, { 0, 0, 0x8B, 0, 0, 0, 0, 0, false }, "PcRelLoadRA",   "!0r,[!1r+!2r<<!3d+!4p]" },
-  { kX86PcRelAdr,      kPcRel,  IS_LOAD | IS_BINARY_OP | REG_DEF0,     { 0, 0, 0xB8, 0, 0, 0, 0, 4, false }, "PcRelAdr",      "!0r,!1d" },
+  { kX86PcRelAdr,      kPcRel,  IS_LOAD | IS_BINARY_OP | REG_DEF0,     { 0, 0, 0xB8, 0, 0, 0, 0, 4, false }, "PcRelAdr",      "!0r,!1p" },
   { kX86RepneScasw,    kNullary, NO_OPERAND | REG_USEA | REG_USEC | SETS_CCODES, { 0x66, 0xF2, 0xAF, 0, 0, 0, 0, 0, false }, "RepNE ScasW", "" },
 };
 
@@ -591,6 +599,7 @@ static bool ModrmIsRegReg(const X86EncodingMap* entry) {
     case kShiftRegCl: return true;
     case kRegCond: return true;
     case kRegRegCond: return true;
+    case kShiftRegRegCl: return true;
     case kJmp:
       switch (entry->opcode) {
         case kX86JmpR: return true;
@@ -768,6 +777,9 @@ size_t X86Mir2Lir::GetInsnSize(LIR* lir) {
       DCHECK_EQ(rs_rCX.GetRegNum(), RegStorage::RegNum(lir->operands[4]));
       return ComputeSize(entry, lir->operands[4], lir->operands[1], lir->operands[0],
                          lir->operands[3]);
+    case kShiftRegRegCl:  // lir operands - 0: reg1, 1: reg2, 2: cl
+      DCHECK_EQ(rs_rCX.GetRegNum(), RegStorage::RegNum(lir->operands[2]));
+      return ComputeSize(entry, lir->operands[0], NO_REG, lir->operands[1], 0);
     case kRegCond:  // lir operands - 0: reg, 1: cond
       return ComputeSize(entry, NO_REG, NO_REG, lir->operands[0], 0);
     case kMemCond:  // lir operands - 0: base, 1: disp, 2: cond
@@ -1336,6 +1348,19 @@ void X86Mir2Lir::EmitShiftMemCl(const X86EncodingMap* entry, int32_t raw_base,
   DCHECK_EQ(0, entry->skeleton.immediate_bytes);
 }
 
+void X86Mir2Lir::EmitShiftRegRegCl(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2, int32_t raw_cl) {
+  DCHECK_EQ(false, entry->skeleton.r8_form);
+  DCHECK_EQ(rs_rCX.GetRegNum(), RegStorage::RegNum(raw_cl));
+  EmitPrefixAndOpcode(entry, raw_reg1, NO_REG, raw_reg2);
+  uint8_t low_reg1 = LowRegisterBits(raw_reg1);
+  uint8_t low_reg2 = LowRegisterBits(raw_reg2);
+  uint8_t modrm = (3 << 6) | (low_reg1 << 3) | low_reg2;
+  code_buffer_.push_back(modrm);
+  DCHECK_EQ(0, entry->skeleton.modrm_opcode);
+  DCHECK_EQ(0, entry->skeleton.ax_opcode);
+  DCHECK_EQ(0, entry->skeleton.immediate_bytes);
+}
+
 void X86Mir2Lir::EmitShiftMemImm(const X86EncodingMap* entry, int32_t raw_base, int32_t disp,
                                  int32_t imm) {
   DCHECK_EQ(false, entry->skeleton.r8_form);
@@ -1829,6 +1854,9 @@ AssemblerStatus X86Mir2Lir::AssembleInstructions(CodeOffset start_addr) {
       case kShiftMemCl:  // lir operands - 0: base, 1:displacement, 2: cl
         EmitShiftMemCl(entry, lir->operands[0], lir->operands[1], lir->operands[2]);
         break;
+      case kShiftRegRegCl:  // lir operands - 0: reg1, 1: reg2, 2: cl
+        EmitShiftRegRegCl(entry, lir->operands[1], lir->operands[0], lir->operands[2]);
+        break;
       case kRegCond:  // lir operands - 0: reg, 1: condition
         EmitRegCond(entry, lir->operands[0], lir->operands[1]);
         break;
diff --git a/compiler/dex/quick/x86/backend_x86.h b/compiler/dex/quick/x86/backend_x86.h
new file mode 100644 (file)
index 0000000..f73db94
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_DEX_QUICK_X86_BACKEND_X86_H_
+#define ART_COMPILER_DEX_QUICK_X86_BACKEND_X86_H_
+
+namespace art {
+
+struct CompilationUnit;
+class Mir2Lir;
+class MIRGraph;
+class ArenaAllocator;
+
+Mir2Lir* X86CodeGenerator(CompilationUnit* const cu, MIRGraph* const mir_graph,
+                          ArenaAllocator* const arena);
+
+}  // namespace art
+
+#endif  // ART_COMPILER_DEX_QUICK_X86_BACKEND_X86_H_
index 996689a..482c430 100644 (file)
@@ -28,7 +28,7 @@ namespace art {
  * pairs.
  */
 void X86Mir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
-  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
   if (cu_->verbose) {
     DumpSparseSwitchTable(table);
   }
@@ -61,7 +61,7 @@ void X86Mir2Lir::GenLargeSparseSwitch(MIR* mir, DexOffset table_offset, RegLocat
  * done:
  */
 void X86Mir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
-  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
   if (cu_->verbose) {
     DumpPackedSwitchTable(table);
   }
@@ -134,8 +134,8 @@ void X86Mir2Lir::GenLargePackedSwitch(MIR* mir, DexOffset table_offset, RegLocat
  *
  * Total size is 4+(width * size + 1)/2 16-bit code units.
  */
-void X86Mir2Lir::GenFillArrayData(DexOffset table_offset, RegLocation rl_src) {
-  const uint16_t* table = cu_->insns + current_dalvik_offset_ + table_offset;
+void X86Mir2Lir::GenFillArrayData(MIR* mir, DexOffset table_offset, RegLocation rl_src) {
+  const uint16_t* table = mir_graph_->GetTable(mir, table_offset);
   // Add the table to the list - we'll process it later
   FillArrayData* tab_rec =
       static_cast<FillArrayData*>(arena_->Alloc(sizeof(FillArrayData), kArenaAllocData));
index d74caae..7ad917d 100644 (file)
@@ -18,6 +18,7 @@
 #define ART_COMPILER_DEX_QUICK_X86_CODEGEN_X86_H_
 
 #include "dex/compiler_internals.h"
+#include "dex/quick/mir_to_lir.h"
 #include "x86_lir.h"
 
 #include <map>
@@ -159,6 +160,7 @@ class X86Mir2Lir : public Mir2Lir {
   bool GenInlinedCas(CallInfo* info, bool is_long, bool is_object) OVERRIDE;
   bool GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) OVERRIDE;
   bool GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double) OVERRIDE;
+  bool GenInlinedReverseBits(CallInfo* info, OpSize size) OVERRIDE;
   bool GenInlinedSqrt(CallInfo* info) OVERRIDE;
   bool GenInlinedAbsFloat(CallInfo* info) OVERRIDE;
   bool GenInlinedAbsDouble(CallInfo* info) OVERRIDE;
@@ -233,7 +235,7 @@ class X86Mir2Lir : public Mir2Lir {
   void GenEntrySequence(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
   void GenExitSequence() OVERRIDE;
   void GenSpecialExitSequence() OVERRIDE;
-  void GenFillArrayData(DexOffset table_offset, RegLocation rl_src) OVERRIDE;
+  void GenFillArrayData(MIR* mir, DexOffset table_offset, RegLocation rl_src) OVERRIDE;
   void GenFusedFPCmpBranch(BasicBlock* bb, MIR* mir, bool gt_bias, bool is_double) OVERRIDE;
   void GenFusedLongCmpBranch(BasicBlock* bb, MIR* mir) OVERRIDE;
   void GenSelect(BasicBlock* bb, MIR* mir) OVERRIDE;
@@ -319,11 +321,13 @@ class X86Mir2Lir : public Mir2Lir {
 
   /*
    * @brief Load the Class* of a Dex Class type into the register.
+   * @param dex DexFile that contains the class type.
    * @param type How the method will be invoked.
    * @param register that will contain the code address.
    * @note register will be passed to TargetReg to get physical register.
    */
-  void LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) OVERRIDE;
+  void LoadClassType(const DexFile& dex_file, uint32_t type_idx,
+                     SpecialTargetRegister symbolic_reg) OVERRIDE;
 
   void FlushIns(RegLocation* ArgLocs, RegLocation rl_method) OVERRIDE;
 
@@ -355,16 +359,10 @@ class X86Mir2Lir : public Mir2Lir {
   void InstallLiteralPools() OVERRIDE;
 
   /*
-   * @brief Generate the debug_frame CFI information.
-   * @returns pointer to vector containing CFE information
-   */
-  static std::vector<uint8_t>* ReturnCommonCallFrameInformation(bool is_x86_64);
-
-  /*
    * @brief Generate the debug_frame FDE information.
    * @returns pointer to vector containing CFE information
    */
-  std::vector<uint8_t>* ReturnCallFrameInformation() OVERRIDE;
+  std::vector<uint8_t>* ReturnFrameDescriptionEntry() OVERRIDE;
 
   LIR* InvokeTrampoline(OpKind op, RegStorage r_tgt, QuickEntrypointEnum trampoline) OVERRIDE;
 
@@ -410,7 +408,7 @@ class X86Mir2Lir : public Mir2Lir {
   LIR* LoadBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
                            RegStorage r_dest, OpSize size);
   LIR* StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale, int displacement,
-                            RegStorage r_src, OpSize size);
+                            RegStorage r_src, OpSize size, int opt_flags = 0);
 
   RegStorage GetCoreArgMappingToPhysicalReg(int core_arg_num);
 
@@ -460,6 +458,8 @@ class X86Mir2Lir : public Mir2Lir {
   void EmitShiftRegImm(const X86EncodingMap* entry, int32_t raw_reg, int32_t imm);
   void EmitShiftRegCl(const X86EncodingMap* entry, int32_t raw_reg, int32_t raw_cl);
   void EmitShiftMemCl(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t raw_cl);
+  void EmitShiftRegRegCl(const X86EncodingMap* entry, int32_t raw_reg1, int32_t raw_reg2,
+                         int32_t raw_cl);
   void EmitShiftMemImm(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t imm);
   void EmitRegCond(const X86EncodingMap* entry, int32_t raw_reg, int32_t cc);
   void EmitMemCond(const X86EncodingMap* entry, int32_t raw_base, int32_t disp, int32_t cc);
@@ -481,8 +481,10 @@ class X86Mir2Lir : public Mir2Lir {
   void GenConstWide(RegLocation rl_dest, int64_t value);
   void GenMultiplyVectorSignedByte(BasicBlock *bb, MIR *mir);
   void GenShiftByteVector(BasicBlock *bb, MIR *mir);
-  void AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3, uint32_t m4);
-  void MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3, uint32_t m4);
+  void AndMaskVectorRegister(RegStorage rs_src1, uint32_t m1, uint32_t m2, uint32_t m3,
+                             uint32_t m4);
+  void MaskVectorRegister(X86OpCode opcode, RegStorage rs_src1, uint32_t m1, uint32_t m2,
+                          uint32_t m3, uint32_t m4);
   void AppendOpcodeWithConst(X86OpCode opcode, int reg, MIR* mir);
 
   static bool ProvidesFullMemoryBarrier(X86OpCode opcode);
@@ -554,7 +556,8 @@ class X86Mir2Lir : public Mir2Lir {
   void GenMoveVector(BasicBlock *bb, MIR *mir);
 
   /*
-   * @brief Packed multiply of units in two vector registers: vB = vB .* @note vC using vA to know the type of the vector.
+   * @brief Packed multiply of units in two vector registers: vB = vB .* @note vC using vA to know
+   * the type of the vector.
    * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
@@ -564,7 +567,8 @@ class X86Mir2Lir : public Mir2Lir {
   void GenMultiplyVector(BasicBlock *bb, MIR *mir);
 
   /*
-   * @brief Packed addition of units in two vector registers: vB = vB .+ vC using vA to know the type of the vector.
+   * @brief Packed addition of units in two vector registers: vB = vB .+ vC using vA to know the
+   * type of the vector.
    * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
@@ -574,7 +578,8 @@ class X86Mir2Lir : public Mir2Lir {
   void GenAddVector(BasicBlock *bb, MIR *mir);
 
   /*
-   * @brief Packed subtraction of units in two vector registers: vB = vB .- vC using vA to know the type of the vector.
+   * @brief Packed subtraction of units in two vector registers: vB = vB .- vC using vA to know the
+   * type of the vector.
    * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
@@ -584,7 +589,8 @@ class X86Mir2Lir : public Mir2Lir {
   void GenSubtractVector(BasicBlock *bb, MIR *mir);
 
   /*
-   * @brief Packed shift left of units in two vector registers: vB = vB .<< vC using vA to know the type of the vector.
+   * @brief Packed shift left of units in two vector registers: vB = vB .<< vC using vA to know the
+   * type of the vector.
    * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
@@ -594,7 +600,8 @@ class X86Mir2Lir : public Mir2Lir {
   void GenShiftLeftVector(BasicBlock *bb, MIR *mir);
 
   /*
-   * @brief Packed signed shift right of units in two vector registers: vB = vB .>> vC using vA to know the type of the vector.
+   * @brief Packed signed shift right of units in two vector registers: vB = vB .>> vC using vA to
+   * know the type of the vector.
    * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
@@ -604,7 +611,8 @@ class X86Mir2Lir : public Mir2Lir {
   void GenSignedShiftRightVector(BasicBlock *bb, MIR *mir);
 
   /*
-   * @brief Packed unsigned shift right of units in two vector registers: vB = vB .>>> vC using vA to know the type of the vector.
+   * @brief Packed unsigned shift right of units in two vector registers: vB = vB .>>> vC using vA
+   * to know the type of the vector.
    * @param bb The basic block in which the MIR is from..
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
@@ -614,7 +622,8 @@ class X86Mir2Lir : public Mir2Lir {
   void GenUnsignedShiftRightVector(BasicBlock *bb, MIR *mir);
 
   /*
-   * @brief Packed bitwise and of units in two vector registers: vB = vB .& vC using vA to know the type of the vector.
+   * @brief Packed bitwise and of units in two vector registers: vB = vB .& vC using vA to know the
+   * type of the vector.
    * @note vA: TypeSize
    * @note vB: destination and source
    * @note vC: source
@@ -622,7 +631,8 @@ class X86Mir2Lir : public Mir2Lir {
   void GenAndVector(BasicBlock *bb, MIR *mir);
 
   /*
-   * @brief Packed bitwise or of units in two vector registers: vB = vB .| vC using vA to know the type of the vector.
+   * @brief Packed bitwise or of units in two vector registers: vB = vB .| vC using vA to know the
+   * type of the vector.
    * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
@@ -632,7 +642,8 @@ class X86Mir2Lir : public Mir2Lir {
   void GenOrVector(BasicBlock *bb, MIR *mir);
 
   /*
-   * @brief Packed bitwise xor of units in two vector registers: vB = vB .^ vC using vA to know the type of the vector.
+   * @brief Packed bitwise xor of units in two vector registers: vB = vB .^ vC using vA to know the
+   * type of the vector.
    * @param bb The basic block in which the MIR is from.
    * @param mir The MIR whose opcode is kMirConstVector.
    * @note vA: TypeSize
@@ -961,6 +972,9 @@ class X86Mir2Lir : public Mir2Lir {
  private:
   // The number of vector registers [0..N] reserved by a call to ReserveVectorRegisters
   int num_reserved_vector_regs_;
+
+  void SwapBits(RegStorage result_reg, int shift, int32_t value);
+  void SwapBits64(RegStorage result_reg, int shift, int64_t value);
 };
 
 }  // namespace art
index 2920fb6..21d1a5c 100755 (executable)
@@ -730,6 +730,25 @@ bool X86Mir2Lir::GenInlinedMinMaxFP(CallInfo* info, bool is_min, bool is_double)
     // Handle NaN.
     branch_nan->target = NewLIR0(kPseudoTargetLabel);
     LoadConstantWide(rl_result.reg, INT64_C(0x7ff8000000000000));
+
+    // The base_of_code_ compiler temp is non-null when it is reserved
+    // for being able to do data accesses relative to method start.
+    if (base_of_code_ != nullptr) {
+      // Loading from the constant pool may have used base of code register.
+      // However, the code here generates logic in diamond shape and not all
+      // paths load base of code register. Therefore, we ensure it is clobbered so
+      // that the temp caching system does not believe it is live at merge point.
+      RegLocation rl_method = mir_graph_->GetRegLocation(base_of_code_->s_reg_low);
+      if (rl_method.wide) {
+        rl_method = UpdateLocWide(rl_method);
+      } else {
+        rl_method = UpdateLoc(rl_method);
+      }
+      if (rl_method.location == kLocPhysReg) {
+        Clobber(rl_method.reg);
+      }
+    }
+
     LIR* branch_exit_nan = NewLIR1(kX86Jmp8, 0);
     // Handle Min/Max. Copy greater/lesser value from src2.
     branch_cond1->target = NewLIR0(kPseudoTargetLabel);
index fdc46e2..d08ea7c 100755 (executable)
@@ -793,8 +793,115 @@ RegLocation X86Mir2Lir::GenDivRem(RegLocation rl_dest, RegLocation rl_src1,
 bool X86Mir2Lir::GenInlinedMinMax(CallInfo* info, bool is_min, bool is_long) {
   DCHECK(cu_->instruction_set == kX86 || cu_->instruction_set == kX86_64);
 
-  if (is_long && cu_->instruction_set == kX86) {
-    return false;
+  if (is_long && !cu_->target64) {
+   /*
+    * We want to implement the following algorithm
+    * mov eax, low part of arg1
+    * mov edx, high part of arg1
+    * mov ebx, low part of arg2
+    * mov ecx, high part of arg2
+    * mov edi, eax
+    * sub edi, ebx
+    * mov edi, edx
+    * sbb edi, ecx
+    * is_min ? "cmovgel eax, ebx" : "cmovll eax, ebx"
+    * is_min ? "cmovgel edx, ecx" : "cmovll edx, ecx"
+    *
+    * The algorithm above needs 5 registers: a pair for the first operand
+    * (which later will be used as result), a pair for the second operand
+    * and a temp register (e.g. 'edi') for intermediate calculations.
+    * Ideally we have 6 GP caller-save registers in 32-bit mode. They are:
+    * 'eax', 'ebx', 'ecx', 'edx', 'esi' and 'edi'. So there should be
+    * always enough registers to operate on. Practically, there is a pair
+    * of registers 'edi' and 'esi' which holds promoted values and
+    * sometimes should be treated as 'callee save'. If one of the operands
+    * is in the promoted registers then we have enough register to
+    * operate on. Otherwise there is lack of resources and we have to
+    * save 'edi' before calculations and restore after.
+    */
+
+    RegLocation rl_src1 = info->args[0];
+    RegLocation rl_src2 = info->args[2];
+    RegLocation rl_dest = InlineTargetWide(info);
+    int res_vreg, src1_vreg, src2_vreg;
+
+    /*
+     * If the result register is the same as the second element, then we
+     * need to be careful. The reason is that the first copy will
+     * inadvertently clobber the second element with the first one thus
+     * yielding the wrong result. Thus we do a swap in that case.
+     */
+    res_vreg = mir_graph_->SRegToVReg(rl_dest.s_reg_low);
+    src2_vreg = mir_graph_->SRegToVReg(rl_src2.s_reg_low);
+    if (res_vreg == src2_vreg) {
+      std::swap(rl_src1, rl_src2);
+    }
+
+    rl_src1 = LoadValueWide(rl_src1, kCoreReg);
+    RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+
+    // Pick the first integer as min/max.
+    OpRegCopyWide(rl_result.reg, rl_src1.reg);
+
+    /*
+     * If the integers are both in the same register, then there is
+     * nothing else to do because they are equal and we have already
+     * moved one into the result.
+     */
+    src1_vreg = mir_graph_->SRegToVReg(rl_src1.s_reg_low);
+    src2_vreg = mir_graph_->SRegToVReg(rl_src2.s_reg_low);
+    if (src1_vreg == src2_vreg) {
+      StoreValueWide(rl_dest, rl_result);
+      return true;
+    }
+
+    // Free registers to make some room for the second operand.
+    // But don't try to free ourselves or promoted registers.
+    if (res_vreg != src1_vreg &&
+        IsTemp(rl_src1.reg.GetLow()) && IsTemp(rl_src1.reg.GetHigh())) {
+      FreeTemp(rl_src1.reg);
+    }
+    rl_src2 = LoadValueWide(rl_src2, kCoreReg);
+
+    // Do we have a free register for intermediate calculations?
+    RegStorage tmp = AllocTemp(false);
+    if (tmp == RegStorage::InvalidReg()) {
+       /*
+        * No, will use 'edi'.
+        *
+        * As mentioned above we have 4 temporary and 2 promotable
+        * caller-save registers. Therefore, we assume that a free
+        * register can be allocated only if 'esi' and 'edi' are
+        * already used as operands. If number of promotable registers
+        * increases from 2 to 4 then our assumption fails and operand
+        * data is corrupted.
+        * Let's DCHECK it.
+        */
+       DCHECK(IsTemp(rl_src2.reg.GetLow()) &&
+              IsTemp(rl_src2.reg.GetHigh()) &&
+              IsTemp(rl_result.reg.GetLow()) &&
+              IsTemp(rl_result.reg.GetHigh()));
+       tmp = rs_rDI;
+       NewLIR1(kX86Push32R, tmp.GetReg());
+    }
+
+    // Now we are ready to do calculations.
+    OpRegReg(kOpMov, tmp, rl_result.reg.GetLow());
+    OpRegReg(kOpSub, tmp, rl_src2.reg.GetLow());
+    OpRegReg(kOpMov, tmp, rl_result.reg.GetHigh());
+    OpRegReg(kOpSbc, tmp, rl_src2.reg.GetHigh());
+
+    // Let's put pop 'edi' here to break a bit the dependency chain.
+    if (tmp == rs_rDI) {
+      NewLIR1(kX86Pop32R, tmp.GetReg());
+    }
+
+    // Conditionally move the other integer into the destination register.
+    ConditionCode cc = is_min ? kCondGe : kCondLt;
+    OpCondRegReg(kOpCmov, cc, rl_result.reg.GetLow(), rl_src2.reg.GetLow());
+    OpCondRegReg(kOpCmov, cc, rl_result.reg.GetHigh(), rl_src2.reg.GetHigh());
+    StoreValueWide(rl_dest, rl_result);
+    return true;
   }
 
   // Get the two arguments to the invoke and place them in GP registers.
@@ -1061,6 +1168,83 @@ bool X86Mir2Lir::GenInlinedCas(CallInfo* info, bool is_long, bool is_object) {
   return true;
 }
 
+void X86Mir2Lir::SwapBits(RegStorage result_reg, int shift, int32_t value) {
+  RegStorage r_temp = AllocTemp();
+  OpRegCopy(r_temp, result_reg);
+  OpRegImm(kOpLsr, result_reg, shift);
+  OpRegImm(kOpAnd, r_temp, value);
+  OpRegImm(kOpAnd, result_reg, value);
+  OpRegImm(kOpLsl, r_temp, shift);
+  OpRegReg(kOpOr, result_reg, r_temp);
+  FreeTemp(r_temp);
+}
+
+void X86Mir2Lir::SwapBits64(RegStorage result_reg, int shift, int64_t value) {
+  RegStorage r_temp = AllocTempWide();
+  OpRegCopy(r_temp, result_reg);
+  OpRegImm(kOpLsr, result_reg, shift);
+  RegStorage r_value = AllocTempWide();
+  LoadConstantWide(r_value, value);
+  OpRegReg(kOpAnd, r_temp, r_value);
+  OpRegReg(kOpAnd, result_reg, r_value);
+  OpRegImm(kOpLsl, r_temp, shift);
+  OpRegReg(kOpOr, result_reg, r_temp);
+  FreeTemp(r_temp);
+  FreeTemp(r_value);
+}
+
+bool X86Mir2Lir::GenInlinedReverseBits(CallInfo* info, OpSize size) {
+  RegLocation rl_src_i = info->args[0];
+  RegLocation rl_i = (size == k64) ? LoadValueWide(rl_src_i, kCoreReg)
+                                   : LoadValue(rl_src_i, kCoreReg);
+  RegLocation rl_dest = (size == k64) ? InlineTargetWide(info) : InlineTarget(info);
+  RegLocation rl_result = EvalLoc(rl_dest, kCoreReg, true);
+  if (size == k64) {
+    if (cu_->instruction_set == kX86_64) {
+      /* Use one bswap instruction to reverse byte order first and then use 3 rounds of
+         swapping bits to reverse bits in a long number x. Using bswap to save instructions
+         compared to generic luni implementation which has 5 rounds of swapping bits.
+         x = bswap x
+         x = (x & 0x5555555555555555) << 1 | (x >> 1) & 0x5555555555555555;
+         x = (x & 0x3333333333333333) << 2 | (x >> 2) & 0x3333333333333333;
+         x = (x & 0x0F0F0F0F0F0F0F0F) << 4 | (x >> 4) & 0x0F0F0F0F0F0F0F0F;
+      */
+      OpRegReg(kOpRev, rl_result.reg, rl_i.reg);
+      SwapBits64(rl_result.reg, 1, 0x5555555555555555);
+      SwapBits64(rl_result.reg, 2, 0x3333333333333333);
+      SwapBits64(rl_result.reg, 4, 0x0f0f0f0f0f0f0f0f);
+      StoreValueWide(rl_dest, rl_result);
+      return true;
+    }
+    RegStorage r_i_low = rl_i.reg.GetLow();
+    if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
+      // First REV shall clobber rl_result.reg.GetLowReg(), save the value in a temp for the second
+      // REV.
+      r_i_low = AllocTemp();
+      OpRegCopy(r_i_low, rl_i.reg);
+    }
+    OpRegReg(kOpRev, rl_result.reg.GetLow(), rl_i.reg.GetHigh());
+    OpRegReg(kOpRev, rl_result.reg.GetHigh(), r_i_low);
+    if (rl_i.reg.GetLowReg() == rl_result.reg.GetLowReg()) {
+      FreeTemp(r_i_low);
+    }
+    SwapBits(rl_result.reg.GetLow(), 1, 0x55555555);
+    SwapBits(rl_result.reg.GetLow(), 2, 0x33333333);
+    SwapBits(rl_result.reg.GetLow(), 4, 0x0f0f0f0f);
+    SwapBits(rl_result.reg.GetHigh(), 1, 0x55555555);
+    SwapBits(rl_result.reg.GetHigh(), 2, 0x33333333);
+    SwapBits(rl_result.reg.GetHigh(), 4, 0x0f0f0f0f);
+    StoreValueWide(rl_dest, rl_result);
+  } else {
+    OpRegReg(kOpRev, rl_result.reg, rl_i.reg);
+    SwapBits(rl_result.reg, 1, 0x55555555);
+    SwapBits(rl_result.reg, 2, 0x33333333);
+    SwapBits(rl_result.reg, 4, 0x0f0f0f0f);
+    StoreValue(rl_dest, rl_result);
+  }
+  return true;
+}
+
 LIR* X86Mir2Lir::OpPcRelLoad(RegStorage reg, LIR* target) {
   CHECK(base_of_code_ != nullptr);
 
@@ -2140,9 +2324,9 @@ void X86Mir2Lir::GenArrayPut(int opt_flags, OpSize size, RegLocation rl_array,
   if ((size == kSignedByte || size == kUnsignedByte) && !IsByteRegister(rl_src.reg)) {
     RegStorage temp = AllocTemp();
     OpRegCopy(temp, rl_src.reg);
-    StoreBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, temp, size);
+    StoreBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, temp, size, opt_flags);
   } else {
-    StoreBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, rl_src.reg, size);
+    StoreBaseIndexedDisp(rl_array.reg, rl_index.reg, scale, data_offset, rl_src.reg, size, opt_flags);
   }
   if (card_mark) {
     // Free rl_index if its a temp. Ensures there are 2 free regs for card mark.
@@ -2956,7 +3140,53 @@ void X86Mir2Lir::GenIntToLong(RegLocation rl_dest, RegLocation rl_src) {
 void X86Mir2Lir::GenShiftOpLong(Instruction::Code opcode, RegLocation rl_dest,
                         RegLocation rl_src1, RegLocation rl_shift) {
   if (!cu_->target64) {
-    Mir2Lir::GenShiftOpLong(opcode, rl_dest, rl_src1, rl_shift);
+    // Long shift operations in 32-bit. Use shld or shrd to create a 32-bit register filled from
+    // the other half, shift the other half, if the shift amount is less than 32 we're done,
+    // otherwise move one register to the other and place zero or sign bits in the other.
+    LIR* branch;
+    FlushAllRegs();
+    LockCallTemps();
+    LoadValueDirectFixed(rl_shift, rs_rCX);
+    RegStorage r_tmp = RegStorage::MakeRegPair(rs_rAX, rs_rDX);
+    LoadValueDirectWideFixed(rl_src1, r_tmp);
+    switch (opcode) {
+      case Instruction::SHL_LONG:
+      case Instruction::SHL_LONG_2ADDR:
+        NewLIR3(kX86Shld32RRC, r_tmp.GetHighReg(), r_tmp.GetLowReg(), rs_rCX.GetReg());
+        NewLIR2(kX86Sal32RC, r_tmp.GetLowReg(), rs_rCX.GetReg());
+        NewLIR2(kX86Test8RI, rs_rCX.GetReg(), 32);
+        branch = NewLIR2(kX86Jcc8, 0, kX86CondZ);
+        OpRegCopy(r_tmp.GetHigh(), r_tmp.GetLow());
+        LoadConstant(r_tmp.GetLow(), 0);
+        branch->target = NewLIR0(kPseudoTargetLabel);
+        break;
+      case Instruction::SHR_LONG:
+      case Instruction::SHR_LONG_2ADDR:
+        NewLIR3(kX86Shrd32RRC, r_tmp.GetLowReg(), r_tmp.GetHighReg(), rs_rCX.GetReg());
+        NewLIR2(kX86Sar32RC, r_tmp.GetHighReg(), rs_rCX.GetReg());
+        NewLIR2(kX86Test8RI, rs_rCX.GetReg(), 32);
+        branch = NewLIR2(kX86Jcc8, 0, kX86CondZ);
+        OpRegCopy(r_tmp.GetLow(), r_tmp.GetHigh());
+        NewLIR2(kX86Sar32RI, r_tmp.GetHighReg(), 31);
+        branch->target = NewLIR0(kPseudoTargetLabel);
+        break;
+      case Instruction::USHR_LONG:
+      case Instruction::USHR_LONG_2ADDR:
+        NewLIR3(kX86Shrd32RRC, r_tmp.GetLowReg(), r_tmp.GetHighReg(),
+               rs_rCX.GetReg());
+        NewLIR2(kX86Shr32RC, r_tmp.GetHighReg(), rs_rCX.GetReg());
+        NewLIR2(kX86Test8RI, rs_rCX.GetReg(), 32);
+        branch = NewLIR2(kX86Jcc8, 0, kX86CondZ);
+        OpRegCopy(r_tmp.GetLow(), r_tmp.GetHigh());
+        LoadConstant(r_tmp.GetHigh(), 0);
+        branch->target = NewLIR0(kPseudoTargetLabel);
+        break;
+      default:
+        LOG(FATAL) << "Unexpected case: " << opcode;
+        return;
+    }
+    RegLocation rl_result = LocCReturnWide();
+    StoreValueWide(rl_dest, rl_result);
     return;
   }
 
index 75279d7..68c1633 100755 (executable)
@@ -17,6 +17,7 @@
 #include <string>
 #include <inttypes.h>
 
+#include "backend_x86.h"
 #include "codegen_x86.h"
 #include "dex/compiler_internals.h"
 #include "dex/quick/mir_to_lir-inl.h"
@@ -24,6 +25,7 @@
 #include "mirror/array.h"
 #include "mirror/string.h"
 #include "x86_lir.h"
+#include "utils/dwarf_cfi.h"
 
 namespace art {
 
@@ -362,6 +364,7 @@ std::string X86Mir2Lir::BuildInsnString(const char *fmt, LIR *lir, unsigned char
              int64_t value = static_cast<int64_t>(static_cast<int64_t>(operand) << 32 |
                              static_cast<uint32_t>(lir->operands[operand_number+1]));
              buf +=StringPrintf("%" PRId64, value);
+             break;
           }
           case 'p': {
             EmbeddedData *tab_rec = reinterpret_cast<EmbeddedData*>(UnwrapPointer(operand));
@@ -594,6 +597,9 @@ bool X86Mir2Lir::GenMemBarrier(MemBarrierKind barrier_kind) {
       mem_barrier = NewLIR0(kX86Mfence);
       ret = true;
     }
+  } else if (barrier_kind == kNTStoreStore) {
+      mem_barrier = NewLIR0(kX86Sfence);
+      ret = true;
   }
 
   // Now ensure that a scheduling barrier is in place.
@@ -758,10 +764,7 @@ bool X86Mir2Lir::IsUnconditionalBranch(LIR* lir) {
 RegisterClass X86Mir2Lir::RegClassForFieldLoadStore(OpSize size, bool is_volatile) {
   // X86_64 can handle any size.
   if (cu_->target64) {
-    if (size == kReference) {
-      return kRefReg;
-    }
-    return kCoreReg;
+    return RegClassBySize(size);
   }
 
   if (UNLIKELY(is_volatile)) {
@@ -975,19 +978,21 @@ void X86Mir2Lir::LoadMethodAddress(const MethodReference& target_method, InvokeT
   method_address_insns_.Insert(move);
 }
 
-void X86Mir2Lir::LoadClassType(uint32_t type_idx, SpecialTargetRegister symbolic_reg) {
+void X86Mir2Lir::LoadClassType(const DexFile& dex_file, uint32_t type_idx,
+                               SpecialTargetRegister symbolic_reg) {
   /*
    * For x86, just generate a 32 bit move immediate instruction, that will be filled
    * in at 'link time'.  For now, put a unique value based on target to ensure that
    * code deduplication works.
    */
-  const DexFile::TypeId& id = cu_->dex_file->GetTypeId(type_idx);
+  const DexFile::TypeId& id = dex_file.GetTypeId(type_idx);
   uintptr_t ptr = reinterpret_cast<uintptr_t>(&id);
 
   // Generate the move instruction with the unique pointer and save index and type.
   LIR *move = RawLIR(current_dalvik_offset_, kX86Mov32RI,
                      TargetReg(symbolic_reg, kNotWide).GetReg(),
-                     static_cast<int>(ptr), type_idx);
+                     static_cast<int>(ptr), type_idx,
+                     WrapPointer(const_cast<DexFile*>(&dex_file)));
   AppendLIR(move);
   class_type_address_insns_.Insert(move);
 }
@@ -1011,19 +1016,6 @@ LIR *X86Mir2Lir::CallWithLinkerFixup(const MethodReference& target_method, Invok
   return call;
 }
 
-/*
- * @brief Enter a 32 bit quantity into a buffer
- * @param buf buffer.
- * @param data Data value.
- */
-
-static void PushWord(std::vector<uint8_t>&buf, int32_t data) {
-  buf.push_back(data & 0xff);
-  buf.push_back((data >> 8) & 0xff);
-  buf.push_back((data >> 16) & 0xff);
-  buf.push_back((data >> 24) & 0xff);
-}
-
 void X86Mir2Lir::InstallLiteralPools() {
   // These are handled differently for x86.
   DCHECK(code_literal_list_ == nullptr);
@@ -1044,10 +1036,10 @@ void X86Mir2Lir::InstallLiteralPools() {
       align_size--;
     }
     for (LIR *p = const_vectors_; p != nullptr; p = p->next) {
-      PushWord(code_buffer_, p->operands[0]);
-      PushWord(code_buffer_, p->operands[1]);
-      PushWord(code_buffer_, p->operands[2]);
-      PushWord(code_buffer_, p->operands[3]);
+      PushWord(&code_buffer_, p->operands[0]);
+      PushWord(&code_buffer_, p->operands[1]);
+      PushWord(&code_buffer_, p->operands[2]);
+      PushWord(&code_buffer_, p->operands[3]);
     }
   }
 
@@ -1072,12 +1064,16 @@ void X86Mir2Lir::InstallLiteralPools() {
   for (uint32_t i = 0; i < class_type_address_insns_.Size(); i++) {
       LIR* p = class_type_address_insns_.Get(i);
       DCHECK_EQ(p->opcode, kX86Mov32RI);
+
+      const DexFile* class_dex_file =
+        reinterpret_cast<const DexFile*>(UnwrapPointer(p->operands[3]));
       uint32_t target_method_idx = p->operands[2];
 
       // The offset to patch is the last 4 bytes of the instruction.
       int patch_offset = p->offset + p->flags.size - 4;
       cu_->compiler_driver->AddClassPatch(cu_->dex_file, cu_->class_def_idx,
-                                          cu_->method_idx, target_method_idx, patch_offset);
+                                          cu_->method_idx, target_method_idx, class_dex_file,
+                                          patch_offset);
   }
 
   // And now the PC-relative calls to methods.
@@ -1221,7 +1217,7 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
   RegLocation rl_obj = info->args[0];
   RegLocation rl_char = info->args[1];
   RegLocation rl_start;  // Note: only present in III flavor or IndexOf.
-  // RBX is callee-save register in 64-bit mode.
+  // RBX is promotable in 64-bit mode.
   RegStorage rs_tmp = cu_->target64 ? rs_r11 : rs_rBX;
   int start_value = -1;
 
@@ -1241,23 +1237,7 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
   // EBX or R11: temporary during execution (depending on mode).
   // REP SCASW: search instruction.
 
-  FlushReg(rs_rAX);
-  Clobber(rs_rAX);
-  LockTemp(rs_rAX);
-  FlushReg(rs_rCX);
-  Clobber(rs_rCX);
-  LockTemp(rs_rCX);
-  FlushReg(rs_rDX);
-  Clobber(rs_rDX);
-  LockTemp(rs_rDX);
-  FlushReg(rs_tmp);
-  Clobber(rs_tmp);
-  LockTemp(rs_tmp);
-  if (cu_->target64) {
-    FlushReg(rs_rDI);
-    Clobber(rs_rDI);
-    LockTemp(rs_rDI);
-  }
+  FlushAllRegs();
 
   RegLocation rl_return = GetReturn(kCoreReg);
   RegLocation rl_dest = InlineTarget(info);
@@ -1299,7 +1279,7 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
   MarkPossibleNullPointerException(0);
 
   if (!cu_->target64) {
-    // EDI is callee-save register in 32-bit mode.
+    // EDI is promotable in 32-bit mode.
     NewLIR1(kX86Push32R, rs_rDI.GetReg());
   }
 
@@ -1406,142 +1386,9 @@ bool X86Mir2Lir::GenInlinedIndexOf(CallInfo* info, bool zero_based) {
   }
 
   StoreValue(rl_dest, rl_return);
-
-  FreeTemp(rs_rAX);
-  FreeTemp(rs_rCX);
-  FreeTemp(rs_rDX);
-  FreeTemp(rs_tmp);
-  if (cu_->target64) {
-    FreeTemp(rs_rDI);
-  }
-
   return true;
 }
 
-/*
- * @brief Enter an 'advance LOC' into the FDE buffer
- * @param buf FDE buffer.
- * @param increment Amount by which to increase the current location.
- */
-static void AdvanceLoc(std::vector<uint8_t>&buf, uint32_t increment) {
-  if (increment < 64) {
-    // Encoding in opcode.
-    buf.push_back(0x1 << 6 | increment);
-  } else if (increment < 256) {
-    // Single byte delta.
-    buf.push_back(0x02);
-    buf.push_back(increment);
-  } else if (increment < 256 * 256) {
-    // Two byte delta.
-    buf.push_back(0x03);
-    buf.push_back(increment & 0xff);
-    buf.push_back((increment >> 8) & 0xff);
-  } else {
-    // Four byte delta.
-    buf.push_back(0x04);
-    PushWord(buf, increment);
-  }
-}
-
-
-std::vector<uint8_t>* X86CFIInitialization(bool is_x86_64) {
-  return X86Mir2Lir::ReturnCommonCallFrameInformation(is_x86_64);
-}
-
-static void EncodeUnsignedLeb128(std::vector<uint8_t>& buf, uint32_t value) {
-  uint8_t buffer[12];
-  uint8_t *ptr = EncodeUnsignedLeb128(buffer, value);
-  for (uint8_t *p = buffer; p < ptr; p++) {
-    buf.push_back(*p);
-  }
-}
-
-static void EncodeSignedLeb128(std::vector<uint8_t>& buf, int32_t value) {
-  uint8_t buffer[12];
-  uint8_t *ptr = EncodeSignedLeb128(buffer, value);
-  for (uint8_t *p = buffer; p < ptr; p++) {
-    buf.push_back(*p);
-  }
-}
-
-std::vector<uint8_t>* X86Mir2Lir::ReturnCommonCallFrameInformation(bool is_x86_64) {
-  std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
-
-  // Length (will be filled in later in this routine).
-  PushWord(*cfi_info, 0);
-
-  // CIE id: always 0.
-  PushWord(*cfi_info, 0);
-
-  // Version: always 1.
-  cfi_info->push_back(0x01);
-
-  // Augmentation: 'zR\0'
-  cfi_info->push_back(0x7a);
-  cfi_info->push_back(0x52);
-  cfi_info->push_back(0x0);
-
-  // Code alignment: 1.
-  EncodeUnsignedLeb128(*cfi_info, 1);
-
-  // Data alignment.
-  if (is_x86_64) {
-    EncodeSignedLeb128(*cfi_info, -8);
-  } else {
-    EncodeSignedLeb128(*cfi_info, -4);
-  }
-
-  // Return address register.
-  if (is_x86_64) {
-    // R16(RIP)
-    cfi_info->push_back(0x10);
-  } else {
-    // R8(EIP)
-    cfi_info->push_back(0x08);
-  }
-
-  // Augmentation length: 1.
-  cfi_info->push_back(1);
-
-  // Augmentation data: 0x03 ((DW_EH_PE_absptr << 4) | DW_EH_PE_udata4).
-  cfi_info->push_back(0x03);
-
-  // Initial instructions.
-  if (is_x86_64) {
-    // DW_CFA_def_cfa R7(RSP) 8.
-    cfi_info->push_back(0x0c);
-    cfi_info->push_back(0x07);
-    cfi_info->push_back(0x08);
-
-    // DW_CFA_offset R16(RIP) 1 (* -8).
-    cfi_info->push_back(0x90);
-    cfi_info->push_back(0x01);
-  } else {
-    // DW_CFA_def_cfa R4(ESP) 4.
-    cfi_info->push_back(0x0c);
-    cfi_info->push_back(0x04);
-    cfi_info->push_back(0x04);
-
-    // DW_CFA_offset R8(EIP) 1 (* -4).
-    cfi_info->push_back(0x88);
-    cfi_info->push_back(0x01);
-  }
-
-  // Padding to a multiple of 4
-  while ((cfi_info->size() & 3) != 0) {
-    // DW_CFA_nop is encoded as 0.
-    cfi_info->push_back(0);
-  }
-
-  // Set the length of the CIE inside the generated bytes.
-  uint32_t length = cfi_info->size() - 4;
-  (*cfi_info)[0] = length;
-  (*cfi_info)[1] = length >> 8;
-  (*cfi_info)[2] = length >> 16;
-  (*cfi_info)[3] = length >> 24;
-  return cfi_info;
-}
-
 static bool ARTRegIDToDWARFRegID(bool is_x86_64, int art_reg_id, int* dwarf_reg_id) {
   if (is_x86_64) {
     switch (art_reg_id) {
@@ -1564,36 +1411,23 @@ static bool ARTRegIDToDWARFRegID(bool is_x86_64, int art_reg_id, int* dwarf_reg_
   }
 }
 
-std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() {
-  std::vector<uint8_t>*cfi_info = new std::vector<uint8_t>;
+std::vector<uint8_t>* X86Mir2Lir::ReturnFrameDescriptionEntry() {
+  std::vector<uint8_t>* cfi_info = new std::vector<uint8_t>;
 
   // Generate the FDE for the method.
   DCHECK_NE(data_offset_, 0U);
 
-  // Length (will be filled in later in this routine).
-  PushWord(*cfi_info, 0);
-
-  // 'CIE_pointer' (filled in by linker).
-  PushWord(*cfi_info, 0);
-
-  // 'initial_location' (filled in by linker).
-  PushWord(*cfi_info, 0);
-
-  // 'address_range' (number of bytes in the method).
-  PushWord(*cfi_info, data_offset_);
-
-  // Augmentation length: 0
-  cfi_info->push_back(0);
+  WriteFDEHeader(cfi_info, cu_->target64);
+  WriteFDEAddressRange(cfi_info, data_offset_, cu_->target64);
 
   // The instructions in the FDE.
   if (stack_decrement_ != nullptr) {
     // Advance LOC to just past the stack decrement.
     uint32_t pc = NEXT_LIR(stack_decrement_)->offset;
-    AdvanceLoc(*cfi_info, pc);
+    DW_CFA_advance_loc(cfi_info, pc);
 
     // Now update the offset to the call frame: DW_CFA_def_cfa_offset frame_size.
-    cfi_info->push_back(0x0e);
-    EncodeUnsignedLeb128(*cfi_info, frame_size_);
+    DW_CFA_def_cfa_offset(cfi_info, frame_size_);
 
     // Handle register spills
     const uint32_t kSpillInstLen = (cu_->target64) ? 5 : 4;
@@ -1605,14 +1439,12 @@ std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() {
         pc += kSpillInstLen;
 
         // Advance LOC to pass this instruction
-        AdvanceLoc(*cfi_info, kSpillInstLen);
+        DW_CFA_advance_loc(cfi_info, kSpillInstLen);
 
         int dwarf_reg_id;
         if (ARTRegIDToDWARFRegID(cu_->target64, reg, &dwarf_reg_id)) {
-          // DW_CFA_offset_extended_sf reg_no offset
-          cfi_info->push_back(0x11);
-          EncodeUnsignedLeb128(*cfi_info, dwarf_reg_id);
-          EncodeSignedLeb128(*cfi_info, offset / kDataAlignmentFactor);
+          // DW_CFA_offset_extended_sf reg offset
+          DW_CFA_offset_extended_sf(cfi_info, dwarf_reg_id, offset / kDataAlignmentFactor);
         }
 
         offset += GetInstructionSetPointerSize(cu_->instruction_set);
@@ -1622,16 +1454,15 @@ std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() {
     // We continue with that stack until the epilogue.
     if (stack_increment_ != nullptr) {
       uint32_t new_pc = NEXT_LIR(stack_increment_)->offset;
-      AdvanceLoc(*cfi_info, new_pc - pc);
+      DW_CFA_advance_loc(cfi_info, new_pc - pc);
 
       // We probably have code snippets after the epilogue, so save the
       // current state: DW_CFA_remember_state.
-      cfi_info->push_back(0x0a);
+      DW_CFA_remember_state(cfi_info);
 
       // We have now popped the stack: DW_CFA_def_cfa_offset 4/8.
       // There is only the return PC on the stack now.
-      cfi_info->push_back(0x0e);
-      EncodeUnsignedLeb128(*cfi_info, GetInstructionSetPointerSize(cu_->instruction_set));
+      DW_CFA_def_cfa_offset(cfi_info, GetInstructionSetPointerSize(cu_->instruction_set));
 
       // Everything after that is the same as before the epilogue.
       // Stack bump was followed by RET instruction.
@@ -1639,25 +1470,16 @@ std::vector<uint8_t>* X86Mir2Lir::ReturnCallFrameInformation() {
       if (post_ret_insn != nullptr) {
         pc = new_pc;
         new_pc = post_ret_insn->offset;
-        AdvanceLoc(*cfi_info, new_pc - pc);
+        DW_CFA_advance_loc(cfi_info, new_pc - pc);
         // Restore the state: DW_CFA_restore_state.
-        cfi_info->push_back(0x0b);
+        DW_CFA_restore_state(cfi_info);
       }
     }
   }
 
-  // Padding to a multiple of 4
-  while ((cfi_info->size() & 3) != 0) {
-    // DW_CFA_nop is encoded as 0.
-    cfi_info->push_back(0);
-  }
+  PadCFI(cfi_info);
+  WriteCFILength(cfi_info, cu_->target64);
 
-  // Set the length of the FDE inside the generated bytes.
-  uint32_t length = cfi_info->size() - 4;
-  (*cfi_info)[0] = length;
-  (*cfi_info)[1] = length >> 8;
-  (*cfi_info)[2] = length >> 16;
-  (*cfi_info)[3] = length >> 24;
   return cfi_info;
 }
 
@@ -1711,6 +1533,9 @@ void X86Mir2Lir::GenMachineSpecificExtendedMethodMIR(BasicBlock* bb, MIR* mir) {
     case kMirOpPackedSet:
       GenSetVector(bb, mir);
       break;
+    case kMirOpMemBarrier:
+      GenMemBarrier(static_cast<MemBarrierKind>(mir->dalvikInsn.vA));
+      break;
     default:
       break;
   }
@@ -2409,11 +2234,11 @@ RegStorage X86Mir2Lir::GetArgMappingToPhysicalReg(int arg_num) {
   }
 
   if (!in_to_reg_storage_mapping_.IsInitialized()) {
-    int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
+    int start_vreg = cu_->mir_graph->GetFirstInVR();
     RegLocation* arg_locs = &mir_graph_->reg_location_[start_vreg];
 
     InToRegStorageX86_64Mapper mapper(this);
-    in_to_reg_storage_mapping_.Initialize(arg_locs, cu_->num_ins, &mapper);
+    in_to_reg_storage_mapping_.Initialize(arg_locs, mir_graph_->GetNumOfInVRs(), &mapper);
   }
   return in_to_reg_storage_mapping_.Get(arg_num);
 }
@@ -2462,11 +2287,11 @@ void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
     StoreRefDisp(rs_rX86_SP, 0, As32BitReg(TargetReg(kArg0, kRef)), kNotVolatile);
   }
 
-  if (cu_->num_ins == 0) {
+  if (mir_graph_->GetNumOfInVRs() == 0) {
     return;
   }
 
-  int start_vreg = cu_->num_dalvik_registers - cu_->num_ins;
+  int start_vreg = cu_->mir_graph->GetFirstInVR();
   /*
    * Copy incoming arguments to their proper home locations.
    * NOTE: an older version of dx had an issue in which
@@ -2480,7 +2305,7 @@ void X86Mir2Lir::FlushIns(RegLocation* ArgLocs, RegLocation rl_method) {
    * half to memory as well.
    */
   ScopedMemRefType mem_ref_type(this, ResourceMask::kDalvikReg);
-  for (int i = 0; i < cu_->num_ins; i++) {
+  for (uint32_t i = 0; i < mir_graph_->GetNumOfInVRs(); i++) {
     // get reg corresponding to input
     RegStorage reg = GetArgMappingToPhysicalReg(i);
 
@@ -2612,9 +2437,6 @@ int X86Mir2Lir::GenDalvikArgsRange(CallInfo* info, int call_state,
       }
     }
 
-    // Logic below assumes that Method pointer is at offset zero from SP.
-    DCHECK_EQ(VRegOffset(static_cast<int>(kVRegMethodPtrBaseReg)), 0);
-
     // The rest can be copied together
     int start_offset = SRegOffset(info->args[last_mapped_in + size_of_the_last_mapped].s_reg_low);
     int outs_offset = StackVisitor::GetOutVROffset(last_mapped_in + size_of_the_last_mapped,
index a48613f..4f65a0f 100644 (file)
@@ -592,7 +592,6 @@ LIR* X86Mir2Lir::LoadConstantWide(RegStorage r_dest, int64_t value) {
                            kDouble, kNotVolatile);
         res->target = data_target;
         res->flags.fixup = kFixupLoad;
-        Clobber(rl_method.reg);
         store_method_addr_used_ = true;
       } else {
         if (r_dest.IsPair()) {
@@ -779,15 +778,20 @@ LIR* X86Mir2Lir::LoadBaseDisp(RegStorage r_base, int displacement, RegStorage r_
 }
 
 LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int scale,
-                                      int displacement, RegStorage r_src, OpSize size) {
+                                      int displacement, RegStorage r_src, OpSize size,
+                                      int opt_flags) {
   LIR *store = NULL;
   LIR *store2 = NULL;
   bool is_array = r_index.Valid();
   bool pair = r_src.IsPair();
   bool is64bit = (size == k64) || (size == kDouble);
+  bool consider_non_temporal = false;
+
   X86OpCode opcode = kX86Nop;
   switch (size) {
     case k64:
+      consider_non_temporal = true;
+      // Fall through!
     case kDouble:
       if (r_src.IsFloat()) {
         opcode = is_array ? kX86MovsdAR : kX86MovsdMR;
@@ -804,6 +808,7 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int
         opcode = is_array ? kX86Mov64AR  : kX86Mov64MR;
         CHECK_EQ(is_array, false);
         CHECK_EQ(r_src.IsFloat(), false);
+        consider_non_temporal = true;
         break;
       }  // else fall-through to k32 case
     case k32:
@@ -815,6 +820,7 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int
         DCHECK(r_src.IsSingle());
       }
       DCHECK_EQ((displacement & 0x3), 0);
+      consider_non_temporal = true;
       break;
     case kUnsignedHalf:
     case kSignedHalf:
@@ -829,6 +835,28 @@ LIR* X86Mir2Lir::StoreBaseIndexedDisp(RegStorage r_base, RegStorage r_index, int
       LOG(FATAL) << "Bad case in StoreBaseIndexedDispBody";
   }
 
+  // Handle non temporal hint here.
+  if (consider_non_temporal && ((opt_flags & MIR_STORE_NON_TEMPORAL) != 0)) {
+    switch (opcode) {
+      // We currently only handle 32/64 bit moves here.
+      case kX86Mov64AR:
+        opcode = kX86Movnti64AR;
+        break;
+      case kX86Mov64MR:
+        opcode = kX86Movnti64MR;
+        break;
+      case kX86Mov32AR:
+        opcode = kX86Movnti32AR;
+        break;
+      case kX86Mov32MR:
+        opcode = kX86Movnti32MR;
+        break;
+      default:
+        // Do nothing here.
+        break;
+    }
+  }
+
   if (!is_array) {
     if (!pair) {
       store = NewLIR3(opcode, r_base.GetReg(), displacement + LOWORD_OFFSET, r_src.GetReg());
@@ -875,6 +903,17 @@ LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r
 
   // StoreBaseDisp() will emit correct insn for atomic store on x86
   // assuming r_dest is correctly prepared using RegClassForFieldLoadStore().
+  // x86 only allows registers EAX-EDX to be used as byte registers, if the input src is not
+  // valid, allocate a temp.
+  bool allocated_temp = false;
+  if (size == kUnsignedByte || size == kSignedByte) {
+    if (!cu_->target64 && !r_src.Low4()) {
+      RegStorage r_input = r_src;
+      r_src = AllocateByteRegister();
+      OpRegCopy(r_src, r_input);
+      allocated_temp = true;
+    }
+  }
 
   LIR* store = StoreBaseIndexedDisp(r_base, RegStorage::InvalidReg(), 0, displacement, r_src, size);
 
@@ -884,6 +923,10 @@ LIR* X86Mir2Lir::StoreBaseDisp(RegStorage r_base, int displacement, RegStorage r
     GenMemBarrier(kAnyAny);
   }
 
+  if (allocated_temp) {
+    FreeTemp(r_src);
+  }
+
   return store;
 }
 
@@ -913,7 +956,8 @@ void X86Mir2Lir::AnalyzeMIR() {
 
   // Did we need a pointer to the method code?
   if (store_method_addr_) {
-    base_of_code_ = mir_graph_->GetNewCompilerTemp(kCompilerTempVR, cu_->target64 == true);
+    base_of_code_ = mir_graph_->GetNewCompilerTemp(kCompilerTempBackend, cu_->target64 == true);
+    DCHECK(base_of_code_ != nullptr);
   } else {
     base_of_code_ = nullptr;
   }
@@ -980,6 +1024,7 @@ void X86Mir2Lir::AnalyzeMIR(int opcode, BasicBlock * bb, MIR *mir) {
       store_method_addr_ = true;
       break;
     case Instruction::INVOKE_STATIC:
+    case Instruction::INVOKE_STATIC_RANGE:
       AnalyzeInvokeStatic(opcode, bb, mir);
       break;
     default:
index 500c6b8..e3ef8c1 100644 (file)
@@ -75,33 +75,36 @@ namespace art {
  *  ST1 .. ST7: caller save
  *
  *  Stack frame diagram (stack grows down, higher addresses at top):
+ *  For a more detailed view of each region see stack.h.
  *
- * +------------------------+
- * | IN[ins-1]              |  {Note: resides in caller's frame}
- * |       .                |
- * | IN[0]                  |
- * | caller's Method*       |
- * +========================+  {Note: start of callee's frame}
- * | return address         |  {pushed by call}
- * | spill region           |  {variable sized}
- * +------------------------+
- * | ...filler word...      |  {Note: used as 2nd word of V[locals-1] if long]
- * +------------------------+
- * | V[locals-1]            |
- * | V[locals-2]            |
- * |      .                 |
- * |      .                 |
- * | V[1]                   |
- * | V[0]                   |
- * +------------------------+
- * |  0 to 3 words padding  |
- * +------------------------+
- * | OUT[outs-1]            |
- * | OUT[outs-2]            |
- * |       .                |
- * | OUT[0]                 |
- * | cur_method*            | <<== sp w/ 16-byte alignment
- * +========================+
+ * +---------------------------+
+ * | IN[ins-1]                 |  {Note: resides in caller's frame}
+ * |       .                   |
+ * | IN[0]                     |
+ * | caller's Method*          |
+ * +===========================+  {Note: start of callee's frame}
+ * | return address            |  {pushed by call}
+ * | spill region              |  {variable sized}
+ * +---------------------------+
+ * | ...filler 4-bytes...      |  {Note: used as 2nd word of V[locals-1] if long]
+ * +---------------------------+
+ * | V[locals-1]               |
+ * | V[locals-2]               |
+ * |      .                    |
+ * |      .                    |
+ * | V[1]                      |
+ * | V[0]                      |
+ * +---------------------------+
+ * | 0 to 12-bytes padding     |
+ * +---------------------------+
+ * | compiler temp region      |
+ * +---------------------------+
+ * | OUT[outs-1]               |
+ * | OUT[outs-2]               |
+ * |       .                   |
+ * | OUT[0]                    |
+ * | StackReference<ArtMethod> | <<== sp w/ 16-byte alignment
+ * +===========================+
  */
 
 enum X86ResourceEncodingPos {
@@ -440,12 +443,12 @@ enum X86OpCode {
   kX86Mov16MR, kX86Mov16AR, kX86Mov16TR,
   kX86Mov16RR, kX86Mov16RM, kX86Mov16RA, kX86Mov16RT,
   kX86Mov16RI, kX86Mov16MI, kX86Mov16AI, kX86Mov16TI,
-  kX86Mov32MR, kX86Mov32AR, kX86Mov32TR,
+  kX86Mov32MR, kX86Mov32AR, kX86Movnti32MR, kX86Movnti32AR, kX86Mov32TR,
   kX86Mov32RR, kX86Mov32RM, kX86Mov32RA, kX86Mov32RT,
   kX86Mov32RI, kX86Mov32MI, kX86Mov32AI, kX86Mov32TI,
   kX86Lea32RM,
   kX86Lea32RA,
-  kX86Mov64MR, kX86Mov64AR, kX86Mov64TR,
+  kX86Mov64MR, kX86Mov64AR, kX86Movnti64MR, kX86Movnti64AR, kX86Mov64TR,
   kX86Mov64RR, kX86Mov64RM, kX86Mov64RA, kX86Mov64RT,
   kX86Mov64RI32, kX86Mov64RI64, kX86Mov64MI, kX86Mov64AI, kX86Mov64TI,
   kX86Lea64RM,
@@ -484,8 +487,10 @@ enum X86OpCode {
 #undef BinaryShiftOpcode
   kX86Cmc,
   kX86Shld32RRI,
+  kX86Shld32RRC,
   kX86Shld32MRI,
   kX86Shrd32RRI,
+  kX86Shrd32RRC,
   kX86Shrd32MRI,
   kX86Shld64RRI,
   kX86Shld64MRI,
@@ -618,7 +623,12 @@ enum X86OpCode {
   kX86MovdrxRR, kX86MovdrxMR, kX86MovdrxAR,  // move into reg from xmm
   kX86MovsxdRR, kX86MovsxdRM, kX86MovsxdRA,  // move 32 bit to 64 bit with sign extension
   kX86Set8R, kX86Set8M, kX86Set8A,  // set byte depending on condition operand
-  kX86Mfence,                   // memory barrier
+  kX86Lfence,                   // memory barrier to serialize all previous
+                                // load-from-memory instructions
+  kX86Mfence,                   // memory barrier to serialize all previous
+                                // load-from-memory and store-to-memory instructions
+  kX86Sfence,                   // memory barrier to serialize all previous
+                                // store-to-memory instructions
   Binary0fOpCode(kX86Imul16),   // 16bit multiply
   Binary0fOpCode(kX86Imul32),   // 32bit multiply
   Binary0fOpCode(kX86Imul64),   // 64bit multiply
@@ -675,6 +685,7 @@ enum X86EncodingKind {
   kMemRegImm,                               // MRI instruction kinds.
   kShiftRegImm, kShiftMemImm, kShiftArrayImm,  // Shift opcode with immediate.
   kShiftRegCl, kShiftMemCl, kShiftArrayCl,     // Shift opcode with register CL.
+  kShiftRegRegCl,
   // kRegRegReg, kRegRegMem, kRegRegArray,    // RRR, RRM, RRA instruction kinds.
   kRegCond, kMemCond, kArrayCond,          // R, M, A instruction kinds following by a condition.
   kRegRegCond,                             // RR instruction kind followed by a condition.
index e26745a..7bf7a65 100644 (file)
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#include "base/bit_vector-inl.h"
 #include "compiler_internals.h"
 #include "dataflow_iterator-inl.h"
 #include "utils/scoped_arena_containers.h"
@@ -146,8 +147,8 @@ bool MIRGraph::FillDefBlockMatrix(BasicBlock* bb) {
 }
 
 void MIRGraph::ComputeDefBlockMatrix() {
-  int num_registers = cu_->num_dalvik_registers;
-  /* Allocate num_dalvik_registers bit vector pointers */
+  int num_registers = GetNumOfCodeAndTempVRs();
+  /* Allocate num_registers bit vector pointers */
   def_block_matrix_ = static_cast<ArenaBitVector**>
       (arena_->Alloc(sizeof(ArenaBitVector *) * num_registers,
                      kArenaAllocDFInfo));
@@ -158,6 +159,7 @@ void MIRGraph::ComputeDefBlockMatrix() {
     def_block_matrix_[i] =
         new (arena_) ArenaBitVector(arena_, GetNumBlocks(), false, kBitMapBMatrix);
   }
+
   AllNodesIterator iter(this);
   for (BasicBlock* bb = iter.Next(); bb != NULL; bb = iter.Next()) {
     FindLocalLiveIn(bb);
@@ -171,8 +173,8 @@ void MIRGraph::ComputeDefBlockMatrix() {
    * Also set the incoming parameters as defs in the entry block.
    * Only need to handle the parameters for the outer method.
    */
-  int num_regs = cu_->num_dalvik_registers;
-  int in_reg = num_regs - cu_->num_ins;
+  int num_regs = GetNumOfCodeVRs();
+  int in_reg = GetFirstInVR();
   for (; in_reg < num_regs; in_reg++) {
     def_block_matrix_[in_reg]->SetBit(GetEntryBlock()->id);
   }
@@ -456,7 +458,7 @@ void MIRGraph::ComputeSuccLineIn(ArenaBitVector* dest, const ArenaBitVector* src
  * insert a phi node if the variable is live-in to the block.
  */
 bool MIRGraph::ComputeBlockLiveIns(BasicBlock* bb) {
-  DCHECK_EQ(temp_bit_vector_size_, cu_->num_dalvik_registers);
+  DCHECK_EQ(temp_bit_vector_size_, cu_->mir_graph.get()->GetNumOfCodeAndTempVRs());
   ArenaBitVector* temp_dalvik_register_v = temp_bit_vector_;
 
   if (bb->data_flow_info == NULL) {
@@ -507,7 +509,7 @@ void MIRGraph::InsertPhiNodes() {
   }
 
   /* Iterate through each Dalvik register */
-  for (dalvik_reg = cu_->num_dalvik_registers - 1; dalvik_reg >= 0; dalvik_reg--) {
+  for (dalvik_reg = GetNumOfCodeAndTempVRs() - 1; dalvik_reg >= 0; dalvik_reg--) {
     input_blocks->Copy(def_block_matrix_[dalvik_reg]);
     phi_blocks->ClearAllBits();
     do {
@@ -586,7 +588,7 @@ void MIRGraph::DoDFSPreOrderSSARename(BasicBlock* block) {
 
   /* Process this block */
   DoSSAConversion(block);
-  int map_size = sizeof(int) * cu_->num_dalvik_registers;
+  int map_size = sizeof(int) * GetNumOfCodeAndTempVRs();
 
   /* Save SSA map snapshot */
   ScopedArenaAllocator allocator(&cu_->arena_stack);
index 4a3e071..bdfab13 100644 (file)
@@ -252,7 +252,7 @@ bool MIRGraph::InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed) {
     // Special-case handling for format 35c/3rc invokes
     Instruction::Code opcode = mir->dalvikInsn.opcode;
     int flags = MIR::DecodedInstruction::IsPseudoMirOp(opcode) ?
-                  0 : Instruction::FlagsOf(mir->dalvikInsn.opcode);
+                  0 : mir->dalvikInsn.FlagsOf();
     if ((flags & Instruction::kInvoke) &&
         (attrs & (DF_FORMAT_35C | DF_FORMAT_3RC))) {
       DCHECK_EQ(next, 0);
@@ -401,33 +401,18 @@ bool MIRGraph::InferTypeAndSize(BasicBlock* bb, MIR* mir, bool changed) {
   return changed;
 }
 
-static const char* storage_name[] = {" Frame ", "PhysReg", " Spill "};
+static const char* storage_name[] = {" Frame ", "PhysReg", " CompilerTemp "};
 
 void MIRGraph::DumpRegLocTable(RegLocation* table, int count) {
-  // FIXME: Quick-specific.  Move to Quick (and make a generic version for MIRGraph?
-  Mir2Lir* cg = static_cast<Mir2Lir*>(cu_->cg.get());
-  if (cg != NULL) {
-    for (int i = 0; i < count; i++) {
-      LOG(INFO) << StringPrintf("Loc[%02d] : %s, %c %c %c %c %c %c 0x%04x S%d",
-          table[i].orig_sreg, storage_name[table[i].location],
-          table[i].wide ? 'W' : 'N', table[i].defined ? 'D' : 'U',
-          table[i].fp ? 'F' : table[i].ref ? 'R' :'C',
-          table[i].is_const ? 'c' : 'n',
-          table[i].high_word ? 'H' : 'L', table[i].home ? 'h' : 't',
-          table[i].reg.GetRawBits(),
-          table[i].s_reg_low);
-    }
-  } else {
-    // Either pre-regalloc or Portable.
-    for (int i = 0; i < count; i++) {
-      LOG(INFO) << StringPrintf("Loc[%02d] : %s, %c %c %c %c %c %c S%d",
-          table[i].orig_sreg, storage_name[table[i].location],
-          table[i].wide ? 'W' : 'N', table[i].defined ? 'D' : 'U',
-          table[i].fp ? 'F' : table[i].ref ? 'R' :'C',
-          table[i].is_const ? 'c' : 'n',
-          table[i].high_word ? 'H' : 'L', table[i].home ? 'h' : 't',
-          table[i].s_reg_low);
-    }
+  for (int i = 0; i < count; i++) {
+    LOG(INFO) << StringPrintf("Loc[%02d] : %s, %c %c %c %c %c %c 0x%04x S%d",
+                              table[i].orig_sreg, storage_name[table[i].location],
+                              table[i].wide ? 'W' : 'N', table[i].defined ? 'D' : 'U',
+                              table[i].fp ? 'F' : table[i].ref ? 'R' :'C',
+                              table[i].is_const ? 'c' : 'n',
+                              table[i].high_word ? 'H' : 'L', table[i].home ? 'h' : 't',
+                              table[i].reg.GetRawBits(),
+                              table[i].s_reg_low);
   }
 }
 
@@ -436,7 +421,9 @@ static const RegLocation fresh_loc = {kLocDalvikFrame, 0, 0, 0, 0, 0, 0, 0, 0,
                                       RegStorage(), INVALID_SREG, INVALID_SREG};
 
 void MIRGraph::InitRegLocations() {
-  /* Allocate the location map */
+  // Allocate the location map. We also include the maximum possible temps because
+  // the temp allocation initializes reg location as well (in order to deal with
+  // case when it will be called after this pass).
   int max_regs = GetNumSSARegs() + GetMaxPossibleCompilerTemps();
   RegLocation* loc = static_cast<RegLocation*>(arena_->Alloc(max_regs * sizeof(*loc),
                                                              kArenaAllocRegAlloc));
@@ -447,22 +434,18 @@ void MIRGraph::InitRegLocations() {
     loc[i].wide = false;
   }
 
-  /* Patch up the locations for the compiler temps */
-  GrowableArray<CompilerTemp*>::Iterator iter(&compiler_temps_);
-  for (CompilerTemp* ct = iter.Next(); ct != NULL; ct = iter.Next()) {
-    loc[ct->s_reg_low].location = kLocCompilerTemp;
-    loc[ct->s_reg_low].defined = true;
-  }
-
   /* Treat Method* as a normal reference */
-  loc[GetMethodSReg()].ref = true;
+  int method_sreg = GetMethodSReg();
+  loc[method_sreg].ref = true;
+  loc[method_sreg].location = kLocCompilerTemp;
+  loc[method_sreg].defined = true;
 
   reg_location_ = loc;
 
-  int num_regs = cu_->num_dalvik_registers;
+  int num_regs = GetNumOfCodeVRs();
 
   /* Add types of incoming arguments based on signature */
-  int num_ins = cu_->num_ins;
+  int num_ins = GetNumOfInVRs();
   if (num_ins > 0) {
     int s_reg = num_regs - num_ins;
     if ((cu_->access_flags & kAccStatic) == 0) {
@@ -517,11 +500,9 @@ void MIRGraph::InitRegLocations() {
  */
 void MIRGraph::RemapRegLocations() {
   for (int i = 0; i < GetNumSSARegs(); i++) {
-    if (reg_location_[i].location != kLocCompilerTemp) {
-      int orig_sreg = reg_location_[i].s_reg_low;
-      reg_location_[i].orig_sreg = orig_sreg;
-      reg_location_[i].s_reg_low = SRegToVReg(orig_sreg);
-    }
+    int orig_sreg = reg_location_[i].s_reg_low;
+    reg_location_[i].orig_sreg = orig_sreg;
+    reg_location_[i].s_reg_low = SRegToVReg(orig_sreg);
   }
 }
 
index 022ec6b..4d5d253 100644 (file)
@@ -20,6 +20,7 @@
 #include "compiler_driver.h"
 
 #include "dex/compiler_ir.h"
+#include "dex_compilation_unit.h"
 #include "field_helper.h"
 #include "mirror/art_field-inl.h"
 #include "mirror/art_method-inl.h"
index 91c7aec..32a7676 100644 (file)
@@ -355,8 +355,8 @@ CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
       compiler_enable_auto_elf_loading_(NULL),
       compiler_get_method_code_addr_(NULL),
       support_boot_image_fixup_(instruction_set != kMips),
-      cfi_info_(nullptr),
       dedupe_code_("dedupe code"),
+      dedupe_src_mapping_table_("dedupe source mapping table"),
       dedupe_mapping_table_("dedupe mapping table"),
       dedupe_vmap_table_("dedupe vmap table"),
       dedupe_gc_map_("dedupe gc map"),
@@ -378,11 +378,6 @@ CompilerDriver::CompilerDriver(const CompilerOptions* compiler_options,
     CHECK(image_classes_.get() == nullptr);
   }
 
-  // Are we generating CFI information?
-  if (compiler_options->GetGenerateGDBInformation()) {
-    cfi_info_.reset(compiler_->GetCallFrameInformationInitialization(*this));
-  }
-
   // Read the profile file if one is provided.
   if (!profile_file.empty()) {
     profile_present_ = profile_file_.LoadFile(profile_file);
@@ -398,6 +393,10 @@ std::vector<uint8_t>* CompilerDriver::DeduplicateCode(const std::vector<uint8_t>
   return dedupe_code_.Add(Thread::Current(), code);
 }
 
+SrcMap* CompilerDriver::DeduplicateSrcMappingTable(const SrcMap& src_map) {
+  return dedupe_src_mapping_table_.Add(Thread::Current(), src_map);
+}
+
 std::vector<uint8_t>* CompilerDriver::DeduplicateMappingTable(const std::vector<uint8_t>& code) {
   return dedupe_mapping_table_.Add(Thread::Current(), code);
 }
@@ -447,7 +446,7 @@ CompilerTls* CompilerDriver::GetTls() {
   // Lazily create thread-local storage
   CompilerTls* res = static_cast<CompilerTls*>(pthread_getspecific(tls_key_));
   if (res == NULL) {
-    res = new CompilerTls();
+    res = compiler_->CreateNewCompilerTls();
     CHECK_PTHREAD_CALL(pthread_setspecific, (tls_key_, res), "compiler tls");
   }
   return res;
@@ -973,6 +972,43 @@ bool CompilerDriver::CanEmbedTypeInCode(const DexFile& dex_file, uint32_t type_i
   }
 }
 
+bool CompilerDriver::CanEmbedReferenceTypeInCode(ClassReference* ref,
+                                                 bool* use_direct_ptr,
+                                                 uintptr_t* direct_type_ptr) {
+  CHECK(ref != nullptr);
+  CHECK(use_direct_ptr != nullptr);
+  CHECK(direct_type_ptr != nullptr);
+
+  ScopedObjectAccess soa(Thread::Current());
+  mirror::Class* reference_class = mirror::Reference::GetJavaLangRefReference();
+  bool is_initialized;
+  bool unused_finalizable;
+  // Make sure we have a finished Reference class object before attempting to use it.
+  if (!CanEmbedTypeInCode(*reference_class->GetDexCache()->GetDexFile(),
+                          reference_class->GetDexTypeIndex(), &is_initialized,
+                          use_direct_ptr, direct_type_ptr, &unused_finalizable) ||
+      !is_initialized) {
+    return false;
+  }
+  ref->first = &reference_class->GetDexFile();
+  ref->second = reference_class->GetDexClassDefIndex();
+  return true;
+}
+
+uint32_t CompilerDriver::GetReferenceSlowFlagOffset() const {
+  ScopedObjectAccess soa(Thread::Current());
+  mirror::Class* klass = mirror::Reference::GetJavaLangRefReference();
+  DCHECK(klass->IsInitialized());
+  return klass->GetSlowPathFlagOffset().Uint32Value();
+}
+
+uint32_t CompilerDriver::GetReferenceDisableFlagOffset() const {
+  ScopedObjectAccess soa(Thread::Current());
+  mirror::Class* klass = mirror::Reference::GetJavaLangRefReference();
+  DCHECK(klass->IsInitialized());
+  return klass->GetDisableIntrinsicFlagOffset().Uint32Value();
+}
+
 void CompilerDriver::ProcessedInstanceField(bool resolved) {
   if (!resolved) {
     stats_->UnresolvedInstanceField();
@@ -1344,12 +1380,14 @@ void CompilerDriver::AddClassPatch(const DexFile* dex_file,
                                     uint16_t referrer_class_def_idx,
                                     uint32_t referrer_method_idx,
                                     uint32_t target_type_idx,
+                                    const DexFile* target_type_dex_file,
                                     size_t literal_offset) {
   MutexLock mu(Thread::Current(), compiled_methods_lock_);
   classes_to_patch_.push_back(new TypePatchInformation(dex_file,
                                                        referrer_class_def_idx,
                                                        referrer_method_idx,
                                                        target_type_idx,
+                                                       target_type_dex_file,
                                                        literal_offset));
 }
 
index 669fb34..624947d 100644 (file)
@@ -76,20 +76,6 @@ enum DexToDexCompilationLevel {
   kOptimize               // Perform required transformation and peep-hole optimizations.
 };
 
-// Thread-local storage compiler worker threads
-class CompilerTls {
-  public:
-    CompilerTls() : llvm_info_(NULL) {}
-    ~CompilerTls() {}
-
-    void* GetLLVMInfo() { return llvm_info_; }
-
-    void SetLLVMInfo(void* llvm_info) { llvm_info_ = llvm_info; }
-
-  private:
-    void* llvm_info_;
-};
-
 class CompilerDriver {
  public:
   // Create a compiler targeting the requested "instruction_set".
@@ -211,6 +197,12 @@ class CompilerDriver {
                           bool* is_type_initialized, bool* use_direct_type_ptr,
                           uintptr_t* direct_type_ptr, bool* out_is_finalizable);
 
+  // Query methods for the java.lang.ref.Reference class.
+  bool CanEmbedReferenceTypeInCode(ClassReference* ref,
+                                   bool* use_direct_type_ptr, uintptr_t* direct_type_ptr);
+  uint32_t GetReferenceSlowFlagOffset() const;
+  uint32_t GetReferenceDisableFlagOffset() const;
+
   // Get the DexCache for the
   mirror::DexCache* GetDexCache(const DexCompilationUnit* mUnit)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -354,6 +346,7 @@ class CompilerDriver {
                      uint16_t referrer_class_def_idx,
                      uint32_t referrer_method_idx,
                      uint32_t target_method_idx,
+                     const DexFile* target_dex_file,
                      size_t literal_offset)
       LOCKS_EXCLUDED(compiled_methods_lock_);
 
@@ -400,10 +393,6 @@ class CompilerDriver {
     return dump_passes_;
   }
 
-  bool DidIncludeDebugSymbols() const {
-    return compiler_options_->GetIncludeDebugSymbols();
-  }
-
   CumulativeLogger* GetTimingsLogger() const {
     return timings_logger_;
   }
@@ -547,6 +536,10 @@ class CompilerDriver {
 
   class TypePatchInformation : public PatchInformation {
    public:
+    const DexFile& GetTargetTypeDexFile() const {
+      return *target_type_dex_file_;
+    }
+
     uint32_t GetTargetTypeIdx() const {
       return target_type_idx_;
     }
@@ -563,13 +556,15 @@ class CompilerDriver {
                          uint16_t referrer_class_def_idx,
                          uint32_t referrer_method_idx,
                          uint32_t target_type_idx,
+                         const DexFile* target_type_dex_file,
                          size_t literal_offset)
         : PatchInformation(dex_file, referrer_class_def_idx,
                            referrer_method_idx, literal_offset),
-          target_type_idx_(target_type_idx) {
+          target_type_idx_(target_type_idx), target_type_dex_file_(target_type_dex_file) {
     }
 
     const uint32_t target_type_idx_;
+    const DexFile* target_type_dex_file_;
 
     friend class CompilerDriver;
     DISALLOW_COPY_AND_ASSIGN(TypePatchInformation);
@@ -592,19 +587,12 @@ class CompilerDriver {
       LOCKS_EXCLUDED(compiled_classes_lock_);
 
   std::vector<uint8_t>* DeduplicateCode(const std::vector<uint8_t>& code);
+  SrcMap* DeduplicateSrcMappingTable(const SrcMap& src_map);
   std::vector<uint8_t>* DeduplicateMappingTable(const std::vector<uint8_t>& code);
   std::vector<uint8_t>* DeduplicateVMapTable(const std::vector<uint8_t>& code);
   std::vector<uint8_t>* DeduplicateGCMap(const std::vector<uint8_t>& code);
   std::vector<uint8_t>* DeduplicateCFIInfo(const std::vector<uint8_t>* cfi_info);
 
-  /*
-   * @brief return the pointer to the Call Frame Information.
-   * @return pointer to call frame information for this compilation.
-   */
-  std::vector<uint8_t>* GetCallFrameInformation() const {
-    return cfi_info_.get();
-  }
-
   ProfileFile profile_file_;
   bool profile_present_;
 
@@ -768,19 +756,17 @@ class CompilerDriver {
 
   bool support_boot_image_fixup_;
 
-  // Call Frame Information, which might be generated to help stack tracebacks.
-  std::unique_ptr<std::vector<uint8_t>> cfi_info_;
-
   // DeDuplication data structures, these own the corresponding byte arrays.
+  template <typename ByteArray>
   class DedupeHashFunc {
    public:
-    size_t operator()(const std::vector<uint8_t>& array) const {
+    size_t operator()(const ByteArray& array) const {
       // For small arrays compute a hash using every byte.
       static const size_t kSmallArrayThreshold = 16;
       size_t hash = 0x811c9dc5;
       if (array.size() <= kSmallArrayThreshold) {
-        for (uint8_t b : array) {
-          hash = (hash * 16777619) ^ b;
+        for (auto b : array) {
+          hash = (hash * 16777619) ^ static_cast<uint8_t>(b);
         }
       } else {
         // For larger arrays use the 2 bytes at 6 bytes (the location of a push registers
@@ -788,12 +774,12 @@ class CompilerDriver {
         // values at random.
         static const size_t kRandomHashCount = 16;
         for (size_t i = 0; i < 2; ++i) {
-          uint8_t b = array[i + 6];
+          uint8_t b = static_cast<uint8_t>(array[i + 6]);
           hash = (hash * 16777619) ^ b;
         }
         for (size_t i = 2; i < kRandomHashCount; ++i) {
           size_t r = i * 1103515245 + 12345;
-          uint8_t b = array[r % array.size()];
+          uint8_t b = static_cast<uint8_t>(array[r % array.size()]);
           hash = (hash * 16777619) ^ b;
         }
       }
@@ -805,11 +791,13 @@ class CompilerDriver {
       return hash;
     }
   };
-  DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_code_;
-  DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_mapping_table_;
-  DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_vmap_table_;
-  DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_gc_map_;
-  DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc, 4> dedupe_cfi_info_;
+
+  DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc<std::vector<uint8_t>>, 4> dedupe_code_;
+  DedupeSet<SrcMap, size_t, DedupeHashFunc<SrcMap>, 4> dedupe_src_mapping_table_;
+  DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc<std::vector<uint8_t>>, 4> dedupe_mapping_table_;
+  DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc<std::vector<uint8_t>>, 4> dedupe_vmap_table_;
+  DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc<std::vector<uint8_t>>, 4> dedupe_gc_map_;
+  DedupeSet<std::vector<uint8_t>, size_t, DedupeHashFunc<std::vector<uint8_t>>, 4> dedupe_cfi_info_;
 
   DISALLOW_COPY_AND_ASSIGN(CompilerDriver);
 };
index 60f76ef..bbfbc6e 100644 (file)
@@ -38,28 +38,32 @@ bool ElfFixup::Fixup(File* file, uintptr_t oat_data_begin) {
   Elf32_Off base_address = oat_data_begin - oatdata_address;
 
   if (!FixupDynamic(*elf_file.get(), base_address)) {
-      LOG(WARNING) << "Failed fo fixup .dynamic in " << file->GetPath();
-      return false;
+    LOG(WARNING) << "Failed to fixup .dynamic in " << file->GetPath();
+    return false;
   }
   if (!FixupSectionHeaders(*elf_file.get(), base_address)) {
-      LOG(WARNING) << "Failed fo fixup section headers in " << file->GetPath();
-      return false;
+    LOG(WARNING) << "Failed to fixup section headers in " << file->GetPath();
+    return false;
   }
   if (!FixupProgramHeaders(*elf_file.get(), base_address)) {
-      LOG(WARNING) << "Failed fo fixup program headers in " << file->GetPath();
-      return false;
+    LOG(WARNING) << "Failed to fixup program headers in " << file->GetPath();
+    return false;
   }
   if (!FixupSymbols(*elf_file.get(), base_address, true)) {
-      LOG(WARNING) << "Failed fo fixup .dynsym in " << file->GetPath();
-      return false;
+    LOG(WARNING) << "Failed to fixup .dynsym in " << file->GetPath();
+    return false;
   }
   if (!FixupSymbols(*elf_file.get(), base_address, false)) {
-      LOG(WARNING) << "Failed fo fixup .symtab in " << file->GetPath();
-      return false;
+    LOG(WARNING) << "Failed to fixup .symtab in " << file->GetPath();
+    return false;
   }
   if (!FixupRelocations(*elf_file.get(), base_address)) {
-      LOG(WARNING) << "Failed fo fixup .rel.dyn in " << file->GetPath();
-      return false;
+    LOG(WARNING) << "Failed to fixup .rel.dyn in " << file->GetPath();
+    return false;
+  }
+  if (!elf_file->FixupDebugSections(base_address)) {
+    LOG(WARNING) << "Failed to fixup debug sections in " << file->GetPath();
+    return false;
   }
   return true;
 }
index 137110f..72bf7d3 100644 (file)
@@ -19,6 +19,7 @@
 #include <vector>
 #include <set>
 
+#include "class_linker.h"
 #include "elf_file.h"
 #include "elf_utils.h"
 #include "mirror/art_field-inl.h"
@@ -99,11 +100,13 @@ mirror::ArtMethod* ElfPatcher::GetTargetMethod(const CompilerDriver::CallPatchIn
 mirror::Class* ElfPatcher::GetTargetType(const CompilerDriver::TypePatchInformation* patch) {
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
   StackHandleScope<2> hs(Thread::Current());
-  Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(patch->GetDexFile())));
-  mirror::Class* klass = class_linker->ResolveType(patch->GetDexFile(), patch->GetTargetTypeIdx(),
+  Handle<mirror::DexCache> dex_cache(hs.NewHandle(class_linker->FindDexCache(
+          patch->GetTargetTypeDexFile())));
+  mirror::Class* klass = class_linker->ResolveType(patch->GetTargetTypeDexFile(),
+                                                   patch->GetTargetTypeIdx(),
                                                    dex_cache, NullHandle<mirror::ClassLoader>());
   CHECK(klass != NULL)
-    << patch->GetDexFile().GetLocation() << " " << patch->GetTargetTypeIdx();
+    << patch->GetTargetTypeDexFile().GetLocation() << " " << patch->GetTargetTypeIdx();
   CHECK(dex_cache->GetResolvedTypes()->Get(patch->GetTargetTypeIdx()) == klass)
     << patch->GetDexFile().GetLocation() << " " << patch->GetReferrerMethodIdx() << " "
     << PrettyClass(dex_cache->GetResolvedTypes()->Get(patch->GetTargetTypeIdx())) << " "
@@ -152,7 +155,7 @@ void ElfPatcher::SetPatchLocation(const CompilerDriver::PatchInformation* patch,
     }
     if (patch->IsType()) {
       const CompilerDriver::TypePatchInformation* tpatch = patch->AsType();
-      const DexFile::TypeId& id = tpatch->GetDexFile().GetTypeId(tpatch->GetTargetTypeIdx());
+      const DexFile::TypeId& id = tpatch->GetTargetTypeDexFile().GetTypeId(tpatch->GetTargetTypeIdx());
       uint32_t expected = reinterpret_cast<uintptr_t>(&id) & 0xFFFFFFFF;
       uint32_t actual = *patch_location;
       CHECK(actual == expected || actual == value) << "Patching type failed: " << std::hex
index 1fde12e..4c69fc8 100644 (file)
 
 #include "elf_writer_quick.h"
 
+#include <unordered_map>
+
 #include "base/logging.h"
 #include "base/unix_file/fd_file.h"
 #include "buffered_output_stream.h"
 #include "driver/compiler_driver.h"
 #include "dwarf.h"
+#include "elf_file.h"
 #include "elf_utils.h"
 #include "file_output_stream.h"
 #include "globals.h"
+#include "leb128.h"
 #include "oat.h"
 #include "oat_writer.h"
 #include "utils.h"
 
 namespace art {
 
-static constexpr Elf32_Word NextOffset(const Elf32_Shdr& cur, const Elf32_Shdr& prev) {
+template <typename Elf_Word, typename Elf_Shdr>
+static constexpr Elf_Word NextOffset(const Elf_Shdr& cur, const Elf_Shdr& prev) {
   return RoundUp(prev.sh_size + prev.sh_offset, cur.sh_addralign);
 }
 
@@ -38,22 +43,62 @@ static uint8_t MakeStInfo(uint8_t binding, uint8_t type) {
   return ((binding) << 4) + ((type) & 0xf);
 }
 
-bool ElfWriterQuick::ElfBuilder::Write() {
+static void PushByte(std::vector<uint8_t>* buf, int data) {
+  buf->push_back(data & 0xff);
+}
+
+static uint32_t PushStr(std::vector<uint8_t>* buf, const char* str, const char* def = nullptr) {
+  if (str == nullptr) {
+    str = def;
+  }
+
+  uint32_t offset = buf->size();
+  for (size_t i = 0; str[i] != '\0'; ++i) {
+    buf->push_back(str[i]);
+  }
+  buf->push_back('\0');
+  return offset;
+}
+
+static uint32_t PushStr(std::vector<uint8_t>* buf, const std::string &str) {
+  uint32_t offset = buf->size();
+  buf->insert(buf->end(), str.begin(), str.end());
+  buf->push_back('\0');
+  return offset;
+}
+
+static void UpdateWord(std::vector<uint8_t>* buf, int offset, int data) {
+  (*buf)[offset+0] = data;
+  (*buf)[offset+1] = data >> 8;
+  (*buf)[offset+2] = data >> 16;
+  (*buf)[offset+3] = data >> 24;
+}
+
+static void PushHalf(std::vector<uint8_t>* buf, int data) {
+  buf->push_back(data & 0xff);
+  buf->push_back((data >> 8) & 0xff);
+}
+
+template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
+          typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
+          typename Elf_Phdr, typename Elf_Shdr>
+bool ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
+  Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::ElfBuilder::Init() {
   // The basic layout of the elf file. Order may be different in final output.
   // +-------------------------+
-  // | Elf32_Ehdr              |
+  // | Elf_Ehdr                |
   // +-------------------------+
-  // | Elf32_Phdr PHDR         |
-  // | Elf32_Phdr LOAD R       | .dynsym .dynstr .hash .rodata
-  // | Elf32_Phdr LOAD R X     | .text
-  // | Elf32_Phdr LOAD RW      | .dynamic
-  // | Elf32_Phdr DYNAMIC      | .dynamic
+  // | Elf_Phdr PHDR           |
+  // | Elf_Phdr LOAD R         | .dynsym .dynstr .hash .rodata
+  // | Elf_Phdr LOAD R X       | .text
+  // | Elf_Phdr LOAD RW        | .dynamic
+  // | Elf_Phdr DYNAMIC        | .dynamic
   // +-------------------------+
   // | .dynsym                 |
-  // | Elf32_Sym  STN_UNDEF    |
-  // | Elf32_Sym  oatdata      |
-  // | Elf32_Sym  oatexec      |
-  // | Elf32_Sym  oatlastword  |
+  // | Elf_Sym  STN_UNDEF      |
+  // | Elf_Sym  oatdata        |
+  // | Elf_Sym  oatexec        |
+  // | Elf_Sym  oatlastword    |
   // +-------------------------+
   // | .dynstr                 |
   // | \0                      |
@@ -63,14 +108,14 @@ bool ElfWriterQuick::ElfBuilder::Write() {
   // | boot.oat\0              |
   // +-------------------------+
   // | .hash                   |
-  // | Elf32_Word nbucket = b  |
-  // | Elf32_Word nchain  = c  |
-  // | Elf32_Word bucket[0]    |
+  // | Elf_Word nbucket = b    |
+  // | Elf_Word nchain  = c    |
+  // | Elf_Word bucket[0]      |
   // |         ...             |
-  // | Elf32_Word bucket[b - 1]|
-  // | Elf32_Word chain[0]     |
+  // | Elf_Word bucket[b - 1]  |
+  // | Elf_Word chain[0]       |
   // |         ...             |
-  // | Elf32_Word chain[c - 1] |
+  // | Elf_Word chain[c - 1]   |
   // +-------------------------+
   // | .rodata                 |
   // | oatdata..oatexec-4      |
@@ -79,13 +124,13 @@ bool ElfWriterQuick::ElfBuilder::Write() {
   // | oatexec..oatlastword    |
   // +-------------------------+
   // | .dynamic                |
-  // | Elf32_Dyn DT_SONAME     |
-  // | Elf32_Dyn DT_HASH       |
-  // | Elf32_Dyn DT_SYMTAB     |
-  // | Elf32_Dyn DT_SYMENT     |
-  // | Elf32_Dyn DT_STRTAB     |
-  // | Elf32_Dyn DT_STRSZ      |
-  // | Elf32_Dyn DT_NULL       |
+  // | Elf_Dyn DT_SONAME       |
+  // | Elf_Dyn DT_HASH         |
+  // | Elf_Dyn DT_SYMTAB       |
+  // | Elf_Dyn DT_SYMENT       |
+  // | Elf_Dyn DT_STRTAB       |
+  // | Elf_Dyn DT_STRSZ        |
+  // | Elf_Dyn DT_NULL         |
   // +-------------------------+  (Optional)
   // | .strtab                 |  (Optional)
   // | program symbol names    |  (Optional)
@@ -107,254 +152,187 @@ bool ElfWriterQuick::ElfBuilder::Write() {
   // | .debug_str\0            |  (Optional)
   // | .debug_info\0           |  (Optional)
   // | .eh_frame\0             |  (Optional)
+  // | .debug_line\0           |  (Optional)
   // | .debug_abbrev\0         |  (Optional)
   // +-------------------------+  (Optional)
-  // | .debug_str              |  (Optional)
-  // +-------------------------+  (Optional)
   // | .debug_info             |  (Optional)
   // +-------------------------+  (Optional)
+  // | .debug_abbrev           |  (Optional)
+  // +-------------------------+  (Optional)
   // | .eh_frame               |  (Optional)
   // +-------------------------+  (Optional)
-  // | .debug_abbrev           |  (Optional)
-  // +-------------------------+
-  // | Elf32_Shdr NULL         |
-  // | Elf32_Shdr .dynsym      |
-  // | Elf32_Shdr .dynstr      |
-  // | Elf32_Shdr .hash        |
-  // | Elf32_Shdr .text        |
-  // | Elf32_Shdr .rodata      |
-  // | Elf32_Shdr .dynamic     |
-  // | Elf32_Shdr .shstrtab    |
-  // | Elf32_Shdr .debug_str   |  (Optional)
-  // | Elf32_Shdr .debug_info  |  (Optional)
-  // | Elf32_Shdr .eh_frame    |  (Optional)
-  // | Elf32_Shdr .debug_abbrev|  (Optional)
+  // | .debug_line             |  (Optional)
+  // +-------------------------+  (Optional)
+  // | .debug_str              |  (Optional)
+  // +-------------------------+  (Optional)
+  // | Elf_Shdr NULL           |
+  // | Elf_Shdr .dynsym        |
+  // | Elf_Shdr .dynstr        |
+  // | Elf_Shdr .hash          |
+  // | Elf_Shdr .text          |
+  // | Elf_Shdr .rodata        |
+  // | Elf_Shdr .dynamic       |
+  // | Elf_Shdr .shstrtab      |
+  // | Elf_Shdr .debug_info    |  (Optional)
+  // | Elf_Shdr .debug_abbrev  |  (Optional)
+  // | Elf_Shdr .eh_frame      |  (Optional)
+  // | Elf_Shdr .debug_line    |  (Optional)
+  // | Elf_Shdr .debug_str     |  (Optional)
   // +-------------------------+
 
-
   if (fatal_error_) {
     return false;
   }
   // Step 1. Figure out all the offsets.
 
-  // What phdr is.
-  uint32_t phdr_offset = sizeof(Elf32_Ehdr);
-  const uint8_t PH_PHDR     = 0;
-  const uint8_t PH_LOAD_R__ = 1;
-  const uint8_t PH_LOAD_R_X = 2;
-  const uint8_t PH_LOAD_RW_ = 3;
-  const uint8_t PH_DYNAMIC  = 4;
-  const uint8_t PH_NUM      = 5;
-  uint32_t phdr_size = sizeof(Elf32_Phdr) * PH_NUM;
   if (debug_logging_) {
-    LOG(INFO) << "phdr_offset=" << phdr_offset << std::hex << " " << phdr_offset;
-    LOG(INFO) << "phdr_size=" << phdr_size << std::hex << " " << phdr_size;
-  }
-  Elf32_Phdr program_headers[PH_NUM];
-  memset(&program_headers, 0, sizeof(program_headers));
-  program_headers[PH_PHDR].p_type    = PT_PHDR;
-  program_headers[PH_PHDR].p_offset  = phdr_offset;
-  program_headers[PH_PHDR].p_vaddr   = phdr_offset;
-  program_headers[PH_PHDR].p_paddr   = phdr_offset;
-  program_headers[PH_PHDR].p_filesz  = sizeof(program_headers);
-  program_headers[PH_PHDR].p_memsz   = sizeof(program_headers);
-  program_headers[PH_PHDR].p_flags   = PF_R;
-  program_headers[PH_PHDR].p_align   = sizeof(Elf32_Word);
-
-  program_headers[PH_LOAD_R__].p_type    = PT_LOAD;
-  program_headers[PH_LOAD_R__].p_offset  = 0;
-  program_headers[PH_LOAD_R__].p_vaddr   = 0;
-  program_headers[PH_LOAD_R__].p_paddr   = 0;
-  program_headers[PH_LOAD_R__].p_flags   = PF_R;
-
-  program_headers[PH_LOAD_R_X].p_type    = PT_LOAD;
-  program_headers[PH_LOAD_R_X].p_flags   = PF_R | PF_X;
-
-  program_headers[PH_LOAD_RW_].p_type    = PT_LOAD;
-  program_headers[PH_LOAD_RW_].p_flags   = PF_R | PF_W;
-
-  program_headers[PH_DYNAMIC].p_type    = PT_DYNAMIC;
-  program_headers[PH_DYNAMIC].p_flags   = PF_R | PF_W;
+    LOG(INFO) << "phdr_offset=" << PHDR_OFFSET << std::hex << " " << PHDR_OFFSET;
+    LOG(INFO) << "phdr_size=" << PHDR_SIZE << std::hex << " " << PHDR_SIZE;
+  }
+
+  memset(&program_headers_, 0, sizeof(program_headers_));
+  program_headers_[PH_PHDR].p_type    = PT_PHDR;
+  program_headers_[PH_PHDR].p_offset  = PHDR_OFFSET;
+  program_headers_[PH_PHDR].p_vaddr   = PHDR_OFFSET;
+  program_headers_[PH_PHDR].p_paddr   = PHDR_OFFSET;
+  program_headers_[PH_PHDR].p_filesz  = sizeof(program_headers_);
+  program_headers_[PH_PHDR].p_memsz   = sizeof(program_headers_);
+  program_headers_[PH_PHDR].p_flags   = PF_R;
+  program_headers_[PH_PHDR].p_align   = sizeof(Elf_Word);
+
+  program_headers_[PH_LOAD_R__].p_type    = PT_LOAD;
+  program_headers_[PH_LOAD_R__].p_offset  = 0;
+  program_headers_[PH_LOAD_R__].p_vaddr   = 0;
+  program_headers_[PH_LOAD_R__].p_paddr   = 0;
+  program_headers_[PH_LOAD_R__].p_flags   = PF_R;
+
+  program_headers_[PH_LOAD_R_X].p_type    = PT_LOAD;
+  program_headers_[PH_LOAD_R_X].p_flags   = PF_R | PF_X;
+
+  program_headers_[PH_LOAD_RW_].p_type    = PT_LOAD;
+  program_headers_[PH_LOAD_RW_].p_flags   = PF_R | PF_W;
+
+  program_headers_[PH_DYNAMIC].p_type    = PT_DYNAMIC;
+  program_headers_[PH_DYNAMIC].p_flags   = PF_R | PF_W;
 
   // Get the dynstr string.
-  std::string dynstr(dynsym_builder_.GenerateStrtab());
+  dynstr_ = dynsym_builder_.GenerateStrtab();
 
   // Add the SONAME to the dynstr.
-  uint32_t dynstr_soname_offset = dynstr.size();
+  dynstr_soname_offset_ = dynstr_.size();
   std::string file_name(elf_file_->GetPath());
   size_t directory_separator_pos = file_name.rfind('/');
   if (directory_separator_pos != std::string::npos) {
     file_name = file_name.substr(directory_separator_pos + 1);
   }
-  dynstr += file_name;
-  dynstr += '\0';
+  dynstr_ += file_name;
+  dynstr_ += '\0';
   if (debug_logging_) {
-    LOG(INFO) << "dynstr size (bytes)   =" << dynstr.size()
-              << std::hex << " " << dynstr.size();
+    LOG(INFO) << "dynstr size (bytes)   =" << dynstr_.size()
+              << std::hex << " " << dynstr_.size();
     LOG(INFO) << "dynsym size (elements)=" << dynsym_builder_.GetSize()
               << std::hex << " " << dynsym_builder_.GetSize();
   }
 
-  // get the strtab
-  std::string strtab;
-  if (IncludingDebugSymbols()) {
-    strtab = symtab_builder_.GenerateStrtab();
-    if (debug_logging_) {
-      LOG(INFO) << "strtab size (bytes)    =" << strtab.size()
-                << std::hex << " " << strtab.size();
-      LOG(INFO) << "symtab size (elements) =" << symtab_builder_.GetSize()
-                << std::hex << " " << symtab_builder_.GetSize();
-    }
-  }
-
   // Get the section header string table.
-  std::vector<Elf32_Shdr*> section_ptrs;
-  std::string shstrtab;
-  shstrtab += '\0';
+  shstrtab_ += '\0';
 
   // Setup sym_undef
-  Elf32_Shdr null_hdr;
-  memset(&null_hdr, 0, sizeof(null_hdr));
-  null_hdr.sh_type = SHT_NULL;
-  null_hdr.sh_link = SHN_UNDEF;
-  section_ptrs.push_back(&null_hdr);
+  memset(&null_hdr_, 0, sizeof(null_hdr_));
+  null_hdr_.sh_type = SHT_NULL;
+  null_hdr_.sh_link = SHN_UNDEF;
+  section_ptrs_.push_back(&null_hdr_);
 
-  uint32_t section_index = 1;
+  section_index_ = 1;
 
   // setup .dynsym
-  section_ptrs.push_back(&dynsym_builder_.section_);
-  AssignSectionStr(&dynsym_builder_, &shstrtab);
-  dynsym_builder_.section_index_ = section_index++;
+  section_ptrs_.push_back(&dynsym_builder_.section_);
+  AssignSectionStr(&dynsym_builder_, &shstrtab_);
+  dynsym_builder_.section_index_ = section_index_++;
 
   // Setup .dynstr
-  section_ptrs.push_back(&dynsym_builder_.strtab_.section_);
-  AssignSectionStr(&dynsym_builder_.strtab_, &shstrtab);
-  dynsym_builder_.strtab_.section_index_ = section_index++;
+  section_ptrs_.push_back(&dynsym_builder_.strtab_.section_);
+  AssignSectionStr(&dynsym_builder_.strtab_, &shstrtab_);
+  dynsym_builder_.strtab_.section_index_ = section_index_++;
 
   // Setup .hash
-  section_ptrs.push_back(&hash_builder_.section_);
-  AssignSectionStr(&hash_builder_, &shstrtab);
-  hash_builder_.section_index_ = section_index++;
+  section_ptrs_.push_back(&hash_builder_.section_);
+  AssignSectionStr(&hash_builder_, &shstrtab_);
+  hash_builder_.section_index_ = section_index_++;
 
   // Setup .rodata
-  section_ptrs.push_back(&rodata_builder_.section_);
-  AssignSectionStr(&rodata_builder_, &shstrtab);
-  rodata_builder_.section_index_ = section_index++;
+  section_ptrs_.push_back(&rodata_builder_.section_);
+  AssignSectionStr(&rodata_builder_, &shstrtab_);
+  rodata_builder_.section_index_ = section_index_++;
 
   // Setup .text
-  section_ptrs.push_back(&text_builder_.section_);
-  AssignSectionStr(&text_builder_, &shstrtab);
-  text_builder_.section_index_ = section_index++;
+  section_ptrs_.push_back(&text_builder_.section_);
+  AssignSectionStr(&text_builder_, &shstrtab_);
+  text_builder_.section_index_ = section_index_++;
 
   // Setup .dynamic
-  section_ptrs.push_back(&dynamic_builder_.section_);
-  AssignSectionStr(&dynamic_builder_, &shstrtab);
-  dynamic_builder_.section_index_ = section_index++;
-
-  if (IncludingDebugSymbols()) {
-    // Setup .symtab
-    section_ptrs.push_back(&symtab_builder_.section_);
-    AssignSectionStr(&symtab_builder_, &shstrtab);
-    symtab_builder_.section_index_ = section_index++;
-
-    // Setup .strtab
-    section_ptrs.push_back(&symtab_builder_.strtab_.section_);
-    AssignSectionStr(&symtab_builder_.strtab_, &shstrtab);
-    symtab_builder_.strtab_.section_index_ = section_index++;
-  }
-  ElfRawSectionBuilder* it = other_builders_.data();
-  for (uint32_t cnt = 0; cnt < other_builders_.size(); ++it, ++cnt) {
-    // Setup all the other sections.
-    section_ptrs.push_back(&it->section_);
-    AssignSectionStr(it, &shstrtab);
-    it->section_index_ = section_index++;
-  }
-
-  // Setup shstrtab
-  section_ptrs.push_back(&shstrtab_builder_.section_);
-  AssignSectionStr(&shstrtab_builder_, &shstrtab);
-  shstrtab_builder_.section_index_ = section_index++;
-
-  if (debug_logging_) {
-    LOG(INFO) << ".shstrtab size    (bytes)   =" << shstrtab.size()
-              << std::hex << " " << shstrtab.size();
-    LOG(INFO) << "section list size (elements)=" << section_ptrs.size()
-              << std::hex << " " << section_ptrs.size();
-  }
+  section_ptrs_.push_back(&dynamic_builder_.section_);
+  AssignSectionStr(&dynamic_builder_, &shstrtab_);
+  dynamic_builder_.section_index_ = section_index_++;
 
   // Fill in the hash section.
-  std::vector<Elf32_Word> hash = dynsym_builder_.GenerateHashContents();
+  hash_ = dynsym_builder_.GenerateHashContents();
 
   if (debug_logging_) {
-    LOG(INFO) << ".hash size (bytes)=" << hash.size() * sizeof(Elf32_Word)
-              << std::hex << " " << hash.size() * sizeof(Elf32_Word);
+    LOG(INFO) << ".hash size (bytes)=" << hash_.size() * sizeof(Elf_Word)
+              << std::hex << " " << hash_.size() * sizeof(Elf_Word);
   }
 
-  Elf32_Word base_offset = sizeof(Elf32_Ehdr) + sizeof(program_headers);
-  std::vector<ElfFilePiece> pieces;
+  Elf_Word base_offset = sizeof(Elf_Ehdr) + sizeof(program_headers_);
 
   // Get the layout in the sections.
   //
   // Get the layout of the dynsym section.
   dynsym_builder_.section_.sh_offset = RoundUp(base_offset, dynsym_builder_.section_.sh_addralign);
   dynsym_builder_.section_.sh_addr = dynsym_builder_.section_.sh_offset;
-  dynsym_builder_.section_.sh_size = dynsym_builder_.GetSize() * sizeof(Elf32_Sym);
+  dynsym_builder_.section_.sh_size = dynsym_builder_.GetSize() * sizeof(Elf_Sym);
   dynsym_builder_.section_.sh_link = dynsym_builder_.GetLink();
 
   // Get the layout of the dynstr section.
-  dynsym_builder_.strtab_.section_.sh_offset = NextOffset(dynsym_builder_.strtab_.section_,
-                                                          dynsym_builder_.section_);
+  dynsym_builder_.strtab_.section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>
+                                               (dynsym_builder_.strtab_.section_,
+                                                dynsym_builder_.section_);
   dynsym_builder_.strtab_.section_.sh_addr = dynsym_builder_.strtab_.section_.sh_offset;
-  dynsym_builder_.strtab_.section_.sh_size = dynstr.size();
+  dynsym_builder_.strtab_.section_.sh_size = dynstr_.size();
   dynsym_builder_.strtab_.section_.sh_link = dynsym_builder_.strtab_.GetLink();
 
   // Get the layout of the hash section
-  hash_builder_.section_.sh_offset = NextOffset(hash_builder_.section_,
-                                                dynsym_builder_.strtab_.section_);
+  hash_builder_.section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>
+                                     (hash_builder_.section_,
+                                      dynsym_builder_.strtab_.section_);
   hash_builder_.section_.sh_addr = hash_builder_.section_.sh_offset;
-  hash_builder_.section_.sh_size = hash.size() * sizeof(Elf32_Word);
+  hash_builder_.section_.sh_size = hash_.size() * sizeof(Elf_Word);
   hash_builder_.section_.sh_link = hash_builder_.GetLink();
 
   // Get the layout of the rodata section.
-  rodata_builder_.section_.sh_offset = NextOffset(rodata_builder_.section_,
-                                                  hash_builder_.section_);
+  rodata_builder_.section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>
+                                       (rodata_builder_.section_,
+                                        hash_builder_.section_);
   rodata_builder_.section_.sh_addr = rodata_builder_.section_.sh_offset;
   rodata_builder_.section_.sh_size = rodata_builder_.size_;
   rodata_builder_.section_.sh_link = rodata_builder_.GetLink();
 
   // Get the layout of the text section.
-  text_builder_.section_.sh_offset = NextOffset(text_builder_.section_, rodata_builder_.section_);
+  text_builder_.section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>
+                                     (text_builder_.section_, rodata_builder_.section_);
   text_builder_.section_.sh_addr = text_builder_.section_.sh_offset;
   text_builder_.section_.sh_size = text_builder_.size_;
   text_builder_.section_.sh_link = text_builder_.GetLink();
   CHECK_ALIGNED(rodata_builder_.section_.sh_offset + rodata_builder_.section_.sh_size, kPageSize);
 
   // Get the layout of the dynamic section.
-  dynamic_builder_.section_.sh_offset = NextOffset(dynamic_builder_.section_,
-                                                   text_builder_.section_);
+  dynamic_builder_.section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>
+                                        (dynamic_builder_.section_,
+                                         text_builder_.section_);
   dynamic_builder_.section_.sh_addr = dynamic_builder_.section_.sh_offset;
-  dynamic_builder_.section_.sh_size = dynamic_builder_.GetSize() * sizeof(Elf32_Dyn);
+  dynamic_builder_.section_.sh_size = dynamic_builder_.GetSize() * sizeof(Elf_Dyn);
   dynamic_builder_.section_.sh_link = dynamic_builder_.GetLink();
 
-  Elf32_Shdr prev = dynamic_builder_.section_;
-  if (IncludingDebugSymbols()) {
-    // Get the layout of the symtab section.
-    symtab_builder_.section_.sh_offset = NextOffset(symtab_builder_.section_,
-                                                    dynamic_builder_.section_);
-    symtab_builder_.section_.sh_addr = 0;
-    // Add to leave space for the null symbol.
-    symtab_builder_.section_.sh_size = symtab_builder_.GetSize() * sizeof(Elf32_Sym);
-    symtab_builder_.section_.sh_link = symtab_builder_.GetLink();
-
-    // Get the layout of the dynstr section.
-    symtab_builder_.strtab_.section_.sh_offset = NextOffset(symtab_builder_.strtab_.section_,
-                                                            symtab_builder_.section_);
-    symtab_builder_.strtab_.section_.sh_addr = 0;
-    symtab_builder_.strtab_.section_.sh_size = strtab.size();
-    symtab_builder_.strtab_.section_.sh_link = symtab_builder_.strtab_.GetLink();
-
-    prev = symtab_builder_.strtab_.section_;
-  }
   if (debug_logging_) {
     LOG(INFO) << "dynsym off=" << dynsym_builder_.section_.sh_offset
               << " dynsym size=" << dynsym_builder_.section_.sh_size;
@@ -368,17 +346,92 @@ bool ElfWriterQuick::ElfBuilder::Write() {
               << " text size=" << text_builder_.section_.sh_size;
     LOG(INFO) << "dynamic off=" << dynamic_builder_.section_.sh_offset
               << " dynamic size=" << dynamic_builder_.section_.sh_size;
-    if (IncludingDebugSymbols()) {
+  }
+
+  return true;
+}
+
+template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
+          typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
+          typename Elf_Phdr, typename Elf_Shdr>
+bool ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
+  Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::ElfBuilder::Write() {
+  std::vector<ElfFilePiece> pieces;
+  Elf_Shdr prev = dynamic_builder_.section_;
+  std::string strtab;
+
+  if (IncludingDebugSymbols()) {
+    // Setup .symtab
+    section_ptrs_.push_back(&symtab_builder_.section_);
+    AssignSectionStr(&symtab_builder_, &shstrtab_);
+    symtab_builder_.section_index_ = section_index_++;
+
+    // Setup .strtab
+    section_ptrs_.push_back(&symtab_builder_.strtab_.section_);
+    AssignSectionStr(&symtab_builder_.strtab_, &shstrtab_);
+    symtab_builder_.strtab_.section_index_ = section_index_++;
+
+    strtab = symtab_builder_.GenerateStrtab();
+    if (debug_logging_) {
+      LOG(INFO) << "strtab size (bytes)    =" << strtab.size()
+                << std::hex << " " << strtab.size();
+      LOG(INFO) << "symtab size (elements) =" << symtab_builder_.GetSize()
+                << std::hex << " " << symtab_builder_.GetSize();
+    }
+  }
+
+  // Setup all the other sections.
+  for (ElfRawSectionBuilder *builder = other_builders_.data(),
+                            *end = builder + other_builders_.size();
+       builder != end; ++builder) {
+    section_ptrs_.push_back(&builder->section_);
+    AssignSectionStr(builder, &shstrtab_);
+    builder->section_index_ = section_index_++;
+  }
+
+  // Setup shstrtab
+  section_ptrs_.push_back(&shstrtab_builder_.section_);
+  AssignSectionStr(&shstrtab_builder_, &shstrtab_);
+  shstrtab_builder_.section_index_ = section_index_++;
+
+  if (debug_logging_) {
+    LOG(INFO) << ".shstrtab size    (bytes)   =" << shstrtab_.size()
+              << std::hex << " " << shstrtab_.size();
+    LOG(INFO) << "section list size (elements)=" << section_ptrs_.size()
+              << std::hex << " " << section_ptrs_.size();
+  }
+
+  if (IncludingDebugSymbols()) {
+    // Get the layout of the symtab section.
+    symtab_builder_.section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>
+                                         (symtab_builder_.section_,
+                                          dynamic_builder_.section_);
+    symtab_builder_.section_.sh_addr = 0;
+    // Add to leave space for the null symbol.
+    symtab_builder_.section_.sh_size = symtab_builder_.GetSize() * sizeof(Elf_Sym);
+    symtab_builder_.section_.sh_link = symtab_builder_.GetLink();
+
+    // Get the layout of the dynstr section.
+    symtab_builder_.strtab_.section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>
+                                                 (symtab_builder_.strtab_.section_,
+                                                  symtab_builder_.section_);
+    symtab_builder_.strtab_.section_.sh_addr = 0;
+    symtab_builder_.strtab_.section_.sh_size = strtab.size();
+    symtab_builder_.strtab_.section_.sh_link = symtab_builder_.strtab_.GetLink();
+
+    prev = symtab_builder_.strtab_.section_;
+    if (debug_logging_) {
       LOG(INFO) << "symtab off=" << symtab_builder_.section_.sh_offset
                 << " symtab size=" << symtab_builder_.section_.sh_size;
       LOG(INFO) << "strtab off=" << symtab_builder_.strtab_.section_.sh_offset
                 << " strtab size=" << symtab_builder_.strtab_.section_.sh_size;
     }
   }
+
   // Get the layout of the extra sections. (This will deal with the debug
   // sections if they are there)
   for (auto it = other_builders_.begin(); it != other_builders_.end(); ++it) {
-    it->section_.sh_offset = NextOffset(it->section_, prev);
+    it->section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>(it->section_, prev);
     it->section_.sh_addr = 0;
     it->section_.sh_size = it->GetBuffer()->size();
     it->section_.sh_link = it->GetLink();
@@ -390,10 +443,12 @@ bool ElfWriterQuick::ElfBuilder::Write() {
                 << " " << it->name_ << " size=" << it->section_.sh_size;
     }
   }
+
   // Get the layout of the shstrtab section
-  shstrtab_builder_.section_.sh_offset = NextOffset(shstrtab_builder_.section_, prev);
+  shstrtab_builder_.section_.sh_offset = NextOffset<Elf_Word, Elf_Shdr>
+                                         (shstrtab_builder_.section_, prev);
   shstrtab_builder_.section_.sh_addr = 0;
-  shstrtab_builder_.section_.sh_size = shstrtab.size();
+  shstrtab_builder_.section_.sh_size = shstrtab_.size();
   shstrtab_builder_.section_.sh_link = shstrtab_builder_.GetLink();
   if (debug_logging_) {
       LOG(INFO) << "shstrtab off=" << shstrtab_builder_.section_.sh_offset
@@ -401,92 +456,92 @@ bool ElfWriterQuick::ElfBuilder::Write() {
   }
 
   // The section list comes after come after.
-  Elf32_Word sections_offset = RoundUp(
+  Elf_Word sections_offset = RoundUp(
       shstrtab_builder_.section_.sh_offset + shstrtab_builder_.section_.sh_size,
-      sizeof(Elf32_Word));
+      sizeof(Elf_Word));
 
   // Setup the actual symbol arrays.
-  std::vector<Elf32_Sym> dynsym = dynsym_builder_.GenerateSymtab();
-  CHECK_EQ(dynsym.size() * sizeof(Elf32_Sym), dynsym_builder_.section_.sh_size);
-  std::vector<Elf32_Sym> symtab;
+  std::vector<Elf_Sym> dynsym = dynsym_builder_.GenerateSymtab();
+  CHECK_EQ(dynsym.size() * sizeof(Elf_Sym), dynsym_builder_.section_.sh_size);
+  std::vector<Elf_Sym> symtab;
   if (IncludingDebugSymbols()) {
     symtab = symtab_builder_.GenerateSymtab();
-    CHECK_EQ(symtab.size() * sizeof(Elf32_Sym), symtab_builder_.section_.sh_size);
+    CHECK_EQ(symtab.size() * sizeof(Elf_Sym), symtab_builder_.section_.sh_size);
   }
 
   // Setup the dynamic section.
   // This will add the 2 values we cannot know until now time, namely the size
   // and the soname_offset.
-  std::vector<Elf32_Dyn> dynamic = dynamic_builder_.GetDynamics(dynstr.size(),
-                                                                dynstr_soname_offset);
-  CHECK_EQ(dynamic.size() * sizeof(Elf32_Dyn), dynamic_builder_.section_.sh_size);
+  std::vector<Elf_Dyn> dynamic = dynamic_builder_.GetDynamics(dynstr_.size(),
+                                                                dynstr_soname_offset_);
+  CHECK_EQ(dynamic.size() * sizeof(Elf_Dyn), dynamic_builder_.section_.sh_size);
 
   // Finish setup of the program headers now that we know the layout of the
   // whole file.
-  Elf32_Word load_r_size = rodata_builder_.section_.sh_offset + rodata_builder_.section_.sh_size;
-  program_headers[PH_LOAD_R__].p_filesz = load_r_size;
-  program_headers[PH_LOAD_R__].p_memsz =  load_r_size;
-  program_headers[PH_LOAD_R__].p_align =  rodata_builder_.section_.sh_addralign;
-
-  Elf32_Word load_rx_size = text_builder_.section_.sh_size;
-  program_headers[PH_LOAD_R_X].p_offset = text_builder_.section_.sh_offset;
-  program_headers[PH_LOAD_R_X].p_vaddr  = text_builder_.section_.sh_offset;
-  program_headers[PH_LOAD_R_X].p_paddr  = text_builder_.section_.sh_offset;
-  program_headers[PH_LOAD_R_X].p_filesz = load_rx_size;
-  program_headers[PH_LOAD_R_X].p_memsz  = load_rx_size;
-  program_headers[PH_LOAD_R_X].p_align  = text_builder_.section_.sh_addralign;
-
-  program_headers[PH_LOAD_RW_].p_offset = dynamic_builder_.section_.sh_offset;
-  program_headers[PH_LOAD_RW_].p_vaddr  = dynamic_builder_.section_.sh_offset;
-  program_headers[PH_LOAD_RW_].p_paddr  = dynamic_builder_.section_.sh_offset;
-  program_headers[PH_LOAD_RW_].p_filesz = dynamic_builder_.section_.sh_size;
-  program_headers[PH_LOAD_RW_].p_memsz  = dynamic_builder_.section_.sh_size;
-  program_headers[PH_LOAD_RW_].p_align  = dynamic_builder_.section_.sh_addralign;
-
-  program_headers[PH_DYNAMIC].p_offset = dynamic_builder_.section_.sh_offset;
-  program_headers[PH_DYNAMIC].p_vaddr  = dynamic_builder_.section_.sh_offset;
-  program_headers[PH_DYNAMIC].p_paddr  = dynamic_builder_.section_.sh_offset;
-  program_headers[PH_DYNAMIC].p_filesz = dynamic_builder_.section_.sh_size;
-  program_headers[PH_DYNAMIC].p_memsz  = dynamic_builder_.section_.sh_size;
-  program_headers[PH_DYNAMIC].p_align  = dynamic_builder_.section_.sh_addralign;
+  Elf_Word load_r_size = rodata_builder_.section_.sh_offset + rodata_builder_.section_.sh_size;
+  program_headers_[PH_LOAD_R__].p_filesz = load_r_size;
+  program_headers_[PH_LOAD_R__].p_memsz =  load_r_size;
+  program_headers_[PH_LOAD_R__].p_align =  rodata_builder_.section_.sh_addralign;
+
+  Elf_Word load_rx_size = text_builder_.section_.sh_size;
+  program_headers_[PH_LOAD_R_X].p_offset = text_builder_.section_.sh_offset;
+  program_headers_[PH_LOAD_R_X].p_vaddr  = text_builder_.section_.sh_offset;
+  program_headers_[PH_LOAD_R_X].p_paddr  = text_builder_.section_.sh_offset;
+  program_headers_[PH_LOAD_R_X].p_filesz = load_rx_size;
+  program_headers_[PH_LOAD_R_X].p_memsz  = load_rx_size;
+  program_headers_[PH_LOAD_R_X].p_align  = text_builder_.section_.sh_addralign;
+
+  program_headers_[PH_LOAD_RW_].p_offset = dynamic_builder_.section_.sh_offset;
+  program_headers_[PH_LOAD_RW_].p_vaddr  = dynamic_builder_.section_.sh_offset;
+  program_headers_[PH_LOAD_RW_].p_paddr  = dynamic_builder_.section_.sh_offset;
+  program_headers_[PH_LOAD_RW_].p_filesz = dynamic_builder_.section_.sh_size;
+  program_headers_[PH_LOAD_RW_].p_memsz  = dynamic_builder_.section_.sh_size;
+  program_headers_[PH_LOAD_RW_].p_align  = dynamic_builder_.section_.sh_addralign;
+
+  program_headers_[PH_DYNAMIC].p_offset = dynamic_builder_.section_.sh_offset;
+  program_headers_[PH_DYNAMIC].p_vaddr  = dynamic_builder_.section_.sh_offset;
+  program_headers_[PH_DYNAMIC].p_paddr  = dynamic_builder_.section_.sh_offset;
+  program_headers_[PH_DYNAMIC].p_filesz = dynamic_builder_.section_.sh_size;
+  program_headers_[PH_DYNAMIC].p_memsz  = dynamic_builder_.section_.sh_size;
+  program_headers_[PH_DYNAMIC].p_align  = dynamic_builder_.section_.sh_addralign;
 
   // Finish setup of the Ehdr values.
-  elf_header_.e_phoff = phdr_offset;
+  elf_header_.e_phoff = PHDR_OFFSET;
   elf_header_.e_shoff = sections_offset;
   elf_header_.e_phnum = PH_NUM;
-  elf_header_.e_shnum = section_ptrs.size();
+  elf_header_.e_shnum = section_ptrs_.size();
   elf_header_.e_shstrndx = shstrtab_builder_.section_index_;
 
   // Add the rest of the pieces to the list.
   pieces.push_back(ElfFilePiece("Elf Header", 0, &elf_header_, sizeof(elf_header_)));
-  pieces.push_back(ElfFilePiece("Program headers", phdr_offset,
-                                &program_headers, sizeof(program_headers)));
+  pieces.push_back(ElfFilePiece("Program headers", PHDR_OFFSET,
+                                &program_headers_, sizeof(program_headers_)));
   pieces.push_back(ElfFilePiece(".dynamic", dynamic_builder_.section_.sh_offset,
                                 dynamic.data(), dynamic_builder_.section_.sh_size));
   pieces.push_back(ElfFilePiece(".dynsym", dynsym_builder_.section_.sh_offset,
-                                dynsym.data(), dynsym.size() * sizeof(Elf32_Sym)));
+                                dynsym.data(), dynsym.size() * sizeof(Elf_Sym)));
   pieces.push_back(ElfFilePiece(".dynstr", dynsym_builder_.strtab_.section_.sh_offset,
-                                dynstr.c_str(), dynstr.size()));
+                                dynstr_.c_str(), dynstr_.size()));
   pieces.push_back(ElfFilePiece(".hash", hash_builder_.section_.sh_offset,
-                                hash.data(), hash.size() * sizeof(Elf32_Word)));
+                                hash_.data(), hash_.size() * sizeof(Elf_Word)));
   pieces.push_back(ElfFilePiece(".rodata", rodata_builder_.section_.sh_offset,
                                 nullptr, rodata_builder_.section_.sh_size));
   pieces.push_back(ElfFilePiece(".text", text_builder_.section_.sh_offset,
                                 nullptr, text_builder_.section_.sh_size));
   if (IncludingDebugSymbols()) {
     pieces.push_back(ElfFilePiece(".symtab", symtab_builder_.section_.sh_offset,
-                                  symtab.data(), symtab.size() * sizeof(Elf32_Sym)));
+                                  symtab.data(), symtab.size() * sizeof(Elf_Sym)));
     pieces.push_back(ElfFilePiece(".strtab", symtab_builder_.strtab_.section_.sh_offset,
                                   strtab.c_str(), strtab.size()));
   }
   pieces.push_back(ElfFilePiece(".shstrtab", shstrtab_builder_.section_.sh_offset,
-                                &shstrtab[0], shstrtab.size()));
-  for (uint32_t i = 0; i < section_ptrs.size(); ++i) {
+                                &shstrtab_[0], shstrtab_.size()));
+  for (uint32_t i = 0; i < section_ptrs_.size(); ++i) {
     // Just add all the sections in induvidually since they are all over the
     // place on the heap/stack.
-    Elf32_Word cur_off = sections_offset + i * sizeof(Elf32_Shdr);
+    Elf_Word cur_off = sections_offset + i * sizeof(Elf_Shdr);
     pieces.push_back(ElfFilePiece("section table piece", cur_off,
-                                  section_ptrs[i], sizeof(Elf32_Shdr)));
+                                  section_ptrs_[i], sizeof(Elf_Shdr)));
   }
 
   if (!WriteOutFile(pieces)) {
@@ -494,7 +549,7 @@ bool ElfWriterQuick::ElfBuilder::Write() {
     return false;
   }
   // write out the actual oat file data.
-  Elf32_Word oat_data_offset = rodata_builder_.section_.sh_offset;
+  Elf_Word oat_data_offset = rodata_builder_.section_.sh_offset;
   if (static_cast<off_t>(oat_data_offset) != lseek(elf_file_->Fd(), oat_data_offset, SEEK_SET)) {
     PLOG(ERROR) << "Failed to seek to .rodata offset " << oat_data_offset
                 << " for " << elf_file_->GetPath();
@@ -510,7 +565,11 @@ bool ElfWriterQuick::ElfBuilder::Write() {
   return true;
 }
 
-bool ElfWriterQuick::ElfBuilder::WriteOutFile(const std::vector<ElfFilePiece>& pieces) {
+template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
+          typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
+          typename Elf_Phdr, typename Elf_Shdr>
+bool ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
+  Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::ElfBuilder::WriteOutFile(const std::vector<ElfFilePiece>& pieces) {
   // TODO It would be nice if this checked for overlap.
   for (auto it = pieces.begin(); it != pieces.end(); ++it) {
     if (it->data_) {
@@ -528,14 +587,22 @@ bool ElfWriterQuick::ElfBuilder::WriteOutFile(const std::vector<ElfFilePiece>& p
   return true;
 }
 
-void ElfWriterQuick::ElfBuilder::SetupDynamic() {
+template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
+          typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
+          typename Elf_Phdr, typename Elf_Shdr>
+void ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
+  Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::ElfBuilder::SetupDynamic() {
   dynamic_builder_.AddDynamicTag(DT_HASH, 0, &hash_builder_);
   dynamic_builder_.AddDynamicTag(DT_STRTAB, 0, &dynsym_builder_.strtab_);
   dynamic_builder_.AddDynamicTag(DT_SYMTAB, 0, &dynsym_builder_);
-  dynamic_builder_.AddDynamicTag(DT_SYMENT, sizeof(Elf32_Sym));
+  dynamic_builder_.AddDynamicTag(DT_SYMENT, sizeof(Elf_Sym));
 }
 
-void ElfWriterQuick::ElfBuilder::SetupRequiredSymbols() {
+template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
+          typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
+          typename Elf_Phdr, typename Elf_Shdr>
+void ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
+  Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::ElfBuilder::SetupRequiredSymbols() {
   dynsym_builder_.AddSymbol("oatdata", &rodata_builder_, 0, true,
                             rodata_builder_.size_, STB_GLOBAL, STT_OBJECT);
   dynsym_builder_.AddSymbol("oatexec", &text_builder_, 0, true,
@@ -544,14 +611,22 @@ void ElfWriterQuick::ElfBuilder::SetupRequiredSymbols() {
                             true, 4, STB_GLOBAL, STT_OBJECT);
 }
 
-void ElfWriterQuick::ElfDynamicBuilder::AddDynamicTag(Elf32_Sword tag, Elf32_Word d_un) {
+template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
+          typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
+          typename Elf_Phdr, typename Elf_Shdr>
+void ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
+  Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::ElfDynamicBuilder::AddDynamicTag(Elf_Sword tag, Elf_Word d_un) {
   if (tag == DT_NULL) {
     return;
   }
   dynamics_.push_back({nullptr, tag, d_un});
 }
 
-void ElfWriterQuick::ElfDynamicBuilder::AddDynamicTag(Elf32_Sword tag, Elf32_Word d_un,
+template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
+          typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
+          typename Elf_Phdr, typename Elf_Shdr>
+void ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
+  Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::ElfDynamicBuilder::AddDynamicTag(Elf_Sword tag, Elf_Word d_un,
                                                       ElfSectionBuilder* section) {
   if (tag == DT_NULL) {
     return;
@@ -559,9 +634,13 @@ void ElfWriterQuick::ElfDynamicBuilder::AddDynamicTag(Elf32_Sword tag, Elf32_Wor
   dynamics_.push_back({section, tag, d_un});
 }
 
-std::vector<Elf32_Dyn> ElfWriterQuick::ElfDynamicBuilder::GetDynamics(Elf32_Word strsz,
-                                                                      Elf32_Word soname) {
-  std::vector<Elf32_Dyn> ret;
+template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
+          typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
+          typename Elf_Phdr, typename Elf_Shdr>
+std::vector<Elf_Dyn> ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
+  Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::ElfDynamicBuilder::GetDynamics(Elf_Word strsz,
+                                                                    Elf_Word soname) {
+  std::vector<Elf_Dyn> ret;
   for (auto it = dynamics_.cbegin(); it != dynamics_.cend(); ++it) {
     if (it->section_) {
       // We are adding an address relative to a section.
@@ -577,15 +656,19 @@ std::vector<Elf32_Dyn> ElfWriterQuick::ElfDynamicBuilder::GetDynamics(Elf32_Word
   return ret;
 }
 
-std::vector<Elf32_Sym> ElfWriterQuick::ElfSymtabBuilder::GenerateSymtab() {
-  std::vector<Elf32_Sym> ret;
-  Elf32_Sym undef_sym;
+template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
+          typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
+          typename Elf_Phdr, typename Elf_Shdr>
+std::vector<Elf_Sym> ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
+  Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::ElfSymtabBuilder::GenerateSymtab() {
+  std::vector<Elf_Sym> ret;
+  Elf_Sym undef_sym;
   memset(&undef_sym, 0, sizeof(undef_sym));
   undef_sym.st_shndx = SHN_UNDEF;
   ret.push_back(undef_sym);
 
   for (auto it = symbols_.cbegin(); it != symbols_.cend(); ++it) {
-    Elf32_Sym sym;
+    Elf_Sym sym;
     memset(&sym, 0, sizeof(sym));
     sym.st_name = it->name_idx_;
     if (it->is_relative_) {
@@ -603,7 +686,11 @@ std::vector<Elf32_Sym> ElfWriterQuick::ElfSymtabBuilder::GenerateSymtab() {
   return ret;
 }
 
-std::string ElfWriterQuick::ElfSymtabBuilder::GenerateStrtab() {
+template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
+          typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
+          typename Elf_Phdr, typename Elf_Shdr>
+std::string ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
+  Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::ElfSymtabBuilder::GenerateStrtab() {
   std::string tab;
   tab += '\0';
   for (auto it = symbols_.begin(); it != symbols_.end(); ++it) {
@@ -615,7 +702,11 @@ std::string ElfWriterQuick::ElfSymtabBuilder::GenerateStrtab() {
   return tab;
 }
 
-void ElfWriterQuick::ElfBuilder::AssignSectionStr(
+template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
+          typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
+          typename Elf_Phdr, typename Elf_Shdr>
+void ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
+  Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::ElfBuilder::AssignSectionStr(
     ElfSectionBuilder* builder, std::string* strtab) {
   builder->section_.sh_name = strtab->size();
   *strtab += builder->name_;
@@ -640,8 +731,11 @@ static unsigned elfhash(const char *_name) {
   return h;
 }
 
-
-std::vector<Elf32_Word> ElfWriterQuick::ElfSymtabBuilder::GenerateHashContents() {
+template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
+          typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
+          typename Elf_Phdr, typename Elf_Shdr>
+std::vector<Elf_Word> ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
+  Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::ElfSymtabBuilder::GenerateHashContents() {
   // Here is how The ELF hash table works.
   // There are 3 arrays to worry about.
   // * The symbol table where the symbol information is.
@@ -651,7 +745,7 @@ std::vector<Elf32_Word> ElfWriterQuick::ElfSymtabBuilder::GenerateHashContents()
   // Lets say the state is something like this.
   // +--------+       +--------+      +-----------+
   // | symtab |       | bucket |      |   chain   |
-  // |  nullptr  |       | 1      |      | STN_UNDEF |
+  // |  null  |       | 1      |      | STN_UNDEF |
   // | <sym1> |       | 4      |      | 2         |
   // | <sym2> |       |        |      | 5         |
   // | <sym3> |       |        |      | STN_UNDEF |
@@ -675,8 +769,8 @@ std::vector<Elf32_Word> ElfWriterQuick::ElfSymtabBuilder::GenerateHashContents()
 
   // Select number of buckets.
   // This is essentially arbitrary.
-  Elf32_Word nbuckets;
-  Elf32_Word chain_size = GetSize();
+  Elf_Word nbuckets;
+  Elf_Word chain_size = GetSize();
   if (symbols_.size() < 8) {
     nbuckets = 2;
   } else if (symbols_.size() < 32) {
@@ -687,22 +781,22 @@ std::vector<Elf32_Word> ElfWriterQuick::ElfSymtabBuilder::GenerateHashContents()
     // Have about 32 ids per bucket.
     nbuckets = RoundUp(symbols_.size()/32, 2);
   }
-  std::vector<Elf32_Word> hash;
+  std::vector<Elf_Word> hash;
   hash.push_back(nbuckets);
   hash.push_back(chain_size);
   uint32_t bucket_offset = hash.size();
   uint32_t chain_offset = bucket_offset + nbuckets;
   hash.resize(hash.size() + nbuckets + chain_size, 0);
 
-  Elf32_Word* buckets = hash.data() + bucket_offset;
-  Elf32_Word* chain   = hash.data() + chain_offset;
+  Elf_Word* buckets = hash.data() + bucket_offset;
+  Elf_Word* chain   = hash.data() + chain_offset;
 
   // Set up the actual hash table.
-  for (Elf32_Word i = 0; i < symbols_.size(); i++) {
+  for (Elf_Word i = 0; i < symbols_.size(); i++) {
     // Add 1 since we need to have the null symbol that is not in the symbols
     // list.
-    Elf32_Word index = i + 1;
-    Elf32_Word hash_val = static_cast<Elf32_Word>(elfhash(symbols_[i].name_.c_str())) % nbuckets;
+    Elf_Word index = i + 1;
+    Elf_Word hash_val = static_cast<Elf_Word>(elfhash(symbols_[i].name_.c_str())) % nbuckets;
     if (buckets[hash_val] == 0) {
       buckets[hash_val] = index;
     } else {
@@ -716,14 +810,18 @@ std::vector<Elf32_Word> ElfWriterQuick::ElfSymtabBuilder::GenerateHashContents()
       // Check for loops. Works because if this is non-empty then there must be
       // another cell which already contains the same symbol index as this one,
       // which means some symbol has more then one name, which isn't allowed.
-      CHECK_EQ(chain[index], static_cast<Elf32_Word>(0));
+      CHECK_EQ(chain[index], static_cast<Elf_Word>(0));
     }
   }
 
   return hash;
 }
 
-void ElfWriterQuick::ElfBuilder::SetupEhdr() {
+template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
+          typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
+          typename Elf_Phdr, typename Elf_Shdr>
+void ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
+  Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::ElfBuilder::SetupEhdr() {
   memset(&elf_header_, 0, sizeof(elf_header_));
   elf_header_.e_ident[EI_MAG0]       = ELFMAG0;
   elf_header_.e_ident[EI_MAG1]       = ELFMAG1;
@@ -737,13 +835,17 @@ void ElfWriterQuick::ElfBuilder::SetupEhdr() {
   elf_header_.e_type = ET_DYN;
   elf_header_.e_version = 1;
   elf_header_.e_entry = 0;
-  elf_header_.e_ehsize = sizeof(Elf32_Ehdr);
-  elf_header_.e_phentsize = sizeof(Elf32_Phdr);
-  elf_header_.e_shentsize = sizeof(Elf32_Shdr);
-  elf_header_.e_phoff = sizeof(Elf32_Ehdr);
+  elf_header_.e_ehsize = sizeof(Elf_Ehdr);
+  elf_header_.e_phentsize = sizeof(Elf_Phdr);
+  elf_header_.e_shentsize = sizeof(Elf_Shdr);
+  elf_header_.e_phoff = sizeof(Elf_Ehdr);
 }
 
-void ElfWriterQuick::ElfBuilder::SetISA(InstructionSet isa) {
+template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
+          typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
+          typename Elf_Phdr, typename Elf_Shdr>
+void ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
+  Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::ElfBuilder::SetISA(InstructionSet isa) {
   switch (isa) {
     case kArm:
       // Fall through.
@@ -784,16 +886,24 @@ void ElfWriterQuick::ElfBuilder::SetISA(InstructionSet isa) {
   }
 }
 
-void ElfWriterQuick::ElfSymtabBuilder::AddSymbol(
-    const std::string& name, const ElfSectionBuilder* section, Elf32_Addr addr,
-    bool is_relative, Elf32_Word size, uint8_t binding, uint8_t type, uint8_t other) {
+template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
+          typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
+          typename Elf_Phdr, typename Elf_Shdr>
+void ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
+  Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::ElfSymtabBuilder::AddSymbol(
+    const std::string& name, const ElfSectionBuilder* section, Elf_Addr addr,
+    bool is_relative, Elf_Word size, uint8_t binding, uint8_t type, uint8_t other) {
   CHECK(section);
   ElfSymtabBuilder::ElfSymbolState state {name, section, addr, size, is_relative,
                                           MakeStInfo(binding, type), other, 0};
   symbols_.push_back(state);
 }
 
-bool ElfWriterQuick::Create(File* elf_file,
+template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
+          typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
+          typename Elf_Phdr, typename Elf_Shdr>
+bool ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
+  Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::Create(File* elf_file,
                             OatWriter* oat_writer,
                             const std::vector<const DexFile*>& dex_files,
                             const std::string& android_root,
@@ -803,9 +913,13 @@ bool ElfWriterQuick::Create(File* elf_file,
   return elf_writer.Write(oat_writer, dex_files, android_root, is_host);
 }
 
-// Add patch information to this section. Each patch is a Elf32_Word that
+template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
+          typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
+          typename Elf_Phdr, typename Elf_Shdr>
+// Add patch information to this section. Each patch is a Elf_Word that
 // identifies an offset from the start of the text section
-void ElfWriterQuick::ReservePatchSpace(std::vector<uint8_t>* buffer, bool debug) {
+void ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
+  Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::ReservePatchSpace(std::vector<uint8_t>* buffer, bool debug) {
   size_t size =
       compiler_driver_->GetCodeToPatch().size() +
       compiler_driver_->GetMethodsToPatch().size() +
@@ -822,131 +936,502 @@ void ElfWriterQuick::ReservePatchSpace(std::vector<uint8_t>* buffer, bool debug)
   }
 }
 
-bool ElfWriterQuick::Write(OatWriter* oat_writer,
+std::vector<uint8_t>* ConstructCIEFrameX86(bool is_x86_64) {
+  std::vector<uint8_t>* cfi_info = new std::vector<uint8_t>;
+
+  // Length (will be filled in later in this routine).
+  if (is_x86_64) {
+    PushWord(cfi_info, 0xffffffff);  // Indicates 64bit
+    PushWord(cfi_info, 0);
+    PushWord(cfi_info, 0);
+  } else {
+    PushWord(cfi_info, 0);
+  }
+
+  // CIE id: always 0.
+  if (is_x86_64) {
+    PushWord(cfi_info, 0);
+    PushWord(cfi_info, 0);
+  } else {
+    PushWord(cfi_info, 0);
+  }
+
+  // Version: always 1.
+  cfi_info->push_back(0x01);
+
+  // Augmentation: 'zR\0'
+  cfi_info->push_back(0x7a);
+  cfi_info->push_back(0x52);
+  cfi_info->push_back(0x0);
+
+  // Code alignment: 1.
+  EncodeUnsignedLeb128(1, cfi_info);
+
+  // Data alignment.
+  if (is_x86_64) {
+    EncodeSignedLeb128(-8, cfi_info);
+  } else {
+    EncodeSignedLeb128(-4, cfi_info);
+  }
+
+  // Return address register.
+  if (is_x86_64) {
+    // R16(RIP)
+    cfi_info->push_back(0x10);
+  } else {
+    // R8(EIP)
+    cfi_info->push_back(0x08);
+  }
+
+  // Augmentation length: 1.
+  cfi_info->push_back(1);
+
+  // Augmentation data.
+  if (is_x86_64) {
+    // 0x04 ((DW_EH_PE_absptr << 4) | DW_EH_PE_udata8).
+    cfi_info->push_back(0x04);
+  } else {
+    // 0x03 ((DW_EH_PE_absptr << 4) | DW_EH_PE_udata4).
+    cfi_info->push_back(0x03);
+  }
+
+  // Initial instructions.
+  if (is_x86_64) {
+    // DW_CFA_def_cfa R7(RSP) 8.
+    cfi_info->push_back(0x0c);
+    cfi_info->push_back(0x07);
+    cfi_info->push_back(0x08);
+
+    // DW_CFA_offset R16(RIP) 1 (* -8).
+    cfi_info->push_back(0x90);
+    cfi_info->push_back(0x01);
+  } else {
+    // DW_CFA_def_cfa R4(ESP) 4.
+    cfi_info->push_back(0x0c);
+    cfi_info->push_back(0x04);
+    cfi_info->push_back(0x04);
+
+    // DW_CFA_offset R8(EIP) 1 (* -4).
+    cfi_info->push_back(0x88);
+    cfi_info->push_back(0x01);
+  }
+
+  // Padding to a multiple of 4
+  while ((cfi_info->size() & 3) != 0) {
+    // DW_CFA_nop is encoded as 0.
+    cfi_info->push_back(0);
+  }
+
+  // Set the length of the CIE inside the generated bytes.
+  if (is_x86_64) {
+    uint32_t length = cfi_info->size() - 12;
+    UpdateWord(cfi_info, 4, length);
+  } else {
+    uint32_t length = cfi_info->size() - 4;
+    UpdateWord(cfi_info, 0, length);
+  }
+  return cfi_info;
+}
+
+std::vector<uint8_t>* ConstructCIEFrame(InstructionSet isa) {
+  switch (isa) {
+    case kX86:
+      return ConstructCIEFrameX86(false);
+    case kX86_64:
+      return ConstructCIEFrameX86(true);
+
+    default:
+      // Not implemented.
+      return nullptr;
+  }
+}
+
+template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
+          typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
+          typename Elf_Phdr, typename Elf_Shdr>
+bool ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
+  Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::Write(OatWriter* oat_writer,
                            const std::vector<const DexFile*>& dex_files_unused,
                            const std::string& android_root_unused,
                            bool is_host_unused) {
-  const bool debug = false;
-  const bool add_symbols = oat_writer->DidAddSymbols();
+  constexpr bool debug = false;
   const OatHeader& oat_header = oat_writer->GetOatHeader();
-  Elf32_Word oat_data_size = oat_header.GetExecutableOffset();
+  Elf_Word oat_data_size = oat_header.GetExecutableOffset();
   uint32_t oat_exec_size = oat_writer->GetSize() - oat_data_size;
 
-  ElfBuilder builder(oat_writer, elf_file_, compiler_driver_->GetInstructionSet(), 0,
-                     oat_data_size, oat_data_size, oat_exec_size, add_symbols, debug);
-
-  if (add_symbols) {
-    AddDebugSymbols(builder, oat_writer, debug);
+  std::unique_ptr<ElfBuilder> builder(new ElfBuilder(
+      oat_writer,
+      elf_file_,
+      compiler_driver_->GetInstructionSet(),
+      0,
+      oat_data_size,
+      oat_data_size,
+      oat_exec_size,
+      compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols(),
+      debug));
+
+  if (!builder->Init()) {
+    return false;
   }
 
-  bool generateDebugInformation = compiler_driver_->GetCallFrameInformation() != nullptr;
-  if (generateDebugInformation) {
-    ElfRawSectionBuilder debug_info(".debug_info",   SHT_PROGBITS, 0, nullptr, 0, 1, 0);
-    ElfRawSectionBuilder debug_abbrev(".debug_abbrev", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
-    ElfRawSectionBuilder debug_str(".debug_str",    SHT_PROGBITS, 0, nullptr, 0, 1, 0);
-    ElfRawSectionBuilder eh_frame(".eh_frame",  SHT_PROGBITS, SHF_ALLOC, nullptr, 0, 4, 0);
-    eh_frame.SetBuffer(*compiler_driver_->GetCallFrameInformation());
-
-    FillInCFIInformation(oat_writer, debug_info.GetBuffer(),
-                         debug_abbrev.GetBuffer(), debug_str.GetBuffer());
-    builder.RegisterRawSection(debug_info);
-    builder.RegisterRawSection(debug_abbrev);
-    builder.RegisterRawSection(eh_frame);
-    builder.RegisterRawSection(debug_str);
+  if (compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols()) {
+    WriteDebugSymbols(builder.get(), oat_writer);
   }
 
   if (compiler_driver_->GetCompilerOptions().GetIncludePatchInformation()) {
     ElfRawSectionBuilder oat_patches(".oat_patches", SHT_OAT_PATCH, 0, NULL, 0,
-                                     sizeof(size_t), sizeof(size_t));
+                                     sizeof(uintptr_t), sizeof(uintptr_t));
     ReservePatchSpace(oat_patches.GetBuffer(), debug);
-    builder.RegisterRawSection(oat_patches);
+    builder->RegisterRawSection(oat_patches);
   }
 
-  return builder.Write();
+  return builder->Write();
 }
 
-void ElfWriterQuick::AddDebugSymbols(ElfBuilder& builder, OatWriter* oat_writer, bool debug) {
+template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
+          typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
+          typename Elf_Phdr, typename Elf_Shdr>
+void ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
+  Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::WriteDebugSymbols(ElfBuilder* builder, OatWriter* oat_writer) {
+  std::unique_ptr<std::vector<uint8_t>> cfi_info(
+      ConstructCIEFrame(compiler_driver_->GetInstructionSet()));
+
+  Elf_Addr text_section_address = builder->text_builder_.section_.sh_addr;
+
+  // Iterate over the compiled methods.
   const std::vector<OatWriter::DebugInfo>& method_info = oat_writer->GetCFIMethodInfo();
-  ElfSymtabBuilder* symtab = &builder.symtab_builder_;
+  ElfSymtabBuilder* symtab = &builder->symtab_builder_;
   for (auto it = method_info.begin(); it != method_info.end(); ++it) {
-    symtab->AddSymbol(it->method_name_, &builder.text_builder_, it->low_pc_, true,
+    symtab->AddSymbol(it->method_name_, &builder->text_builder_, it->low_pc_, true,
                       it->high_pc_ - it->low_pc_, STB_GLOBAL, STT_FUNC);
+
+    // Include CFI for compiled method, if possible.
+    if (cfi_info.get() != nullptr) {
+      DCHECK(it->compiled_method_ != nullptr);
+
+      // Copy in the FDE, if present
+      const std::vector<uint8_t>* fde = it->compiled_method_->GetCFIInfo();
+      if (fde != nullptr) {
+        // Copy the information into cfi_info and then fix the address in the new copy.
+        int cur_offset = cfi_info->size();
+        cfi_info->insert(cfi_info->end(), fde->begin(), fde->end());
+
+        bool is_64bit = *(reinterpret_cast<const uint32_t*>(fde->data())) == 0xffffffff;
+
+        // Set the 'CIE_pointer' field.
+        uint64_t CIE_pointer = cur_offset + (is_64bit ? 12 : 4);
+        uint64_t offset_to_update = CIE_pointer;
+        if (is_64bit) {
+          (*cfi_info)[offset_to_update+0] = CIE_pointer;
+          (*cfi_info)[offset_to_update+1] = CIE_pointer >> 8;
+          (*cfi_info)[offset_to_update+2] = CIE_pointer >> 16;
+          (*cfi_info)[offset_to_update+3] = CIE_pointer >> 24;
+          (*cfi_info)[offset_to_update+4] = CIE_pointer >> 32;
+          (*cfi_info)[offset_to_update+5] = CIE_pointer >> 40;
+          (*cfi_info)[offset_to_update+6] = CIE_pointer >> 48;
+          (*cfi_info)[offset_to_update+7] = CIE_pointer >> 56;
+        } else {
+          (*cfi_info)[offset_to_update+0] = CIE_pointer;
+          (*cfi_info)[offset_to_update+1] = CIE_pointer >> 8;
+          (*cfi_info)[offset_to_update+2] = CIE_pointer >> 16;
+          (*cfi_info)[offset_to_update+3] = CIE_pointer >> 24;
+        }
+
+        // Set the 'initial_location' field.
+        offset_to_update += is_64bit ? 8 : 4;
+        if (is_64bit) {
+          const uint64_t quick_code_start = it->low_pc_ + text_section_address;
+          (*cfi_info)[offset_to_update+0] = quick_code_start;
+          (*cfi_info)[offset_to_update+1] = quick_code_start >> 8;
+          (*cfi_info)[offset_to_update+2] = quick_code_start >> 16;
+          (*cfi_info)[offset_to_update+3] = quick_code_start >> 24;
+          (*cfi_info)[offset_to_update+4] = quick_code_start >> 32;
+          (*cfi_info)[offset_to_update+5] = quick_code_start >> 40;
+          (*cfi_info)[offset_to_update+6] = quick_code_start >> 48;
+          (*cfi_info)[offset_to_update+7] = quick_code_start >> 56;
+        } else {
+          const uint32_t quick_code_start = it->low_pc_ + text_section_address;
+          (*cfi_info)[offset_to_update+0] = quick_code_start;
+          (*cfi_info)[offset_to_update+1] = quick_code_start >> 8;
+          (*cfi_info)[offset_to_update+2] = quick_code_start >> 16;
+          (*cfi_info)[offset_to_update+3] = quick_code_start >> 24;
+        }
+      }
+    }
   }
-}
 
-static void UpdateWord(std::vector<uint8_t>*buf, int offset, int data) {
-  (*buf)[offset+0] = data;
-  (*buf)[offset+1] = data >> 8;
-  (*buf)[offset+2] = data >> 16;
-  (*buf)[offset+3] = data >> 24;
-}
+  bool hasCFI = (cfi_info.get() != nullptr);
+  bool hasLineInfo = false;
+  for (auto& dbg_info : oat_writer->GetCFIMethodInfo()) {
+    if (dbg_info.dbgstream_ != nullptr &&
+        !dbg_info.compiled_method_->GetSrcMappingTable().empty()) {
+      hasLineInfo = true;
+      break;
+    }
+  }
 
-static void PushWord(std::vector<uint8_t>*buf, int data) {
-  buf->push_back(data & 0xff);
-  buf->push_back((data >> 8) & 0xff);
-  buf->push_back((data >> 16) & 0xff);
-  buf->push_back((data >> 24) & 0xff);
+  if (hasLineInfo || hasCFI) {
+    ElfRawSectionBuilder debug_info(".debug_info",     SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+    ElfRawSectionBuilder debug_abbrev(".debug_abbrev", SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+    ElfRawSectionBuilder debug_str(".debug_str",       SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+    ElfRawSectionBuilder debug_line(".debug_line",     SHT_PROGBITS, 0, nullptr, 0, 1, 0);
+
+    FillInCFIInformation(oat_writer, debug_info.GetBuffer(),
+                         debug_abbrev.GetBuffer(), debug_str.GetBuffer(),
+                         hasLineInfo ? debug_line.GetBuffer() : nullptr,
+                         text_section_address);
+
+    builder->RegisterRawSection(debug_info);
+    builder->RegisterRawSection(debug_abbrev);
+
+    if (hasCFI) {
+      ElfRawSectionBuilder eh_frame(".eh_frame",  SHT_PROGBITS, SHF_ALLOC, nullptr, 0, 4, 0);
+      eh_frame.SetBuffer(std::move(*cfi_info.get()));
+      builder->RegisterRawSection(eh_frame);
+    }
+
+    if (hasLineInfo) {
+      builder->RegisterRawSection(debug_line);
+    }
+
+    builder->RegisterRawSection(debug_str);
+  }
 }
 
-static void PushHalf(std::vector<uint8_t>*buf, int data) {
-  buf->push_back(data & 0xff);
-  buf->push_back((data >> 8) & 0xff);
+class LineTableGenerator FINAL : public Leb128Encoder {
+ public:
+  LineTableGenerator(int line_base, int line_range, int opcode_base,
+                     std::vector<uint8_t>* data, uintptr_t current_address,
+                     size_t current_line)
+    : Leb128Encoder(data), line_base_(line_base), line_range_(line_range),
+      opcode_base_(opcode_base), current_address_(current_address),
+      current_line_(current_line), current_file_index_(0) {}
+
+  void PutDelta(unsigned delta_addr, int delta_line) {
+    current_line_ += delta_line;
+    current_address_ += delta_addr;
+
+    if (delta_line >= line_base_ && delta_line < line_base_ + line_range_) {
+      unsigned special_opcode = (delta_line - line_base_) +
+                                (line_range_ * delta_addr) + opcode_base_;
+      if (special_opcode <= 255) {
+        PushByte(data_, special_opcode);
+        return;
+      }
+    }
+
+    // generate standart opcode for address advance
+    if (delta_addr != 0) {
+      PushByte(data_, DW_LNS_advance_pc);
+      PushBackUnsigned(delta_addr);
+    }
+
+    // generate standart opcode for line delta
+    if (delta_line != 0) {
+      PushByte(data_, DW_LNS_advance_line);
+      PushBackSigned(delta_line);
+    }
+
+    // generate standart opcode for new LTN entry
+    PushByte(data_, DW_LNS_copy);
+  }
+
+  void SetAddr(uintptr_t addr) {
+    if (current_address_ == addr) {
+      return;
+    }
+
+    current_address_ = addr;
+
+    PushByte(data_, 0);  // extended opcode:
+    PushByte(data_, 1 + 4);  // length: opcode_size + address_size
+    PushByte(data_, DW_LNE_set_address);
+    PushWord(data_, addr);
+  }
+
+  void SetLine(unsigned line) {
+    int delta_line = line - current_line_;
+    if (delta_line) {
+      current_line_ = line;
+      PushByte(data_, DW_LNS_advance_line);
+      PushBackSigned(delta_line);
+    }
+  }
+
+  void SetFile(unsigned file_index) {
+    if (current_file_index_ != file_index) {
+      current_file_index_ = file_index;
+      PushByte(data_, DW_LNS_set_file);
+      PushBackUnsigned(file_index);
+    }
+  }
+
+  void EndSequence() {
+    // End of Line Table Program
+    // 0(=ext), 1(len), DW_LNE_end_sequence
+    PushByte(data_, 0);
+    PushByte(data_, 1);
+    PushByte(data_, DW_LNE_end_sequence);
+  }
+
+ private:
+  const int line_base_;
+  const int line_range_;
+  const int opcode_base_;
+  uintptr_t current_address_;
+  size_t current_line_;
+  unsigned current_file_index_;
+
+  DISALLOW_COPY_AND_ASSIGN(LineTableGenerator);
+};
+
+// TODO: rewriting it using DexFile::DecodeDebugInfo needs unneeded stuff.
+static void GetLineInfoForJava(const uint8_t* dbgstream, const SrcMap& pc2dex,
+                               SrcMap* result, uint32_t start_pc = 0) {
+  if (dbgstream == nullptr) {
+    return;
+  }
+
+  int adjopcode;
+  uint32_t dex_offset = 0;
+  uint32_t java_line = DecodeUnsignedLeb128(&dbgstream);
+
+  // skip parameters
+  for (uint32_t param_count = DecodeUnsignedLeb128(&dbgstream); param_count != 0; --param_count) {
+    DecodeUnsignedLeb128(&dbgstream);
+  }
+
+  for (bool is_end = false; is_end == false; ) {
+    uint8_t opcode = *dbgstream;
+    dbgstream++;
+    switch (opcode) {
+    case DexFile::DBG_END_SEQUENCE:
+      is_end = true;
+      break;
+
+    case DexFile::DBG_ADVANCE_PC:
+      dex_offset += DecodeUnsignedLeb128(&dbgstream);
+      break;
+
+    case DexFile::DBG_ADVANCE_LINE:
+      java_line += DecodeSignedLeb128(&dbgstream);
+      break;
+
+    case DexFile::DBG_START_LOCAL:
+    case DexFile::DBG_START_LOCAL_EXTENDED:
+      DecodeUnsignedLeb128(&dbgstream);
+      DecodeUnsignedLeb128(&dbgstream);
+      DecodeUnsignedLeb128(&dbgstream);
+
+      if (opcode == DexFile::DBG_START_LOCAL_EXTENDED) {
+        DecodeUnsignedLeb128(&dbgstream);
+      }
+      break;
+
+    case DexFile::DBG_END_LOCAL:
+    case DexFile::DBG_RESTART_LOCAL:
+      DecodeUnsignedLeb128(&dbgstream);
+      break;
+
+    case DexFile::DBG_SET_PROLOGUE_END:
+    case DexFile::DBG_SET_EPILOGUE_BEGIN:
+    case DexFile::DBG_SET_FILE:
+      break;
+
+    default:
+      adjopcode = opcode - DexFile::DBG_FIRST_SPECIAL;
+      dex_offset += adjopcode / DexFile::DBG_LINE_RANGE;
+      java_line += DexFile::DBG_LINE_BASE + (adjopcode % DexFile::DBG_LINE_RANGE);
+
+      for (SrcMap::const_iterator found = pc2dex.FindByTo(dex_offset);
+          found != pc2dex.end() && found->to_ == static_cast<int32_t>(dex_offset);
+          found++) {
+        result->push_back({found->from_ + start_pc, static_cast<int32_t>(java_line)});
+      }
+      break;
+    }
+  }
 }
 
-void ElfWriterQuick::FillInCFIInformation(OatWriter* oat_writer,
+template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
+          typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
+          typename Elf_Phdr, typename Elf_Shdr>
+void ElfWriterQuick<Elf_Word, Elf_Sword, Elf_Addr, Elf_Dyn,
+  Elf_Sym, Elf_Ehdr, Elf_Phdr, Elf_Shdr>::FillInCFIInformation(OatWriter* oat_writer,
                                           std::vector<uint8_t>* dbg_info,
                                           std::vector<uint8_t>* dbg_abbrev,
-                                          std::vector<uint8_t>* dbg_str) {
+                                          std::vector<uint8_t>* dbg_str,
+                                          std::vector<uint8_t>* dbg_line,
+                                          uint32_t text_section_offset) {
+  const std::vector<OatWriter::DebugInfo>& method_info = oat_writer->GetCFIMethodInfo();
+
+  uint32_t producer_str_offset = PushStr(dbg_str, "Android dex2oat");
+
   // Create the debug_abbrev section with boilerplate information.
   // We only care about low_pc and high_pc right now for the compilation
   // unit and methods.
 
   // Tag 1: Compilation unit: DW_TAG_compile_unit.
-  dbg_abbrev->push_back(1);
-  dbg_abbrev->push_back(DW_TAG_compile_unit);
+  PushByte(dbg_abbrev, 1);
+  PushByte(dbg_abbrev, DW_TAG_compile_unit);
 
   // There are children (the methods).
-  dbg_abbrev->push_back(DW_CHILDREN_yes);
+  PushByte(dbg_abbrev, DW_CHILDREN_yes);
+
+  // DW_AT_producer DW_FORM_data1.
+  // REVIEW: we can get rid of dbg_str section if
+  // DW_FORM_string (immediate string) was used everywhere instead of
+  // DW_FORM_strp (ref to string from .debug_str section).
+  // DW_FORM_strp makes sense only if we reuse the strings.
+  PushByte(dbg_abbrev, DW_AT_producer);
+  PushByte(dbg_abbrev, DW_FORM_strp);
 
   // DW_LANG_Java DW_FORM_data1.
-  dbg_abbrev->push_back(DW_AT_language);
-  dbg_abbrev->push_back(DW_FORM_data1);
+  PushByte(dbg_abbrev, DW_AT_language);
+  PushByte(dbg_abbrev, DW_FORM_data1);
 
   // DW_AT_low_pc DW_FORM_addr.
-  dbg_abbrev->push_back(DW_AT_low_pc);
-  dbg_abbrev->push_back(DW_FORM_addr);
+  PushByte(dbg_abbrev, DW_AT_low_pc);
+  PushByte(dbg_abbrev, DW_FORM_addr);
 
   // DW_AT_high_pc DW_FORM_addr.
-  dbg_abbrev->push_back(DW_AT_high_pc);
-  dbg_abbrev->push_back(DW_FORM_addr);
+  PushByte(dbg_abbrev, DW_AT_high_pc);
+  PushByte(dbg_abbrev, DW_FORM_addr);
+
+  if (dbg_line != nullptr) {
+    // DW_AT_stmt_list DW_FORM_sec_offset.
+    PushByte(dbg_abbrev, DW_AT_stmt_list);
+    PushByte(dbg_abbrev, DW_FORM_sec_offset);
+  }
 
   // End of DW_TAG_compile_unit.
   PushHalf(dbg_abbrev, 0);
 
   // Tag 2: Compilation unit: DW_TAG_subprogram.
-  dbg_abbrev->push_back(2);
-  dbg_abbrev->push_back(DW_TAG_subprogram);
+  PushByte(dbg_abbrev, 2);
+  PushByte(dbg_abbrev, DW_TAG_subprogram);
 
   // There are no children.
-  dbg_abbrev->push_back(DW_CHILDREN_no);
+  PushByte(dbg_abbrev, DW_CHILDREN_no);
 
   // Name of the method.
-  dbg_abbrev->push_back(DW_AT_name);
-  dbg_abbrev->push_back(DW_FORM_strp);
+  PushByte(dbg_abbrev, DW_AT_name);
+  PushByte(dbg_abbrev, DW_FORM_strp);
 
   // DW_AT_low_pc DW_FORM_addr.
-  dbg_abbrev->push_back(DW_AT_low_pc);
-  dbg_abbrev->push_back(DW_FORM_addr);
+  PushByte(dbg_abbrev, DW_AT_low_pc);
+  PushByte(dbg_abbrev, DW_FORM_addr);
 
   // DW_AT_high_pc DW_FORM_addr.
-  dbg_abbrev->push_back(DW_AT_high_pc);
-  dbg_abbrev->push_back(DW_FORM_addr);
+  PushByte(dbg_abbrev, DW_AT_high_pc);
+  PushByte(dbg_abbrev, DW_FORM_addr);
 
   // End of DW_TAG_subprogram.
   PushHalf(dbg_abbrev, 0);
 
   // Start the debug_info section with the header information
   // 'unit_length' will be filled in later.
+  int cunit_length = dbg_info->size();
   PushWord(dbg_info, 0);
 
   // 'version' - 3.
@@ -956,55 +1441,160 @@ void ElfWriterQuick::FillInCFIInformation(OatWriter* oat_writer,
   PushWord(dbg_info, 0);
 
   // Address size: 4.
-  dbg_info->push_back(4);
+  PushByte(dbg_info, 4);
 
   // Start the description for the compilation unit.
   // This uses tag 1.
-  dbg_info->push_back(1);
+  PushByte(dbg_info, 1);
+
+  // The producer is Android dex2oat.
+  PushWord(dbg_info, producer_str_offset);
 
   // The language is Java.
-  dbg_info->push_back(DW_LANG_Java);
+  PushByte(dbg_info, DW_LANG_Java);
 
-  // Leave space for low_pc and high_pc.
-  int low_pc_offset = dbg_info->size();
+  // low_pc and high_pc.
+  uint32_t cunit_low_pc = 0 - 1;
+  uint32_t cunit_high_pc = 0;
+  int cunit_low_pc_pos = dbg_info->size();
   PushWord(dbg_info, 0);
   PushWord(dbg_info, 0);
 
-  // Walk through the information in the method table, and enter into dbg_info.
-  const std::vector<OatWriter::DebugInfo>& dbg = oat_writer->GetCFIMethodInfo();
-  uint32_t low_pc = 0xFFFFFFFFU;
-  uint32_t high_pc = 0;
+  if (dbg_line == nullptr) {
+    for (size_t i = 0; i < method_info.size(); ++i) {
+      const OatWriter::DebugInfo &dbg = method_info[i];
+
+      cunit_low_pc = std::min(cunit_low_pc, dbg.low_pc_);
+      cunit_high_pc = std::max(cunit_high_pc, dbg.high_pc_);
+
+      // Start a new TAG: subroutine (2).
+      PushByte(dbg_info, 2);
 
-  for (uint32_t i = 0; i < dbg.size(); i++) {
-    const OatWriter::DebugInfo& info = dbg[i];
-    if (info.low_pc_ < low_pc) {
-      low_pc = info.low_pc_;
+      // Enter name, low_pc, high_pc.
+      PushWord(dbg_info, PushStr(dbg_str, dbg.method_name_));
+      PushWord(dbg_info, dbg.low_pc_ + text_section_offset);
+      PushWord(dbg_info, dbg.high_pc_ + text_section_offset);
     }
-    if (info.high_pc_ > high_pc) {
-      high_pc = info.high_pc_;
+  } else {
+    // TODO: in gdb info functions <regexp> - reports Java functions, but
+    // source file is <unknown> because .debug_line is formed as one
+    // compilation unit. To fix this it is possible to generate
+    // a separate compilation unit for every distinct Java source.
+    // Each of the these compilation units can have several non-adjacent
+    // method ranges.
+
+    // Line number table offset
+    PushWord(dbg_info, dbg_line->size());
+
+    size_t lnt_length = dbg_line->size();
+    PushWord(dbg_line, 0);
+
+    PushHalf(dbg_line, 4);  // LNT Version DWARF v4 => 4
+
+    size_t lnt_hdr_length = dbg_line->size();
+    PushWord(dbg_line, 0);  // TODO: 64-bit uses 8-byte here
+
+    PushByte(dbg_line, 1);  // minimum_instruction_length (ubyte)
+    PushByte(dbg_line, 1);  // maximum_operations_per_instruction (ubyte) = always 1
+    PushByte(dbg_line, 1);  // default_is_stmt (ubyte)
+
+    const int8_t LINE_BASE = -5;
+    PushByte(dbg_line, LINE_BASE);  // line_base (sbyte)
+
+    const uint8_t LINE_RANGE = 14;
+    PushByte(dbg_line, LINE_RANGE);  // line_range (ubyte)
+
+    const uint8_t OPCODE_BASE = 13;
+    PushByte(dbg_line, OPCODE_BASE);  // opcode_base (ubyte)
+
+    // Standard_opcode_lengths (array of ubyte).
+    PushByte(dbg_line, 0); PushByte(dbg_line, 1); PushByte(dbg_line, 1);
+    PushByte(dbg_line, 1); PushByte(dbg_line, 1); PushByte(dbg_line, 0);
+    PushByte(dbg_line, 0); PushByte(dbg_line, 0); PushByte(dbg_line, 1);
+    PushByte(dbg_line, 0); PushByte(dbg_line, 0); PushByte(dbg_line, 1);
+
+    PushByte(dbg_line, 0);  // include_directories (sequence of path names) = EMPTY
+
+    // File_names (sequence of file entries).
+    std::unordered_map<const char*, size_t> files;
+    for (size_t i = 0; i < method_info.size(); ++i) {
+      const OatWriter::DebugInfo &dbg = method_info[i];
+      // TODO: add package directory to the file name
+      const char* file_name = dbg.src_file_name_ == nullptr ? "null" : dbg.src_file_name_;
+      auto found = files.find(file_name);
+      if (found == files.end()) {
+        size_t file_index = 1 + files.size();
+        files[file_name] = file_index;
+        PushStr(dbg_line, file_name);
+        PushByte(dbg_line, 0);  // include directory index = LEB128(0) - no directory
+        PushByte(dbg_line, 0);  // modification time = LEB128(0) - NA
+        PushByte(dbg_line, 0);  // file length = LEB128(0) - NA
+      }
+    }
+    PushByte(dbg_line, 0);  // End of file_names.
+
+    // Set lnt header length.
+    UpdateWord(dbg_line, lnt_hdr_length, dbg_line->size() - lnt_hdr_length - 4);
+
+    // Generate Line Number Program code, one long program for all methods.
+    LineTableGenerator line_table_generator(LINE_BASE, LINE_RANGE, OPCODE_BASE,
+                                            dbg_line, 0, 1);
+
+    SrcMap pc2java_map;
+    for (size_t i = 0; i < method_info.size(); ++i) {
+      const OatWriter::DebugInfo &dbg = method_info[i];
+      const char* file_name = (dbg.src_file_name_ == nullptr) ? "null" : dbg.src_file_name_;
+      size_t file_index = files[file_name];
+      DCHECK_NE(file_index, 0U) << file_name;
+
+      cunit_low_pc = std::min(cunit_low_pc, dbg.low_pc_);
+      cunit_high_pc = std::max(cunit_high_pc, dbg.high_pc_);
+
+      // Start a new TAG: subroutine (2).
+      PushByte(dbg_info, 2);
+
+      // Enter name, low_pc, high_pc.
+      PushWord(dbg_info, PushStr(dbg_str, dbg.method_name_));
+      PushWord(dbg_info, dbg.low_pc_ + text_section_offset);
+      PushWord(dbg_info, dbg.high_pc_ + text_section_offset);
+
+      GetLineInfoForJava(dbg.dbgstream_, dbg.compiled_method_->GetSrcMappingTable(),
+                         &pc2java_map, dbg.low_pc_);
+      pc2java_map.DeltaFormat({dbg.low_pc_, 1}, dbg.high_pc_);
+      if (!pc2java_map.empty()) {
+        line_table_generator.SetFile(file_index);
+        line_table_generator.SetAddr(dbg.low_pc_ + text_section_offset);
+        line_table_generator.SetLine(1);
+        for (auto& src_map_elem : pc2java_map) {
+          line_table_generator.PutDelta(src_map_elem.from_, src_map_elem.to_);
+        }
+        pc2java_map.clear();
+      }
     }
 
-    // Start a new TAG: subroutine (2).
-    dbg_info->push_back(2);
-
-    // Enter the name into the string table (and NUL terminate).
-    uint32_t str_offset = dbg_str->size();
-    dbg_str->insert(dbg_str->end(), info.method_name_.begin(), info.method_name_.end());
-    dbg_str->push_back('\0');
+    // End Sequence should have the highest address set.
+    line_table_generator.SetAddr(cunit_high_pc + text_section_offset);
+    line_table_generator.EndSequence();
 
-    // Enter name, low_pc, high_pc.
-    PushWord(dbg_info, str_offset);
-    PushWord(dbg_info, info.low_pc_);
-    PushWord(dbg_info, info.high_pc_);
+    // set lnt length
+    UpdateWord(dbg_line, lnt_length, dbg_line->size() - lnt_length - 4);
   }
 
   // One byte terminator
-  dbg_info->push_back(0);
+  PushByte(dbg_info, 0);
+
+  // Fill in cunit's low_pc and high_pc.
+  UpdateWord(dbg_info, cunit_low_pc_pos, cunit_low_pc + text_section_offset);
+  UpdateWord(dbg_info, cunit_low_pc_pos + 4, cunit_high_pc + text_section_offset);
 
-  // We have now walked all the methods.  Fill in lengths and low/high PCs.
-  UpdateWord(dbg_info, 0, dbg_info->size() - 4);
-  UpdateWord(dbg_info, low_pc_offset, low_pc);
-  UpdateWord(dbg_info, low_pc_offset + 4, high_pc);
+  // We have now walked all the methods.  Fill in lengths.
+  UpdateWord(dbg_info, cunit_length, dbg_info->size() - cunit_length - 4);
 }
 
+// Explicit instantiations
+template class ElfWriterQuick<Elf32_Word, Elf32_Sword, Elf32_Addr, Elf32_Dyn,
+                              Elf32_Sym, Elf32_Ehdr, Elf32_Phdr, Elf32_Shdr>;
+template class ElfWriterQuick<Elf64_Word, Elf64_Sword, Elf64_Addr, Elf64_Dyn,
+                              Elf64_Sym, Elf64_Ehdr, Elf64_Phdr, Elf64_Shdr>;
+
 }  // namespace art
index a0d36df..890528e 100644 (file)
@@ -23,6 +23,9 @@
 
 namespace art {
 
+template <typename Elf_Word, typename Elf_Sword, typename Elf_Addr,
+          typename Elf_Dyn, typename Elf_Sym, typename Elf_Ehdr,
+          typename Elf_Phdr, typename Elf_Shdr>
 class ElfWriterQuick FINAL : public ElfWriter {
  public:
   // Write an ELF file. Returns true on success, false on failure.
@@ -48,16 +51,14 @@ class ElfWriterQuick FINAL : public ElfWriter {
   ~ElfWriterQuick() {}
 
   class ElfBuilder;
-  void AddDebugSymbols(ElfBuilder& builder,
-                       OatWriter* oat_writer,
-                       bool debug);
+  void WriteDebugSymbols(ElfBuilder* builder, OatWriter* oat_writer);
   void ReservePatchSpace(std::vector<uint8_t>* buffer, bool debug);
 
   class ElfSectionBuilder {
    public:
-    ElfSectionBuilder(const std::string& sec_name, Elf32_Word type, Elf32_Word flags,
-                      const ElfSectionBuilder *link, Elf32_Word info, Elf32_Word align,
-                      Elf32_Word entsize)
+    ElfSectionBuilder(const std::string& sec_name, Elf_Word type, Elf_Word flags,
+                      const ElfSectionBuilder *link, Elf_Word info, Elf_Word align,
+                      Elf_Word entsize)
         : name_(sec_name), link_(link) {
       memset(&section_, 0, sizeof(section_));
       section_.sh_type = type;
@@ -69,14 +70,14 @@ class ElfWriterQuick FINAL : public ElfWriter {
 
     virtual ~ElfSectionBuilder() {}
 
-    Elf32_Shdr section_;
-    Elf32_Word section_index_ = 0;
+    Elf_Shdr section_;
+    Elf_Word section_index_ = 0;
 
    protected:
     const std::string name_;
     const ElfSectionBuilder* link_;
 
-    Elf32_Word GetLink() {
+    Elf_Word GetLink() {
       return (link_) ? link_->section_index_ : 0;
     }
 
@@ -86,22 +87,22 @@ class ElfWriterQuick FINAL : public ElfWriter {
 
   class ElfDynamicBuilder : public ElfSectionBuilder {
    public:
-    void AddDynamicTag(Elf32_Sword tag, Elf32_Word d_un);
-    void AddDynamicTag(Elf32_Sword tag, Elf32_Word offset, ElfSectionBuilder* section);
+    void AddDynamicTag(Elf_Sword tag, Elf_Word d_un);
+    void AddDynamicTag(Elf_Sword tag, Elf_Word offset, ElfSectionBuilder* section);
 
     ElfDynamicBuilder(const std::string& sec_name, ElfSectionBuilder *link)
         : ElfSectionBuilder(sec_name, SHT_DYNAMIC, SHF_ALLOC | SHF_ALLOC, link,
-                            0, kPageSize, sizeof(Elf32_Dyn)) {}
+                            0, kPageSize, sizeof(Elf_Dyn)) {}
     ~ElfDynamicBuilder() {}
 
    protected:
     struct ElfDynamicState {
       ElfSectionBuilder* section_;
-      Elf32_Sword tag_;
-      Elf32_Word off_;
+      Elf_Sword tag_;
+      Elf_Word off_;
     };
     std::vector<ElfDynamicState> dynamics_;
-    Elf32_Word GetSize() {
+    Elf_Word GetSize() {
       // Add 1 for the DT_NULL, 1 for DT_STRSZ, and 1 for DT_SONAME. All of
       // these must be added when we actually put the file together because
       // their values are very dependent on state.
@@ -112,7 +113,7 @@ class ElfWriterQuick FINAL : public ElfWriter {
     // table and soname_off should be the offset of the soname in .dynstr.
     // Since niether can be found prior to final layout we will wait until here
     // to add them.
-    std::vector<Elf32_Dyn> GetDynamics(Elf32_Word strsz, Elf32_Word soname_off);
+    std::vector<Elf_Dyn> GetDynamics(Elf_Word strsz, Elf_Word soname_off);
 
    private:
     friend class ElfBuilder;
@@ -120,13 +121,13 @@ class ElfWriterQuick FINAL : public ElfWriter {
 
   class ElfRawSectionBuilder : public ElfSectionBuilder {
    public:
-    ElfRawSectionBuilder(const std::string& sec_name, Elf32_Word type, Elf32_Word flags,
-                         const ElfSectionBuilder* link, Elf32_Word info, Elf32_Word align,
-                         Elf32_Word entsize)
+    ElfRawSectionBuilder(const std::string& sec_name, Elf_Word type, Elf_Word flags,
+                         const ElfSectionBuilder* link, Elf_Word info, Elf_Word align,
+                         Elf_Word entsize)
         : ElfSectionBuilder(sec_name, type, flags, link, info, align, entsize) {}
     ~ElfRawSectionBuilder() {}
     std::vector<uint8_t>* GetBuffer() { return &buf_; }
-    void SetBuffer(std::vector<uint8_t> buf) { buf_ = buf; }
+    void SetBuffer(std::vector<uint8_t>&& buf) { buf_ = buf; }
 
    protected:
     std::vector<uint8_t> buf_;
@@ -137,17 +138,17 @@ class ElfWriterQuick FINAL : public ElfWriter {
 
   class ElfOatSectionBuilder : public ElfSectionBuilder {
    public:
-    ElfOatSectionBuilder(const std::string& sec_name, Elf32_Word size, Elf32_Word offset,
-                         Elf32_Word type, Elf32_Word flags)
+    ElfOatSectionBuilder(const std::string& sec_name, Elf_Word size, Elf_Word offset,
+                         Elf_Word type, Elf_Word flags)
         : ElfSectionBuilder(sec_name, type, flags, NULL, 0, kPageSize, 0),
           offset_(offset), size_(size) {}
     ~ElfOatSectionBuilder() {}
 
    protected:
     // Offset of the content within the file.
-    Elf32_Word offset_;
+    Elf_Word offset_;
     // Size of the content within the file.
-    Elf32_Word size_;
+    Elf_Word size_;
 
    private:
     friend class ElfBuilder;
@@ -159,27 +160,27 @@ class ElfWriterQuick FINAL : public ElfWriter {
     // 'relative_addr' within the given section and has the given attributes.
     void AddSymbol(const std::string& name,
                    const ElfSectionBuilder* section,
-                   Elf32_Addr addr,
+                   Elf_Addr addr,
                    bool is_relative,
-                   Elf32_Word size,
+                   Elf_Word size,
                    uint8_t binding,
                    uint8_t type,
                    uint8_t other = 0);
 
-    ElfSymtabBuilder(const std::string& sec_name, Elf32_Word type,
-                     const std::string& str_name, Elf32_Word str_type, bool alloc)
+    ElfSymtabBuilder(const std::string& sec_name, Elf_Word type,
+                     const std::string& str_name, Elf_Word str_type, bool alloc)
         : ElfSectionBuilder(sec_name, type, ((alloc) ? SHF_ALLOC : 0U), &strtab_, 0,
-                            sizeof(Elf32_Word), sizeof(Elf32_Sym)),
+                            sizeof(Elf_Word), sizeof(Elf_Sym)),
           str_name_(str_name), str_type_(str_type),
           strtab_(str_name, str_type, ((alloc) ? SHF_ALLOC : 0U), NULL, 0, 1, 1) {}
     ~ElfSymtabBuilder() {}
 
    protected:
-    std::vector<Elf32_Word> GenerateHashContents();
+    std::vector<Elf_Word> GenerateHashContents();
     std::string GenerateStrtab();
-    std::vector<Elf32_Sym> GenerateSymtab();
+    std::vector<Elf_Sym> GenerateSymtab();
 
-    Elf32_Word GetSize() {
+    Elf_Word GetSize() {
       // 1 is for the implicit NULL symbol.
       return symbols_.size() + 1;
     }
@@ -187,18 +188,18 @@ class ElfWriterQuick FINAL : public ElfWriter {
     struct ElfSymbolState {
       const std::string name_;
       const ElfSectionBuilder* section_;
-      Elf32_Addr addr_;
-      Elf32_Word size_;
+      Elf_Addr addr_;
+      Elf_Word size_;
       bool is_relative_;
       uint8_t info_;
       uint8_t other_;
       // Used during Write() to temporarially hold name index in the strtab.
-      Elf32_Word name_idx_;
+      Elf_Word name_idx_;
     };
 
     // Information for the strsym for dynstr sections.
     const std::string str_name_;
-    Elf32_Word str_type_;
+    Elf_Word str_type_;
     // The symbols in the same order they will be in the symbol table.
     std::vector<ElfSymbolState> symbols_;
     ElfSectionBuilder strtab_;
@@ -212,10 +213,10 @@ class ElfWriterQuick FINAL : public ElfWriter {
     ElfBuilder(OatWriter* oat_writer,
                File* elf_file,
                InstructionSet isa,
-               Elf32_Word rodata_relative_offset,
-               Elf32_Word rodata_size,
-               Elf32_Word text_relative_offset,
-               Elf32_Word text_size,
+               Elf_Word rodata_relative_offset,
+               Elf_Word rodata_size,
+               Elf_Word text_relative_offset,
+               Elf_Word text_size,
                const bool add_symbols,
                bool debug = false)
         : oat_writer_(oat_writer),
@@ -229,7 +230,7 @@ class ElfWriterQuick FINAL : public ElfWriter {
           dynsym_builder_(".dynsym", SHT_DYNSYM, ".dynstr", SHT_STRTAB, true),
           symtab_builder_(".symtab", SHT_SYMTAB, ".strtab", SHT_STRTAB, false),
           hash_builder_(".hash", SHT_HASH, SHF_ALLOC, &dynsym_builder_, 0,
-                        sizeof(Elf32_Word), sizeof(Elf32_Word)),
+                        sizeof(Elf_Word), sizeof(Elf_Word)),
           dynamic_builder_(".dynamic", &dynsym_builder_),
           shstrtab_builder_(".shstrtab", SHT_STRTAB, 0, NULL, 0, 1, 1) {
       SetupEhdr();
@@ -239,6 +240,7 @@ class ElfWriterQuick FINAL : public ElfWriter {
     }
     ~ElfBuilder() {}
 
+    bool Init();
     bool Write();
 
     // Adds the given raw section to the builder. This will copy it. The caller
@@ -255,7 +257,28 @@ class ElfWriterQuick FINAL : public ElfWriter {
 
     bool fatal_error_ = false;
 
-    Elf32_Ehdr elf_header_;
+    // What phdr is.
+    static const uint32_t PHDR_OFFSET = sizeof(Elf_Ehdr);
+    enum : uint8_t {
+      PH_PHDR     = 0,
+      PH_LOAD_R__ = 1,
+      PH_LOAD_R_X = 2,
+      PH_LOAD_RW_ = 3,
+      PH_DYNAMIC  = 4,
+      PH_NUM      = 5,
+    };
+    static const uint32_t PHDR_SIZE = sizeof(Elf_Phdr) * PH_NUM;
+    Elf_Phdr program_headers_[PH_NUM];
+
+    Elf_Ehdr elf_header_;
+
+    Elf_Shdr null_hdr_;
+    std::string shstrtab_;
+    uint32_t section_index_;
+    std::string dynstr_;
+    uint32_t dynstr_soname_offset_;
+    std::vector<Elf_Shdr*> section_ptrs_;
+    std::vector<Elf_Word> hash_;
 
    public:
     ElfOatSectionBuilder text_builder_;
@@ -292,14 +315,14 @@ class ElfWriterQuick FINAL : public ElfWriter {
     void SetupRequiredSymbols();
     void AssignSectionStr(ElfSectionBuilder *builder, std::string* strtab);
     struct ElfFilePiece {
-      ElfFilePiece(const std::string& name, Elf32_Word offset, const void* data, Elf32_Word size)
+      ElfFilePiece(const std::string& name, Elf_Word offset, const void* data, Elf_Word size)
           : dbg_name_(name), offset_(offset), data_(data), size_(size) {}
       ~ElfFilePiece() {}
 
       const std::string& dbg_name_;
-      Elf32_Word offset_;
+      Elf_Word offset_;
       const void *data_;
-      Elf32_Word size_;
+      Elf_Word size_;
       static bool Compare(ElfFilePiece a, ElfFilePiece b) {
         return a.offset_ < b.offset_;
       }
@@ -318,11 +341,18 @@ class ElfWriterQuick FINAL : public ElfWriter {
    * @param dbg_str Debug strings.
    */
   void FillInCFIInformation(OatWriter* oat_writer, std::vector<uint8_t>* dbg_info,
-                            std::vector<uint8_t>* dbg_abbrev, std::vector<uint8_t>* dbg_str);
+                            std::vector<uint8_t>* dbg_abbrev, std::vector<uint8_t>* dbg_str,
+                            std::vector<uint8_t>* dbg_line, uint32_t text_section_offset);
 
   DISALLOW_IMPLICIT_CONSTRUCTORS(ElfWriterQuick);
 };
 
+// Explicitly instantiated in elf_writer_quick.cc
+typedef ElfWriterQuick<Elf32_Word, Elf32_Sword, Elf32_Addr, Elf32_Dyn,
+                       Elf32_Sym, Elf32_Ehdr, Elf32_Phdr, Elf32_Shdr> ElfWriterQuick32;
+typedef ElfWriterQuick<Elf64_Word, Elf64_Sword, Elf64_Addr, Elf64_Dyn,
+                       Elf64_Sym, Elf64_Ehdr, Elf64_Phdr, Elf64_Shdr> ElfWriterQuick64;
+
 }  // namespace art
 
 #endif  // ART_COMPILER_ELF_WRITER_QUICK_H_
index 3d119bb..f2a16e5 100644 (file)
@@ -21,6 +21,7 @@
 #include <vector>
 
 #include "base/unix_file/fd_file.h"
+#include "class_linker.h"
 #include "common_compiler_test.h"
 #include "elf_fixup.h"
 #include "gc/space/image_space.h"
index ba7e13f..9c9cdf2 100644 (file)
@@ -232,7 +232,7 @@ bool ImageWriter::AllocMemory() {
   size_t length = RoundUp(Runtime::Current()->GetHeap()->GetTotalMemory(), kPageSize);
   std::string error_msg;
   image_.reset(MemMap::MapAnonymous("image writer image", NULL, length, PROT_READ | PROT_WRITE,
-                                    true, &error_msg));
+                                    false, &error_msg));
   if (UNLIKELY(image_.get() == nullptr)) {
     LOG(ERROR) << "Failed to allocate memory for image file generation: " << error_msg;
     return false;
index 4626f38..a21004c 100644 (file)
@@ -223,13 +223,9 @@ void JniCompilerTest::CompileAndRunIntMethodThroughStubImpl() {
   SetUpForTest(false, "bar", "(I)I", nullptr);
   // calling through stub will link with &Java_MyClassNatives_bar
 
-  ScopedObjectAccess soa(Thread::Current());
   std::string reason;
-  StackHandleScope<1> hs(soa.Self());
-  Handle<mirror::ClassLoader> class_loader(
-      hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader_)));
-  ASSERT_TRUE(
-      Runtime::Current()->GetJavaVM()->LoadNativeLibrary("", class_loader, &reason)) << reason;
+  ASSERT_TRUE(Runtime::Current()->GetJavaVM()->LoadNativeLibrary(env_, "", class_loader_, &reason))
+      << reason;
 
   jint result = env_->CallNonvirtualIntMethod(jobj_, jklass_, jmethod_, 24);
   EXPECT_EQ(25, result);
@@ -242,13 +238,9 @@ void JniCompilerTest::CompileAndRunStaticIntMethodThroughStubImpl() {
   SetUpForTest(true, "sbar", "(I)I", nullptr);
   // calling through stub will link with &Java_MyClassNatives_sbar
 
-  ScopedObjectAccess soa(Thread::Current());
   std::string reason;
-  StackHandleScope<1> hs(soa.Self());
-  Handle<mirror::ClassLoader> class_loader(
-      hs.NewHandle(soa.Decode<mirror::ClassLoader*>(class_loader_)));
-  ASSERT_TRUE(
-      Runtime::Current()->GetJavaVM()->LoadNativeLibrary("", class_loader, &reason)) << reason;
+  ASSERT_TRUE(Runtime::Current()->GetJavaVM()->LoadNativeLibrary(env_, "", class_loader_, &reason))
+      << reason;
 
   jint result = env_->CallStaticIntMethod(jklass_, jmethod_, 42);
   EXPECT_EQ(43, result);
@@ -976,10 +968,10 @@ void JniCompilerTest::UpcallReturnTypeChecking_InstanceImpl() {
   check_jni_abort_catcher.Check("attempt to return an instance of java.lang.String from java.lang.Class MyClassNatives.instanceMethodThatShouldReturnClass()");
 
   // Here, we just call the method incorrectly; we should catch that too.
-  env_->CallVoidMethod(jobj_, jmethod_);
+  env_->CallObjectMethod(jobj_, jmethod_);
   check_jni_abort_catcher.Check("attempt to return an instance of java.lang.String from java.lang.Class MyClassNatives.instanceMethodThatShouldReturnClass()");
-  env_->CallStaticVoidMethod(jklass_, jmethod_);
-  check_jni_abort_catcher.Check("calling non-static method java.lang.Class MyClassNatives.instanceMethodThatShouldReturnClass() with CallStaticVoidMethodV");
+  env_->CallStaticObjectMethod(jklass_, jmethod_);
+  check_jni_abort_catcher.Check("calling non-static method java.lang.Class MyClassNatives.instanceMethodThatShouldReturnClass() with CallStaticObjectMethodV");
 }
 
 JNI_TEST(UpcallReturnTypeChecking_Instance)
@@ -996,10 +988,10 @@ void JniCompilerTest::UpcallReturnTypeChecking_StaticImpl() {
   check_jni_abort_catcher.Check("attempt to return an instance of java.lang.String from java.lang.Class MyClassNatives.staticMethodThatShouldReturnClass()");
 
   // Here, we just call the method incorrectly; we should catch that too.
-  env_->CallStaticVoidMethod(jklass_, jmethod_);
+  env_->CallStaticObjectMethod(jklass_, jmethod_);
   check_jni_abort_catcher.Check("attempt to return an instance of java.lang.String from java.lang.Class MyClassNatives.staticMethodThatShouldReturnClass()");
-  env_->CallVoidMethod(jobj_, jmethod_);
-  check_jni_abort_catcher.Check("calling static method java.lang.Class MyClassNatives.staticMethodThatShouldReturnClass() with CallVoidMethodV");
+  env_->CallObjectMethod(jobj_, jmethod_);
+  check_jni_abort_catcher.Check("calling static method java.lang.Class MyClassNatives.staticMethodThatShouldReturnClass() with CallObjectMethodV");
 }
 
 JNI_TEST(UpcallReturnTypeChecking_Static)
index c38cfaf..78a228b 100644 (file)
@@ -14,6 +14,8 @@
  * limitations under the License.
  */
 
+#include "jni_compiler.h"
+
 #include <algorithm>
 #include <memory>
 #include <vector>
@@ -27,7 +29,7 @@
 #include "dex_file-inl.h"
 #include "driver/compiler_driver.h"
 #include "entrypoints/quick/quick_entrypoints.h"
-#include "jni_internal.h"
+#include "jni_env_ext.h"
 #include "mirror/art_method.h"
 #include "utils/assembler.h"
 #include "utils/managed_register.h"
@@ -90,6 +92,7 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
 
   // Assembler that holds generated instructions
   std::unique_ptr<Assembler> jni_asm(Assembler::Create(instruction_set));
+  jni_asm->InitializeFrameDescriptionEntry();
 
   // Offsets into data structures
   // TODO: if cross compiling these offsets are for the host not the target
@@ -432,12 +435,14 @@ CompiledMethod* ArtJniCompileMethodInternal(CompilerDriver* driver,
   std::vector<uint8_t> managed_code(cs);
   MemoryRegion code(&managed_code[0], managed_code.size());
   __ FinalizeInstructions(code);
+  jni_asm->FinalizeFrameDescriptionEntry();
   return new CompiledMethod(driver,
                             instruction_set,
                             managed_code,
                             frame_size,
                             main_jni_conv->CoreSpillMask(),
-                            main_jni_conv->FpSpillMask());
+                            main_jni_conv->FpSpillMask(),
+                            jni_asm->GetFrameDescriptionEntry());
 }
 
 // Copy a single parameter from the managed to the JNI calling convention
@@ -543,10 +548,9 @@ static void SetNativeParameter(Assembler* jni_asm,
   }
 }
 
-}  // namespace art
-
-extern "C" art::CompiledMethod* ArtQuickJniCompileMethod(art::CompilerDriver* compiler,
-                                                         uint32_t access_flags, uint32_t method_idx,
-                                                         const art::DexFile& dex_file) {
+CompiledMethod* ArtQuickJniCompileMethod(CompilerDriver* compiler, uint32_t access_flags,
+                                         uint32_t method_idx, const DexFile& dex_file) {
   return ArtJniCompileMethodInternal(compiler, access_flags, method_idx, dex_file);
 }
+
+}  // namespace art
diff --git a/compiler/jni/quick/jni_compiler.h b/compiler/jni/quick/jni_compiler.h
new file mode 100644 (file)
index 0000000..46277f1
--- /dev/null
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_JNI_QUICK_JNI_COMPILER_H_
+#define ART_COMPILER_JNI_QUICK_JNI_COMPILER_H_
+
+#include "dex_file.h"
+
+namespace art {
+
+class CompilerDriver;
+class CompiledMethod;
+
+CompiledMethod* ArtQuickJniCompileMethod(CompilerDriver* compiler, uint32_t access_flags,
+                                         uint32_t method_idx, const DexFile& dex_file);
+
+}  // namespace art
+
+#endif  // ART_COMPILER_JNI_QUICK_JNI_COMPILER_H_
index 5990e8c..3aeecad 100644 (file)
@@ -141,7 +141,7 @@ CompileDexMethod(DexCompilationUnit* dex_compilation_unit, InvokeType invoke_typ
   cunit->SetDexCompilationUnit(dex_compilation_unit);
   cunit->SetCompilerDriver(compiler_driver_);
   // TODO: consolidate ArtCompileMethods
-  CompileOneMethod(*compiler_driver_,
+  CompileOneMethod(compiler_driver_,
                    compiler_driver_->GetCompiler(),
                    dex_compilation_unit->GetCodeItem(),
                    dex_compilation_unit->GetAccessFlags(),
@@ -172,68 +172,62 @@ CompileNativeMethod(DexCompilationUnit* dex_compilation_unit) {
 }
 
 
-}  // namespace llvm
-}  // namespace art
-
-static art::llvm::CompilerLLVM* ContextOf(art::CompilerDriver* driver) {
+static CompilerLLVM* ContextOf(art::CompilerDriver* driver) {
   void *compiler_context = driver->GetCompilerContext();
   CHECK(compiler_context != NULL);
-  return reinterpret_cast<art::llvm::CompilerLLVM*>(compiler_context);
+  return reinterpret_cast<CompilerLLVM*>(compiler_context);
 }
 
-static art::llvm::CompilerLLVM* ContextOf(const art::CompilerDriver& driver) {
+static CompilerLLVM* ContextOf(const art::CompilerDriver& driver) {
   void *compiler_context = driver.GetCompilerContext();
   CHECK(compiler_context != NULL);
-  return reinterpret_cast<art::llvm::CompilerLLVM*>(compiler_context);
+  return reinterpret_cast<CompilerLLVM*>(compiler_context);
 }
 
-extern "C" void ArtInitCompilerContext(art::CompilerDriver* driver) {
+void ArtInitCompilerContext(CompilerDriver* driver) {
   CHECK(driver->GetCompilerContext() == nullptr);
 
-  art::llvm::CompilerLLVM* compiler_llvm = new art::llvm::CompilerLLVM(driver,
-                                                                       driver->GetInstructionSet());
+  CompilerLLVM* compiler_llvm = new CompilerLLVM(driver, driver->GetInstructionSet());
 
   driver->SetCompilerContext(compiler_llvm);
 }
 
-extern "C" void ArtUnInitCompilerContext(art::CompilerDriver* driver) {
+void ArtUnInitCompilerContext(CompilerDriver* driver) {
   delete ContextOf(driver);
   driver->SetCompilerContext(nullptr);
 }
-extern "C" art::CompiledMethod* ArtCompileMethod(art::CompilerDriver* driver,
-                                                 const art::DexFile::CodeItem* code_item,
-                                                 uint32_t access_flags,
-                                                 art::InvokeType invoke_type,
-                                                 uint16_t class_def_idx,
-                                                 uint32_t method_idx,
-                                                 jobject class_loader,
-                                                 const art::DexFile& dex_file) {
+
+CompiledMethod* ArtCompileMethod(CompilerDriver* driver, const DexFile::CodeItem* code_item,
+                                 uint32_t access_flags, InvokeType invoke_type,
+                                 uint16_t class_def_idx, uint32_t method_idx, jobject class_loader,
+                                 const DexFile& dex_file) {
   UNUSED(class_def_idx);  // TODO: this is used with Compiler::RequiresConstructorBarrier.
-  art::ClassLinker *class_linker = art::Runtime::Current()->GetClassLinker();
+  ClassLinker *class_linker = Runtime::Current()->GetClassLinker();
 
-  art::DexCompilationUnit dex_compilation_unit(
-    NULL, class_loader, class_linker, dex_file, code_item,
-    class_def_idx, method_idx, access_flags, driver->GetVerifiedMethod(&dex_file, method_idx));
-  art::llvm::CompilerLLVM* compiler_llvm = ContextOf(driver);
-  art::CompiledMethod* result = compiler_llvm->CompileDexMethod(&dex_compilation_unit, invoke_type);
+  DexCompilationUnit dex_compilation_unit(nullptr, class_loader, class_linker, dex_file, code_item,
+                                          class_def_idx, method_idx, access_flags,
+                                          driver->GetVerifiedMethod(&dex_file, method_idx));
+  CompilerLLVM* compiler_llvm = ContextOf(driver);
+  CompiledMethod* result = compiler_llvm->CompileDexMethod(&dex_compilation_unit, invoke_type);
   return result;
 }
 
-extern "C" art::CompiledMethod* ArtLLVMJniCompileMethod(art::CompilerDriver* driver,
-                                                        uint32_t access_flags, uint32_t method_idx,
-                                                        const art::DexFile& dex_file) {
-  art::ClassLinker *class_linker = art::Runtime::Current()->GetClassLinker();
+CompiledMethod* ArtLLVMJniCompileMethod(CompilerDriver* driver, uint32_t access_flags,
+                                        uint32_t method_idx, const DexFile& dex_file) {
+  ClassLinker *class_linker = Runtime::Current()->GetClassLinker();
 
-  art::DexCompilationUnit dex_compilation_unit(
-      nullptr, nullptr, class_linker, dex_file, nullptr,
-      0, method_idx, access_flags, nullptr);
+  DexCompilationUnit dex_compilation_unit(nullptr, nullptr, class_linker, dex_file, nullptr,
+                                          0, method_idx, access_flags, nullptr);
 
-  art::llvm::CompilerLLVM* compiler_llvm = ContextOf(driver);
-  art::CompiledMethod* result = compiler_llvm->CompileNativeMethod(&dex_compilation_unit);
+  CompilerLLVM* compiler_llvm = ContextOf(driver);
+  CompiledMethod* result = compiler_llvm->CompileNativeMethod(&dex_compilation_unit);
   return result;
 }
 
-extern "C" void compilerLLVMSetBitcodeFileName(const art::CompilerDriver& driver,
-                                               const std::string& filename) {
+void compilerLLVMSetBitcodeFileName(const CompilerDriver& driver, const std::string& filename) {
   ContextOf(driver)->SetBitcodeFileName(filename);
 }
+
+}  // namespace llvm
+}  // namespace art
+
index cc74deb..7d29198 100644 (file)
@@ -95,6 +95,19 @@ class CompilerLLVM {
   DISALLOW_COPY_AND_ASSIGN(CompilerLLVM);
 };
 
+void ArtInitCompilerContext(CompilerDriver* driver);
+
+void ArtUnInitCompilerContext(CompilerDriver* driver);
+
+CompiledMethod* ArtCompileMethod(CompilerDriver* driver, const DexFile::CodeItem* code_item,
+                                 uint32_t access_flags, InvokeType invoke_type,
+                                 uint16_t class_def_idx, uint32_t method_idx, jobject class_loader,
+                                 const DexFile& dex_file);
+
+CompiledMethod* ArtLLVMJniCompileMethod(CompilerDriver* driver, uint32_t access_flags,
+                                        uint32_t method_idx, const DexFile& dex_file);
+
+void compilerLLVMSetBitcodeFileName(const CompilerDriver& driver, const std::string& filename);
 
 }  // namespace llvm
 }  // namespace art
diff --git a/compiler/llvm/llvm_compiler.cc b/compiler/llvm/llvm_compiler.cc
new file mode 100644 (file)
index 0000000..55af614
--- /dev/null
@@ -0,0 +1,161 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "llvm_compiler.h"
+
+#ifdef ART_USE_PORTABLE_COMPILER
+#include "compiler.h"
+#include "compiler_llvm.h"
+#include "dex/portable/mir_to_gbc.h"
+#include "dex_file.h"
+#include "elf_writer_mclinker.h"
+#include "mirror/art_method-inl.h"
+#endif
+
+namespace art {
+
+#ifdef ART_USE_PORTABLE_COMPILER
+
+namespace llvm {
+
+// Thread-local storage compiler worker threads
+class LLVMCompilerTls : public CompilerTls {
+  public:
+    LLVMCompilerTls() : llvm_info_(nullptr) {}
+    ~LLVMCompilerTls() {}
+
+    void* GetLLVMInfo() { return llvm_info_; }
+
+    void SetLLVMInfo(void* llvm_info) { llvm_info_ = llvm_info; }
+
+  private:
+    void* llvm_info_;
+};
+
+
+
+class LLVMCompiler FINAL : public Compiler {
+ public:
+  explicit LLVMCompiler(CompilerDriver* driver) : Compiler(driver, 1000) {}
+
+  CompilerTls* CreateNewCompilerTls() {
+    return new LLVMCompilerTls();
+  }
+
+  void Init() const OVERRIDE {
+    ArtInitCompilerContext(GetCompilerDriver());
+  }
+
+  void UnInit() const OVERRIDE {
+    ArtUnInitCompilerContext(GetCompilerDriver());
+  }
+
+  bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file, CompilationUnit* cu) const
+      OVERRIDE {
+    return true;
+  }
+
+  CompiledMethod* Compile(const DexFile::CodeItem* code_item,
+                          uint32_t access_flags,
+                          InvokeType invoke_type,
+                          uint16_t class_def_idx,
+                          uint32_t method_idx,
+                          jobject class_loader,
+                          const DexFile& dex_file) const OVERRIDE {
+    CompiledMethod* method = TryCompileWithSeaIR(code_item,
+                                                 access_flags,
+                                                 invoke_type,
+                                                 class_def_idx,
+                                                 method_idx,
+                                                 class_loader,
+                                                 dex_file);
+    if (method != nullptr) {
+      return method;
+    }
+
+    return ArtCompileMethod(GetCompilerDriver(),
+                            code_item,
+                            access_flags,
+                            invoke_type,
+                            class_def_idx,
+                            method_idx,
+                            class_loader,
+                            dex_file);
+  }
+
+  CompiledMethod* JniCompile(uint32_t access_flags,
+                             uint32_t method_idx,
+                             const DexFile& dex_file) const OVERRIDE {
+    return ArtLLVMJniCompileMethod(GetCompilerDriver(), access_flags, method_idx, dex_file);
+  }
+
+  uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const {
+    return reinterpret_cast<uintptr_t>(method->GetEntryPointFromPortableCompiledCode());
+  }
+
+  bool WriteElf(art::File* file,
+                OatWriter* oat_writer,
+                const std::vector<const art::DexFile*>& dex_files,
+                const std::string& android_root,
+                bool is_host, const CompilerDriver& driver) const
+      OVERRIDE
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return art::ElfWriterMclinker::Create(
+        file, oat_writer, dex_files, android_root, is_host, driver);
+  }
+
+  Backend* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const {
+    return PortableCodeGenerator(
+        cu, cu->mir_graph.get(), &cu->arena,
+        reinterpret_cast<art::llvm::LlvmCompilationUnit*>(compilation_unit));
+  }
+
+  void InitCompilationUnit(CompilationUnit& cu) const {
+      // Fused long branches not currently useful in bitcode.
+    cu.disable_opt |=
+        (1 << kBranchFusing) |
+        (1 << kSuppressExceptionEdges);
+  }
+
+  bool IsPortable() const OVERRIDE {
+    return true;
+  }
+
+  void SetBitcodeFileName(const CompilerDriver& driver, const std::string& filename) {
+    typedef void (*SetBitcodeFileNameFn)(const CompilerDriver&, const std::string&);
+
+    SetBitcodeFileNameFn set_bitcode_file_name =
+      reinterpret_cast<SetBitcodeFileNameFn>(compilerLLVMSetBitcodeFileName);
+
+    set_bitcode_file_name(driver, filename);
+  }
+
+ private:
+  DISALLOW_COPY_AND_ASSIGN(LLVMCompiler);
+};
+
+}  // namespace llvm
+#endif
+
+Compiler* CreateLLVMCompiler(CompilerDriver* driver) {
+#ifdef ART_USE_PORTABLE_COMPILER
+      return new llvm::LLVMCompiler(driver);
+#else
+      return nullptr;
+#endif
+}
+
+}  // namespace art
diff --git a/compiler/llvm/llvm_compiler.h b/compiler/llvm/llvm_compiler.h
new file mode 100644 (file)
index 0000000..da6d0e9
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2012 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_LLVM_LLVM_COMPILER_H_
+#define ART_COMPILER_LLVM_LLVM_COMPILER_H_
+
+namespace art {
+
+class Compiler;
+class CompilerDriver;
+
+Compiler* CreateLLVMCompiler(CompilerDriver* driver);
+
+}
+
+#endif  // ART_COMPILER_LLVM_LLVM_COMPILER_H_
index 11d1728..e858a7b 100644 (file)
@@ -14,6 +14,7 @@
  * limitations under the License.
  */
 
+#include "class_linker.h"
 #include "common_compiler_test.h"
 #include "compiler.h"
 #include "dex/verification_results.h"
@@ -187,7 +188,7 @@ TEST_F(OatTest, OatHeaderSizeCheck) {
   EXPECT_EQ(84U, sizeof(OatHeader));
   EXPECT_EQ(8U, sizeof(OatMethodOffsets));
   EXPECT_EQ(24U, sizeof(OatQuickMethodHeader));
-  EXPECT_EQ(79 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
+  EXPECT_EQ(91 * GetInstructionSetPointerSize(kRuntimeISA), sizeof(QuickEntryPoints));
 }
 
 TEST_F(OatTest, OatHeaderIsValid) {
index 9da59ab..680ce0a 100644 (file)
@@ -18,6 +18,7 @@
 
 #include <zlib.h>
 
+#include "base/allocator.h"
 #include "base/bit_vector.h"
 #include "base/stl_util.h"
 #include "base/unix_file/fd_file.h"
@@ -357,7 +358,6 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
         uint32_t thumb_offset = compiled_method->CodeDelta();
         quick_code_offset = offset_ + sizeof(OatQuickMethodHeader) + thumb_offset;
 
-        bool force_debug_capture = false;
         bool deduped = false;
 
         // Deduplicate code arrays.
@@ -400,47 +400,24 @@ class OatWriter::InitCodeMethodVisitor : public OatDexMethodVisitor {
           offset_ += code_size;
         }
 
-        uint32_t quick_code_start = quick_code_offset - writer_->oat_header_->GetExecutableOffset();
-        std::vector<uint8_t>* cfi_info = writer_->compiler_driver_->GetCallFrameInformation();
-        if (cfi_info != nullptr) {
-          // Copy in the FDE, if present
-          const std::vector<uint8_t>* fde = compiled_method->GetCFIInfo();
-          if (fde != nullptr) {
-            // Copy the information into cfi_info and then fix the address in the new copy.
-            int cur_offset = cfi_info->size();
-            cfi_info->insert(cfi_info->end(), fde->begin(), fde->end());
-
-            // Set the 'CIE_pointer' field to cur_offset+4.
-            uint32_t CIE_pointer = cur_offset + 4;
-            uint32_t offset_to_update = cur_offset + sizeof(uint32_t);
-            (*cfi_info)[offset_to_update+0] = CIE_pointer;
-            (*cfi_info)[offset_to_update+1] = CIE_pointer >> 8;
-            (*cfi_info)[offset_to_update+2] = CIE_pointer >> 16;
-            (*cfi_info)[offset_to_update+3] = CIE_pointer >> 24;
-
-            // Set the 'initial_location' field to address the start of the method.
-            offset_to_update = cur_offset + 2*sizeof(uint32_t);
-            (*cfi_info)[offset_to_update+0] = quick_code_start;
-            (*cfi_info)[offset_to_update+1] = quick_code_start >> 8;
-            (*cfi_info)[offset_to_update+2] = quick_code_start >> 16;
-            (*cfi_info)[offset_to_update+3] = quick_code_start >> 24;
-            force_debug_capture = true;
-          }
-        }
-
+        if (writer_->compiler_driver_->GetCompilerOptions().GetIncludeDebugSymbols()) {
+          // Record debug information for this function if we are doing that.
 
-        if (writer_->compiler_driver_->DidIncludeDebugSymbols() || force_debug_capture) {
-          // Record debug information for this function if we are doing that or
-          // we have CFI and so need it.
           std::string name = PrettyMethod(it.GetMemberIndex(), *dex_file_, true);
           if (deduped) {
-            // TODO We should place the DEDUPED tag on the first instance of a
-            // deduplicated symbol so that it will show up in a debuggerd crash
-            // report.
+            // TODO We should place the DEDUPED tag on the first instance of a deduplicated symbol
+            // so that it will show up in a debuggerd crash report.
             name += " [ DEDUPED ]";
           }
-          writer_->method_info_.push_back(DebugInfo(name, quick_code_start,
-                                                    quick_code_start + code_size));
+
+          const uint32_t quick_code_start = quick_code_offset -
+              writer_->oat_header_->GetExecutableOffset();
+          const DexFile::CodeItem *code_item = it.GetMethodCodeItem();
+          writer_->method_info_.push_back(DebugInfo(name,
+                dex_file_->GetSourceFile(dex_file_->GetClassDef(class_def_index_)),
+                quick_code_start, quick_code_start + code_size,
+                code_item == nullptr ? nullptr : dex_file_->GetDebugInfoStream(code_item),
+                compiled_method));
         }
       }
 
@@ -550,7 +527,14 @@ class OatWriter::InitImageMethodVisitor : public OatDexMethodVisitor {
                                                       NullHandle<mirror::ClassLoader>(),
                                                       NullHandle<mirror::ArtMethod>(),
                                                       invoke_type);
-    CHECK(method != NULL) << PrettyMethod(it.GetMemberIndex(), *dex_file_, true);
+    if (method == nullptr) {
+      LOG(ERROR) << "Unexpected failure to resolve a method: "
+                 << PrettyMethod(it.GetMemberIndex(), *dex_file_, true);
+      soa.Self()->AssertPendingException();
+      mirror::Throwable* exc = soa.Self()->GetException(nullptr);
+      std::string dump = exc->Dump();
+      LOG(FATAL) << dump;
+    }
     // Portable code offsets are set by ElfWriterMclinker::FixupCompiledCodeOffset after linking.
     method->SetQuickOatCodeOffset(offsets.code_offset_);
     method->SetOatNativeGcMapOffset(offsets.gc_map_offset_);
index 945048e..11f8bff 100644 (file)
@@ -30,6 +30,7 @@
 namespace art {
 
 class BitVector;
+class CompiledMethod;
 class OutputStream;
 
 // OatHeader         variable length with count of D OatDexFiles
@@ -97,22 +98,25 @@ class OatWriter {
   ~OatWriter();
 
   struct DebugInfo {
-    DebugInfo(const std::string& method_name, uint32_t low_pc, uint32_t high_pc)
-      : method_name_(method_name), low_pc_(low_pc), high_pc_(high_pc) {
+    DebugInfo(const std::string& method_name, const char* src_file_name,
+              uint32_t low_pc, uint32_t high_pc, const uint8_t* dbgstream,
+              CompiledMethod* compiled_method)
+      : method_name_(method_name), src_file_name_(src_file_name),
+        low_pc_(low_pc), high_pc_(high_pc), dbgstream_(dbgstream),
+        compiled_method_(compiled_method) {
     }
-    std::string method_name_;
+    std::string method_name_;  // Note: this name is a pretty-printed name.
+    const char* src_file_name_;
     uint32_t    low_pc_;
     uint32_t    high_pc_;
+    const uint8_t* dbgstream_;
+    CompiledMethod* compiled_method_;
   };
 
   const std::vector<DebugInfo>& GetCFIMethodInfo() const {
     return method_info_;
   }
 
-  bool DidAddSymbols() const {
-    return compiler_driver_->DidIncludeDebugSymbols();
-  }
-
  private:
   // The DataAccess classes are helper classes that provide access to members related to
   // a given map, i.e. GC map, mapping table or vmap table. By abstracting these away
index bd8c27e..7269fff 100644 (file)
@@ -19,6 +19,7 @@
 #include "code_generator_arm.h"
 #include "code_generator_x86.h"
 #include "code_generator_x86_64.h"
+#include "compiled_method.h"
 #include "dex/verified_method.h"
 #include "driver/dex_compilation_unit.h"
 #include "gc_map_builder.h"
@@ -297,7 +298,7 @@ void CodeGenerator::BuildNativeGCMap(
   }
 }
 
-void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data) const {
+void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data, SrcMap* src_map) const {
   uint32_t pc2dex_data_size = 0u;
   uint32_t pc2dex_entries = pc_infos_.Size();
   uint32_t pc2dex_offset = 0u;
@@ -305,6 +306,10 @@ void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data) const {
   uint32_t dex2pc_data_size = 0u;
   uint32_t dex2pc_entries = 0u;
 
+  if (src_map != nullptr) {
+    src_map->reserve(pc2dex_entries);
+  }
+
   // We currently only have pc2dex entries.
   for (size_t i = 0; i < pc2dex_entries; i++) {
     struct PcInfo pc_info = pc_infos_.Get(i);
@@ -312,6 +317,9 @@ void CodeGenerator::BuildMappingTable(std::vector<uint8_t>* data) const {
     pc2dex_data_size += SignedLeb128Size(pc_info.dex_pc - pc2dex_dalvik_offset);
     pc2dex_offset = pc_info.native_pc;
     pc2dex_dalvik_offset = pc_info.dex_pc;
+    if (src_map != nullptr) {
+      src_map->push_back(SrcMapElem({pc2dex_offset, pc2dex_dalvik_offset}));
+    }
   }
 
   uint32_t total_entries = pc2dex_entries + dex2pc_entries;
index b31c3a3..12337c9 100644 (file)
@@ -32,6 +32,7 @@ static size_t constexpr kUninitializedFrameSize = 0;
 
 class CodeGenerator;
 class DexCompilationUnit;
+class SrcMap;
 
 class CodeAllocator {
  public:
@@ -126,7 +127,7 @@ class CodeGenerator : public ArenaObject {
 
   void GenerateSlowPaths();
 
-  void BuildMappingTable(std::vector<uint8_t>* vector) const;
+  void BuildMappingTable(std::vector<uint8_t>* vector, SrcMap* src_map) const;
   void BuildVMapTable(std::vector<uint8_t>* vector) const;
   void BuildNativeGCMap(
       std::vector<uint8_t>* vector, const DexCompilationUnit& dex_compilation_unit) const;
@@ -142,6 +143,7 @@ class CodeGenerator : public ArenaObject {
  protected:
   CodeGenerator(HGraph* graph, size_t number_of_registers)
       : frame_size_(kUninitializedFrameSize),
+        core_spill_mask_(0),
         graph_(graph),
         block_labels_(graph->GetArena(), 0),
         pc_infos_(graph->GetArena(), 32),
index f544d47..35b8116 100644 (file)
  */
 
 #include "code_generator_x86.h"
-#include "gc/accounting/card_table.h"
-#include "utils/assembler.h"
-#include "utils/stack_checks.h"
-#include "utils/x86/assembler_x86.h"
-#include "utils/x86/managed_register_x86.h"
 
 #include "entrypoints/quick/quick_entrypoints.h"
+#include "gc/accounting/card_table.h"
 #include "mirror/array.h"
 #include "mirror/art_method.h"
 #include "thread.h"
+#include "utils/assembler.h"
+#include "utils/stack_checks.h"
+#include "utils/x86/assembler_x86.h"
+#include "utils/x86/managed_register_x86.h"
 
 namespace art {
 
index e1807dc..c4571ca 100644 (file)
@@ -35,7 +35,7 @@ x86_64::X86_64ManagedRegister Location::AsX86_64() const {
 
 namespace x86_64 {
 
-static constexpr bool kExplicitStackOverflowCheck = true;
+static constexpr bool kExplicitStackOverflowCheck = false;
 
 // Some x86_64 instructions require a register to be available as temp.
 static constexpr Register TMP = R11;
@@ -208,25 +208,26 @@ void CodeGeneratorX86_64::GenerateFrameEntry() {
   static const int kFakeReturnRegister = 16;
   core_spill_mask_ |= (1 << kFakeReturnRegister);
 
+  bool skip_overflow_check = IsLeafMethod()
+      && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86_64);
+
+  if (!skip_overflow_check && !kExplicitStackOverflowCheck) {
+    __ testq(CpuRegister(RAX), Address(
+        CpuRegister(RSP), -static_cast<int32_t>(GetStackOverflowReservedBytes(kX86_64))));
+    RecordPcInfo(0);
+  }
+
   // The return PC has already been pushed on the stack.
   __ subq(CpuRegister(RSP),
           Immediate(GetFrameSize() - kNumberOfPushedRegistersAtEntry * kX86_64WordSize));
 
-  bool skip_overflow_check = IsLeafMethod()
-      && !FrameNeedsStackCheck(GetFrameSize(), InstructionSet::kX86_64);
-
-  if (!skip_overflow_check) {
-    if (kExplicitStackOverflowCheck) {
-      SlowPathCode* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathX86_64();
-      AddSlowPath(slow_path);
+  if (!skip_overflow_check && kExplicitStackOverflowCheck) {
+    SlowPathCode* slow_path = new (GetGraph()->GetArena()) StackOverflowCheckSlowPathX86_64();
+    AddSlowPath(slow_path);
 
-      __ gs()->cmpq(CpuRegister(RSP),
-                    Address::Absolute(Thread::StackEndOffset<kX86_64WordSize>(), true));
-      __ j(kLess, slow_path->GetEntryLabel());
-    } else {
-      __ testq(CpuRegister(RAX), Address(
-          CpuRegister(RSP), -static_cast<int32_t>(GetStackOverflowReservedBytes(kX86_64))));
-    }
+    __ gs()->cmpq(CpuRegister(RSP),
+                  Address::Absolute(Thread::StackEndOffset<kX86_64WordSize>(), true));
+    __ j(kLess, slow_path->GetEntryLabel());
   }
 
   __ movl(Address(CpuRegister(RSP), kCurrentMethodStackOffset), CpuRegister(RDI));
index cb3dd0f..bb699e4 100644 (file)
@@ -501,6 +501,7 @@ class HInstruction : public ArenaObject {
   void SetBlock(HBasicBlock* block) { block_ = block; }
   bool IsInBlock() const { return block_ != nullptr; }
   bool IsInLoop() const { return block_->IsInLoop(); }
+  bool IsLoopHeaderPhi() { return IsPhi() && block_->IsLoopHeader(); }
 
   virtual size_t InputCount() const  = 0;
   virtual HInstruction* InputAt(size_t i) const = 0;
index 8a5077b..3461276 100644 (file)
  * limitations under the License.
  */
 
+#include "optimizing_compiler.h"
+
 #include <fstream>
 #include <stdint.h>
 
 #include "builder.h"
 #include "code_generator.h"
-#include "compilers.h"
+#include "compiler.h"
 #include "driver/compiler_driver.h"
 #include "driver/dex_compilation_unit.h"
 #include "graph_visualizer.h"
@@ -65,12 +67,105 @@ static bool kIsVisualizerEnabled = false;
  */
 static const char* kStringFilter = "";
 
-OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver) : QuickCompiler(driver) {
+class OptimizingCompiler FINAL : public Compiler {
+ public:
+  explicit OptimizingCompiler(CompilerDriver* driver);
+
+  bool CanCompileMethod(uint32_t method_idx, const DexFile& dex_file, CompilationUnit* cu) const
+      OVERRIDE;
+
+  CompiledMethod* Compile(const DexFile::CodeItem* code_item,
+                          uint32_t access_flags,
+                          InvokeType invoke_type,
+                          uint16_t class_def_idx,
+                          uint32_t method_idx,
+                          jobject class_loader,
+                          const DexFile& dex_file) const OVERRIDE;
+
+  CompiledMethod* TryCompile(const DexFile::CodeItem* code_item,
+                             uint32_t access_flags,
+                             InvokeType invoke_type,
+                             uint16_t class_def_idx,
+                             uint32_t method_idx,
+                             jobject class_loader,
+                             const DexFile& dex_file) const;
+
+  // For the following methods we will use the fallback. This is a delegation pattern.
+  CompiledMethod* JniCompile(uint32_t access_flags,
+                             uint32_t method_idx,
+                             const DexFile& dex_file) const OVERRIDE;
+
+  uintptr_t GetEntryPointOf(mirror::ArtMethod* method) const OVERRIDE
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  bool WriteElf(art::File* file,
+                OatWriter* oat_writer,
+                const std::vector<const art::DexFile*>& dex_files,
+                const std::string& android_root,
+                bool is_host) const OVERRIDE SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  Backend* GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const OVERRIDE;
+
+  void InitCompilationUnit(CompilationUnit& cu) const OVERRIDE;
+
+  void Init() const OVERRIDE;
+
+  void UnInit() const OVERRIDE;
+
+ private:
+  std::unique_ptr<std::ostream> visualizer_output_;
+
+  // Delegate to another compiler in case the optimizing compiler cannot compile a method.
+  // Currently the fallback is the quick compiler.
+  std::unique_ptr<Compiler> delegate_;
+
+  DISALLOW_COPY_AND_ASSIGN(OptimizingCompiler);
+};
+
+OptimizingCompiler::OptimizingCompiler(CompilerDriver* driver) : Compiler(driver, 100),
+    delegate_(Create(driver, Compiler::Kind::kQuick)) {
   if (kIsVisualizerEnabled) {
     visualizer_output_.reset(new std::ofstream("art.cfg"));
   }
 }
 
+void OptimizingCompiler::Init() const {
+  delegate_->Init();
+}
+
+void OptimizingCompiler::UnInit() const {
+  delegate_->UnInit();
+}
+
+bool OptimizingCompiler::CanCompileMethod(uint32_t method_idx, const DexFile& dex_file,
+                                          CompilationUnit* cu) const {
+  return delegate_->CanCompileMethod(method_idx, dex_file, cu);
+}
+
+CompiledMethod* OptimizingCompiler::JniCompile(uint32_t access_flags,
+                                               uint32_t method_idx,
+                                               const DexFile& dex_file) const {
+  return delegate_->JniCompile(access_flags, method_idx, dex_file);
+}
+
+uintptr_t OptimizingCompiler::GetEntryPointOf(mirror::ArtMethod* method) const {
+  return delegate_->GetEntryPointOf(method);
+}
+
+bool OptimizingCompiler::WriteElf(art::File* file, OatWriter* oat_writer,
+                                  const std::vector<const art::DexFile*>& dex_files,
+                                  const std::string& android_root, bool is_host) const {
+  return delegate_->WriteElf(file, oat_writer, dex_files, android_root, is_host);
+}
+
+Backend* OptimizingCompiler::GetCodeGenerator(CompilationUnit* cu, void* compilation_unit) const {
+  return delegate_->GetCodeGenerator(cu, compilation_unit);
+}
+
+void OptimizingCompiler::InitCompilationUnit(CompilationUnit& cu) const {
+  delegate_->InitCompilationUnit(cu);
+}
+
 CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_item,
                                                uint32_t access_flags,
                                                InvokeType invoke_type,
@@ -161,7 +256,10 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
   }
 
   std::vector<uint8_t> mapping_table;
-  codegen->BuildMappingTable(&mapping_table);
+  SrcMap src_mapping_table;
+  codegen->BuildMappingTable(&mapping_table,
+          GetCompilerDriver()->GetCompilerOptions().GetIncludeDebugSymbols() ?
+               &src_mapping_table : nullptr);
   std::vector<uint8_t> vmap_table;
   codegen->BuildVMapTable(&vmap_table);
   std::vector<uint8_t> gc_map;
@@ -173,10 +271,32 @@ CompiledMethod* OptimizingCompiler::TryCompile(const DexFile::CodeItem* code_ite
                             codegen->GetFrameSize(),
                             codegen->GetCoreSpillMask(),
                             0, /* FPR spill mask, unused */
+                            &src_mapping_table,
                             mapping_table,
                             vmap_table,
                             gc_map,
                             nullptr);
 }
 
+CompiledMethod* OptimizingCompiler::Compile(const DexFile::CodeItem* code_item,
+                                            uint32_t access_flags,
+                                            InvokeType invoke_type,
+                                            uint16_t class_def_idx,
+                                            uint32_t method_idx,
+                                            jobject class_loader,
+                                            const DexFile& dex_file) const {
+  CompiledMethod* method = TryCompile(code_item, access_flags, invoke_type, class_def_idx,
+                                      method_idx, class_loader, dex_file);
+  if (method != nullptr) {
+    return method;
+  }
+
+  return delegate_->Compile(code_item, access_flags, invoke_type, class_def_idx, method_idx,
+                            class_loader, dex_file);
+}
+
+Compiler* CreateOptimizingCompiler(CompilerDriver* driver) {
+  return new OptimizingCompiler(driver);
+}
+
 }  // namespace art
diff --git a/compiler/optimizing/optimizing_compiler.h b/compiler/optimizing/optimizing_compiler.h
new file mode 100644 (file)
index 0000000..a415eca
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_OPTIMIZING_OPTIMIZING_COMPILER_H_
+#define ART_COMPILER_OPTIMIZING_OPTIMIZING_COMPILER_H_
+
+namespace art {
+
+class Compiler;
+class CompilerDriver;
+
+Compiler* CreateOptimizingCompiler(CompilerDriver* driver);
+
+}
+
+#endif  // ART_COMPILER_OPTIMIZING_OPTIMIZING_COMPILER_H_
index bd3a7d9..da13b1e 100644 (file)
@@ -16,6 +16,7 @@
 
 #include "register_allocator.h"
 
+#include "base/bit_vector-inl.h"
 #include "code_generator.h"
 #include "ssa_liveness_analysis.h"
 
index a7283ab..bafe577 100644 (file)
@@ -22,6 +22,7 @@
 #include "optimizing_unit_test.h"
 #include "register_allocator.h"
 #include "ssa_liveness_analysis.h"
+#include "ssa_phi_elimination.h"
 #include "utils/arena_allocator.h"
 
 #include "gtest/gtest.h"
@@ -356,4 +357,38 @@ TEST(RegisterAllocatorTest, FirstRegisterUse) {
   ASSERT_EQ(new_interval->FirstRegisterUse(), last_add->GetLifetimePosition() + 1);
 }
 
+TEST(RegisterAllocatorTest, DeadPhi) {
+  /* Test for a dead loop phi taking as back-edge input a phi that also has
+   * this loop phi as input. Walking backwards in SsaDeadPhiElimination
+   * does not solve the problem because the loop phi will be visited last.
+   *
+   * Test the following snippet:
+   *  int a = 0
+   *  do {
+   *    if (true) {
+   *      a = 2;
+   *    }
+   *  } while (true);
+   */
+
+  const uint16_t data[] = TWO_REGISTERS_CODE_ITEM(
+    Instruction::CONST_4 | 0 | 0,
+    Instruction::CONST_4 | 1 << 8 | 0,
+    Instruction::IF_NE | 1 << 8 | 1 << 12, 3,
+    Instruction::CONST_4 | 2 << 12 | 0 << 8,
+    Instruction::GOTO | 0xFD00,
+    Instruction::RETURN_VOID);
+
+  ArenaPool pool;
+  ArenaAllocator allocator(&pool);
+  HGraph* graph = BuildSSAGraph(data, &allocator);
+  SsaDeadPhiElimination(graph).Run();
+  CodeGenerator* codegen = CodeGenerator::Create(&allocator, graph, kX86);
+  SsaLivenessAnalysis liveness(*graph, codegen);
+  liveness.Analyze();
+  RegisterAllocator register_allocator(&allocator, codegen, liveness);
+  register_allocator.AllocateRegisters();
+  ASSERT_TRUE(register_allocator.Validate(false));
+}
+
 }  // namespace art
index fbdc0b9..5de1ab9 100644 (file)
@@ -16,6 +16,7 @@
 
 #include "ssa_liveness_analysis.h"
 
+#include "base/bit_vector-inl.h"
 #include "code_generator.h"
 #include "nodes.h"
 
index 13fa03f..a079954 100644 (file)
@@ -53,8 +53,9 @@ void SsaDeadPhiElimination::Run() {
     }
   }
 
-  // Remove phis that are not live. Visit in post order to ensure
-  // we only remove phis with no users (dead phis might use dead phis).
+  // Remove phis that are not live. Visit in post order so that phis
+  // that are not inputs of loop phis can be removed when they have
+  // no users left (dead phis might use dead phis).
   for (HPostOrderIterator it(*graph_); !it.Done(); it.Advance()) {
     HBasicBlock* block = it.Current();
     HInstruction* current = block->GetFirstPhi();
@@ -62,6 +63,17 @@ void SsaDeadPhiElimination::Run() {
     while (current != nullptr) {
       next = current->GetNext();
       if (current->AsPhi()->IsDead()) {
+        if (current->HasUses()) {
+          for (HUseIterator<HInstruction> it(current->GetUses()); !it.Done(); it.Advance()) {
+            HUseListNode<HInstruction>* user_node = it.Current();
+            HInstruction* user = user_node->GetUser();
+            DCHECK(user->IsLoopHeaderPhi());
+            DCHECK(user->AsPhi()->IsDead());
+            // Just put itself as an input. The phi will be removed in this loop anyway.
+            user->SetRawInputAt(user_node->GetIndex(), user);
+            current->RemoveUser(user, user_node->GetIndex());
+          }
+        }
         block->RemovePhi(current->AsPhi());
       }
       current = next;
index d5225c1..6da375a 100644 (file)
@@ -16,7 +16,7 @@
 
 #include "trampoline_compiler.h"
 
-#include "jni_internal.h"
+#include "jni_env_ext.h"
 #include "utils/arm/assembler_arm.h"
 #include "utils/arm64/assembler_arm64.h"
 #include "utils/mips/assembler_mips.h"
index 39f7d18..de35f3d 100644 (file)
 
 #include "arena_allocator.h"
 #include "arena_bit_vector.h"
+#include "base/allocator.h"
 
 namespace art {
 
 template <typename ArenaAlloc>
-class ArenaBitVectorAllocator : public Allocator {
+class ArenaBitVectorAllocator FINAL : public Allocator {
  public:
   explicit ArenaBitVectorAllocator(ArenaAlloc* arena) : arena_(arena) {}
   ~ArenaBitVectorAllocator() {}
@@ -37,7 +38,7 @@ class ArenaBitVectorAllocator : public Allocator {
   static void operator delete(void* p) {}  // Nop.
 
  private:
-  ArenaAlloc* arena_;
+  ArenaAlloc* const arena_;
   DISALLOW_COPY_AND_ASSIGN(ArenaBitVectorAllocator);
 };
 
index 485ed76..c92658f 100644 (file)
@@ -51,23 +51,23 @@ std::ostream& operator<<(std::ostream& os, const OatBitMapKind& kind);
  * A BitVector implementation that uses Arena allocation.
  */
 class ArenaBitVector : public BitVector {
 public:
-    ArenaBitVector(ArenaAllocator* arena, uint32_t start_bits, bool expandable,
-                   OatBitMapKind kind = kBitMapMisc);
-    ArenaBitVector(ScopedArenaAllocator* arena, uint32_t start_bits, bool expandable,
-                   OatBitMapKind kind = kBitMapMisc);
-    ~ArenaBitVector() {}
+ public:
+  ArenaBitVector(ArenaAllocator* arena, uint32_t start_bits, bool expandable,
+                 OatBitMapKind kind = kBitMapMisc);
+  ArenaBitVector(ScopedArenaAllocator* arena, uint32_t start_bits, bool expandable,
+                 OatBitMapKind kind = kBitMapMisc);
+  ~ArenaBitVector() {}
 
   static void* operator new(size_t size, ArenaAllocator* arena) {
-     return arena->Alloc(sizeof(ArenaBitVector), kArenaAllocGrowableBitMap);
+    return arena->Alloc(sizeof(ArenaBitVector), kArenaAllocGrowableBitMap);
   }
   static void* operator new(size_t size, ScopedArenaAllocator* arena) {
-     return arena->Alloc(sizeof(ArenaBitVector), kArenaAllocGrowableBitMap);
+    return arena->Alloc(sizeof(ArenaBitVector), kArenaAllocGrowableBitMap);
   }
   static void operator delete(void* p) {}  // Nop.
 
 private:
-    const OatBitMapKind kind_;      // for memory use tuning. TODO: currently unused.
+ private:
+  const OatBitMapKind kind_;      // for memory use tuning. TODO: currently unused.
 };
 
 
index 3f90f21..3edf59b 100644 (file)
@@ -21,6 +21,8 @@
 #include "thread.h"
 #include "utils.h"
 
+using namespace vixl;  // NOLINT(build/namespaces)
+
 namespace art {
 namespace arm64 {
 
@@ -75,7 +77,7 @@ void Arm64Assembler::AddConstant(Register rd, int32_t value, Condition cond) {
 
 void Arm64Assembler::AddConstant(Register rd, Register rn, int32_t value,
                                  Condition cond) {
-  if ((cond == AL) || (cond == NV)) {
+  if ((cond == al) || (cond == nv)) {
     // VIXL macro-assembler handles all variants.
     ___ Add(reg_x(rd), reg_x(rn), value);
   } else {
@@ -85,7 +87,7 @@ void Arm64Assembler::AddConstant(Register rd, Register rn, int32_t value,
     temps.Exclude(reg_x(rd), reg_x(rn));
     vixl::Register temp = temps.AcquireX();
     ___ Add(temp, reg_x(rn), value);
-    ___ Csel(reg_x(rd), temp, reg_x(rd), COND_OP(cond));
+    ___ Csel(reg_x(rd), temp, reg_x(rd), cond);
   }
 }
 
@@ -195,7 +197,7 @@ void Arm64Assembler::StoreSpanning(FrameOffset dest_off, ManagedRegister m_sourc
 // Load routines.
 void Arm64Assembler::LoadImmediate(Register dest, int32_t value,
                                    Condition cond) {
-  if ((cond == AL) || (cond == NV)) {
+  if ((cond == al) || (cond == nv)) {
     ___ Mov(reg_x(dest), value);
   } else {
     // temp = value
@@ -205,9 +207,9 @@ void Arm64Assembler::LoadImmediate(Register dest, int32_t value,
       temps.Exclude(reg_x(dest));
       vixl::Register temp = temps.AcquireX();
       ___ Mov(temp, value);
-      ___ Csel(reg_x(dest), temp, reg_x(dest), COND_OP(cond));
+      ___ Csel(reg_x(dest), temp, reg_x(dest), cond);
     } else {
-      ___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), COND_OP(cond));
+      ___ Csel(reg_x(dest), reg_x(XZR), reg_x(dest), cond);
     }
   }
 }
@@ -557,11 +559,11 @@ void Arm64Assembler::CreateHandleScopeEntry(ManagedRegister m_out_reg, FrameOffs
     }
     ___ Cmp(reg_w(in_reg.AsOverlappingCoreRegisterLow()), 0);
     if (!out_reg.Equals(in_reg)) {
-      LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
+      LoadImmediate(out_reg.AsCoreRegister(), 0, eq);
     }
-    AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offs.Int32Value(), NE);
+    AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offs.Int32Value(), ne);
   } else {
-    AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offs.Int32Value(), AL);
+    AddConstant(out_reg.AsCoreRegister(), SP, handle_scope_offs.Int32Value(), al);
   }
 }
 
@@ -577,9 +579,9 @@ void Arm64Assembler::CreateHandleScopeEntry(FrameOffset out_off, FrameOffset han
     // e.g. scratch = (scratch == 0) ? 0 : (SP+handle_scope_offset)
     ___ Cmp(reg_w(scratch.AsOverlappingCoreRegisterLow()), 0);
     // Move this logic in add constants with flags.
-    AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), NE);
+    AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), ne);
   } else {
-    AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), AL);
+    AddConstant(scratch.AsCoreRegister(), SP, handle_scope_offset.Int32Value(), al);
   }
   StoreToOffset(scratch.AsCoreRegister(), SP, out_off.Int32Value());
 }
@@ -593,7 +595,7 @@ void Arm64Assembler::LoadReferenceFromHandleScope(ManagedRegister m_out_reg,
   vixl::Label exit;
   if (!out_reg.Equals(in_reg)) {
     // FIXME: Who sets the flags here?
-    LoadImmediate(out_reg.AsCoreRegister(), 0, EQ);
+    LoadImmediate(out_reg.AsCoreRegister(), 0, eq);
   }
   ___ Cbz(reg_x(in_reg.AsCoreRegister()), &exit);
   LoadFromOffset(out_reg.AsCoreRegister(), in_reg.AsCoreRegister(), 0);
index ab4999a..788950b 100644 (file)
@@ -34,28 +34,6 @@ namespace art {
 namespace arm64 {
 
 #define MEM_OP(x...)      vixl::MemOperand(x)
-#define COND_OP(x)        static_cast<vixl::Condition>(x)
-
-enum Condition {
-  kNoCondition = -1,
-  EQ = 0,
-  NE = 1,
-  HS = 2,
-  LO = 3,
-  MI = 4,
-  PL = 5,
-  VS = 6,
-  VC = 7,
-  HI = 8,
-  LS = 9,
-  GE = 10,
-  LT = 11,
-  GT = 12,
-  LE = 13,
-  AL = 14,    // Always.
-  NV = 15,    // Behaves as always/al.
-  kMaxCondition = 16,
-};
 
 enum LoadOperandType {
   kLoadSignedByte,
@@ -225,15 +203,15 @@ class Arm64Assembler FINAL : public Assembler {
   void StoreSToOffset(SRegister source, Register base, int32_t offset);
   void StoreDToOffset(DRegister source, Register base, int32_t offset);
 
-  void LoadImmediate(Register dest, int32_t value, Condition cond = AL);
+  void LoadImmediate(Register dest, int32_t value, vixl::Condition cond = vixl::al);
   void Load(Arm64ManagedRegister dst, Register src, int32_t src_offset, size_t size);
   void LoadWFromOffset(LoadOperandType type, WRegister dest,
                       Register base, int32_t offset);
   void LoadFromOffset(Register dest, Register base, int32_t offset);
   void LoadSFromOffset(SRegister dest, Register base, int32_t offset);
   void LoadDFromOffset(DRegister dest, Register base, int32_t offset);
-  void AddConstant(Register rd, int32_t value, Condition cond = AL);
-  void AddConstant(Register rd, Register rn, int32_t value, Condition cond = AL);
+  void AddConstant(Register rd, int32_t value, vixl::Condition cond = vixl::al);
+  void AddConstant(Register rd, Register rn, int32_t value, vixl::Condition cond = vixl::al);
 
   // Vixl buffer.
   byte* vixl_buf_;
index f72f5e5..4addfa0 100644 (file)
@@ -499,6 +499,10 @@ class Assembler {
   // and branch to a ExceptionSlowPath if it is.
   virtual void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) = 0;
 
+  virtual void InitializeFrameDescriptionEntry() {}
+  virtual void FinalizeFrameDescriptionEntry() {}
+  virtual std::vector<uint8_t>* GetFrameDescriptionEntry() { return nullptr; }
+
   virtual ~Assembler() {}
 
  protected:
diff --git a/compiler/utils/dwarf_cfi.cc b/compiler/utils/dwarf_cfi.cc
new file mode 100644 (file)
index 0000000..83e5f5a
--- /dev/null
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "leb128.h"
+#include "utils.h"
+
+#include "dwarf_cfi.h"
+
+namespace art {
+
+void DW_CFA_advance_loc(std::vector<uint8_t>* buf, uint32_t increment) {
+  if (increment < 64) {
+    // Encoding in opcode.
+    buf->push_back(0x1 << 6 | increment);
+  } else if (increment < 256) {
+    // Single byte delta.
+    buf->push_back(0x02);
+    buf->push_back(increment);
+  } else if (increment < 256 * 256) {
+    // Two byte delta.
+    buf->push_back(0x03);
+    buf->push_back(increment & 0xff);
+    buf->push_back((increment >> 8) & 0xff);
+  } else {
+    // Four byte delta.
+    buf->push_back(0x04);
+    PushWord(buf, increment);
+  }
+}
+
+void DW_CFA_offset_extended_sf(std::vector<uint8_t>* buf, int reg, int32_t offset) {
+  buf->push_back(0x11);
+  EncodeUnsignedLeb128(reg, buf);
+  EncodeSignedLeb128(offset, buf);
+}
+
+void DW_CFA_offset(std::vector<uint8_t>* buf, int reg, uint32_t offset) {
+  buf->push_back((0x2 << 6) | reg);
+  EncodeUnsignedLeb128(offset, buf);
+}
+
+void DW_CFA_def_cfa_offset(std::vector<uint8_t>* buf, int32_t offset) {
+  buf->push_back(0x0e);
+  EncodeUnsignedLeb128(offset, buf);
+}
+
+void DW_CFA_remember_state(std::vector<uint8_t>* buf) {
+  buf->push_back(0x0a);
+}
+
+void DW_CFA_restore_state(std::vector<uint8_t>* buf) {
+  buf->push_back(0x0b);
+}
+
+void WriteFDEHeader(std::vector<uint8_t>* buf, bool is_64bit) {
+  // 'length' (filled in by other functions).
+  if (is_64bit) {
+    PushWord(buf, 0xffffffff);  // Indicates 64bit
+    PushWord(buf, 0);
+    PushWord(buf, 0);
+  } else {
+    PushWord(buf, 0);
+  }
+
+  // 'CIE_pointer' (filled in by linker).
+  if (is_64bit) {
+    PushWord(buf, 0);
+    PushWord(buf, 0);
+  } else {
+    PushWord(buf, 0);
+  }
+
+  // 'initial_location' (filled in by linker).
+  if (is_64bit) {
+    PushWord(buf, 0);
+    PushWord(buf, 0);
+  } else {
+    PushWord(buf, 0);
+  }
+
+  // 'address_range' (filled in by other functions).
+  if (is_64bit) {
+    PushWord(buf, 0);
+    PushWord(buf, 0);
+  } else {
+    PushWord(buf, 0);
+  }
+
+  // Augmentation length: 0
+  buf->push_back(0);
+}
+
+void WriteFDEAddressRange(std::vector<uint8_t>* buf, uint64_t data, bool is_64bit) {
+  const size_t kOffsetOfAddressRange = is_64bit? 28 : 12;
+  CHECK(buf->size() >= kOffsetOfAddressRange + (is_64bit? 8 : 4));
+
+  uint8_t *p = buf->data() + kOffsetOfAddressRange;
+  if (is_64bit) {
+    p[0] = data;
+    p[1] = data >> 8;
+    p[2] = data >> 16;
+    p[3] = data >> 24;
+    p[4] = data >> 32;
+    p[5] = data >> 40;
+    p[6] = data >> 48;
+    p[7] = data >> 56;
+  } else {
+    p[0] = data;
+    p[1] = data >> 8;
+    p[2] = data >> 16;
+    p[3] = data >> 24;
+  }
+}
+
+void WriteCFILength(std::vector<uint8_t>* buf, bool is_64bit) {
+  uint64_t length = is_64bit ? buf->size() - 12 : buf->size() - 4;
+  DCHECK_EQ((length & 0x3), 0U);
+
+  uint8_t *p = is_64bit? buf->data() + 4 : buf->data();
+  if (is_64bit) {
+    p[0] = length;
+    p[1] = length >> 8;
+    p[2] = length >> 16;
+    p[3] = length >> 24;
+    p[4] = length >> 32;
+    p[5] = length >> 40;
+    p[6] = length >> 48;
+    p[7] = length >> 56;
+  } else {
+    p[0] = length;
+    p[1] = length >> 8;
+    p[2] = length >> 16;
+    p[3] = length >> 24;
+  }
+}
+
+void PadCFI(std::vector<uint8_t>* buf) {
+  while (buf->size() & 0x3) {
+    buf->push_back(0);
+  }
+}
+
+}  // namespace art
diff --git a/compiler/utils/dwarf_cfi.h b/compiler/utils/dwarf_cfi.h
new file mode 100644 (file)
index 0000000..0c8b151
--- /dev/null
@@ -0,0 +1,95 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_COMPILER_UTILS_DWARF_CFI_H_
+#define ART_COMPILER_UTILS_DWARF_CFI_H_
+
+#include <vector>
+
+namespace art {
+
+/**
+ * @brief Enter a 'DW_CFA_advance_loc' into an FDE buffer
+ * @param buf FDE buffer.
+ * @param increment Amount by which to increase the current location.
+ */
+void DW_CFA_advance_loc(std::vector<uint8_t>* buf, uint32_t increment);
+
+/**
+ * @brief Enter a 'DW_CFA_offset_extended_sf' into an FDE buffer
+ * @param buf FDE buffer.
+ * @param reg Register number.
+ * @param offset Offset of register address from CFA.
+ */
+void DW_CFA_offset_extended_sf(std::vector<uint8_t>* buf, int reg, int32_t offset);
+
+/**
+ * @brief Enter a 'DW_CFA_offset' into an FDE buffer
+ * @param buf FDE buffer.
+ * @param reg Register number.
+ * @param offset Offset of register address from CFA.
+ */
+void DW_CFA_offset(std::vector<uint8_t>* buf, int reg, uint32_t offset);
+
+/**
+ * @brief Enter a 'DW_CFA_def_cfa_offset' into an FDE buffer
+ * @param buf FDE buffer.
+ * @param offset New offset of CFA.
+ */
+void DW_CFA_def_cfa_offset(std::vector<uint8_t>* buf, int32_t offset);
+
+/**
+ * @brief Enter a 'DW_CFA_remember_state' into an FDE buffer
+ * @param buf FDE buffer.
+ */
+void DW_CFA_remember_state(std::vector<uint8_t>* buf);
+
+/**
+ * @brief Enter a 'DW_CFA_restore_state' into an FDE buffer
+ * @param buf FDE buffer.
+ */
+void DW_CFA_restore_state(std::vector<uint8_t>* buf);
+
+/**
+ * @brief Write FDE header into an FDE buffer
+ * @param buf FDE buffer.
+ * @param is_64bit If FDE is for 64bit application.
+ */
+void WriteFDEHeader(std::vector<uint8_t>* buf, bool is_64bit);
+
+/**
+ * @brief Set 'address_range' field of an FDE buffer
+ * @param buf FDE buffer.
+ * @param data Data value.
+ * @param is_64bit If FDE is for 64bit application.
+ */
+void WriteFDEAddressRange(std::vector<uint8_t>* buf, uint64_t data, bool is_64bit);
+
+/**
+ * @brief Set 'length' field of an FDE buffer
+ * @param buf FDE buffer.
+ * @param is_64bit If FDE is for 64bit application.
+ */
+void WriteCFILength(std::vector<uint8_t>* buf, bool is_64bit);
+
+/**
+ * @brief Pad an FDE buffer with 0 until its size is a multiple of 4
+ * @param buf FDE buffer.
+ */
+void PadCFI(std::vector<uint8_t>* buf);
+}  // namespace art
+
+#endif  // ART_COMPILER_UTILS_DWARF_CFI_H_
index b6a5c20..2c9bc28 100644 (file)
@@ -20,6 +20,7 @@
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "memory_region.h"
 #include "thread.h"
+#include "utils/dwarf_cfi.h"
 
 namespace art {
 namespace x86 {
@@ -1407,20 +1408,61 @@ void X86Assembler::EmitGenericShift(int reg_or_opcode,
   EmitOperand(reg_or_opcode, Operand(operand));
 }
 
+void X86Assembler::InitializeFrameDescriptionEntry() {
+  WriteFDEHeader(&cfi_info_, false /* is_64bit */);
+}
+
+void X86Assembler::FinalizeFrameDescriptionEntry() {
+  WriteFDEAddressRange(&cfi_info_, buffer_.Size(), false /* is_64bit */);
+  PadCFI(&cfi_info_);
+  WriteCFILength(&cfi_info_, false /* is_64bit */);
+}
+
 constexpr size_t kFramePointerSize = 4;
 
 void X86Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
                               const std::vector<ManagedRegister>& spill_regs,
                               const ManagedRegisterEntrySpills& entry_spills) {
+  cfi_cfa_offset_ = kFramePointerSize;  // Only return address on stack
+  cfi_pc_ = buffer_.Size();  // Nothing emitted yet
+  DCHECK_EQ(cfi_pc_, 0U);
+
+  uint32_t reg_offset = 1;
   CHECK_ALIGNED(frame_size, kStackAlignment);
   for (int i = spill_regs.size() - 1; i >= 0; --i) {
     pushl(spill_regs.at(i).AsX86().AsCpuRegister());
+
+    // DW_CFA_advance_loc
+    DW_CFA_advance_loc(&cfi_info_, buffer_.Size() - cfi_pc_);
+    cfi_pc_ = buffer_.Size();
+    // DW_CFA_def_cfa_offset
+    cfi_cfa_offset_ += kFramePointerSize;
+    DW_CFA_def_cfa_offset(&cfi_info_, cfi_cfa_offset_);
+    // DW_CFA_offset reg offset
+    reg_offset++;
+    DW_CFA_offset(&cfi_info_, spill_regs.at(i).AsX86().DWARFRegId(), reg_offset);
   }
+
   // return address then method on stack
-  addl(ESP, Immediate(-frame_size + (spill_regs.size() * kFramePointerSize) +
-                      sizeof(StackReference<mirror::ArtMethod>) /*method*/ +
-                      kFramePointerSize /*return address*/));
+  int32_t adjust = frame_size - (spill_regs.size() * kFramePointerSize) -
+                   sizeof(StackReference<mirror::ArtMethod>) /*method*/ -
+                   kFramePointerSize /*return address*/;
+  addl(ESP, Immediate(-adjust));
+  // DW_CFA_advance_loc
+  DW_CFA_advance_loc(&cfi_info_, buffer_.Size() - cfi_pc_);
+  cfi_pc_ = buffer_.Size();
+  // DW_CFA_def_cfa_offset
+  cfi_cfa_offset_ += adjust;
+  DW_CFA_def_cfa_offset(&cfi_info_, cfi_cfa_offset_);
+
   pushl(method_reg.AsX86().AsCpuRegister());
+  // DW_CFA_advance_loc
+  DW_CFA_advance_loc(&cfi_info_, buffer_.Size() - cfi_pc_);
+  cfi_pc_ = buffer_.Size();
+  // DW_CFA_def_cfa_offset
+  cfi_cfa_offset_ += kFramePointerSize;
+  DW_CFA_def_cfa_offset(&cfi_info_, cfi_cfa_offset_);
+
   for (size_t i = 0; i < entry_spills.size(); ++i) {
     movl(Address(ESP, frame_size + sizeof(StackReference<mirror::ArtMethod>) +
                  (i * kFramePointerSize)),
@@ -1442,6 +1484,12 @@ void X86Assembler::RemoveFrame(size_t frame_size,
 void X86Assembler::IncreaseFrameSize(size_t adjust) {
   CHECK_ALIGNED(adjust, kStackAlignment);
   addl(ESP, Immediate(-adjust));
+  // DW_CFA_advance_loc
+  DW_CFA_advance_loc(&cfi_info_, buffer_.Size() - cfi_pc_);
+  cfi_pc_ = buffer_.Size();
+  // DW_CFA_def_cfa_offset
+  cfi_cfa_offset_ += adjust;
+  DW_CFA_def_cfa_offset(&cfi_info_, cfi_cfa_offset_);
 }
 
 void X86Assembler::DecreaseFrameSize(size_t adjust) {
index ce20768..5c4e34f 100644 (file)
@@ -571,6 +571,12 @@ class X86Assembler FINAL : public Assembler {
   // and branch to a ExceptionSlowPath if it is.
   void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
 
+  void InitializeFrameDescriptionEntry() OVERRIDE;
+  void FinalizeFrameDescriptionEntry() OVERRIDE;
+  std::vector<uint8_t>* GetFrameDescriptionEntry() OVERRIDE {
+    return &cfi_info_;
+  }
+
  private:
   inline void EmitUint8(uint8_t value);
   inline void EmitInt32(int32_t value);
@@ -589,6 +595,9 @@ class X86Assembler FINAL : public Assembler {
   void EmitGenericShift(int rm, Register reg, const Immediate& imm);
   void EmitGenericShift(int rm, Register operand, Register shifter);
 
+  std::vector<uint8_t> cfi_info_;
+  uint32_t cfi_cfa_offset_, cfi_pc_;
+
   DISALLOW_COPY_AND_ASSIGN(X86Assembler);
 };
 
index 09d2b49..5d46ee2 100644 (file)
@@ -88,6 +88,14 @@ const int kNumberOfAllocIds = kNumberOfCpuAllocIds + kNumberOfXmmAllocIds +
 // There is a one-to-one mapping between ManagedRegister and register id.
 class X86ManagedRegister : public ManagedRegister {
  public:
+  int DWARFRegId() const {
+    CHECK(IsCpuRegister());
+    // For all the X86 registers we care about:
+    // EAX, ECX, EDX, EBX, ESP, EBP, ESI, EDI,
+    // DWARF register id is the same as id_.
+    return static_cast<int>(id_);
+  }
+
   ByteRegister AsByteRegister() const {
     CHECK(IsCpuRegister());
     CHECK_LT(AsCpuRegister(), ESP);  // ESP, EBP, ESI and EDI cannot be encoded as byte registers.
index 7684271..1e2884a 100644 (file)
@@ -20,6 +20,7 @@
 #include "entrypoints/quick/quick_entrypoints.h"
 #include "memory_region.h"
 #include "thread.h"
+#include "utils/dwarf_cfi.h"
 
 namespace art {
 namespace x86_64 {
@@ -1714,11 +1715,26 @@ void X86_64Assembler::EmitOptionalByteRegNormalizingRex32(CpuRegister dst, const
   }
 }
 
+void X86_64Assembler::InitializeFrameDescriptionEntry() {
+  WriteFDEHeader(&cfi_info_, true /* is_64bit */);
+}
+
+void X86_64Assembler::FinalizeFrameDescriptionEntry() {
+  WriteFDEAddressRange(&cfi_info_, buffer_.Size(), true /* is_64bit */);
+  PadCFI(&cfi_info_);
+  WriteCFILength(&cfi_info_, true /* is_64bit */);
+}
+
 constexpr size_t kFramePointerSize = 8;
 
 void X86_64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
                                  const std::vector<ManagedRegister>& spill_regs,
                                  const ManagedRegisterEntrySpills& entry_spills) {
+  cfi_cfa_offset_ = kFramePointerSize;  // Only return address on stack
+  cfi_pc_ = buffer_.Size();  // Nothing emitted yet
+  DCHECK_EQ(cfi_pc_, 0U);
+
+  uint32_t reg_offset = 1;
   CHECK_ALIGNED(frame_size, kStackAlignment);
   int gpr_count = 0;
   for (int i = spill_regs.size() - 1; i >= 0; --i) {
@@ -1726,6 +1742,16 @@ void X86_64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
     if (spill.IsCpuRegister()) {
       pushq(spill.AsCpuRegister());
       gpr_count++;
+
+      // DW_CFA_advance_loc
+      DW_CFA_advance_loc(&cfi_info_, buffer_.Size() - cfi_pc_);
+      cfi_pc_ = buffer_.Size();
+      // DW_CFA_def_cfa_offset
+      cfi_cfa_offset_ += kFramePointerSize;
+      DW_CFA_def_cfa_offset(&cfi_info_, cfi_cfa_offset_);
+      // DW_CFA_offset reg offset
+      reg_offset++;
+      DW_CFA_offset(&cfi_info_, spill.DWARFRegId(), reg_offset);
     }
   }
   // return address then method on stack
@@ -1733,6 +1759,13 @@ void X86_64Assembler::BuildFrame(size_t frame_size, ManagedRegister method_reg,
                           - (gpr_count * kFramePointerSize)
                           - kFramePointerSize /*return address*/;
   subq(CpuRegister(RSP), Immediate(rest_of_frame));
+  // DW_CFA_advance_loc
+  DW_CFA_advance_loc(&cfi_info_, buffer_.Size() - cfi_pc_);
+  cfi_pc_ = buffer_.Size();
+  // DW_CFA_def_cfa_offset
+  cfi_cfa_offset_ += rest_of_frame;
+  DW_CFA_def_cfa_offset(&cfi_info_, cfi_cfa_offset_);
+
   // spill xmms
   int64_t offset = rest_of_frame;
   for (int i = spill_regs.size() - 1; i >= 0; --i) {
@@ -1796,6 +1829,12 @@ void X86_64Assembler::RemoveFrame(size_t frame_size,
 void X86_64Assembler::IncreaseFrameSize(size_t adjust) {
   CHECK_ALIGNED(adjust, kStackAlignment);
   addq(CpuRegister(RSP), Immediate(-static_cast<int64_t>(adjust)));
+  // DW_CFA_advance_loc
+  DW_CFA_advance_loc(&cfi_info_, buffer_.Size() - cfi_pc_);
+  cfi_pc_ = buffer_.Size();
+  // DW_CFA_def_cfa_offset
+  cfi_cfa_offset_ += adjust;
+  DW_CFA_def_cfa_offset(&cfi_info_, cfi_cfa_offset_);
 }
 
 void X86_64Assembler::DecreaseFrameSize(size_t adjust) {
index 9a6df1c..3f9f007 100644 (file)
@@ -614,6 +614,12 @@ class X86_64Assembler FINAL : public Assembler {
   // and branch to a ExceptionSlowPath if it is.
   void ExceptionPoll(ManagedRegister scratch, size_t stack_adjust) OVERRIDE;
 
+  void InitializeFrameDescriptionEntry() OVERRIDE;
+  void FinalizeFrameDescriptionEntry() OVERRIDE;
+  std::vector<uint8_t>* GetFrameDescriptionEntry() OVERRIDE {
+    return &cfi_info_;
+  }
+
  private:
   void EmitUint8(uint8_t value);
   void EmitInt32(int32_t value);
@@ -655,6 +661,9 @@ class X86_64Assembler FINAL : public Assembler {
   void EmitOptionalByteRegNormalizingRex32(CpuRegister dst, CpuRegister src);
   void EmitOptionalByteRegNormalizingRex32(CpuRegister dst, const Operand& operand);
 
+  std::vector<uint8_t> cfi_info_;
+  uint32_t cfi_cfa_offset_, cfi_pc_;
+
   DISALLOW_COPY_AND_ASSIGN(X86_64Assembler);
 };
 
index 822659f..3a96ad0 100644 (file)
@@ -87,6 +87,21 @@ const int kNumberOfAllocIds = kNumberOfCpuAllocIds + kNumberOfXmmAllocIds +
 // There is a one-to-one mapping between ManagedRegister and register id.
 class X86_64ManagedRegister : public ManagedRegister {
  public:
+  int DWARFRegId() const {
+    CHECK(IsCpuRegister());
+    switch (id_) {
+      case RAX: return  0;
+      case RDX: return  1;
+      case RCX: return  2;
+      case RBX: return  3;
+      case RSI: return  4;
+      case RDI: return  5;
+      case RBP: return  6;
+      case RSP: return  7;
+      default: return static_cast<int>(id_);  // R8 ~ R15
+    }
+  }
+
   CpuRegister AsCpuRegister() const {
     CHECK(IsCpuRegister());
     return CpuRegister(static_cast<Register>(id_));
index 28db711..2ef826b 100644 (file)
@@ -37,9 +37,9 @@ ifeq ($(ART_BUILD_TARGET_DEBUG),true)
 endif
 
 # We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
-ifeq ($(ART_BUILD_NDEBUG),true)
+ifeq ($(ART_BUILD_HOST_NDEBUG),true)
   $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libart-compiler,art/compiler,host,ndebug))
 endif
-ifeq ($(ART_BUILD_DEBUG),true)
+ifeq ($(ART_BUILD_HOST_DEBUG),true)
   $(eval $(call build-art-executable,dex2oat,$(DEX2OAT_SRC_FILES),libartd-compiler,art/compiler,host,debug))
 endif
index 19b37af..aef235b 100644 (file)
@@ -226,6 +226,15 @@ static void Usage(const char* fmt, ...) {
   UsageError("  --disable-passes=<pass-names>:  disable one or more passes separated by comma.");
   UsageError("      Example: --disable-passes=UseCount,BBOptimizations");
   UsageError("");
+  UsageError("  --print-pass-options: print a list of passes that have configurable options along "
+             "with the setting.");
+  UsageError("      Will print default if no overridden setting exists.");
+  UsageError("");
+  UsageError("  --pass-options=Pass1Name:Pass1OptionName:Pass1Option#,"
+             "Pass2Name:Pass2OptionName:Pass2Option#");
+  UsageError("      Used to specify a pass specific option. The setting itself must be integer.");
+  UsageError("      Separator used between options is a comma.");
+  UsageError("");
   std::cerr << "See log for usage error information\n";
   exit(EXIT_FAILURE);
 }
@@ -847,6 +856,7 @@ static int dex2oat(int argc, char** argv) {
   bool dump_stats = false;
   bool dump_timing = false;
   bool dump_passes = false;
+  bool print_pass_options = false;
   bool include_patch_information = CompilerOptions::kDefaultIncludePatchInformation;
   bool include_debug_symbols = kIsDebugBuild;
   bool dump_slow_timing = kIsDebugBuild;
@@ -1033,6 +1043,11 @@ static int dex2oat(int argc, char** argv) {
     } else if (option.starts_with("--dump-cfg-passes=")) {
       std::string dump_passes = option.substr(strlen("--dump-cfg-passes=")).data();
       PassDriverMEOpts::SetDumpPassList(dump_passes);
+    } else if (option == "--print-pass-options") {
+      print_pass_options = true;
+    } else if (option.starts_with("--pass-options=")) {
+      std::string options = option.substr(strlen("--pass-options=")).data();
+      PassDriverMEOpts::SetOverriddenPassOptions(options);
     } else if (option == "--include-patch-information") {
       include_patch_information = true;
     } else if (option == "--no-include-patch-information") {
@@ -1179,6 +1194,10 @@ static int dex2oat(int argc, char** argv) {
       break;
   }
 
+  if (print_pass_options) {
+    PassDriverMEOpts::PrintPassOptions();
+  }
+
   std::unique_ptr<CompilerOptions> compiler_options(new CompilerOptions(compiler_filter,
                                                                         huge_method_threshold,
                                                                         large_method_threshold,
index a0abc9e..d67c169 100644 (file)
@@ -99,9 +99,9 @@ ifeq ($(ART_BUILD_TARGET_DEBUG),true)
   $(eval $(call build-libart-disassembler,target,debug))
 endif
 # We always build dex2oat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
-ifeq ($(ART_BUILD_NDEBUG),true)
+ifeq ($(ART_BUILD_HOST_NDEBUG),true)
   $(eval $(call build-libart-disassembler,host,ndebug))
 endif
-ifeq ($(ART_BUILD_DEBUG),true)
+ifeq ($(ART_BUILD_HOST_DEBUG),true)
   $(eval $(call build-libart-disassembler,host,debug))
 endif
index 0ca8962..7551add 100644 (file)
@@ -702,12 +702,24 @@ DISASSEMBLER_ENTRY(cmp,
         load = true;
         immediate_bytes = 1;
         break;
+      case 0xA5:
+        opcode << "shld";
+        has_modrm = true;
+        load = true;
+        cx = true;
+        break;
       case 0xAC:
         opcode << "shrd";
         has_modrm = true;
         load = true;
         immediate_bytes = 1;
         break;
+      case 0xAD:
+        opcode << "shrd";
+        has_modrm = true;
+        load = true;
+        cx = true;
+        break;
       case 0xAE:
         if (prefix[0] == 0xF3) {
           prefix[0] = 0;  // clear prefix now it's served its purpose as part of the opcode
@@ -752,6 +764,7 @@ DISASSEMBLER_ENTRY(cmp,
       case 0xB7: opcode << "movzxw"; has_modrm = true; load = true; break;
       case 0xBE: opcode << "movsxb"; has_modrm = true; load = true; byte_second_operand = true; rex |= (rex == 0 ? 0 : 0b1000); break;
       case 0xBF: opcode << "movsxw"; has_modrm = true; load = true; break;
+      case 0xC3: opcode << "movnti"; store = true; has_modrm = true; break;
       case 0xC5:
         if (prefix[2] == 0x66) {
           opcode << "pextrw";
index c1fb267..1feb27d 100644 (file)
@@ -430,9 +430,9 @@ class OatDumper {
         StackHandleScope<1> hs(soa.Self());
         Handle<mirror::DexCache> dex_cache(
             hs.NewHandle(runtime->GetClassLinker()->FindDexCache(dex_file)));
-        NullHandle<mirror::ClassLoader> class_loader;
-        verifier::MethodVerifier verifier(&dex_file, &dex_cache, &class_loader, &class_def,
-                                          code_item, dex_method_idx, nullptr, method_access_flags,
+        verifier::MethodVerifier verifier(&dex_file, dex_cache, NullHandle<mirror::ClassLoader>(),
+                                          &class_def, code_item, dex_method_idx,
+                                          NullHandle<mirror::ArtMethod>(), method_access_flags,
                                           true, true, true);
         verifier.Verify();
         DumpCode(*indent2_os, &verifier, oat_method, code_item);
@@ -698,12 +698,12 @@ class OatDumper {
                     uint32_t method_access_flags) {
     if ((method_access_flags & kAccNative) == 0) {
       ScopedObjectAccess soa(Thread::Current());
-      StackHandleScope<2> hs(soa.Self());
+      StackHandleScope<1> hs(soa.Self());
       Handle<mirror::DexCache> dex_cache(
           hs.NewHandle(Runtime::Current()->GetClassLinker()->FindDexCache(*dex_file)));
-      auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
       verifier::MethodVerifier::VerifyMethodAndDump(os, dex_method_idx, dex_file, dex_cache,
-                                                    class_loader, &class_def, code_item, nullptr,
+                                                    NullHandle<mirror::ClassLoader>(), &class_def,
+                                                    code_item, NullHandle<mirror::ArtMethod>(),
                                                     method_access_flags);
     }
   }
@@ -939,15 +939,26 @@ class ImageDumper {
       StackHandleScope<1> hs(Thread::Current());
       FieldHelper fh(hs.NewHandle(field));
       mirror::Class* type = fh.GetType();
+      DCHECK(type->IsPrimitive());
       if (type->IsPrimitiveLong()) {
         os << StringPrintf("%" PRId64 " (0x%" PRIx64 ")\n", field->Get64(obj), field->Get64(obj));
       } else if (type->IsPrimitiveDouble()) {
         os << StringPrintf("%f (%a)\n", field->GetDouble(obj), field->GetDouble(obj));
       } else if (type->IsPrimitiveFloat()) {
         os << StringPrintf("%f (%a)\n", field->GetFloat(obj), field->GetFloat(obj));
-      } else {
-        DCHECK(type->IsPrimitive());
+      } else if (type->IsPrimitiveInt()) {
         os << StringPrintf("%d (0x%x)\n", field->Get32(obj), field->Get32(obj));
+      } else if (type->IsPrimitiveChar()) {
+        os << StringPrintf("%u (0x%x)\n", field->GetChar(obj), field->GetChar(obj));
+      } else if (type->IsPrimitiveShort()) {
+        os << StringPrintf("%d (0x%x)\n", field->GetShort(obj), field->GetShort(obj));
+      } else if (type->IsPrimitiveBoolean()) {
+        os << StringPrintf("%s (0x%x)\n", field->GetBoolean(obj)? "true" : "false",
+            field->GetBoolean(obj));
+      } else if (type->IsPrimitiveByte()) {
+        os << StringPrintf("%d (0x%x)\n", field->GetByte(obj), field->GetByte(obj));
+      } else {
+        LOG(FATAL) << "Unknown type: " << PrettyClass(type);
       }
     } else {
       // Get the value, don't compute the type unless it is non-null as we don't want
index 8b6b9ad..1e16096 100644 (file)
@@ -37,9 +37,9 @@ ifeq ($(ART_BUILD_TARGET_DEBUG),true)
 endif
 
 # We always build patchoat and dependencies, even if the host build is otherwise disabled, since they are used to cross compile for the target.
-ifeq ($(ART_BUILD_NDEBUG),true)
+ifeq ($(ART_BUILD_HOST_NDEBUG),true)
   $(eval $(call build-art-executable,patchoat,$(PATCHOAT_SRC_FILES),,art/compiler,host,ndebug))
 endif
-ifeq ($(ART_BUILD_DEBUG),true)
+ifeq ($(ART_BUILD_HOST_DEBUG),true)
   $(eval $(call build-art-executable,patchoat,$(PATCHOAT_SRC_FILES),,art/compiler,host,debug))
 endif
index 55e8141..bbdf3a3 100644 (file)
@@ -466,16 +466,13 @@ bool PatchOat::Patch(File* input_oat, off_t delta, File* output_oat, TimingLogge
   return true;
 }
 
-bool PatchOat::CheckOatFile() {
-  Elf32_Shdr* patches_sec = oat_file_->FindSectionByName(".oat_patches");
-  if (patches_sec == nullptr) {
-    return false;
-  }
-  if (patches_sec->sh_type != SHT_OAT_PATCH) {
+template <typename ptr_t>
+bool PatchOat::CheckOatFile(const Elf32_Shdr& patches_sec) {
+  if (patches_sec.sh_type != SHT_OAT_PATCH) {
     return false;
   }
-  uintptr_t* patches = reinterpret_cast<uintptr_t*>(oat_file_->Begin() + patches_sec->sh_offset);
-  uintptr_t* patches_end = patches + (patches_sec->sh_size/sizeof(uintptr_t));
+  ptr_t* patches = reinterpret_cast<ptr_t*>(oat_file_->Begin() + patches_sec.sh_offset);
+  ptr_t* patches_end = patches + (patches_sec.sh_size / sizeof(ptr_t));
   Elf32_Shdr* oat_data_sec = oat_file_->FindSectionByName(".rodata");
   Elf32_Shdr* oat_text_sec = oat_file_->FindSectionByName(".text");
   if (oat_data_sec == nullptr) {
@@ -572,6 +569,11 @@ bool PatchOat::PatchElf() {
     }
   }
 
+  t.NewTiming("Fixup Debug Sections");
+  if (!oat_file_->FixupDebugSections(delta_)) {
+    return false;
+  }
+
   return true;
 }
 
@@ -599,10 +601,28 @@ bool PatchOat::PatchTextSection() {
     LOG(ERROR) << ".oat_patches section not found. Aborting patch";
     return false;
   }
-  DCHECK(CheckOatFile()) << "Oat file invalid";
-  CHECK_EQ(patches_sec->sh_type, SHT_OAT_PATCH) << "Unexpected type of .oat_patches";
-  uintptr_t* patches = reinterpret_cast<uintptr_t*>(oat_file_->Begin() + patches_sec->sh_offset);
-  uintptr_t* patches_end = patches + (patches_sec->sh_size/sizeof(uintptr_t));
+  if (patches_sec->sh_type != SHT_OAT_PATCH) {
+    LOG(ERROR) << "Unexpected type of .oat_patches";
+    return false;
+  }
+
+  switch (patches_sec->sh_entsize) {
+    case sizeof(uint32_t):
+      return PatchTextSection<uint32_t>(*patches_sec);
+    case sizeof(uint64_t):
+      return PatchTextSection<uint64_t>(*patches_sec);
+    default:
+      LOG(ERROR) << ".oat_patches Entsize of " << patches_sec->sh_entsize << "bits "
+                 << "is not valid";
+      return false;
+  }
+}
+
+template <typename ptr_t>
+bool PatchOat::PatchTextSection(const Elf32_Shdr& patches_sec) {
+  DCHECK(CheckOatFile<ptr_t>(patches_sec)) << "Oat file invalid";
+  ptr_t* patches = reinterpret_cast<ptr_t*>(oat_file_->Begin() + patches_sec.sh_offset);
+  ptr_t* patches_end = patches + (patches_sec.sh_size / sizeof(ptr_t));
   Elf32_Shdr* oat_text_sec = oat_file_->FindSectionByName(".text");
   CHECK(oat_text_sec != nullptr);
   byte* to_patch = oat_file_->Begin() + oat_text_sec->sh_offset;
@@ -614,7 +634,6 @@ bool PatchOat::PatchTextSection() {
     CHECK_LT(reinterpret_cast<uintptr_t>(patch_loc), to_patch_end);
     *patch_loc += delta_;
   }
-
   return true;
 }
 
index 6960d3b..326333e 100644 (file)
@@ -74,11 +74,12 @@ class PatchOat {
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool InHeap(mirror::Object*);
 
-  bool CheckOatFile();
-
   // Patches oat in place, modifying the oat_file given to the constructor.
   bool PatchElf();
   bool PatchTextSection();
+  // Templatized version to actually do the patching with the right sized offsets.
+  template <typename ptr_t> bool PatchTextSection(const Elf32_Shdr& patches_sec);
+  template <typename ptr_t> bool CheckOatFile(const Elf32_Shdr& patches_sec);
   bool PatchOatHeader();
   bool PatchSymbols(Elf32_Shdr* section);
 
index 0ab6626..6511dfa 100644 (file)
@@ -80,6 +80,7 @@ LIBART_COMMON_SRC_FILES := \
   interpreter/interpreter.cc \
   interpreter/interpreter_common.cc \
   interpreter/interpreter_switch_impl.cc \
+  java_vm_ext.cc \
   jdwp/jdwp_event.cc \
   jdwp/jdwp_expand_buf.cc \
   jdwp/jdwp_handler.cc \
@@ -87,6 +88,7 @@ LIBART_COMMON_SRC_FILES := \
   jdwp/jdwp_request.cc \
   jdwp/jdwp_socket.cc \
   jdwp/object_registry.cc \
+  jni_env_ext.cc \
   jni_internal.cc \
   jobject_comparator.cc \
   mem_map.cc \
@@ -470,10 +472,10 @@ endef
 
 # We always build dex2oat and dependencies, even if the host build is otherwise disabled, since
 # they are used to cross compile for the target.
-ifeq ($(ART_BUILD_NDEBUG),true)
+ifeq ($(ART_BUILD_HOST_NDEBUG),true)
   $(eval $(call build-libart,host,ndebug))
 endif
-ifeq ($(ART_BUILD_DEBUG),true)
+ifeq ($(ART_BUILD_HOST_DEBUG),true)
   $(eval $(call build-libart,host,debug))
 endif
 
index 8c6afd6..38a88c5 100644 (file)
@@ -48,12 +48,24 @@ extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*);
 extern "C" void* art_quick_resolve_string(void*, uint32_t);
 
 // Field entrypoints.
+extern "C" int art_quick_set8_instance(uint32_t, void*, int8_t);
+extern "C" int art_quick_set8_static(uint32_t, int8_t);
+extern "C" int art_quick_set16_instance(uint32_t, void*, int16_t);
+extern "C" int art_quick_set16_static(uint32_t, int16_t);
 extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t);
 extern "C" int art_quick_set32_static(uint32_t, int32_t);
 extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t);
 extern "C" int art_quick_set64_static(uint32_t, int64_t);
 extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*);
 extern "C" int art_quick_set_obj_static(uint32_t, void*);
+extern "C" int8_t art_quick_get_byte_instance(uint32_t, void*);
+extern "C" uint8_t art_quick_get_boolean_instance(uint32_t, void*);
+extern "C" int8_t art_quick_get_byte_static(uint32_t);
+extern "C" uint8_t art_quick_get_boolean_static(uint32_t);
+extern "C" int16_t art_quick_get_short_instance(uint32_t, void*);
+extern "C" uint16_t art_quick_get_char_instance(uint32_t, void*);
+extern "C" int16_t art_quick_get_short_static(uint32_t);
+extern "C" uint16_t art_quick_get_char_static(uint32_t);
 extern "C" int32_t art_quick_get32_instance(uint32_t, void*);
 extern "C" int32_t art_quick_get32_static(uint32_t);
 extern "C" int64_t art_quick_get64_instance(uint32_t, void*);
@@ -154,15 +166,27 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
   qpoints->pResolveString = art_quick_resolve_string;
 
   // Field
+  qpoints->pSet8Instance = art_quick_set8_instance;
+  qpoints->pSet8Static = art_quick_set8_static;
+  qpoints->pSet16Instance = art_quick_set16_instance;
+  qpoints->pSet16Static = art_quick_set16_static;
   qpoints->pSet32Instance = art_quick_set32_instance;
   qpoints->pSet32Static = art_quick_set32_static;
   qpoints->pSet64Instance = art_quick_set64_instance;
   qpoints->pSet64Static = art_quick_set64_static;
   qpoints->pSetObjInstance = art_quick_set_obj_instance;
   qpoints->pSetObjStatic = art_quick_set_obj_static;
+  qpoints->pGetByteInstance = art_quick_get_byte_instance;
+  qpoints->pGetBooleanInstance = art_quick_get_boolean_instance;
+  qpoints->pGetShortInstance = art_quick_get_short_instance;
+  qpoints->pGetCharInstance = art_quick_get_char_instance;
   qpoints->pGet32Instance = art_quick_get32_instance;
   qpoints->pGet64Instance = art_quick_get64_instance;
   qpoints->pGetObjInstance = art_quick_get_obj_instance;
+  qpoints->pGetByteStatic = art_quick_get_byte_static;
+  qpoints->pGetBooleanStatic = art_quick_get_boolean_static;
+  qpoints->pGetShortStatic = art_quick_get_short_static;
+  qpoints->pGetCharStatic = art_quick_get_char_static;
   qpoints->pGet32Static = art_quick_get32_static;
   qpoints->pGet64Static = art_quick_get64_static;
   qpoints->pGetObjStatic = art_quick_get_obj_static;
index 1b30c9c..51bcd3c 100644 (file)
@@ -203,6 +203,77 @@ ENTRY \c_name
 END \c_name
 .endm
 
+.macro  RETURN_OR_DELIVER_PENDING_EXCEPTION_REG reg
+    ldr \reg, [r9, #THREAD_EXCEPTION_OFFSET]   // Get exception field.
+    cbnz \reg, 1f
+    bx lr
+1:
+    DELIVER_PENDING_EXCEPTION
+.endm
+
+.macro  RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+    RETURN_OR_DELIVER_PENDING_EXCEPTION_REG r1
+.endm
+
+.macro RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
+    RETURN_IF_RESULT_IS_ZERO
+    DELIVER_PENDING_EXCEPTION
+.endm
+
+// Macros taking opportunity of code similarities for downcalls with referrer for non-wide fields.
+.macro  ONE_ARG_REF_DOWNCALL name, entrypoint, return
+    .extern \entrypoint
+ENTRY \name
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    ldr    r1, [sp, #32]                 @ pass referrer
+    mov    r2, r9                        @ pass Thread::Current
+    mov    r3, sp                        @ pass SP
+    bl     \entrypoint                   @ (uint32_t field_idx, const Method* referrer, Thread*, SP)
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    \return
+END \name
+.endm
+
+.macro  TWO_ARG_REF_DOWNCALL name, entrypoint, return
+    .extern \entrypoint
+ENTRY \name
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    ldr    r2, [sp, #32]                 @ pass referrer
+    mov    r3, r9                        @ pass Thread::Current
+    mov    r12, sp
+    str    r12, [sp, #-16]!              @ expand the frame and pass SP
+    .pad #16
+    .cfi_adjust_cfa_offset 16
+    bl     \entrypoint                   @ (field_idx, Object*, referrer, Thread*, SP)
+    add    sp, #16                       @ strip the extra frame
+    .cfi_adjust_cfa_offset -16
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
+    \return
+END \name
+.endm
+
+.macro  THREE_ARG_REF_DOWNCALL name, entrypoint, return
+    .extern \entrypoint
+ENTRY \name
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
+    ldr    r3, [sp, #32]                 @ pass referrer
+    mov    r12, sp                       @ save SP
+    sub    sp, #8                        @ grow frame for alignment with stack args
+    .pad #8
+    .cfi_adjust_cfa_offset 8
+    push   {r9, r12}                     @ pass Thread::Current and SP
+    .save {r9, r12}
+    .cfi_adjust_cfa_offset 8
+    .cfi_rel_offset r9, 0
+    .cfi_rel_offset r12, 4
+    bl     \entrypoint                   @ (field_idx, Object*, new_val, referrer, Thread*, SP)
+    add    sp, #16                       @ release out args
+    .cfi_adjust_cfa_offset -16
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME   @ TODO: we can clearly save an add here
+    \return
+END \name
+.endm
+
     /*
      * Called by managed code, saves callee saves and then calls artThrowException
      * that will place a mock Method* at the bottom of the stack. Arg1 holds the exception.
@@ -601,23 +672,14 @@ ENTRY art_quick_initialize_type_and_verify_access
 END art_quick_initialize_type_and_verify_access
 
     /*
-     * Called by managed code to resolve a static field and load a 32-bit primitive value.
+     * Called by managed code to resolve a static field and load a non-wide value.
      */
-    .extern artGet32StaticFromCode
-ENTRY art_quick_get32_static
-    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
-    ldr    r1, [sp, #32]                 @ pass referrer
-    mov    r2, r9                        @ pass Thread::Current
-    mov    r3, sp                        @ pass SP
-    bl     artGet32StaticFromCode        @ (uint32_t field_idx, const Method* referrer, Thread*, SP)
-    ldr    r1, [r9, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
-    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
-    cbnz   r1, 1f                        @ success if no exception pending
-    bx     lr                            @ return on success
-1:
-    DELIVER_PENDING_EXCEPTION
-END art_quick_get32_static
-
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
     /*
      * Called by managed code to resolve a static field and load a 64-bit primitive value.
      */
@@ -637,43 +699,14 @@ ENTRY art_quick_get64_static
 END art_quick_get64_static
 
     /*
-     * Called by managed code to resolve a static field and load an object reference.
+     * Called by managed code to resolve an instance field and load a non-wide value.
      */
-    .extern artGetObjStaticFromCode
-ENTRY art_quick_get_obj_static
-    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
-    ldr    r1, [sp, #32]                 @ pass referrer
-    mov    r2, r9                        @ pass Thread::Current
-    mov    r3, sp                        @ pass SP
-    bl     artGetObjStaticFromCode       @ (uint32_t field_idx, const Method* referrer, Thread*, SP)
-    ldr    r1, [r9, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
-    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
-    cbnz   r1, 1f                        @ success if no exception pending
-    bx     lr                            @ return on success
-1:
-    DELIVER_PENDING_EXCEPTION
-END art_quick_get_obj_static
-
-    /*
-     * Called by managed code to resolve an instance field and load a 32-bit primitive value.
-     */
-    .extern artGet32InstanceFromCode
-ENTRY art_quick_get32_instance
-    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
-    ldr    r2, [sp, #32]                 @ pass referrer
-    mov    r3, r9                        @ pass Thread::Current
-    mov    r12, sp
-    str    r12, [sp, #-16]!              @ expand the frame and pass SP
-    bl     artGet32InstanceFromCode      @ (field_idx, Object*, referrer, Thread*, SP)
-    add    sp, #16                       @ strip the extra frame
-    ldr    r1, [r9, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
-    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
-    cbnz   r1, 1f                        @ success if no exception pending
-    bx     lr                            @ return on success
-1:
-    DELIVER_PENDING_EXCEPTION
-END art_quick_get32_instance
-
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
+TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_R1
     /*
      * Called by managed code to resolve an instance field and load a 64-bit primitive value.
      */
@@ -698,48 +731,12 @@ ENTRY art_quick_get64_instance
 END art_quick_get64_instance
 
     /*
-     * Called by managed code to resolve an instance field and load an object reference.
-     */
-    .extern artGetObjInstanceFromCode
-ENTRY art_quick_get_obj_instance
-    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
-    ldr    r2, [sp, #32]                 @ pass referrer
-    mov    r3, r9                        @ pass Thread::Current
-    mov    r12, sp
-    str    r12, [sp, #-16]!              @ expand the frame and pass SP
-    .pad #16
-    .cfi_adjust_cfa_offset 16
-    bl     artGetObjInstanceFromCode     @ (field_idx, Object*, referrer, Thread*, SP)
-    add    sp, #16                       @ strip the extra frame
-    .cfi_adjust_cfa_offset -16
-    ldr    r1, [r9, #THREAD_EXCEPTION_OFFSET]  @ load Thread::Current()->exception_
-    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
-    cbnz   r1, 1f                        @ success if no exception pending
-    bx     lr                            @ return on success
-1:
-    DELIVER_PENDING_EXCEPTION
-END art_quick_get_obj_instance
-
-    /*
-     * Called by managed code to resolve a static field and store a 32-bit primitive value.
+     * Called by managed code to resolve a static field and store a non-wide value.
      */
-    .extern artSet32StaticFromCode
-ENTRY art_quick_set32_static
-    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
-    ldr    r2, [sp, #32]                 @ pass referrer
-    mov    r3, r9                        @ pass Thread::Current
-    mov    r12, sp
-    str    r12, [sp, #-16]!              @ expand the frame and pass SP
-    .pad #16
-    .cfi_adjust_cfa_offset 16
-    bl     artSet32StaticFromCode        @ (field_idx, new_val, referrer, Thread*, SP)
-    add    sp, #16                       @ strip the extra frame
-    .cfi_adjust_cfa_offset -16
-    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
-    RETURN_IF_RESULT_IS_ZERO
-    DELIVER_PENDING_EXCEPTION
-END art_quick_set32_static
-
+TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
+TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
+TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
+TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
     /*
      * Called by managed code to resolve a static field and store a 64-bit primitive value.
      * On entry r0 holds field index, r1:r2 hold new_val
@@ -767,53 +764,16 @@ ENTRY art_quick_set64_static
 END art_quick_set64_static
 
     /*
-     * Called by managed code to resolve a static field and store an object reference.
-     */
-    .extern artSetObjStaticFromCode
-ENTRY art_quick_set_obj_static
-    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
-    ldr    r2, [sp, #32]                 @ pass referrer
-    mov    r3, r9                        @ pass Thread::Current
-    mov    r12, sp
-    str    r12, [sp, #-16]!              @ expand the frame and pass SP
-    .pad #16
-    .cfi_adjust_cfa_offset 16
-    bl     artSetObjStaticFromCode       @ (field_idx, new_val, referrer, Thread*, SP)
-    add    sp, #16                       @ strip the extra frame
-    .cfi_adjust_cfa_offset -16
-    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME
-    RETURN_IF_RESULT_IS_ZERO
-    DELIVER_PENDING_EXCEPTION
-END art_quick_set_obj_static
-
-    /*
-     * Called by managed code to resolve an instance field and store a 32-bit primitive value.
+     * Called by managed code to resolve an instance field and store a non-wide value.
      */
-    .extern artSet32InstanceFromCode
-ENTRY art_quick_set32_instance
-    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
-    ldr    r3, [sp, #32]                 @ pass referrer
-    mov    r12, sp                       @ save SP
-    sub    sp, #8                        @ grow frame for alignment with stack args
-    .pad #8
-    .cfi_adjust_cfa_offset 8
-    push   {r9, r12}                     @ pass Thread::Current and SP
-    .save {r9, r12}
-    .cfi_adjust_cfa_offset 8
-    .cfi_rel_offset r9, 0
-    .cfi_rel_offset r12, 4
-    bl     artSet32InstanceFromCode      @ (field_idx, Object*, new_val, referrer, Thread*, SP)
-    add    sp, #16                       @ release out args
-    .cfi_adjust_cfa_offset -16
-    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME   @ TODO: we can clearly save an add here
-    RETURN_IF_RESULT_IS_ZERO
-    DELIVER_PENDING_EXCEPTION
-END art_quick_set32_instance
-
+THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
+THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
+THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
+THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_RESULT_IS_ZERO_OR_DELIVER
     /*
      * Called by managed code to resolve an instance field and store a 64-bit primitive value.
      */
-    .extern artSet32InstanceFromCode
+    .extern artSet64InstanceFromCode
 ENTRY art_quick_set64_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
     mov    r12, sp                       @ save SP
@@ -833,29 +793,6 @@ ENTRY art_quick_set64_instance
 END art_quick_set64_instance
 
     /*
-     * Called by managed code to resolve an instance field and store an object reference.
-     */
-    .extern artSetObjInstanceFromCode
-ENTRY art_quick_set_obj_instance
-    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     @ save callee saves in case of GC
-    ldr    r3, [sp, #32]                 @ pass referrer
-    mov    r12, sp                       @ save SP
-    sub    sp, #8                        @ grow frame for alignment with stack args
-    .pad #8
-    .cfi_adjust_cfa_offset 8
-    push   {r9, r12}                     @ pass Thread::Current and SP
-    .save {r9, r12}
-    .cfi_adjust_cfa_offset 8
-    .cfi_rel_offset r9, 0
-    bl     artSetObjInstanceFromCode     @ (field_idx, Object*, new_val, referrer, Thread*, SP)
-    add    sp, #16                       @ release out args
-    .cfi_adjust_cfa_offset -16
-    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME   @ TODO: we can clearly save an add here
-    RETURN_IF_RESULT_IS_ZERO
-    DELIVER_PENDING_EXCEPTION
-END art_quick_set_obj_instance
-
-    /*
      * Entry from managed code to resolve a string, this stub will allocate a String and deliver an
      * exception on error. On success the String is returned. R0 holds the referring method,
      * R1 holds the string index. The fast path check for hit in strings cache has already been
index 0c33d9c..70e93b3 100644 (file)
@@ -47,12 +47,24 @@ extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*);
 extern "C" void* art_quick_resolve_string(void*, uint32_t);
 
 // Field entrypoints.
+extern "C" int art_quick_set8_instance(uint32_t, void*, int8_t);
+extern "C" int art_quick_set8_static(uint32_t, int8_t);
+extern "C" int art_quick_set16_instance(uint32_t, void*, int16_t);
+extern "C" int art_quick_set16_static(uint32_t, int16_t);
 extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t);
 extern "C" int art_quick_set32_static(uint32_t, int32_t);
 extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t);
 extern "C" int art_quick_set64_static(uint32_t, int64_t);
 extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*);
 extern "C" int art_quick_set_obj_static(uint32_t, void*);
+extern "C" uint8_t art_quick_get_boolean_instance(uint32_t, void*);
+extern "C" int8_t art_quick_get_byte_instance(uint32_t, void*);
+extern "C" uint8_t art_quick_get_boolean_static(uint32_t);
+extern "C" int8_t art_quick_get_byte_static(uint32_t);
+extern "C" uint16_t art_quick_get_char_instance(uint32_t, void*);
+extern "C" int16_t art_quick_get_short_instance(uint32_t, void*);
+extern "C" uint16_t art_quick_get_char_static(uint32_t);
+extern "C" int16_t art_quick_get_short_static(uint32_t);
 extern "C" int32_t art_quick_get32_instance(uint32_t, void*);
 extern "C" int32_t art_quick_get32_static(uint32_t);
 extern "C" int64_t art_quick_get64_instance(uint32_t, void*);
@@ -136,15 +148,27 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
   qpoints->pResolveString = art_quick_resolve_string;
 
   // Field
+  qpoints->pSet8Instance = art_quick_set8_instance;
+  qpoints->pSet8Static = art_quick_set8_static;
+  qpoints->pSet16Instance = art_quick_set16_instance;
+  qpoints->pSet16Static = art_quick_set16_static;
   qpoints->pSet32Instance = art_quick_set32_instance;
   qpoints->pSet32Static = art_quick_set32_static;
   qpoints->pSet64Instance = art_quick_set64_instance;
   qpoints->pSet64Static = art_quick_set64_static;
   qpoints->pSetObjInstance = art_quick_set_obj_instance;
   qpoints->pSetObjStatic = art_quick_set_obj_static;
+  qpoints->pGetBooleanInstance = art_quick_get_boolean_instance;
+  qpoints->pGetByteInstance = art_quick_get_byte_instance;
+  qpoints->pGetCharInstance = art_quick_get_char_instance;
+  qpoints->pGetShortInstance = art_quick_get_short_instance;
   qpoints->pGet32Instance = art_quick_get32_instance;
   qpoints->pGet64Instance = art_quick_get64_instance;
   qpoints->pGetObjInstance = art_quick_get_obj_instance;
+  qpoints->pGetBooleanStatic = art_quick_get_boolean_static;
+  qpoints->pGetByteStatic = art_quick_get_byte_static;
+  qpoints->pGetCharStatic = art_quick_get_char_static;
+  qpoints->pGetShortStatic = art_quick_get_short_static;
   qpoints->pGet32Static = art_quick_get32_static;
   qpoints->pGet64Static = art_quick_get64_static;
   qpoints->pGetObjStatic = art_quick_get_obj_static;
index 2a19e27..606816a 100644 (file)
@@ -1266,17 +1266,29 @@ TWO_ARG_DOWNCALL art_quick_initialize_static_storage, artInitializeStaticStorage
 TWO_ARG_DOWNCALL art_quick_initialize_type, artInitializeTypeFromCode, RETURN_IF_RESULT_IS_NON_ZERO
 TWO_ARG_DOWNCALL art_quick_initialize_type_and_verify_access, artInitializeTypeAndVerifyAccessFromCode, RETURN_IF_RESULT_IS_NON_ZERO
 
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
 ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
 ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
 ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
 
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
 TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
 TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
 TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION_X1
 
+TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
+TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
 TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
 TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
 
+THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
+THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
 THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
 THREE_ARG_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
 THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_W0_IS_ZERO_OR_DELIVER
index 4db5ea6..6add93b 100644 (file)
@@ -22,9 +22,9 @@
 // Offset of field Thread::tls32_.state_and_flags verified in InitCpu
 #define THREAD_FLAGS_OFFSET 0
 // Offset of field Thread::tlsPtr_.card_table verified in InitCpu
-#define THREAD_CARD_TABLE_OFFSET 112
+#define THREAD_CARD_TABLE_OFFSET 120
 // Offset of field Thread::tlsPtr_.exception verified in InitCpu
-#define THREAD_EXCEPTION_OFFSET 116
+#define THREAD_EXCEPTION_OFFSET 124
 
 #define FRAME_SIZE_SAVE_ALL_CALLEE_SAVE 64
 #define FRAME_SIZE_REFS_ONLY_CALLEE_SAVE 64
index d3e7d5e..25e911d 100644 (file)
@@ -49,12 +49,24 @@ extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*);
 extern "C" void* art_quick_resolve_string(void*, uint32_t);
 
 // Field entrypoints.
+extern "C" int art_quick_set8_instance(uint32_t, void*, int8_t);
+extern "C" int art_quick_set8_static(uint32_t, int8_t);
+extern "C" int art_quick_set16_instance(uint32_t, void*, int16_t);
+extern "C" int art_quick_set16_static(uint32_t, int16_t);
 extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t);
 extern "C" int art_quick_set32_static(uint32_t, int32_t);
 extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t);
 extern "C" int art_quick_set64_static(uint32_t, int64_t);
 extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*);
 extern "C" int art_quick_set_obj_static(uint32_t, void*);
+extern "C" uint8_t art_quick_get_boolean_instance(uint32_t, void*);
+extern "C" int8_t art_quick_get_byte_instance(uint32_t, void*);
+extern "C" uint8_t art_quick_get_boolean_static(uint32_t);
+extern "C" int8_t art_quick_get_byte_static(uint32_t);
+extern "C" uint16_t art_quick_get_char_instance(uint32_t, void*);
+extern "C" int16_t art_quick_get_short_instance(uint32_t, void*);
+extern "C" uint16_t art_quick_get_char_static(uint32_t);
+extern "C" int16_t art_quick_get_short_static(uint32_t);
 extern "C" int32_t art_quick_get32_instance(uint32_t, void*);
 extern "C" int32_t art_quick_get32_static(uint32_t);
 extern "C" int64_t art_quick_get64_instance(uint32_t, void*);
@@ -159,15 +171,27 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
   qpoints->pResolveString = art_quick_resolve_string;
 
   // Field
+  qpoints->pSet8Instance = art_quick_set8_instance;
+  qpoints->pSet8Static = art_quick_set8_static;
+  qpoints->pSet16Instance = art_quick_set16_instance;
+  qpoints->pSet16Static = art_quick_set16_static;
   qpoints->pSet32Instance = art_quick_set32_instance;
   qpoints->pSet32Static = art_quick_set32_static;
   qpoints->pSet64Instance = art_quick_set64_instance;
   qpoints->pSet64Static = art_quick_set64_static;
   qpoints->pSetObjInstance = art_quick_set_obj_instance;
   qpoints->pSetObjStatic = art_quick_set_obj_static;
+  qpoints->pGetBooleanInstance = art_quick_get_boolean_instance;
+  qpoints->pGetByteInstance = art_quick_get_byte_instance;
+  qpoints->pGetCharInstance = art_quick_get_char_instance;
+  qpoints->pGetShortInstance = art_quick_get_short_instance;
   qpoints->pGet32Instance = art_quick_get32_instance;
   qpoints->pGet64Instance = art_quick_get64_instance;
   qpoints->pGetObjInstance = art_quick_get_obj_instance;
+  qpoints->pGetBooleanStatic = art_quick_get_boolean_static;
+  qpoints->pGetByteStatic = art_quick_get_byte_static;
+  qpoints->pGetCharStatic = art_quick_get_char_static;
+  qpoints->pGetShortStatic = art_quick_get_short_static;
   qpoints->pGet32Static = art_quick_get32_static;
   qpoints->pGet64Static = art_quick_get64_static;
   qpoints->pGetObjStatic = art_quick_get_obj_static;
index 8786222..9e9e523 100644 (file)
@@ -739,6 +739,59 @@ ENTRY art_quick_initialize_type_and_verify_access
     move    $a3, $sp                           # pass $sp
     RETURN_IF_RESULT_IS_NON_ZERO
 END art_quick_initialize_type_and_verify_access
+    /*
+     * Called by managed code to resolve a static field and load a boolean primitive value.
+     */
+    .extern artGetBooleanStaticFromCode
+ENTRY art_quick_get_boolean_static
+    GENERATE_GLOBAL_POINTER
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
+    lw     $a1, 64($sp)                  # pass referrer's Method*
+    move   $a2, rSELF                    # pass Thread::Current
+    jal    artGetBooleanStaticFromCode   # (uint32_t field_idx, const Method* referrer, Thread*, $sp)
+    move   $a3, $sp                      # pass $sp
+    RETURN_IF_NO_EXCEPTION
+END art_quick_get_boolean_static
+    /*
+     * Called by managed code to resolve a static field and load a byte primitive value.
+     */
+    .extern artGetByteStaticFromCode
+ENTRY art_quick_get_byte_static
+    GENERATE_GLOBAL_POINTER
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
+    lw     $a1, 64($sp)                  # pass referrer's Method*
+    move   $a2, rSELF                    # pass Thread::Current
+    jal    artGetByteStaticFromCode      # (uint32_t field_idx, const Method* referrer, Thread*, $sp)
+    move   $a3, $sp                      # pass $sp
+    RETURN_IF_NO_EXCEPTION
+END art_quick_get_byte_static
+
+    /*
+     * Called by managed code to resolve a static field and load a char primitive value.
+     */
+    .extern artGetCharStaticFromCode
+ENTRY art_quick_get_char_static
+    GENERATE_GLOBAL_POINTER
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
+    lw     $a1, 64($sp)                  # pass referrer's Method*
+    move   $a2, rSELF                    # pass Thread::Current
+    jal    artGetCharStaticFromCode      # (uint32_t field_idx, const Method* referrer, Thread*, $sp)
+    move   $a3, $sp                      # pass $sp
+    RETURN_IF_NO_EXCEPTION
+END art_quick_get_char_static
+    /*
+     * Called by managed code to resolve a static field and load a short primitive value.
+     */
+    .extern artGetShortStaticFromCode
+ENTRY art_quick_get_short_static
+    GENERATE_GLOBAL_POINTER
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
+    lw     $a1, 64($sp)                  # pass referrer's Method*
+    move   $a2, rSELF                    # pass Thread::Current
+    jal    artGetShortStaticFromCode     # (uint32_t field_idx, const Method* referrer, Thread*, $sp)
+    move   $a3, $sp                      # pass $sp
+    RETURN_IF_NO_EXCEPTION
+END art_quick_get_short_static
 
     /*
      * Called by managed code to resolve a static field and load a 32-bit primitive value.
@@ -783,6 +836,60 @@ ENTRY art_quick_get_obj_static
 END art_quick_get_obj_static
 
     /*
+     * Called by managed code to resolve an instance field and load a boolean primitive value.
+     */
+    .extern artGetBooleanInstanceFromCode
+ENTRY art_quick_get_boolean_instance
+    GENERATE_GLOBAL_POINTER
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
+    lw     $a2, 64($sp)                  # pass referrer's Method*
+    move   $a3, rSELF                    # pass Thread::Current
+    jal    artGetBooleanInstanceFromCode # (field_idx, Object*, referrer, Thread*, $sp)
+    sw     $sp, 16($sp)                  # pass $sp
+    RETURN_IF_NO_EXCEPTION
+END art_quick_get_boolean_instance
+    /*
+     * Called by managed code to resolve an instance field and load a byte primitive value.
+     */
+    .extern artGetByteInstanceFromCode
+ENTRY art_quick_get_byte_instance
+    GENERATE_GLOBAL_POINTER
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
+    lw     $a2, 64($sp)                  # pass referrer's Method*
+    move   $a3, rSELF                    # pass Thread::Current
+    jal    artGetByteInstanceFromCode    # (field_idx, Object*, referrer, Thread*, $sp)
+    sw     $sp, 16($sp)                  # pass $sp
+    RETURN_IF_NO_EXCEPTION
+END art_quick_get_byte_instance
+
+    /*
+     * Called by managed code to resolve an instance field and load a char primitive value.
+     */
+    .extern artGetCharInstanceFromCode
+ENTRY art_quick_get_char_instance
+    GENERATE_GLOBAL_POINTER
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
+    lw     $a2, 64($sp)                  # pass referrer's Method*
+    move   $a3, rSELF                    # pass Thread::Current
+    jal    artGetCharInstanceFromCode    # (field_idx, Object*, referrer, Thread*, $sp)
+    sw     $sp, 16($sp)                  # pass $sp
+    RETURN_IF_NO_EXCEPTION
+END art_quick_get_char_instance
+    /*
+     * Called by managed code to resolve an instance field and load a short primitive value.
+     */
+    .extern artGetShortInstanceFromCode
+ENTRY art_quick_get_short_instance
+    GENERATE_GLOBAL_POINTER
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
+    lw     $a2, 64($sp)                  # pass referrer's Method*
+    move   $a3, rSELF                    # pass Thread::Current
+    jal    artGetShortInstanceFromCode      # (field_idx, Object*, referrer, Thread*, $sp)
+    sw     $sp, 16($sp)                  # pass $sp
+    RETURN_IF_NO_EXCEPTION
+END art_quick_get_short_instance
+
+    /*
      * Called by managed code to resolve an instance field and load a 32-bit primitive value.
      */
     .extern artGet32InstanceFromCode
@@ -825,6 +932,34 @@ ENTRY art_quick_get_obj_instance
 END art_quick_get_obj_instance
 
     /*
+     * Called by managed code to resolve a static field and store a 8-bit primitive value.
+     */
+    .extern artSet8StaticFromCode
+ENTRY art_quick_set8_static
+    GENERATE_GLOBAL_POINTER
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
+    lw     $a2, 64($sp)                  # pass referrer's Method*
+    move   $a3, rSELF                    # pass Thread::Current
+    jal    artSet8StaticFromCode         # (field_idx, new_val, referrer, Thread*, $sp)
+    sw     $sp, 16($sp)                  # pass $sp
+    RETURN_IF_ZERO
+END art_quick_set8_static
+
+    /*
+     * Called by managed code to resolve a static field and store a 16-bit primitive value.
+     */
+    .extern artSet16StaticFromCode
+ENTRY art_quick_set16_static
+    GENERATE_GLOBAL_POINTER
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
+    lw     $a2, 64($sp)                  # pass referrer's Method*
+    move   $a3, rSELF                    # pass Thread::Current
+    jal    artSet16StaticFromCode        # (field_idx, new_val, referrer, Thread*, $sp)
+    sw     $sp, 16($sp)                  # pass $sp
+    RETURN_IF_ZERO
+END art_quick_set16_static
+
+    /*
      * Called by managed code to resolve a static field and store a 32-bit primitive value.
      */
     .extern artSet32StaticFromCode
@@ -841,7 +976,7 @@ END art_quick_set32_static
     /*
      * Called by managed code to resolve a static field and store a 64-bit primitive value.
      */
-    .extern artSet32StaticFromCode
+    .extern artSet64StaticFromCode
 ENTRY art_quick_set64_static
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
@@ -867,6 +1002,34 @@ ENTRY art_quick_set_obj_static
 END art_quick_set_obj_static
 
     /*
+     * Called by managed code to resolve an instance field and store a 8-bit primitive value.
+     */
+    .extern artSet8InstanceFromCode
+ENTRY art_quick_set8_instance
+    GENERATE_GLOBAL_POINTER
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
+    lw     $a3, 64($sp)                  # pass referrer's Method*
+    sw     rSELF, 16($sp)                # pass Thread::Current
+    jal    artSet8InstanceFromCode      # (field_idx, Object*, new_val, referrer, Thread*, $sp)
+    sw     $sp, 20($sp)                  # pass $sp
+    RETURN_IF_ZERO
+END art_quick_set8_instance
+
+    /*
+     * Called by managed code to resolve an instance field and store a 16-bit primitive value.
+     */
+    .extern artSet16InstanceFromCode
+ENTRY art_quick_set16_instance
+    GENERATE_GLOBAL_POINTER
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
+    lw     $a3, 64($sp)                  # pass referrer's Method*
+    sw     rSELF, 16($sp)                # pass Thread::Current
+    jal    artSet16InstanceFromCode      # (field_idx, Object*, new_val, referrer, Thread*, $sp)
+    sw     $sp, 20($sp)                  # pass $sp
+    RETURN_IF_ZERO
+END art_quick_set16_instance
+
+    /*
      * Called by managed code to resolve an instance field and store a 32-bit primitive value.
      */
     .extern artSet32InstanceFromCode
@@ -883,7 +1046,7 @@ END art_quick_set32_instance
     /*
      * Called by managed code to resolve an instance field and store a 64-bit primitive value.
      */
-    .extern artSet32InstanceFromCode
+    .extern artSet64InstanceFromCode
 ENTRY art_quick_set64_instance
     GENERATE_GLOBAL_POINTER
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME     # save callee saves in case of GC
index 864e3f7..6b74a1b 100644 (file)
@@ -309,7 +309,7 @@ class StubTest : public CommonRuntimeTest {
         "addl $16, %%esp"           // Pop referrer
         : "=a" (result)
           // Use the result from eax
-        : "a"(arg0), "c"(arg1), "d"(arg2), "D"(code), [referrer]"m"(referrer), [hidden]"r"(hidden)
+        : "a"(arg0), "c"(arg1), "d"(arg2), "D"(code), [referrer]"r"(referrer), [hidden]"m"(hidden)
           // This places code into edi, arg0 into eax, arg1 into ecx, and arg2 into edx
         : "memory");  // clobber.
     // TODO: Should we clobber the other registers? EBX gets clobbered by some of the stubs,
@@ -398,7 +398,7 @@ class StubTest : public CommonRuntimeTest {
         // Load call params into the right registers.
         "ldp x0, x1, [sp]\n\t"
         "ldp x2, x3, [sp, #16]\n\t"
-        "ldp x18, x12, [sp, #32]\n\t"
+        "ldp x18, x17, [sp, #32]\n\t"
         "add sp, sp, #48\n\t"
         ".cfi_adjust_cfa_offset -48\n\t"
 
@@ -489,19 +489,17 @@ class StubTest : public CommonRuntimeTest {
     // Note: Uses the native convention
     // TODO: Set the thread?
     __asm__ __volatile__(
-        "movq %[hidden], %%r9\n\t"     // No need to save r9, listed as clobbered
-        "movd %%r9, %%xmm0\n\t"
         "pushq %[referrer]\n\t"        // Push referrer
         "pushq (%%rsp)\n\t"            // & 16B alignment padding
         ".cfi_adjust_cfa_offset 16\n\t"
-        "call *%%rax\n\t"              // Call the stub
+        "call *%%rbx\n\t"              // Call the stub
         "addq $16, %%rsp\n\t"          // Pop nullptr and padding
         ".cfi_adjust_cfa_offset -16\n\t"
         : "=a" (result)
         // Use the result from rax
-        : "D"(arg0), "S"(arg1), "d"(arg2), "a"(code), [referrer] "m"(referrer), [hidden] "m"(hidden)
+        : "D"(arg0), "S"(arg1), "d"(arg2), "b"(code), [referrer] "c"(referrer), [hidden] "a"(hidden)
         // This places arg0 into rdi, arg1 into rsi, arg2 into rdx, and code into rax
-        : "rbx", "rcx", "rbp", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
+        : "rbp", "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15",
           "memory");  // clobber all
     // TODO: Should we clobber the other registers?
 #else
@@ -1306,6 +1304,259 @@ TEST_F(StubTest, StringCompareTo) {
 }
 
 
+static void GetSetBooleanStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
+                           mirror::ArtMethod* referrer, StubTest* test)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+  constexpr size_t num_values = 5;
+  uint8_t values[num_values] = { 0, 1, 2, 128, 0xFF };
+
+  for (size_t i = 0; i < num_values; ++i) {
+    test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+                              static_cast<size_t>(values[i]),
+                              0U,
+                              StubTest::GetEntrypoint(self, kQuickSet8Static),
+                              self,
+                              referrer);
+
+    size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+                                           0U, 0U,
+                                           StubTest::GetEntrypoint(self, kQuickGetBooleanStatic),
+                                           self,
+                                           referrer);
+    // Boolean currently stores bools as uint8_t, be more zealous about asserting correct writes/gets.
+    EXPECT_EQ(values[i], static_cast<uint8_t>(res)) << "Iteration " << i;
+  }
+#else
+  LOG(INFO) << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA;
+  // Force-print to std::cout so it's also outside the logcat.
+  std::cout << "Skipping set_boolean_static as I don't know how to do that on " << kRuntimeISA << std::endl;
+#endif
+}
+static void GetSetByteStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
+                           mirror::ArtMethod* referrer, StubTest* test)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+  constexpr size_t num_values = 5;
+  int8_t values[num_values] = { -128, -64, 0, 64, 127 };
+
+  for (size_t i = 0; i < num_values; ++i) {
+    test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+                              static_cast<size_t>(values[i]),
+                              0U,
+                              StubTest::GetEntrypoint(self, kQuickSet8Static),
+                              self,
+                              referrer);
+
+    size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+                                           0U, 0U,
+                                           StubTest::GetEntrypoint(self, kQuickGetByteStatic),
+                                           self,
+                                           referrer);
+    EXPECT_EQ(values[i], static_cast<int8_t>(res)) << "Iteration " << i;
+  }
+#else
+  LOG(INFO) << "Skipping set_byte_static as I don't know how to do that on " << kRuntimeISA;
+  // Force-print to std::cout so it's also outside the logcat.
+  std::cout << "Skipping set_byte_static as I don't know how to do that on " << kRuntimeISA << std::endl;
+#endif
+}
+
+
+static void GetSetBooleanInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
+                             Thread* self, mirror::ArtMethod* referrer, StubTest* test)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+  constexpr size_t num_values = 5;
+  uint8_t values[num_values] = { 0, true, 2, 128, 0xFF };
+
+  for (size_t i = 0; i < num_values; ++i) {
+    test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+                              reinterpret_cast<size_t>(obj->Get()),
+                              static_cast<size_t>(values[i]),
+                              StubTest::GetEntrypoint(self, kQuickSet8Instance),
+                              self,
+                              referrer);
+
+    uint8_t res = f->Get()->GetBoolean(obj->Get());
+    EXPECT_EQ(values[i], res) << "Iteration " << i;
+
+    f->Get()->SetBoolean<false>(obj->Get(), res);
+
+    size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+                                            reinterpret_cast<size_t>(obj->Get()),
+                                            0U,
+                                            StubTest::GetEntrypoint(self, kQuickGetBooleanInstance),
+                                            self,
+                                            referrer);
+    EXPECT_EQ(res, static_cast<uint8_t>(res2));
+  }
+#else
+  LOG(INFO) << "Skipping set_boolean_instance as I don't know how to do that on " << kRuntimeISA;
+  // Force-print to std::cout so it's also outside the logcat.
+  std::cout << "Skipping set_boolean_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
+#endif
+}
+static void GetSetByteInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
+                             Thread* self, mirror::ArtMethod* referrer, StubTest* test)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+  constexpr size_t num_values = 5;
+  int8_t values[num_values] = { -128, -64, 0, 64, 127 };
+
+  for (size_t i = 0; i < num_values; ++i) {
+    test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+                              reinterpret_cast<size_t>(obj->Get()),
+                              static_cast<size_t>(values[i]),
+                              StubTest::GetEntrypoint(self, kQuickSet8Instance),
+                              self,
+                              referrer);
+
+    int8_t res = f->Get()->GetByte(obj->Get());
+    EXPECT_EQ(res, values[i]) << "Iteration " << i;
+    f->Get()->SetByte<false>(obj->Get(), ++res);
+
+    size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+                                            reinterpret_cast<size_t>(obj->Get()),
+                                            0U,
+                                            StubTest::GetEntrypoint(self, kQuickGetByteInstance),
+                                            self,
+                                            referrer);
+    EXPECT_EQ(res, static_cast<int8_t>(res2));
+  }
+#else
+  LOG(INFO) << "Skipping set_byte_instance as I don't know how to do that on " << kRuntimeISA;
+  // Force-print to std::cout so it's also outside the logcat.
+  std::cout << "Skipping set_byte_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
+#endif
+}
+
+static void GetSetCharStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
+                           mirror::ArtMethod* referrer, StubTest* test)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+  constexpr size_t num_values = 6;
+  uint16_t values[num_values] = { 0, 1, 2, 255, 32768, 0xFFFF };
+
+  for (size_t i = 0; i < num_values; ++i) {
+    test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+                              static_cast<size_t>(values[i]),
+                              0U,
+                              StubTest::GetEntrypoint(self, kQuickSet16Static),
+                              self,
+                              referrer);
+
+    size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+                                           0U, 0U,
+                                           StubTest::GetEntrypoint(self, kQuickGetCharStatic),
+                                           self,
+                                           referrer);
+
+    EXPECT_EQ(values[i], static_cast<uint16_t>(res)) << "Iteration " << i;
+  }
+#else
+  LOG(INFO) << "Skipping set_char_static as I don't know how to do that on " << kRuntimeISA;
+  // Force-print to std::cout so it's also outside the logcat.
+  std::cout << "Skipping set_char_static as I don't know how to do that on " << kRuntimeISA << std::endl;
+#endif
+}
+static void GetSetShortStatic(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
+                           mirror::ArtMethod* referrer, StubTest* test)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+  constexpr size_t num_values = 6;
+  int16_t values[num_values] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
+
+  for (size_t i = 0; i < num_values; ++i) {
+    test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+                              static_cast<size_t>(values[i]),
+                              0U,
+                              StubTest::GetEntrypoint(self, kQuickSet16Static),
+                              self,
+                              referrer);
+
+    size_t res = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+                                           0U, 0U,
+                                           StubTest::GetEntrypoint(self, kQuickGetShortStatic),
+                                           self,
+                                           referrer);
+
+    EXPECT_EQ(static_cast<int16_t>(res), values[i]) << "Iteration " << i;
+  }
+#else
+  LOG(INFO) << "Skipping set_short_static as I don't know how to do that on " << kRuntimeISA;
+  // Force-print to std::cout so it's also outside the logcat.
+  std::cout << "Skipping set_short_static as I don't know how to do that on " << kRuntimeISA << std::endl;
+#endif
+}
+
+static void GetSetCharInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
+                             Thread* self, mirror::ArtMethod* referrer, StubTest* test)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+  constexpr size_t num_values = 6;
+  uint16_t values[num_values] = { 0, 1, 2, 255, 32768, 0xFFFF };
+
+  for (size_t i = 0; i < num_values; ++i) {
+    test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+                              reinterpret_cast<size_t>(obj->Get()),
+                              static_cast<size_t>(values[i]),
+                              StubTest::GetEntrypoint(self, kQuickSet16Instance),
+                              self,
+                              referrer);
+
+    uint16_t res = f->Get()->GetChar(obj->Get());
+    EXPECT_EQ(res, values[i]) << "Iteration " << i;
+    f->Get()->SetChar<false>(obj->Get(), ++res);
+
+    size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+                                            reinterpret_cast<size_t>(obj->Get()),
+                                            0U,
+                                            StubTest::GetEntrypoint(self, kQuickGetCharInstance),
+                                            self,
+                                            referrer);
+    EXPECT_EQ(res, static_cast<uint16_t>(res2));
+  }
+#else
+  LOG(INFO) << "Skipping set_char_instance as I don't know how to do that on " << kRuntimeISA;
+  // Force-print to std::cout so it's also outside the logcat.
+  std::cout << "Skipping set_char_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
+#endif
+}
+static void GetSetShortInstance(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f,
+                             Thread* self, mirror::ArtMethod* referrer, StubTest* test)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+#if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
+  constexpr size_t num_values = 6;
+  int16_t values[num_values] = { -0x7FFF, -32768, 0, 255, 32767, 0x7FFE };
+
+  for (size_t i = 0; i < num_values; ++i) {
+    test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+                              reinterpret_cast<size_t>(obj->Get()),
+                              static_cast<size_t>(values[i]),
+                              StubTest::GetEntrypoint(self, kQuickSet16Instance),
+                              self,
+                              referrer);
+
+    int16_t res = f->Get()->GetShort(obj->Get());
+    EXPECT_EQ(res, values[i]) << "Iteration " << i;
+    f->Get()->SetShort<false>(obj->Get(), ++res);
+
+    size_t res2 = test->Invoke3WithReferrer(static_cast<size_t>((*f)->GetDexFieldIndex()),
+                                            reinterpret_cast<size_t>(obj->Get()),
+                                            0U,
+                                            StubTest::GetEntrypoint(self, kQuickGetShortInstance),
+                                            self,
+                                            referrer);
+    EXPECT_EQ(res, static_cast<int16_t>(res2));
+  }
+#else
+  LOG(INFO) << "Skipping set_short_instance as I don't know how to do that on " << kRuntimeISA;
+  // Force-print to std::cout so it's also outside the logcat.
+  std::cout << "Skipping set_short_instance as I don't know how to do that on " << kRuntimeISA << std::endl;
+#endif
+}
+
 static void GetSet32Static(Handle<mirror::Object>* obj, Handle<mirror::ArtField>* f, Thread* self,
                            mirror::ArtMethod* referrer, StubTest* test)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -1555,6 +1806,26 @@ static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type)
 
       Primitive::Type type = f->GetTypeAsPrimitiveType();
       switch (type) {
+        case Primitive::Type::kPrimBoolean:
+          if (test_type == type) {
+            GetSetBooleanStatic(&obj, &f, self, m.Get(), test);
+          }
+          break;
+        case Primitive::Type::kPrimByte:
+          if (test_type == type) {
+            GetSetByteStatic(&obj, &f, self, m.Get(), test);
+          }
+          break;
+        case Primitive::Type::kPrimChar:
+          if (test_type == type) {
+            GetSetCharStatic(&obj, &f, self, m.Get(), test);
+          }
+          break;
+        case Primitive::Type::kPrimShort:
+          if (test_type == type) {
+            GetSetShortStatic(&obj, &f, self, m.Get(), test);
+          }
+          break;
         case Primitive::Type::kPrimInt:
           if (test_type == type) {
             GetSet32Static(&obj, &f, self, m.Get(), test);
@@ -1590,6 +1861,26 @@ static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type)
 
       Primitive::Type type = f->GetTypeAsPrimitiveType();
       switch (type) {
+        case Primitive::Type::kPrimBoolean:
+          if (test_type == type) {
+            GetSetBooleanInstance(&obj, &f, self, m.Get(), test);
+          }
+          break;
+        case Primitive::Type::kPrimByte:
+          if (test_type == type) {
+            GetSetByteInstance(&obj, &f, self, m.Get(), test);
+          }
+          break;
+        case Primitive::Type::kPrimChar:
+          if (test_type == type) {
+            GetSetCharInstance(&obj, &f, self, m.Get(), test);
+          }
+          break;
+        case Primitive::Type::kPrimShort:
+          if (test_type == type) {
+            GetSetShortInstance(&obj, &f, self, m.Get(), test);
+          }
+          break;
         case Primitive::Type::kPrimInt:
           if (test_type == type) {
             GetSet32Instance(&obj, &f, self, m.Get(), test);
@@ -1618,6 +1909,33 @@ static void TestFields(Thread* self, StubTest* test, Primitive::Type test_type)
   // TODO: Deallocate things.
 }
 
+TEST_F(StubTest, Fields8) {
+  TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
+
+  Thread* self = Thread::Current();
+
+  self->TransitionFromSuspendedToRunnable();
+  LoadDex("AllFields");
+  bool started = runtime_->Start();
+  CHECK(started);
+
+  TestFields(self, this, Primitive::Type::kPrimBoolean);
+  TestFields(self, this, Primitive::Type::kPrimByte);
+}
+
+TEST_F(StubTest, Fields16) {
+  TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
+
+  Thread* self = Thread::Current();
+
+  self->TransitionFromSuspendedToRunnable();
+  LoadDex("AllFields");
+  bool started = runtime_->Start();
+  CHECK(started);
+
+  TestFields(self, this, Primitive::Type::kPrimChar);
+  TestFields(self, this, Primitive::Type::kPrimShort);
+}
 
 TEST_F(StubTest, Fields32) {
   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
@@ -1658,7 +1976,6 @@ TEST_F(StubTest, Fields64) {
   TestFields(self, this, Primitive::Type::kPrimLong);
 }
 
-
 TEST_F(StubTest, IMT) {
 #if defined(__i386__) || defined(__arm__) || defined(__aarch64__) || (defined(__x86_64__) && !defined(__APPLE__))
   TEST_DISABLED_FOR_HEAP_REFERENCE_POISONING();
@@ -1709,19 +2026,6 @@ TEST_F(StubTest, IMT) {
   jmethodID obj_constructor = env->GetMethodID(obj_jclass, "<init>", "()V");
   ASSERT_NE(nullptr, obj_constructor);
 
-  // Sanity check: check that there is a conflict for List.contains in ArrayList.
-
-  mirror::Class* arraylist_class = soa.Decode<mirror::Class*>(arraylist_jclass);
-  mirror::ArtMethod* m = arraylist_class->GetEmbeddedImTableEntry(
-      inf_contains->GetDexMethodIndex() % mirror::Class::kImtSize);
-
-  if (!m->IsImtConflictMethod()) {
-    LOG(WARNING) << "Test is meaningless, no IMT conflict in setup: " <<
-        PrettyMethod(m, true);
-    LOG(WARNING) << "Please update StubTest.IMT.";
-    return;
-  }
-
   // Create instances.
 
   jobject jarray_list = env->NewObject(arraylist_jclass, arraylist_constructor);
@@ -1732,7 +2036,11 @@ TEST_F(StubTest, IMT) {
   ASSERT_NE(nullptr, jobj);
   Handle<mirror::Object> obj(hs.NewHandle(soa.Decode<mirror::Object*>(jobj)));
 
-  // Invoke.
+  // Invocation tests.
+
+  // 1. imt_conflict
+
+  // Contains.
 
   size_t result =
       Invoke3WithReferrerAndHidden(0U, reinterpret_cast<size_t>(array_list.Get()),
@@ -1750,7 +2058,7 @@ TEST_F(StubTest, IMT) {
 
   ASSERT_FALSE(self->IsExceptionPending()) << PrettyTypeOf(self->GetException(nullptr));
 
-  // Invoke again.
+  // Contains.
 
   result = Invoke3WithReferrerAndHidden(0U, reinterpret_cast<size_t>(array_list.Get()),
                                         reinterpret_cast<size_t>(obj.Get()),
@@ -1760,6 +2068,28 @@ TEST_F(StubTest, IMT) {
 
   ASSERT_FALSE(self->IsExceptionPending());
   EXPECT_EQ(static_cast<size_t>(JNI_TRUE), result);
+
+  // 2. regular interface trampoline
+
+  result = Invoke3WithReferrer(static_cast<size_t>(inf_contains.Get()->GetDexMethodIndex()),
+                               reinterpret_cast<size_t>(array_list.Get()),
+                               reinterpret_cast<size_t>(obj.Get()),
+                               StubTest::GetEntrypoint(self,
+                                   kQuickInvokeInterfaceTrampolineWithAccessCheck),
+                               self, contains_amethod.Get());
+
+  ASSERT_FALSE(self->IsExceptionPending());
+  EXPECT_EQ(static_cast<size_t>(JNI_TRUE), result);
+
+  result = Invoke3WithReferrer(static_cast<size_t>(inf_contains.Get()->GetDexMethodIndex()),
+                               reinterpret_cast<size_t>(array_list.Get()),
+                               reinterpret_cast<size_t>(array_list.Get()),
+                               StubTest::GetEntrypoint(self,
+                                   kQuickInvokeInterfaceTrampolineWithAccessCheck),
+                               self, contains_amethod.Get());
+
+  ASSERT_FALSE(self->IsExceptionPending());
+  EXPECT_EQ(static_cast<size_t>(JNI_FALSE), result);
 #else
   LOG(INFO) << "Skipping imt as I don't know how to do that on " << kRuntimeISA;
   // Force-print to std::cout so it's also outside the logcat.
index a578023..efbbfb3 100644 (file)
@@ -181,13 +181,4 @@ MACRO0(SETUP_GOT_NOSAVE)
 #endif
 END_MACRO
 
-MACRO0(SETUP_GOT)
-    PUSH  ebx
-    SETUP_GOT_NOSAVE
-END_MACRO
-
-MACRO0(UNDO_SETUP_GOT)
-    POP  ebx
-END_MACRO
-
 #endif  // ART_RUNTIME_ARCH_X86_ASM_SUPPORT_X86_S_
index a072996..682c502 100644 (file)
@@ -47,12 +47,24 @@ extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*);
 extern "C" void* art_quick_resolve_string(void*, uint32_t);
 
 // Field entrypoints.
+extern "C" int art_quick_set8_instance(uint32_t, void*, int8_t);
+extern "C" int art_quick_set8_static(uint32_t, int8_t);
+extern "C" int art_quick_set16_instance(uint32_t, void*, int16_t);
+extern "C" int art_quick_set16_static(uint32_t, int16_t);
 extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t);
 extern "C" int art_quick_set32_static(uint32_t, int32_t);
 extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t);
 extern "C" int art_quick_set64_static(uint32_t, int64_t);
 extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*);
 extern "C" int art_quick_set_obj_static(uint32_t, void*);
+extern "C" int8_t art_quick_get_byte_instance(uint32_t, void*);
+extern "C" uint8_t art_quick_get_boolean_instance(uint32_t, void*);
+extern "C" int8_t art_quick_get_byte_static(uint32_t);
+extern "C" uint8_t art_quick_get_boolean_static(uint32_t);
+extern "C" int16_t art_quick_get_short_instance(uint32_t, void*);
+extern "C" uint16_t art_quick_get_char_instance(uint32_t, void*);
+extern "C" int16_t art_quick_get_short_static(uint32_t);
+extern "C" uint16_t art_quick_get_char_static(uint32_t);
 extern "C" int32_t art_quick_get32_instance(uint32_t, void*);
 extern "C" int32_t art_quick_get32_static(uint32_t);
 extern "C" int64_t art_quick_get64_instance(uint32_t, void*);
@@ -137,15 +149,27 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
   qpoints->pResolveString = art_quick_resolve_string;
 
   // Field
+  qpoints->pSet8Instance = art_quick_set8_instance;
+  qpoints->pSet8Static = art_quick_set8_static;
+  qpoints->pSet16Instance = art_quick_set16_instance;
+  qpoints->pSet16Static = art_quick_set16_static;
   qpoints->pSet32Instance = art_quick_set32_instance;
   qpoints->pSet32Static = art_quick_set32_static;
   qpoints->pSet64Instance = art_quick_set64_instance;
   qpoints->pSet64Static = art_quick_set64_static;
   qpoints->pSetObjInstance = art_quick_set_obj_instance;
   qpoints->pSetObjStatic = art_quick_set_obj_static;
+  qpoints->pGetByteInstance = art_quick_get_byte_instance;
+  qpoints->pGetBooleanInstance = art_quick_get_boolean_instance;
+  qpoints->pGetShortInstance = art_quick_get_short_instance;
+  qpoints->pGetCharInstance = art_quick_get_char_instance;
   qpoints->pGet32Instance = art_quick_get32_instance;
   qpoints->pGet64Instance = art_quick_get64_instance;
   qpoints->pGetObjInstance = art_quick_get_obj_instance;
+  qpoints->pGetByteStatic = art_quick_get_byte_static;
+  qpoints->pGetBooleanStatic = art_quick_get_boolean_static;
+  qpoints->pGetShortStatic = art_quick_get_short_static;
+  qpoints->pGetCharStatic = art_quick_get_char_static;
   qpoints->pGet32Static = art_quick_get32_static;
   qpoints->pGet64Static = art_quick_get64_static;
   qpoints->pGetObjStatic = art_quick_get_obj_static;
index fb26f5f..a41c304 100644 (file)
@@ -1,6 +1,6 @@
 /*
- * Copyright (C) 2008 The Android Open Source Project
- *
+* Copyright (C) 2008 The Android Open Source Project
+*
  * Licensed under the Apache License, Version 2.0 (the "License");
  * you may not use this file except in compliance with the License.
  * You may obtain a copy of the License at
index 997a259..5d27e47 100644 (file)
      * Jni dlsym lookup stub.
      */
 DEFINE_FUNCTION art_jni_dlsym_lookup_stub
-    subl LITERAL(4), %esp         // align stack
-    CFI_ADJUST_CFA_OFFSET(4)
-    SETUP_GOT                     // pushes ebx
+    subl LITERAL(8), %esp         // align stack
+    CFI_ADJUST_CFA_OFFSET(8)
     pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
-    CFI_ADJUST_CFA_OFFSET(4)
-    call PLT_SYMBOL(artFindNativeMethod)  // (Thread*)
-    addl LITERAL(4), %esp         // remove argument
-    CFI_ADJUST_CFA_OFFSET(-4)
-    UNDO_SETUP_GOT                // pop ebx
-    addl LITERAL(4), %esp         // remove padding
-    CFI_ADJUST_CFA_OFFSET(-4)
-    testl %eax, %eax         // check if returned method code is null
+    call SYMBOL(artFindNativeMethod)  // (Thread*)
+    addl LITERAL(12), %esp        // remove argument & padding
+    CFI_ADJUST_CFA_OFFSET(-12)
+    testl %eax, %eax              // check if returned method code is null
     jz .Lno_native_code_found     // if null, jump to return to handle
     jmp *%eax                     // otherwise, tail call to intended method
 .Lno_native_code_found:
index 9365795..f5fe869 100644 (file)
@@ -70,8 +70,7 @@ DEFINE_FUNCTION art_portable_proxy_invoke_handler
     PUSH ebp                        // Set up frame.
     movl %esp, %ebp
     CFI_DEF_CFA_REGISTER(%ebp)
-    subl LITERAL(4), %esp           // Align stack
-    SETUP_GOT                       // pushes ebx
+    subl LITERAL(8), %esp           // Align stack
     leal 8(%ebp), %edx              // %edx = ArtMethod** called_addr
     movl 12(%ebp), %ecx             // %ecx = receiver
     movl 0(%edx), %eax              // %eax = ArtMethod* called
@@ -79,8 +78,7 @@ DEFINE_FUNCTION art_portable_proxy_invoke_handler
     pushl %fs:THREAD_SELF_OFFSET    // Pass thread.
     pushl %ecx                      // Pass receiver.
     pushl %eax                      // Pass called.
-    call PLT_SYMBOL(artPortableProxyInvokeHandler)  // (called, receiver, Thread*, &called)
-    UNDO_SETUP_GOT
+    call SYMBOL(artPortableProxyInvokeHandler)  // (called, receiver, Thread*, &called)
     leave
     CFI_RESTORE(%ebp)
     CFI_DEF_CFA(%esp, 4)
@@ -94,8 +92,7 @@ DEFINE_FUNCTION art_portable_resolution_trampoline
   PUSH ebp                        // Set up frame.
   movl %esp, %ebp
   CFI_DEF_CFA_REGISTER(%ebp)
-  subl LITERAL(4), %esp           // Align stack
-  SETUP_GOT                       // pushes ebx
+  subl LITERAL(8), %esp           // Align stack
   leal 8(%ebp), %edx              // %edx = ArtMethod** called_addr
   movl 12(%ebp), %ecx             // %ecx = receiver
   movl 0(%edx), %eax              // %eax = ArtMethod* called
@@ -103,8 +100,7 @@ DEFINE_FUNCTION art_portable_resolution_trampoline
   pushl %fs:THREAD_SELF_OFFSET    // Pass thread.
   pushl %ecx                      // Pass receiver.
   pushl %eax                      // Pass called.
-  call PLT_SYMBOL(artPortableResolutionTrampoline)  // (called, receiver, Thread*, &called)
-  UNDO_SETUP_GOT
+  call SYMBOL(artPortableResolutionTrampoline)  // (called, receiver, Thread*, &called)
   leave
   CFI_RESTORE(%ebp)
   CFI_DEF_CFA(%esp, 4)
@@ -119,15 +115,13 @@ DEFINE_FUNCTION_NO_HIDE art_portable_to_interpreter_bridge
   PUSH ebp                        // Set up frame.
   movl %esp, %ebp
   CFI_DEF_CFA_REGISTER(%ebp)
-  subl LITERAL(8), %esp           // Align stack
-  SETUP_GOT
+  subl LITERAL(12), %esp           // Align stack
   leal 8(%ebp), %edx              // %edx = ArtMethod** called_addr
   movl 0(%edx), %eax              // %eax = ArtMethod* called
   pushl %edx                      // Pass called_addr.
   pushl %fs:THREAD_SELF_OFFSET    // Pass thread.
   pushl %eax                      // Pass called.
-  call PLT_SYMBOL(artPortableToInterpreterBridge)  // (called, Thread*, &called)
-  UNDO_SETUP_GOT
+  call SYMBOL(artPortableToInterpreterBridge)  // (called, Thread*, &called)
   leave
   CFI_RESTORE(%ebp)
   CFI_DEF_CFA(%esp, 4)
index 75c8646..337e5fe 100644 (file)
@@ -105,7 +105,6 @@ MACRO0(DELIVER_PENDING_EXCEPTION)
     PUSH ecx                                 // pass SP
     pushl %fs:THREAD_SELF_OFFSET             // pass Thread::Current()
     CFI_ADJUST_CFA_OFFSET(4)
-    SETUP_GOT_NOSAVE                         // clobbers ebx (harmless here)
     call SYMBOL(artDeliverPendingExceptionFromCode)  // artDeliverPendingExceptionFromCode(Thread*, SP)
     int3                                     // unreached
 END_MACRO
@@ -120,7 +119,6 @@ MACRO2(NO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
     PUSH ecx                      // pass SP
     pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
     CFI_ADJUST_CFA_OFFSET(4)
-    SETUP_GOT_NOSAVE              // clobbers ebx (harmless here)
     call VAR(cxx_name, 1)     // cxx_name(Thread*, SP)
     int3                          // unreached
     END_FUNCTION RAW_VAR(c_name, 0)
@@ -136,7 +134,6 @@ MACRO2(ONE_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
     pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
     CFI_ADJUST_CFA_OFFSET(4)
     PUSH eax                      // pass arg1
-    SETUP_GOT_NOSAVE              // clobbers ebx (harmless here)
     call VAR(cxx_name, 1)     // cxx_name(arg1, Thread*, SP)
     int3                          // unreached
     END_FUNCTION RAW_VAR(c_name, 0)
@@ -152,7 +149,6 @@ MACRO2(TWO_ARG_RUNTIME_EXCEPTION, c_name, cxx_name)
     CFI_ADJUST_CFA_OFFSET(4)
     PUSH ecx                      // pass arg2
     PUSH eax                      // pass arg1
-    SETUP_GOT_NOSAVE              // clobbers ebx (harmless here)
     call VAR(cxx_name, 1)     // cxx_name(arg1, arg2, Thread*, SP)
     int3                          // unreached
     END_FUNCTION RAW_VAR(c_name, 0)
@@ -219,7 +215,6 @@ MACRO2(INVOKE_TRAMPOLINE, c_name, cxx_name)
     PUSH eax    // <-- callee save Method* to go here
     movl %esp, %edx  // remember SP
     // Outgoing argument set up
-    SETUP_GOT_NOSAVE
     subl MACRO_LITERAL(12), %esp  // alignment padding
     CFI_ADJUST_CFA_OFFSET(12)
     PUSH edx                      // pass SP
@@ -318,7 +313,6 @@ MACRO3(NO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
     DEFINE_FUNCTION RAW_VAR(c_name, 0)
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save ref containing registers for GC
     mov %esp, %edx                // remember SP
-    SETUP_GOT_NOSAVE              // clobbers ebx (harmless here)
     // Outgoing argument set up
     subl MACRO_LITERAL(8), %esp   // push padding
     CFI_ADJUST_CFA_OFFSET(8)
@@ -337,7 +331,6 @@ MACRO3(ONE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
     DEFINE_FUNCTION RAW_VAR(c_name, 0)
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save ref containing registers for GC
     mov %esp, %edx                // remember SP
-    SETUP_GOT_NOSAVE              // clobbers EBX
     // Outgoing argument set up
     PUSH eax                      // push padding
     PUSH edx                      // pass SP
@@ -356,7 +349,6 @@ MACRO3(TWO_ARG_DOWNCALL, c_name, cxx_name, return_macro)
     DEFINE_FUNCTION RAW_VAR(c_name, 0)
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save ref containing registers for GC
     mov %esp, %edx                // remember SP
-    SETUP_GOT_NOSAVE              // clobbers EBX
     // Outgoing argument set up
     PUSH edx                      // pass SP
     pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
@@ -384,7 +376,6 @@ MACRO3(THREE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
     PUSH edx                      // pass arg3
     PUSH ecx                      // pass arg2
     PUSH eax                      // pass arg1
-    SETUP_GOT_NOSAVE              // clobbers EBX
     call VAR(cxx_name, 1)     // cxx_name(arg1, arg2, arg3, Thread*, SP)
     addl MACRO_LITERAL(32), %esp  // pop arguments
     CFI_ADJUST_CFA_OFFSET(-32)
@@ -393,6 +384,48 @@ MACRO3(THREE_ARG_DOWNCALL, c_name, cxx_name, return_macro)
     END_FUNCTION RAW_VAR(c_name, 0)
 END_MACRO
 
+MACRO3(ONE_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
+    DEFINE_FUNCTION RAW_VAR(c_name, 0)
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save ref containing registers for GC
+    mov %esp, %edx                // remember SP
+    mov 32(%esp), %ecx            // get referrer
+    // Outgoing argument set up
+    PUSH edx                      // pass SP
+    pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
+    CFI_ADJUST_CFA_OFFSET(4)
+    PUSH ecx                      // pass referrer
+    PUSH eax                      // pass arg1
+    call VAR(cxx_name, 1)         // cxx_name(arg1, referrer, Thread*, SP)
+    addl MACRO_LITERAL(16), %esp  // pop arguments
+    CFI_ADJUST_CFA_OFFSET(-16)
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME  // restore frame up to return address
+    CALL_MACRO(return_macro, 2)   // return or deliver exception
+    END_FUNCTION RAW_VAR(c_name, 0)
+END_MACRO
+
+MACRO3(TWO_ARG_REF_DOWNCALL, c_name, cxx_name, return_macro)
+    DEFINE_FUNCTION RAW_VAR(c_name, 0)
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save ref containing registers for GC
+    mov %esp, %ebx                // remember SP
+    mov 32(%esp), %edx            // get referrer
+    subl MACRO_LITERAL(12), %esp  // alignment padding
+    CFI_ADJUST_CFA_OFFSET(12)
+    PUSH ebx                      // pass SP
+    pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
+    CFI_ADJUST_CFA_OFFSET(4)
+    // Outgoing argument set up
+    PUSH edx                      // pass referrer
+    PUSH ecx                      // pass arg2
+    PUSH eax                      // pass arg1
+    call VAR(cxx_name, 1)     // cxx_name(arg1, arg2, referrer, Thread*, SP)
+    addl MACRO_LITERAL(32), %esp  // pop arguments
+    CFI_ADJUST_CFA_OFFSET(-32)
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME  // restore frame up to return address
+    CALL_MACRO(return_macro, 2)   // return or deliver exception
+    END_FUNCTION RAW_VAR(c_name, 0)
+END_MACRO
+
+
 MACRO0(RETURN_IF_RESULT_IS_NON_ZERO)
     testl %eax, %eax               // eax == 0 ?
     jz  1f                         // if eax == 0 goto 1
@@ -559,7 +592,6 @@ DEFINE_FUNCTION art_quick_lock_object
 .Lslow_lock:
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save ref containing registers for GC
     mov %esp, %edx                // remember SP
-    SETUP_GOT_NOSAVE              // clobbers EBX
     // Outgoing argument set up
     PUSH eax                      // push padding
     PUSH edx                      // pass SP
@@ -593,7 +625,6 @@ DEFINE_FUNCTION art_quick_unlock_object
 .Lslow_unlock:
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save ref containing registers for GC
     mov %esp, %edx                // remember SP
-    SETUP_GOT_NOSAVE              // clobbers EBX
     // Outgoing argument set up
     PUSH eax                      // push padding
     PUSH edx                      // pass SP
@@ -608,7 +639,6 @@ DEFINE_FUNCTION art_quick_unlock_object
 END_FUNCTION art_quick_unlock_object
 
 DEFINE_FUNCTION art_quick_is_assignable
-    SETUP_GOT_NOSAVE             // clobbers EBX
     PUSH eax                     // alignment padding
     PUSH ecx                     // pass arg2 - obj->klass
     PUSH eax                     // pass arg1 - checked class
@@ -619,7 +649,6 @@ DEFINE_FUNCTION art_quick_is_assignable
 END_FUNCTION art_quick_is_assignable
 
 DEFINE_FUNCTION art_quick_check_cast
-    SETUP_GOT_NOSAVE             // clobbers EBX
     PUSH eax                     // alignment padding
     PUSH ecx                     // pass arg2 - obj->klass
     PUSH eax                     // pass arg1 - checked class
@@ -691,7 +720,6 @@ DEFINE_FUNCTION art_quick_aput_obj
     pushl CLASS_OFFSET(%edx)     // pass arg2 - type of the value to be stored
     CFI_ADJUST_CFA_OFFSET(4)
     PUSH ebx                     // pass arg1 - component type of the array
-    SETUP_GOT_NOSAVE             // clobbers EBX
     call SYMBOL(artIsAssignableFromCode)  // (Class* a, Class* b)
     addl LITERAL(16), %esp       // pop arguments
     CFI_ADJUST_CFA_OFFSET(-16)
@@ -738,7 +766,6 @@ DEFINE_FUNCTION art_quick_d2l
     PUSH eax                      // alignment padding
     PUSH ecx                      // pass arg2 a.hi
     PUSH eax                      // pass arg1 a.lo
-    SETUP_GOT_NOSAVE              // clobbers EBX
     call SYMBOL(art_d2l)      // (jdouble a)
     addl LITERAL(12), %esp        // pop arguments
     CFI_ADJUST_CFA_OFFSET(-12)
@@ -748,7 +775,6 @@ END_FUNCTION art_quick_d2l
 DEFINE_FUNCTION art_quick_f2l
     subl LITERAL(8), %esp         // alignment padding
     CFI_ADJUST_CFA_OFFSET(8)
-    SETUP_GOT_NOSAVE              // clobbers EBX
     PUSH eax                      // pass arg1 a
     call SYMBOL(art_f2l)      // (jfloat a)
     addl LITERAL(12), %esp        // pop arguments
@@ -763,7 +789,6 @@ DEFINE_FUNCTION art_quick_ldiv
     PUSH edx                     // pass arg3 b.lo
     PUSH ecx                     // pass arg2 a.hi
     PUSH eax                     // pass arg1 a.lo
-    SETUP_GOT_NOSAVE             // clobbers EBX
     call SYMBOL(artLdiv)     // (jlong a, jlong b)
     addl LITERAL(28), %esp       // pop arguments
     CFI_ADJUST_CFA_OFFSET(-28)
@@ -777,7 +802,6 @@ DEFINE_FUNCTION art_quick_lmod
     PUSH edx                     // pass arg3 b.lo
     PUSH ecx                     // pass arg2 a.hi
     PUSH eax                     // pass arg1 a.lo
-    SETUP_GOT_NOSAVE             // clobbers EBX
     call SYMBOL(artLmod)     // (jlong a, jlong b)
     addl LITERAL(28), %esp       // pop arguments
     CFI_ADJUST_CFA_OFFSET(-28)
@@ -832,6 +856,46 @@ DEFINE_FUNCTION art_quick_lushr
     ret
 END_FUNCTION art_quick_lushr
 
+DEFINE_FUNCTION art_quick_set8_instance
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME       // save ref containing registers for GC
+    mov %esp, %ebx                // remember SP
+    subl LITERAL(8), %esp         // alignment padding
+    CFI_ADJUST_CFA_OFFSET(8)
+    PUSH ebx                      // pass SP
+    pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
+    CFI_ADJUST_CFA_OFFSET(4)
+    mov 32(%ebx), %ebx            // get referrer
+    PUSH ebx                      // pass referrer
+    PUSH edx                      // pass new_val
+    PUSH ecx                      // pass object
+    PUSH eax                      // pass field_idx
+    call PLT_SYMBOL(artSet8InstanceFromCode)  // (field_idx, Object*, new_val, referrer, Thread*, SP)
+    addl LITERAL(32), %esp        // pop arguments
+    CFI_ADJUST_CFA_OFFSET(-32)
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
+    RETURN_IF_EAX_ZERO            // return or deliver exception
+END_FUNCTION art_quick_set8_instance
+
+DEFINE_FUNCTION art_quick_set16_instance
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME       // save ref containing registers for GC
+    mov %esp, %ebx                // remember SP
+    subl LITERAL(8), %esp         // alignment padding
+    CFI_ADJUST_CFA_OFFSET(8)
+    PUSH ebx                      // pass SP
+    pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
+    CFI_ADJUST_CFA_OFFSET(4)
+    mov 32(%ebx), %ebx            // get referrer
+    PUSH ebx                      // pass referrer
+    PUSH edx                      // pass new_val
+    PUSH ecx                      // pass object
+    PUSH eax                      // pass field_idx
+    call PLT_SYMBOL(artSet16InstanceFromCode)  // (field_idx, Object*, new_val, referrer, Thread*, SP)
+    addl LITERAL(32), %esp        // pop arguments
+    CFI_ADJUST_CFA_OFFSET(-32)
+    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
+    RETURN_IF_EAX_ZERO            // return or deliver exception
+END_FUNCTION art_quick_set16_instance
+
 DEFINE_FUNCTION art_quick_set32_instance
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME       // save ref containing registers for GC
     mov %esp, %ebx                // remember SP
@@ -845,7 +909,6 @@ DEFINE_FUNCTION art_quick_set32_instance
     PUSH edx                      // pass new_val
     PUSH ecx                      // pass object
     PUSH eax                      // pass field_idx
-    SETUP_GOT_NOSAVE              // clobbers EBX
     call SYMBOL(artSet32InstanceFromCode)  // (field_idx, Object*, new_val, referrer, Thread*, SP)
     addl LITERAL(32), %esp        // pop arguments
     CFI_ADJUST_CFA_OFFSET(-32)
@@ -865,7 +928,6 @@ DEFINE_FUNCTION art_quick_set64_instance
     PUSH edx                      // pass low half of new_val
     PUSH ecx                      // pass object
     PUSH eax                      // pass field_idx
-    SETUP_GOT_NOSAVE              // clobbers EBX
     call SYMBOL(artSet64InstanceFromCode)  // (field_idx, Object*, new_val, Thread*, SP)
     addl LITERAL(32), %esp        // pop arguments
     CFI_ADJUST_CFA_OFFSET(-32)
@@ -886,7 +948,6 @@ DEFINE_FUNCTION art_quick_set_obj_instance
     PUSH edx                      // pass new_val
     PUSH ecx                      // pass object
     PUSH eax                      // pass field_idx
-    SETUP_GOT_NOSAVE              // clobbers EBX
     call SYMBOL(artSetObjInstanceFromCode) // (field_idx, Object*, new_val, referrer, Thread*, SP)
     addl LITERAL(32), %esp        // pop arguments
     CFI_ADJUST_CFA_OFFSET(-32)
@@ -894,8 +955,15 @@ DEFINE_FUNCTION art_quick_set_obj_instance
     RETURN_IF_EAX_ZERO            // return or deliver exception
 END_FUNCTION art_quick_set_obj_instance
 
-DEFINE_FUNCTION art_quick_get32_instance
-    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save ref containing registers for GC
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+
+DEFINE_FUNCTION art_quick_get64_instance
+    SETUP_REF_ONLY_CALLEE_SAVE_FRAME       // save ref containing registers for GC
     mov %esp, %ebx                // remember SP
     mov 32(%esp), %edx            // get referrer
     subl LITERAL(12), %esp        // alignment padding
@@ -906,15 +974,14 @@ DEFINE_FUNCTION art_quick_get32_instance
     PUSH edx                      // pass referrer
     PUSH ecx                      // pass object
     PUSH eax                      // pass field_idx
-    SETUP_GOT_NOSAVE              // clobbers EBX
-    call SYMBOL(artGet32InstanceFromCode)  // (field_idx, Object*, referrer, Thread*, SP)
+    call SYMBOL(artGet64InstanceFromCode)  // (field_idx, Object*, referrer, Thread*, SP)
     addl LITERAL(32), %esp        // pop arguments
     CFI_ADJUST_CFA_OFFSET(-32)
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
     RETURN_OR_DELIVER_PENDING_EXCEPTION    // return or deliver exception
-END_FUNCTION art_quick_get32_instance
+END_FUNCTION art_quick_get64_instance
 
-DEFINE_FUNCTION art_quick_get64_instance
+DEFINE_FUNCTION art_quick_set8_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME       // save ref containing registers for GC
     mov %esp, %ebx                // remember SP
     mov 32(%esp), %edx            // get referrer
@@ -924,17 +991,16 @@ DEFINE_FUNCTION art_quick_get64_instance
     pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
     CFI_ADJUST_CFA_OFFSET(4)
     PUSH edx                      // pass referrer
-    PUSH ecx                      // pass object
+    PUSH ecx                      // pass new_val
     PUSH eax                      // pass field_idx
-    SETUP_GOT_NOSAVE              // clobbers EBX
-    call SYMBOL(artGet64InstanceFromCode)  // (field_idx, Object*, referrer, Thread*, SP)
+    call SYMBOL(artSet8StaticFromCode)  // (field_idx, new_val, referrer, Thread*, SP)
     addl LITERAL(32), %esp        // pop arguments
     CFI_ADJUST_CFA_OFFSET(-32)
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
-    RETURN_OR_DELIVER_PENDING_EXCEPTION    // return or deliver exception
-END_FUNCTION art_quick_get64_instance
+    RETURN_IF_EAX_ZERO            // return or deliver exception
+END_FUNCTION art_quick_set8_static
 
-DEFINE_FUNCTION art_quick_get_obj_instance
+DEFINE_FUNCTION art_quick_set16_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME       // save ref containing registers for GC
     mov %esp, %ebx                // remember SP
     mov 32(%esp), %edx            // get referrer
@@ -944,15 +1010,14 @@ DEFINE_FUNCTION art_quick_get_obj_instance
     pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
     CFI_ADJUST_CFA_OFFSET(4)
     PUSH edx                      // pass referrer
-    PUSH ecx                      // pass object
+    PUSH ecx                      // pass new_val
     PUSH eax                      // pass field_idx
-    SETUP_GOT_NOSAVE              // clobbers EBX
-    call SYMBOL(artGetObjInstanceFromCode) // (field_idx, Object*, referrer, Thread*, SP)
+    call SYMBOL(artSet16StaticFromCode)  // (field_idx, new_val, referrer, Thread*, SP)
     addl LITERAL(32), %esp        // pop arguments
     CFI_ADJUST_CFA_OFFSET(-32)
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
-    RETURN_OR_DELIVER_PENDING_EXCEPTION    // return or deliver exception
-END_FUNCTION art_quick_get_obj_instance
+    RETURN_IF_EAX_ZERO            // return or deliver exception
+END_FUNCTION art_quick_set16_static
 
 DEFINE_FUNCTION art_quick_set32_static
     SETUP_REF_ONLY_CALLEE_SAVE_FRAME       // save ref containing registers for GC
@@ -966,7 +1031,6 @@ DEFINE_FUNCTION art_quick_set32_static
     PUSH edx                      // pass referrer
     PUSH ecx                      // pass new_val
     PUSH eax                      // pass field_idx
-    SETUP_GOT_NOSAVE              // clobbers EBX
     call SYMBOL(artSet32StaticFromCode)  // (field_idx, new_val, referrer, Thread*, SP)
     addl LITERAL(32), %esp        // pop arguments
     CFI_ADJUST_CFA_OFFSET(-32)
@@ -987,7 +1051,6 @@ DEFINE_FUNCTION art_quick_set64_static
     PUSH ecx                      // pass low half of new_val
     PUSH ebx                      // pass referrer
     PUSH eax                      // pass field_idx
-    SETUP_GOT_NOSAVE              // clobbers EBX
     call SYMBOL(artSet64StaticFromCode)  // (field_idx, referrer, new_val, Thread*, SP)
     addl LITERAL(32), %esp        // pop arguments
     CFI_ADJUST_CFA_OFFSET(-32)
@@ -1007,63 +1070,19 @@ DEFINE_FUNCTION art_quick_set_obj_static
     PUSH edx                      // pass referrer
     PUSH ecx                      // pass new_val
     PUSH eax                      // pass field_idx
-    SETUP_GOT_NOSAVE              // clobbers EBX
     call SYMBOL(artSetObjStaticFromCode)  // (field_idx, new_val, referrer, Thread*, SP)
     addl LITERAL(32), %esp        // pop arguments
     RESTORE_REF_ONLY_CALLEE_SAVE_FRAME  // restore frame up to return address
     RETURN_IF_EAX_ZERO            // return or deliver exception
 END_FUNCTION art_quick_set_obj_static
 
-DEFINE_FUNCTION art_quick_get32_static
-    SETUP_REF_ONLY_CALLEE_SAVE_FRAME       // save ref containing registers for GC
-    mov %esp, %edx                // remember SP
-    mov 32(%esp), %ecx            // get referrer
-    PUSH edx                      // pass SP
-    pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
-    CFI_ADJUST_CFA_OFFSET(4)
-    PUSH ecx                      // pass referrer
-    PUSH eax                      // pass field_idx
-    SETUP_GOT_NOSAVE              // clobbers EBX
-    call SYMBOL(artGet32StaticFromCode)    // (field_idx, referrer, Thread*, SP)
-    addl LITERAL(16), %esp        // pop arguments
-    CFI_ADJUST_CFA_OFFSET(-16)
-    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
-    RETURN_OR_DELIVER_PENDING_EXCEPTION    // return or deliver exception
-END_FUNCTION art_quick_get32_static
-
-DEFINE_FUNCTION art_quick_get64_static
-    SETUP_REF_ONLY_CALLEE_SAVE_FRAME  // save ref containing registers for GC
-    mov %esp, %edx                // remember SP
-    mov 32(%esp), %ecx            // get referrer
-    PUSH edx                      // pass SP
-    pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
-    CFI_ADJUST_CFA_OFFSET(4)
-    PUSH ecx                      // pass referrer
-    PUSH eax                      // pass field_idx
-    SETUP_GOT_NOSAVE              // clobbers EBX
-    call SYMBOL(artGet64StaticFromCode)    // (field_idx, referrer, Thread*, SP)
-    addl LITERAL(16), %esp        // pop arguments
-    CFI_ADJUST_CFA_OFFSET(-16)
-    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
-    RETURN_OR_DELIVER_PENDING_EXCEPTION    // return or deliver exception
-END_FUNCTION art_quick_get64_static
-
-DEFINE_FUNCTION art_quick_get_obj_static
-    SETUP_REF_ONLY_CALLEE_SAVE_FRAME       // save ref containing registers for GC
-    mov %esp, %edx                // remember SP
-    mov 32(%esp), %ecx            // get referrer
-    PUSH edx                      // pass SP
-    pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
-    CFI_ADJUST_CFA_OFFSET(4)
-    PUSH ecx                      // pass referrer
-    PUSH eax                      // pass field_idx
-    SETUP_GOT_NOSAVE              // clobbers EBX
-    call SYMBOL(artGetObjStaticFromCode)   // (field_idx, referrer, Thread*, SP)
-    addl LITERAL(16), %esp        // pop arguments
-    CFI_ADJUST_CFA_OFFSET(-16)
-    RESTORE_REF_ONLY_CALLEE_SAVE_FRAME     // restore frame up to return address
-    RETURN_OR_DELIVER_PENDING_EXCEPTION    // return or deliver exception
-END_FUNCTION art_quick_get_obj_static
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
 
 DEFINE_FUNCTION art_quick_proxy_invoke_handler
     SETUP_REF_AND_ARGS_CALLEE_SAVE_FRAME   // save frame and Method*
@@ -1072,7 +1091,6 @@ DEFINE_FUNCTION art_quick_proxy_invoke_handler
     CFI_ADJUST_CFA_OFFSET(4)
     PUSH ecx                      // pass receiver
     PUSH eax                      // pass proxy method
-    SETUP_GOT_NOSAVE              // clobbers EBX
     call SYMBOL(artQuickProxyInvokeHandler) // (proxy method, receiver, Thread*, SP)
     movd %eax, %xmm0              // place return value also into floating point return value
     movd %edx, %xmm1
@@ -1104,7 +1122,6 @@ DEFINE_FUNCTION art_quick_resolution_trampoline
     CFI_ADJUST_CFA_OFFSET(4)
     PUSH ecx                      // pass receiver
     PUSH eax                      // pass method
-    SETUP_GOT_NOSAVE              // clobbers EBX
     call SYMBOL(artQuickResolutionTrampoline) // (Method* called, receiver, Thread*, SP)
     movl %eax, %edi               // remember code pointer in EDI
     addl LITERAL(16), %esp        // pop arguments
@@ -1139,7 +1156,6 @@ DEFINE_FUNCTION_NO_HIDE art_quick_generic_jni_trampoline
     subl LITERAL(8), %esp         // Padding for 16B alignment.
     pushl %ebp                    // Pass SP (to ArtMethod).
     pushl %fs:THREAD_SELF_OFFSET  // Pass Thread::Current().
-    SETUP_GOT_NOSAVE              // Clobbers ebx.
     call SYMBOL(artQuickGenericJniTrampoline)  // (Thread*, sp)
 
     // The C call will have registered the complete save-frame on success.
@@ -1212,7 +1228,6 @@ DEFINE_FUNCTION_NO_HIDE art_quick_to_interpreter_bridge
     pushl %fs:THREAD_SELF_OFFSET  // pass Thread::Current()
     CFI_ADJUST_CFA_OFFSET(4)
     PUSH eax                      // pass  method
-    SETUP_GOT_NOSAVE              // clobbers EBX
     call SYMBOL(artQuickToInterpreterBridge)  // (method, Thread*, SP)
     movd %eax, %xmm0              // place return value also into floating point return value
     movd %edx, %xmm1
@@ -1239,7 +1254,6 @@ DEFINE_FUNCTION art_quick_instrumentation_entry
     CFI_ADJUST_CFA_OFFSET(4)
     PUSH ecx                      // Pass receiver.
     PUSH eax                      // Pass Method*.
-    SETUP_GOT_NOSAVE              // clobbers EBX
     call SYMBOL(artInstrumentationMethodEntryFromCode) // (Method*, Object*, Thread*, SP, LR)
     addl LITERAL(28), %esp        // Pop arguments upto saved Method*.
     movl 28(%esp), %edi           // Restore edi.
@@ -1274,7 +1288,6 @@ DEFINE_FUNCTION art_quick_instrumentation_exit
     PUSH ecx                      // Pass SP.
     pushl %fs:THREAD_SELF_OFFSET  // Pass Thread::Current.
     CFI_ADJUST_CFA_OFFSET(4)
-    SETUP_GOT_NOSAVE              // clobbers EBX
     call SYMBOL(artInstrumentationMethodExitFromCode)  // (Thread*, SP, gpr_result, fpr_result)
     mov   %eax, %ecx              // Move returned link register.
     addl LITERAL(32), %esp        // Pop arguments.
@@ -1304,7 +1317,6 @@ DEFINE_FUNCTION art_quick_deoptimize
     PUSH ecx                      // Pass SP.
     pushl %fs:THREAD_SELF_OFFSET  // Pass Thread::Current().
     CFI_ADJUST_CFA_OFFSET(4)
-    SETUP_GOT_NOSAVE              // clobbers EBX
     call SYMBOL(artDeoptimize)  // artDeoptimize(Thread*, SP)
     int3                          // Unreachable.
 END_FUNCTION art_quick_deoptimize
index f7acbdb..4ae61a2 100644 (file)
@@ -119,6 +119,8 @@ MACRO0(ALIGN_FUNCTION_ENTRY)
     .balign 16
 END_MACRO
 
+// TODO: we might need to use SYMBOL() here to add the underscore prefix
+// for mac builds.
 MACRO1(DEFINE_FUNCTION, c_name)
     FUNCTION_TYPE(\c_name, 0)
     ASM_HIDDEN VAR(c_name, 0)
index 35a0cf4..c9028e1 100644 (file)
@@ -48,12 +48,24 @@ extern "C" void* art_quick_initialize_type_and_verify_access(uint32_t, void*);
 extern "C" void* art_quick_resolve_string(void*, uint32_t);
 
 // Field entrypoints.
+extern "C" int art_quick_set8_instance(uint32_t, void*, int8_t);
+extern "C" int art_quick_set8_static(uint32_t, int8_t);
+extern "C" int art_quick_set16_instance(uint32_t, void*, int16_t);
+extern "C" int art_quick_set16_static(uint32_t, int16_t);
 extern "C" int art_quick_set32_instance(uint32_t, void*, int32_t);
 extern "C" int art_quick_set32_static(uint32_t, int32_t);
 extern "C" int art_quick_set64_instance(uint32_t, void*, int64_t);
 extern "C" int art_quick_set64_static(uint32_t, int64_t);
 extern "C" int art_quick_set_obj_instance(uint32_t, void*, void*);
 extern "C" int art_quick_set_obj_static(uint32_t, void*);
+extern "C" int8_t art_quick_get_byte_instance(uint32_t, void*);
+extern "C" uint8_t art_quick_get_boolean_instance(uint32_t, void*);
+extern "C" int8_t art_quick_get_byte_static(uint32_t);
+extern "C" uint8_t art_quick_get_boolean_static(uint32_t);
+extern "C" int16_t art_quick_get_short_instance(uint32_t, void*);
+extern "C" uint16_t art_quick_get_char_instance(uint32_t, void*);
+extern "C" int16_t art_quick_get_short_static(uint32_t);
+extern "C" uint16_t art_quick_get_char_static(uint32_t);
 extern "C" int32_t art_quick_get32_instance(uint32_t, void*);
 extern "C" int32_t art_quick_get32_static(uint32_t);
 extern "C" int64_t art_quick_get64_instance(uint32_t, void*);
@@ -141,15 +153,27 @@ void InitEntryPoints(InterpreterEntryPoints* ipoints, JniEntryPoints* jpoints,
   qpoints->pResolveString = art_quick_resolve_string;
 
   // Field
+  qpoints->pSet8Instance = art_quick_set8_instance;
+  qpoints->pSet8Static = art_quick_set8_static;
+  qpoints->pSet16Instance = art_quick_set16_instance;
+  qpoints->pSet16Static = art_quick_set16_static;
   qpoints->pSet32Instance = art_quick_set32_instance;
   qpoints->pSet32Static = art_quick_set32_static;
   qpoints->pSet64Instance = art_quick_set64_instance;
   qpoints->pSet64Static = art_quick_set64_static;
   qpoints->pSetObjInstance = art_quick_set_obj_instance;
   qpoints->pSetObjStatic = art_quick_set_obj_static;
+  qpoints->pGetByteInstance = art_quick_get_byte_instance;
+  qpoints->pGetBooleanInstance = art_quick_get_boolean_instance;
+  qpoints->pGetShortInstance = art_quick_get_short_instance;
+  qpoints->pGetCharInstance = art_quick_get_char_instance;
   qpoints->pGet32Instance = art_quick_get32_instance;
   qpoints->pGet64Instance = art_quick_get64_instance;
   qpoints->pGetObjInstance = art_quick_get_obj_instance;
+  qpoints->pGetByteStatic = art_quick_get_byte_static;
+  qpoints->pGetBooleanStatic = art_quick_get_boolean_static;
+  qpoints->pGetShortStatic = art_quick_get_short_static;
+  qpoints->pGetCharStatic = art_quick_get_char_static;
   qpoints->pGet32Static = art_quick_get32_static;
   qpoints->pGet64Static = art_quick_get64_static;
   qpoints->pGetObjStatic = art_quick_get_obj_static;
index 5798092..e68cfbc 100644 (file)
@@ -1076,17 +1076,29 @@ UNIMPLEMENTED art_quick_lshl
 UNIMPLEMENTED art_quick_lshr
 UNIMPLEMENTED art_quick_lushr
 
+THREE_ARG_REF_DOWNCALL art_quick_set8_instance, artSet8InstanceFromCode, RETURN_IF_EAX_ZERO
+THREE_ARG_REF_DOWNCALL art_quick_set16_instance, artSet16InstanceFromCode, RETURN_IF_EAX_ZERO
 THREE_ARG_REF_DOWNCALL art_quick_set32_instance, artSet32InstanceFromCode, RETURN_IF_EAX_ZERO
 THREE_ARG_DOWNCALL art_quick_set64_instance, artSet64InstanceFromCode, RETURN_IF_EAX_ZERO
 THREE_ARG_REF_DOWNCALL art_quick_set_obj_instance, artSetObjInstanceFromCode, RETURN_IF_EAX_ZERO
 
+TWO_ARG_REF_DOWNCALL art_quick_get_byte_instance, artGetByteInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_boolean_instance, artGetBooleanInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_short_instance, artGetShortInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+TWO_ARG_REF_DOWNCALL art_quick_get_char_instance, artGetCharInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
 TWO_ARG_REF_DOWNCALL art_quick_get32_instance, artGet32InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
 TWO_ARG_REF_DOWNCALL art_quick_get64_instance, artGet64InstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
 TWO_ARG_REF_DOWNCALL art_quick_get_obj_instance, artGetObjInstanceFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
 
+TWO_ARG_REF_DOWNCALL art_quick_set8_static, artSet8StaticFromCode, RETURN_IF_EAX_ZERO
+TWO_ARG_REF_DOWNCALL art_quick_set16_static, artSet16StaticFromCode, RETURN_IF_EAX_ZERO
 TWO_ARG_REF_DOWNCALL art_quick_set32_static, artSet32StaticFromCode, RETURN_IF_EAX_ZERO
 TWO_ARG_REF_DOWNCALL art_quick_set_obj_static, artSetObjStaticFromCode, RETURN_IF_EAX_ZERO
 
+ONE_ARG_REF_DOWNCALL art_quick_get_byte_static, artGetByteStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_boolean_static, artGetBooleanStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_short_static, artGetShortStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
+ONE_ARG_REF_DOWNCALL art_quick_get_char_static, artGetCharStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
 ONE_ARG_REF_DOWNCALL art_quick_get32_static, artGet32StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
 ONE_ARG_REF_DOWNCALL art_quick_get64_static, artGet64StaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
 ONE_ARG_REF_DOWNCALL art_quick_get_obj_static, artGetObjStaticFromCode, RETURN_OR_DELIVER_PENDING_EXCEPTION
index 39d51a5..64cdbbf 100644 (file)
@@ -29,7 +29,7 @@ Atomic<uint64_t> TrackedAllocators::bytes_used_[kAllocatorTagCount];
 Atomic<uint64_t> TrackedAllocators::max_bytes_used_[kAllocatorTagCount];
 Atomic<uint64_t> TrackedAllocators::total_bytes_used_[kAllocatorTagCount];
 
-class MallocAllocator : public Allocator {
+class MallocAllocator FINAL : public Allocator {
  public:
   explicit MallocAllocator() {}
   ~MallocAllocator() {}
@@ -48,7 +48,7 @@ class MallocAllocator : public Allocator {
 
 MallocAllocator g_malloc_allocator;
 
-class NoopAllocator : public Allocator {
+class NoopAllocator FINAL : public Allocator {
  public:
   explicit NoopAllocator() {}
   ~NoopAllocator() {}
diff --git a/runtime/base/bit_vector-inl.h b/runtime/base/bit_vector-inl.h
new file mode 100644 (file)
index 0000000..dc13dd5
--- /dev/null
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2013 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_BASE_BIT_VECTOR_INL_H_
+#define ART_RUNTIME_BASE_BIT_VECTOR_INL_H_
+
+#include "bit_vector.h"
+#include "logging.h"
+#include "utils.h"
+
+namespace art {
+
+inline bool BitVector::IndexIterator::operator==(const IndexIterator& other) const {
+  DCHECK(bit_storage_ == other.bit_storage_);
+  DCHECK_EQ(storage_size_, other.storage_size_);
+  return bit_index_ == other.bit_index_;
+}
+
+inline int BitVector::IndexIterator::operator*() const {
+  DCHECK_LT(bit_index_, BitSize());
+  return bit_index_;
+}
+
+inline BitVector::IndexIterator& BitVector::IndexIterator::operator++() {
+  DCHECK_LT(bit_index_, BitSize());
+  bit_index_ = FindIndex(bit_index_ + 1u);
+  return *this;
+}
+
+inline BitVector::IndexIterator BitVector::IndexIterator::operator++(int) {
+  IndexIterator result(*this);
+  ++*this;
+  return result;
+}
+
+inline uint32_t BitVector::IndexIterator::FindIndex(uint32_t start_index) const {
+  DCHECK_LE(start_index, BitSize());
+  uint32_t word_index = start_index / kWordBits;
+  if (UNLIKELY(word_index == storage_size_)) {
+    return start_index;
+  }
+  uint32_t word = bit_storage_[word_index];
+  // Mask out any bits in the first word we've already considered.
+  word &= static_cast<uint32_t>(-1) << (start_index & 0x1f);
+  while (word == 0u) {
+    ++word_index;
+    if (UNLIKELY(word_index == storage_size_)) {
+      return BitSize();
+    }
+    word = bit_storage_[word_index];
+  }
+  return word_index * 32u + CTZ(word);
+}
+
+inline void BitVector::ClearAllBits() {
+  memset(storage_, 0, storage_size_ * kWordBytes);
+}
+
+inline bool BitVector::Equal(const BitVector* src) const {
+  return (storage_size_ == src->GetStorageSize()) &&
+    (expandable_ == src->IsExpandable()) &&
+    (memcmp(storage_, src->GetRawStorage(), storage_size_ * sizeof(uint32_t)) == 0);
+}
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_BASE_BIT_VECTOR_INL_H_
index 1b9022e..3d2f0de 100644 (file)
 
 #include "bit_vector.h"
 
+#include "allocator.h"
+#include "bit_vector-inl.h"
+
 namespace art {
 
-// TODO: profile to make sure this is still a win relative to just using shifted masks.
-static uint32_t check_masks[32] = {
-  0x00000001, 0x00000002, 0x00000004, 0x00000008, 0x00000010,
-  0x00000020, 0x00000040, 0x00000080, 0x00000100, 0x00000200,
-  0x00000400, 0x00000800, 0x00001000, 0x00002000, 0x00004000,
-  0x00008000, 0x00010000, 0x00020000, 0x00040000, 0x00080000,
-  0x00100000, 0x00200000, 0x00400000, 0x00800000, 0x01000000,
-  0x02000000, 0x04000000, 0x08000000, 0x10000000, 0x20000000,
-  0x40000000, 0x80000000 };
-
-static inline uint32_t BitsToWords(uint32_t bits) {
-  return (bits + 31) >> 5;
+// The number of words necessary to encode bits.
+static constexpr uint32_t BitsToWords(uint32_t bits) {
+  return RoundUp(bits, 32) / 32;
 }
 
 // TODO: replace excessive argument defaulting when we are at gcc 4.7
@@ -40,10 +34,10 @@ BitVector::BitVector(uint32_t start_bits,
                      Allocator* allocator,
                      uint32_t storage_size,
                      uint32_t* storage)
-  : allocator_(allocator),
-    expandable_(expandable),
+  : storage_(storage),
     storage_size_(storage_size),
-    storage_(storage) {
+    allocator_(allocator),
+    expandable_(expandable) {
   COMPILE_ASSERT(sizeof(*storage_) == kWordBytes, check_word_bytes);
   COMPILE_ASSERT(sizeof(*storage_) * 8u == kWordBits, check_word_bits);
   if (storage_ == nullptr) {
@@ -56,59 +50,7 @@ BitVector::~BitVector() {
   allocator_->Free(storage_);
 }
 
-/*
- * Determine whether or not the specified bit is set.
- */
-bool BitVector::IsBitSet(uint32_t num) const {
-  // If the index is over the size:
-  if (num >= storage_size_ * kWordBits) {
-    // Whether it is expandable or not, this bit does not exist: thus it is not set.
-    return false;
-  }
-
-  return IsBitSet(storage_, num);
-}
-
-// Mark all bits bit as "clear".
-void BitVector::ClearAllBits() {
-  memset(storage_, 0, storage_size_ * kWordBytes);
-}
-
-// Mark the specified bit as "set".
-/*
- * TUNING: this could have pathologically bad growth/expand behavior.  Make sure we're
- * not using it badly or change resize mechanism.
- */
-void BitVector::SetBit(uint32_t num) {
-  if (num >= storage_size_ * kWordBits) {
-    DCHECK(expandable_) << "Attempted to expand a non-expandable bitmap to position " << num;
-
-    /* Round up to word boundaries for "num+1" bits */
-    uint32_t new_size = BitsToWords(num + 1);
-    DCHECK_GT(new_size, storage_size_);
-    uint32_t *new_storage =
-        static_cast<uint32_t*>(allocator_->Alloc(new_size * kWordBytes));
-    memcpy(new_storage, storage_, storage_size_ * kWordBytes);
-    // Zero out the new storage words.
-    memset(&new_storage[storage_size_], 0, (new_size - storage_size_) * kWordBytes);
-    // TOTO: collect stats on space wasted because of resize.
-    storage_ = new_storage;
-    storage_size_ = new_size;
-  }
-
-  storage_[num >> 5] |= check_masks[num & 0x1f];
-}
-
-// Mark the specified bit as "unset".
-void BitVector::ClearBit(uint32_t num) {
-  // If the index is over the size, we don't have to do anything, it is cleared.
-  if (num < storage_size_ * kWordBits) {
-    // Otherwise, go ahead and clear it.
-    storage_[num >> 5] &= ~check_masks[num & 0x1f];
-  }
-}
-
-bool BitVector::SameBitsSet(const BitVector *src) {
+bool BitVector::SameBitsSet(const BitVector *src) const {
   int our_highest = GetHighestBitSet();
   int src_highest = src->GetHighestBitSet();
 
@@ -134,7 +76,6 @@ bool BitVector::SameBitsSet(const BitVector *src) {
   return (memcmp(storage_, src->GetRawStorage(), our_highest_index * kWordBytes) == 0);
 }
 
-// Intersect with another bit vector.
 void BitVector::Intersect(const BitVector* src) {
   uint32_t src_storage_size = src->storage_size_;
 
@@ -155,9 +96,6 @@ void BitVector::Intersect(const BitVector* src) {
   }
 }
 
-/*
- * Union with another bit vector.
- */
 bool BitVector::Union(const BitVector* src) {
   // Get the highest bit to determine how much we need to expand.
   int highest_bit = src->GetHighestBitSet();
@@ -175,8 +113,7 @@ bool BitVector::Union(const BitVector* src) {
   if (storage_size_ < src_size) {
     changed = true;
 
-    // Set it to reallocate.
-    SetBit(highest_bit);
+    EnsureSize(highest_bit);
 
     // Paranoid: storage size should be big enough to hold this bit now.
     DCHECK_LT(static_cast<uint32_t> (highest_bit), storage_size_ * kWordBits);
@@ -242,21 +179,20 @@ bool BitVector::UnionIfNotIn(const BitVector* union_with, const BitVector* not_i
 }
 
 void BitVector::Subtract(const BitVector *src) {
-    uint32_t src_size = src->storage_size_;
+  uint32_t src_size = src->storage_size_;
 
-    // We only need to operate on bytes up to the smaller of the sizes of the two operands.
-    unsigned int min_size = (storage_size_ > src_size) ? src_size : storage_size_;
+  // We only need to operate on bytes up to the smaller of the sizes of the two operands.
+  unsigned int min_size = (storage_size_ > src_size) ? src_size : storage_size_;
 
-    // Difference until max, we know both accept it:
-    //   There is no need to do more:
-    //     If we are bigger than src, the upper bits are unchanged.
-    //     If we are smaller than src, the non-existant upper bits are 0 and thus can't get subtracted.
-    for (uint32_t idx = 0; idx < min_size; idx++) {
-        storage_[idx] &= (~(src->GetRawStorageWord(idx)));
-    }
+  // Difference until max, we know both accept it:
+  //   There is no need to do more:
+  //     If we are bigger than src, the upper bits are unchanged.
+  //     If we are smaller than src, the non-existant upper bits are 0 and thus can't get subtracted.
+  for (uint32_t idx = 0; idx < min_size; idx++) {
+    storage_[idx] &= (~(src->GetRawStorageWord(idx)));
+  }
 }
 
-// Count the number of bits that are set.
 uint32_t BitVector::NumSetBits() const {
   uint32_t count = 0;
   for (uint32_t word = 0; word < storage_size_; word++) {
@@ -265,17 +201,11 @@ uint32_t BitVector::NumSetBits() const {
   return count;
 }
 
-// Count the number of bits that are set in range [0, end).
 uint32_t BitVector::NumSetBits(uint32_t end) const {
   DCHECK_LE(end, storage_size_ * kWordBits);
   return NumSetBits(storage_, end);
 }
 
-/*
- * Mark specified number of bits as "set". Cannot set all bits like ClearAll
- * since there might be unused bits - setting those to one will confuse the
- * iterator.
- */
 void BitVector::SetInitialBits(uint32_t num_bits) {
   // If num_bits is 0, clear everything.
   if (num_bits == 0) {
@@ -288,7 +218,7 @@ void BitVector::SetInitialBits(uint32_t num_bits) {
 
   uint32_t idx;
   // We can set every storage element with -1.
-  for (idx = 0; idx < (num_bits >> 5); idx++) {
+  for (idx = 0; idx < WordIndex(num_bits); idx++) {
     storage_[idx] = -1;
   }
 
@@ -312,20 +242,8 @@ int BitVector::GetHighestBitSet() const {
     uint32_t value = storage_[idx];
 
     if (value != 0) {
-      // Shift right for the counting.
-      value /= 2;
-
-      int cnt = 0;
-
-      // Count the bits.
-      while (value > 0) {
-        value /= 2;
-        cnt++;
-      }
-
-      // Return cnt + how many storage units still remain * the number of bits per unit.
-      int res = cnt + (idx * kWordBits);
-      return res;
+      // Return highest bit set in value plus bits from previous storage indexes.
+      return 31 - CLZ(value) + (idx * kWordBits);
     }
   }
 
@@ -333,23 +251,6 @@ int BitVector::GetHighestBitSet() const {
   return -1;
 }
 
-bool BitVector::EnsureSizeAndClear(unsigned int num) {
-  // Check if the bitvector is expandable.
-  if (IsExpandable() == false) {
-    return false;
-  }
-
-  if (num > 0) {
-    // Now try to expand by setting the last bit.
-    SetBit(num - 1);
-  }
-
-  // We must clear all bits as per our specification.
-  ClearAllBits();
-
-  return true;
-}
-
 void BitVector::Copy(const BitVector *src) {
   // Get highest bit set, we only need to copy till then.
   int highest_bit = src->GetHighestBitSet();
@@ -375,13 +276,8 @@ void BitVector::Copy(const BitVector *src) {
   }
 }
 
-bool BitVector::IsBitSet(const uint32_t* storage, uint32_t num) {
-  uint32_t val = storage[num >> 5] & check_masks[num & 0x1f];
-  return (val != 0);
-}
-
 uint32_t BitVector::NumSetBits(const uint32_t* storage, uint32_t end) {
-  uint32_t word_end = end >> 5;
+  uint32_t word_end = WordIndex(end);
   uint32_t partial_word_bits = end & 0x1f;
 
   uint32_t count = 0u;
@@ -400,45 +296,6 @@ void BitVector::Dump(std::ostream& os, const char *prefix) const {
   os << buffer.str() << std::endl;
 }
 
-
-void BitVector::DumpDotHelper(bool last_entry, FILE* file, std::ostringstream& buffer) const {
-  // Now print it to the file.
-  fprintf(file, "    {%s}", buffer.str().c_str());
-
-  // If it isn't the last entry, add a |.
-  if (last_entry == false) {
-    fprintf(file, "|");
-  }
-
-  // Add the \n.
-  fprintf(file, "\\\n");
-}
-
-void BitVector::DumpDot(FILE* file, const char* prefix, bool last_entry) const {
-  std::ostringstream buffer;
-  DumpHelper(prefix, buffer);
-  DumpDotHelper(last_entry, file, buffer);
-}
-
-void BitVector::DumpIndicesDot(FILE* file, const char* prefix, bool last_entry) const {
-  std::ostringstream buffer;
-  DumpIndicesHelper(prefix, buffer);
-  DumpDotHelper(last_entry, file, buffer);
-}
-
-void BitVector::DumpIndicesHelper(const char* prefix, std::ostringstream& buffer) const {
-  // Initialize it.
-  if (prefix != nullptr) {
-    buffer << prefix;
-  }
-
-  for (size_t i = 0; i < storage_size_ * kWordBits; i++) {
-    if (IsBitSet(i)) {
-      buffer << i << " ";
-    }
-  }
-}
-
 void BitVector::DumpHelper(const char* prefix, std::ostringstream& buffer) const {
   // Initialize it.
   if (prefix != nullptr) {
@@ -452,4 +309,22 @@ void BitVector::DumpHelper(const char* prefix, std::ostringstream& buffer) const
   buffer << ')';
 }
 
+void BitVector::EnsureSize(uint32_t idx) {
+  if (idx >= storage_size_ * kWordBits) {
+    DCHECK(expandable_) << "Attempted to expand a non-expandable bitmap to position " << idx;
+
+    /* Round up to word boundaries for "idx+1" bits */
+    uint32_t new_size = BitsToWords(idx + 1);
+    DCHECK_GT(new_size, storage_size_);
+    uint32_t *new_storage =
+        static_cast<uint32_t*>(allocator_->Alloc(new_size * kWordBytes));
+    memcpy(new_storage, storage_, storage_size_ * kWordBytes);
+    // Zero out the new storage words.
+    memset(&new_storage[storage_size_], 0, (new_size - storage_size_) * kWordBytes);
+    // TOTO: collect stats on space wasted because of resize.
+    storage_ = new_storage;
+    storage_size_ = new_size;
+  }
+}
+
 }  // namespace art
index fb1646f..1e28a27 100644 (file)
 #define ART_RUNTIME_BASE_BIT_VECTOR_H_
 
 #include <stdint.h>
-#include <stddef.h>
-
-#include "allocator.h"
-#include "base/logging.h"
-#include "utils.h"
+#include <iterator>
 
 namespace art {
 
+class Allocator;
+
 /*
  * Expanding bitmap, used for tracking resources.  Bits are numbered starting
  * from zero.  All operations on a BitVector are unsynchronized.
  */
 class BitVector {
-  public:
-    class IndexContainer;
-
-    /**
-     * @brief Convenient iterator across the indexes of the BitVector's set bits.
-     *
-     * @details IndexIterator is a Forward iterator (C++11: 24.2.5) from the lowest
-     * to the highest index of the BitVector's set bits. Instances can be retrieved
-     * only through BitVector::Indexes() which returns an IndexContainer wrapper
-     * object with begin() and end() suitable for range-based loops:
-     *   for (uint32_t idx : bit_vector.Indexes()) {
-     *     // Use idx.
-     *   }
-     */
-    class IndexIterator
-        : std::iterator<std::forward_iterator_tag, uint32_t, ptrdiff_t, void, uint32_t> {
-      public:
-        bool operator==(const IndexIterator& other) const {
-          DCHECK(bit_storage_ == other.bit_storage_);
-          DCHECK_EQ(storage_size_, other.storage_size_);
-          return bit_index_ == other.bit_index_;
-        }
-
-        bool operator!=(const IndexIterator& other) const {
-          return !(*this == other);
-        }
-
-        int operator*() const {
-          DCHECK_LT(bit_index_, BitSize());
-          return bit_index_;
-        }
-
-        IndexIterator& operator++() {
-          DCHECK_LT(bit_index_, BitSize());
-          bit_index_ = FindIndex(bit_index_ + 1u);
-          return *this;
-        }
-
-        IndexIterator operator++(int) {
-          IndexIterator result(*this);
-          ++*this;
-          return result;
-        }
-
-        // Helper function to check for end without comparing with bit_vector.Indexes().end().
-        bool Done() const {
-          return bit_index_ == BitSize();
-        }
-
-      private:
-        struct begin_tag { };
-        struct end_tag { };
-
-        IndexIterator(const BitVector* bit_vector, begin_tag)
-          : bit_storage_(bit_vector->GetRawStorage()),
-            storage_size_(bit_vector->storage_size_),
-            bit_index_(FindIndex(0u)) { }
-
-        IndexIterator(const BitVector* bit_vector, end_tag)
-          : bit_storage_(bit_vector->GetRawStorage()),
-            storage_size_(bit_vector->storage_size_),
-            bit_index_(BitSize()) { }
-
-        uint32_t BitSize() const {
-          return storage_size_ * kWordBits;
-        }
-
-        uint32_t FindIndex(uint32_t start_index) const {
-          DCHECK_LE(start_index, BitSize());
-          uint32_t word_index = start_index / kWordBits;
-          if (UNLIKELY(word_index == storage_size_)) {
-            return start_index;
-          }
-          uint32_t word = bit_storage_[word_index];
-          // Mask out any bits in the first word we've already considered.
-          word &= static_cast<uint32_t>(-1) << (start_index & 0x1f);
-          while (word == 0u) {
-            ++word_index;
-            if (UNLIKELY(word_index == storage_size_)) {
-              return BitSize();
-            }
-            word = bit_storage_[word_index];
-          }
-          return word_index * 32u + CTZ(word);
-        }
-
-        const uint32_t* const bit_storage_;
-        const uint32_t storage_size_;  // Size of vector in words.
-        uint32_t bit_index_;           // Current index (size in bits).
-
-        friend class BitVector::IndexContainer;
-    };
-
-    /**
-     * @brief BitVector wrapper class for iteration across indexes of set bits.
-     */
-    class IndexContainer {
-     public:
-      explicit IndexContainer(const BitVector* bit_vector) : bit_vector_(bit_vector) { }
-
-      IndexIterator begin() const {
-        return IndexIterator(bit_vector_, IndexIterator::begin_tag());
-      }
-
-      IndexIterator end() const {
-        return IndexIterator(bit_vector_, IndexIterator::end_tag());
-      }
-
-     private:
-      const BitVector* const bit_vector_;
-    };
-
-    BitVector(uint32_t start_bits,
-              bool expandable,
-              Allocator* allocator,
-              uint32_t storage_size = 0,
-              uint32_t* storage = nullptr);
-
-    virtual ~BitVector();
-
-    void SetBit(uint32_t num);
-    void ClearBit(uint32_t num);
-    bool IsBitSet(uint32_t num) const;
-    void ClearAllBits();
-    void SetInitialBits(uint32_t num_bits);
-
-    void Copy(const BitVector* src);
-    void Intersect(const BitVector* src2);
-    bool Union(const BitVector* src);
-
-    // Set bits of union_with that are not in not_in.
-    bool UnionIfNotIn(const BitVector* union_with, const BitVector* not_in);
-
-    void Subtract(const BitVector* src);
-    // Are we equal to another bit vector?  Note: expandability attributes must also match.
-    bool Equal(const BitVector* src) {
-      return (storage_size_ == src->GetStorageSize()) &&
-        (expandable_ == src->IsExpandable()) &&
-        (memcmp(storage_, src->GetRawStorage(), storage_size_ * sizeof(uint32_t)) == 0);
+ public:
+  class IndexContainer;
+
+  /**
+   * @brief Convenient iterator across the indexes of the BitVector's set bits.
+   *
+   * @details IndexIterator is a Forward iterator (C++11: 24.2.5) from the lowest
+   * to the highest index of the BitVector's set bits. Instances can be retrieved
+   * only through BitVector::Indexes() which returns an IndexContainer wrapper
+   * object with begin() and end() suitable for range-based loops:
+   *   for (uint32_t idx : bit_vector.Indexes()) {
+   *     // Use idx.
+   *   }
+   */
+  class IndexIterator :
+      std::iterator<std::forward_iterator_tag, uint32_t, ptrdiff_t, void, uint32_t> {
+   public:
+    bool operator==(const IndexIterator& other) const;
+
+    bool operator!=(const IndexIterator& other) const {
+      return !(*this == other);
     }
 
-    /**
-     * @brief Are all the bits set the same?
-     * @details expandability and size can differ as long as the same bits are set.
-     */
-    bool SameBitsSet(const BitVector *src);
+    int operator*() const;
 
-    uint32_t NumSetBits() const;
+    IndexIterator& operator++();
 
-    // Number of bits set in range [0, end).
-    uint32_t NumSetBits(uint32_t end) const;
+    IndexIterator operator++(int);
 
-    IndexContainer Indexes() const {
-      return IndexContainer(this);
+    // Helper function to check for end without comparing with bit_vector.Indexes().end().
+    bool Done() const {
+      return bit_index_ == BitSize();
     }
 
-    uint32_t GetStorageSize() const { return storage_size_; }
-    bool IsExpandable() const { return expandable_; }
-    uint32_t GetRawStorageWord(size_t idx) const { return storage_[idx]; }
-    uint32_t* GetRawStorage() { return storage_; }
-    const uint32_t* GetRawStorage() const { return storage_; }
-    size_t GetSizeOf() const { return storage_size_ * kWordBytes; }
+   private:
+    struct begin_tag { };
+    struct end_tag { };
 
-    /**
-     * @return the highest bit set, -1 if none are set
-     */
-    int GetHighestBitSet() const;
+    IndexIterator(const BitVector* bit_vector, begin_tag)
+      : bit_storage_(bit_vector->GetRawStorage()),
+        storage_size_(bit_vector->storage_size_),
+        bit_index_(FindIndex(0u)) { }
 
-    // Is bit set in storage. (No range check.)
-    static bool IsBitSet(const uint32_t* storage, uint32_t num);
-    // Number of bits set in range [0, end) in storage. (No range check.)
-    static uint32_t NumSetBits(const uint32_t* storage, uint32_t end);
+    IndexIterator(const BitVector* bit_vector, end_tag)
+      : bit_storage_(bit_vector->GetRawStorage()),
+        storage_size_(bit_vector->storage_size_),
+        bit_index_(BitSize()) { }
 
-    bool EnsureSizeAndClear(unsigned int num);
+    uint32_t BitSize() const {
+      return storage_size_ * kWordBits;
+    }
 
-    void Dump(std::ostream& os, const char* prefix) const;
+    uint32_t FindIndex(uint32_t start_index) const;
+    const uint32_t* const bit_storage_;
+    const uint32_t storage_size_;  // Size of vector in words.
+    uint32_t bit_index_;           // Current index (size in bits).
 
-    /**
-     * @brief last_entry is this the last entry for the dot dumping
-     * @details if not, a "|" is appended to the dump.
-     */
-    void DumpDot(FILE* file, const char* prefix, bool last_entry = false) const;
+    friend class BitVector::IndexContainer;
+  };
 
-    /**
-     * @brief last_entry is this the last entry for the dot dumping
-     * @details if not, a "|" is appended to the dump.
-     */
-    void DumpIndicesDot(FILE* file, const char* prefix, bool last_entry = false) const;
+  /**
+   * @brief BitVector wrapper class for iteration across indexes of set bits.
+   */
+  class IndexContainer {
+   public:
+    explicit IndexContainer(const BitVector* bit_vector) : bit_vector_(bit_vector) { }
 
-  protected:
-    /**
-     * @brief Dump the bitvector into buffer in a 00101..01 format.
-     * @param buffer the ostringstream used to dump the bitvector into.
-     */
-    void DumpHelper(const char* prefix, std::ostringstream& buffer) const;
+    IndexIterator begin() const {
+      return IndexIterator(bit_vector_, IndexIterator::begin_tag());
+    }
 
-    /**
-     * @brief Dump the bitvector in a 1 2 5 8 format, where the numbers are the bit set.
-     * @param buffer the ostringstream used to dump the bitvector into.
-     */
-    void DumpIndicesHelper(const char* prefix, std::ostringstream& buffer) const;
+    IndexIterator end() const {
+      return IndexIterator(bit_vector_, IndexIterator::end_tag());
+    }
+
+   private:
+    const BitVector* const bit_vector_;
+  };
+
+  BitVector(uint32_t start_bits,
+            bool expandable,
+            Allocator* allocator,
+            uint32_t storage_size = 0,
+            uint32_t* storage = nullptr);
 
-    /**
-     * @brief Wrapper to perform the bitvector dumping with the .dot format.
-     * @param buffer the ostringstream used to dump the bitvector into.
+  virtual ~BitVector();
+
+  // Mark the specified bit as "set".
+  void SetBit(uint32_t idx) {
+    /*
+     * TUNING: this could have pathologically bad growth/expand behavior.  Make sure we're
+     * not using it badly or change resize mechanism.
      */
-    void DumpDotHelper(bool last_entry, FILE* file, std::ostringstream& buffer) const;
+    if (idx >= storage_size_ * kWordBits) {
+      EnsureSize(idx);
+    }
+    storage_[WordIndex(idx)] |= BitMask(idx);
+  }
+
+  // Mark the specified bit as "unset".
+  void ClearBit(uint32_t idx) {
+    // If the index is over the size, we don't have to do anything, it is cleared.
+    if (idx < storage_size_ * kWordBits) {
+      // Otherwise, go ahead and clear it.
+      storage_[WordIndex(idx)] &= ~BitMask(idx);
+    }
+  }
+
+  // Determine whether or not the specified bit is set.
+  bool IsBitSet(uint32_t idx) const {
+    // If the index is over the size, whether it is expandable or not, this bit does not exist:
+    // thus it is not set.
+    return (idx < (storage_size_ * kWordBits)) && IsBitSet(storage_, idx);
+  }
+
+  // Mark all bits bit as "clear".
+  void ClearAllBits();
+
+  // Mark specified number of bits as "set". Cannot set all bits like ClearAll since there might
+  // be unused bits - setting those to one will confuse the iterator.
+  void SetInitialBits(uint32_t num_bits);
+
+  void Copy(const BitVector* src);
+
+  // Intersect with another bit vector.
+  void Intersect(const BitVector* src2);
+
+  // Union with another bit vector.
+  bool Union(const BitVector* src);
+
+  // Set bits of union_with that are not in not_in.
+  bool UnionIfNotIn(const BitVector* union_with, const BitVector* not_in);
+
+  void Subtract(const BitVector* src);
+
+  // Are we equal to another bit vector?  Note: expandability attributes must also match.
+  bool Equal(const BitVector* src) const;
+
+  /**
+   * @brief Are all the bits set the same?
+   * @details expandability and size can differ as long as the same bits are set.
+   */
+  bool SameBitsSet(const BitVector *src) const;
+
+  // Count the number of bits that are set.
+  uint32_t NumSetBits() const;
+
+  // Count the number of bits that are set in range [0, end).
+  uint32_t NumSetBits(uint32_t end) const;
+
+  IndexContainer Indexes() const {
+    return IndexContainer(this);
+  }
+
+  uint32_t GetStorageSize() const {
+    return storage_size_;
+  }
+
+  bool IsExpandable() const {
+    return expandable_;
+  }
+
+  uint32_t GetRawStorageWord(size_t idx) const {
+    return storage_[idx];
+  }
+
+  uint32_t* GetRawStorage() {
+    return storage_;
+  }
+
+  const uint32_t* GetRawStorage() const {
+    return storage_;
+  }
+
+  size_t GetSizeOf() const {
+    return storage_size_ * kWordBytes;
+  }
+
+  /**
+   * @return the highest bit set, -1 if none are set
+   */
+  int GetHighestBitSet() const;
+
+  // Is bit set in storage. (No range check.)
+  static bool IsBitSet(const uint32_t* storage, uint32_t idx) {
+    return (storage[WordIndex(idx)] & BitMask(idx)) != 0;
+  }
+
+  // Number of bits set in range [0, end) in storage. (No range check.)
+  static uint32_t NumSetBits(const uint32_t* storage, uint32_t end);
+
+  void Dump(std::ostream& os, const char* prefix) const;
+
+ private:
+  /**
+   * @brief Dump the bitvector into buffer in a 00101..01 format.
+   * @param buffer the ostringstream used to dump the bitvector into.
+   */
+  void DumpHelper(const char* prefix, std::ostringstream& buffer) const;
+
+  // Ensure there is space for a bit at idx.
+  void EnsureSize(uint32_t idx);
+
+  // The index of the word within storage.
+  static constexpr uint32_t WordIndex(uint32_t idx) {
+    return idx >> 5;
+  }
+
+  // A bit mask to extract the bit for the given index.
+  static constexpr uint32_t BitMask(uint32_t idx) {
+    return 1 << (idx & 0x1f);
+  }
 
-  private:
-    static constexpr uint32_t kWordBytes = sizeof(uint32_t);
-    static constexpr uint32_t kWordBits = kWordBytes * 8;
+  static constexpr uint32_t kWordBytes = sizeof(uint32_t);
+  static constexpr uint32_t kWordBits = kWordBytes * 8;
 
-    Allocator* const allocator_;
-    const bool expandable_;         // expand bitmap if we run out?
-    uint32_t   storage_size_;       // current size, in 32-bit words.
-    uint32_t*  storage_;
+  uint32_t*  storage_;            // The storage for the bit vector.
+  uint32_t   storage_size_;       // Current size, in 32-bit words.
+  Allocator* const allocator_;    // Allocator if expandable.
+  const bool expandable_;         // Should the bitmap expand if too small?
 };
 
 
index 1403f50..df5d79d 100644 (file)
@@ -16,7 +16,8 @@
 
 #include <memory>
 
-#include "bit_vector.h"
+#include "allocator.h"
+#include "bit_vector-inl.h"
 #include "gtest/gtest.h"
 
 namespace art {
index 80fb9ea..23caefc 100644 (file)
@@ -35,6 +35,7 @@ Mutex* Locks::allocated_thread_ids_lock_ = nullptr;
 ReaderWriterMutex* Locks::breakpoint_lock_ = nullptr;
 ReaderWriterMutex* Locks::classlinker_classes_lock_ = nullptr;
 ReaderWriterMutex* Locks::heap_bitmap_lock_ = nullptr;
+Mutex* Locks::jni_libraries_lock_ = nullptr;
 Mutex* Locks::logging_lock_ = nullptr;
 Mutex* Locks::mem_maps_lock_ = nullptr;
 Mutex* Locks::modify_ldt_lock_ = nullptr;
@@ -834,6 +835,7 @@ void Locks::Init() {
     DCHECK(breakpoint_lock_ != nullptr);
     DCHECK(classlinker_classes_lock_ != nullptr);
     DCHECK(heap_bitmap_lock_ != nullptr);
+    DCHECK(jni_libraries_lock_ != nullptr);
     DCHECK(logging_lock_ != nullptr);
     DCHECK(mutator_lock_ != nullptr);
     DCHECK(thread_list_lock_ != nullptr);
@@ -878,6 +880,10 @@ void Locks::Init() {
     DCHECK(thread_list_lock_ == nullptr);
     thread_list_lock_ = new Mutex("thread list lock", current_lock_level);
 
+    UPDATE_CURRENT_LOCK_LEVEL(kJniLoadLibraryLock);
+    DCHECK(jni_libraries_lock_ == nullptr);
+    jni_libraries_lock_ = new Mutex("JNI shared libraries map lock", current_lock_level);
+
     UPDATE_CURRENT_LOCK_LEVEL(kBreakpointLock);
     DCHECK(breakpoint_lock_ == nullptr);
     breakpoint_lock_ = new ReaderWriterMutex("breakpoint lock", current_lock_level);
index 8d89f96..2a623fd 100644 (file)
@@ -74,7 +74,6 @@ enum LockLevel {
   kDefaultMutexLevel,
   kMarkSweepLargeObjectLock,
   kPinTableLock,
-  kLoadLibraryLock,
   kJdwpObjectRegistryLock,
   kModifyLdtLock,
   kAllocatedThreadIdsLock,
@@ -83,6 +82,7 @@ enum LockLevel {
   kBreakpointLock,
   kMonitorLock,
   kMonitorListLock,
+  kJniLoadLibraryLock,
   kThreadListLock,
   kBreakpointInvokeLock,
   kDeoptimizationLock,
@@ -561,8 +561,11 @@ class Locks {
   // attaching and detaching.
   static Mutex* thread_list_lock_ ACQUIRED_AFTER(trace_lock_);
 
+  // Guards maintaining loading library data structures.
+  static Mutex* jni_libraries_lock_ ACQUIRED_AFTER(thread_list_lock_);
+
   // Guards breakpoints.
-  static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(thread_list_lock_);
+  static ReaderWriterMutex* breakpoint_lock_ ACQUIRED_AFTER(jni_libraries_lock_);
 
   // Guards lists of classes within the class linker.
   static ReaderWriterMutex* classlinker_classes_lock_ ACQUIRED_AFTER(breakpoint_lock_);
index 9036e3d..b0f8e22 100644 (file)
@@ -25,6 +25,7 @@
 #include "dex_file-inl.h"
 #include "field_helper.h"
 #include "gc/space/space.h"
+#include "java_vm_ext.h"
 #include "mirror/art_field-inl.h"
 #include "mirror/art_method-inl.h"
 #include "mirror/class-inl.h"
 #include "runtime.h"
 #include "scoped_thread_state_change.h"
 #include "thread.h"
+#include "well_known_classes.h"
 
 namespace art {
 
-static void JniAbort(const char* jni_function_name, const char* msg) {
-  Thread* self = Thread::Current();
-  ScopedObjectAccess soa(self);
-  mirror::ArtMethod* current_method = self->GetCurrentMethod(nullptr);
-
-  std::ostringstream os;
-  os << "JNI DETECTED ERROR IN APPLICATION: " << msg;
-
-  if (jni_function_name != nullptr) {
-    os << "\n    in call to " << jni_function_name;
-  }
-  // TODO: is this useful given that we're about to dump the calling thread's stack?
-  if (current_method != nullptr) {
-    os << "\n    from " << PrettyMethod(current_method);
-  }
-  os << "\n";
-  self->Dump(os);
-
-  JavaVMExt* vm = Runtime::Current()->GetJavaVM();
-  if (vm->check_jni_abort_hook != nullptr) {
-    vm->check_jni_abort_hook(vm->check_jni_abort_hook_data, os.str());
-  } else {
-    // Ensure that we get a native stack trace for this thread.
-    self->TransitionFromRunnableToSuspended(kNative);
-    LOG(FATAL) << os.str();
-    self->TransitionFromSuspendedToRunnable();  // Unreachable, keep annotalysis happy.
-  }
-}
-
-static void JniAbortV(const char* jni_function_name, const char* fmt, va_list ap) {
-  std::string msg;
-  StringAppendV(&msg, fmt, ap);
-  JniAbort(jni_function_name, msg.c_str());
-}
-
-void JniAbortF(const char* jni_function_name, const char* fmt, ...) {
-  va_list args;
-  va_start(args, fmt);
-  JniAbortV(jni_function_name, fmt, args);
-  va_end(args);
-}
-
 /*
  * ===========================================================================
  *      JNI function helpers
  * ===========================================================================
  */
 
-static bool IsHandleScopeLocalRef(JNIEnv* env, jobject localRef) {
-  return GetIndirectRefKind(localRef) == kHandleScopeOrInvalid &&
-      reinterpret_cast<JNIEnvExt*>(env)->self->HandleScopeContains(localRef);
-}
-
 // Flags passed into ScopedCheck.
 #define kFlag_Default       0x0000
 
@@ -109,134 +64,88 @@ static bool IsHandleScopeLocalRef(JNIEnv* env, jobject localRef) {
 #define kFlag_Invocation    0x8000      // Part of the invocation interface (JavaVM*).
 
 #define kFlag_ForceTrace    0x80000000  // Add this to a JNI function's flags if you want to trace every call.
-
-static const char* gBuiltInPrefixes[] = {
-  "Landroid/",
-  "Lcom/android/",
-  "Lcom/google/android/",
-  "Ldalvik/",
-  "Ljava/",
-  "Ljavax/",
-  "Llibcore/",
-  "Lorg/apache/harmony/",
-  nullptr
+/*
+ * Java primitive types:
+ * B - jbyte
+ * C - jchar
+ * D - jdouble
+ * F - jfloat
+ * I - jint
+ * J - jlong
+ * S - jshort
+ * Z - jboolean (shown as true and false)
+ * V - void
+ *
+ * Java reference types:
+ * L - jobject
+ * a - jarray
+ * c - jclass
+ * s - jstring
+ * t - jthrowable
+ *
+ * JNI types:
+ * b - jboolean (shown as JNI_TRUE and JNI_FALSE)
+ * f - jfieldID
+ * i - JNI error value (JNI_OK, JNI_ERR, JNI_EDETACHED, JNI_EVERSION)
+ * m - jmethodID
+ * p - void*
+ * r - jint (for release mode arguments)
+ * u - const char* (Modified UTF-8)
+ * z - jsize (for lengths; use i if negative values are okay)
+ * v - JavaVM*
+ * w - jobjectRefType
+ * E - JNIEnv*
+ * . - no argument; just print "..." (used for varargs JNI calls)
+ *
+ */
+union JniValueType {
+  jarray a;
+  jboolean b;
+  jclass c;
+  jfieldID f;
+  jint i;
+  jmethodID m;
+  const void* p;  // Pointer.
+  jint r;  // Release mode.
+  jstring s;
+  jthrowable t;
+  const char* u;  // Modified UTF-8.
+  JavaVM* v;
+  jobjectRefType w;
+  jsize z;
+  jbyte B;
+  jchar C;
+  jdouble D;
+  JNIEnv* E;
+  jfloat F;
+  jint I;
+  jlong J;
+  jobject L;
+  jshort S;
+  const void* V;  // void
+  jboolean Z;
 };
 
-static bool ShouldTrace(JavaVMExt* vm, mirror::ArtMethod* method)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  // If both "-Xcheck:jni" and "-Xjnitrace:" are enabled, we print trace messages
-  // when a native method that matches the -Xjnitrace argument calls a JNI function
-  // such as NewByteArray.
-  // If -verbose:third-party-jni is on, we want to log any JNI function calls
-  // made by a third-party native method.
-  std::string class_name(method->GetDeclaringClassDescriptor());
-  if (!vm->trace.empty() && class_name.find(vm->trace) != std::string::npos) {
-    return true;
-  }
-  if (VLOG_IS_ON(third_party_jni)) {
-    // Return true if we're trying to log all third-party JNI activity and 'method' doesn't look
-    // like part of Android.
-    for (size_t i = 0; gBuiltInPrefixes[i] != nullptr; ++i) {
-      if (StartsWith(class_name, gBuiltInPrefixes[i])) {
-        return false;
-      }
-    }
-    return true;
-  }
-  return false;
-}
-
 class ScopedCheck {
  public:
-  // For JNIEnv* functions.
-  explicit ScopedCheck(JNIEnv* env, int flags, const char* functionName)
-      SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
-      : soa_(env) {
-    Init(flags, functionName, true);
-    CheckThread(flags);
-  }
-
-  // For JavaVM* functions.
-  // TODO: it's not correct that this is a lock function, but making it so aids annotalysis.
-  explicit ScopedCheck(JavaVM* vm, bool has_method, const char* functionName)
-      SHARED_LOCK_FUNCTION(Locks::mutator_lock_)
-      : soa_(vm) {
-    Init(kFlag_Invocation, functionName, has_method);
-  }
-
-  ~ScopedCheck() UNLOCK_FUNCTION(Locks::mutator_lock_) {}
-
-  const ScopedObjectAccess& soa() {
-    return soa_;
+  explicit ScopedCheck(int flags, const char* functionName, bool has_method = true)
+      : function_name_(functionName), flags_(flags), indent_(0), has_method_(has_method) {
   }
 
-  bool ForceCopy() {
-    return Runtime::Current()->GetJavaVM()->force_copy;
-  }
+  ~ScopedCheck() {}
 
   // Checks that 'class_name' is a valid "fully-qualified" JNI class name, like "java/lang/Thread"
   // or "[Ljava/lang/Object;". A ClassLoader can actually normalize class names a couple of
   // times, so using "java.lang.Thread" instead of "java/lang/Thread" might work in some
   // circumstances, but this is incorrect.
-  void CheckClassName(const char* class_name) {
+  bool CheckClassName(const char* class_name) {
     if ((class_name == nullptr) || !IsValidJniClassName(class_name)) {
-      JniAbortF(function_name_,
-                "illegal class name '%s'\n"
-                "    (should be of the form 'package/Class', [Lpackage/Class;' or '[[B')",
-                class_name);
-    }
-  }
-
-  /*
-   * Verify that the field is of the appropriate type.  If the field has an
-   * object type, "java_object" is the object we're trying to assign into it.
-   *
-   * Works for both static and instance fields.
-   */
-  void CheckFieldType(jvalue value, jfieldID fid, char prim, bool isStatic)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    StackHandleScope<1> hs(Thread::Current());
-    Handle<mirror::ArtField> f(hs.NewHandle(CheckFieldID(fid)));
-    if (f.Get() == nullptr) {
-      return;
-    }
-    mirror::Class* field_type = FieldHelper(f).GetType();
-    if (!field_type->IsPrimitive()) {
-      jobject java_object = value.l;
-      if (java_object != nullptr) {
-        mirror::Object* obj = soa_.Decode<mirror::Object*>(java_object);
-        // If java_object is a weak global ref whose referent has been cleared,
-        // obj will be NULL.  Otherwise, obj should always be non-NULL
-        // and valid.
-        if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(obj)) {
-          Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
-          JniAbortF(function_name_, "field operation on invalid %s: %p",
-                    ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object);
-          return;
-        } else {
-          if (!obj->InstanceOf(field_type)) {
-            JniAbortF(function_name_, "attempt to set field %s with value of wrong type: %s",
-                      PrettyField(f.Get()).c_str(), PrettyTypeOf(obj).c_str());
-            return;
-          }
-        }
-      }
-    } else if (field_type != Runtime::Current()->GetClassLinker()->FindPrimitiveClass(prim)) {
-      JniAbortF(function_name_, "attempt to set field %s with value of wrong type: %c",
-                PrettyField(f.Get()).c_str(), prim);
-      return;
-    }
-
-    if (isStatic != f.Get()->IsStatic()) {
-      if (isStatic) {
-        JniAbortF(function_name_, "accessing non-static field %s as static",
-                  PrettyField(f.Get()).c_str());
-      } else {
-        JniAbortF(function_name_, "accessing static field %s as non-static",
-                  PrettyField(f.Get()).c_str());
-      }
-      return;
+      AbortF("illegal class name '%s'\n"
+             "    (should be of the form 'package/Class', [Lpackage/Class;' or '[[B')",
+             class_name);
+      return false;
     }
+    return true;
   }
 
   /*
@@ -244,59 +153,90 @@ class ScopedCheck {
    *
    * Assumes "jobj" has already been validated.
    */
-  void CheckInstanceFieldID(jobject java_object, jfieldID fid)
+  bool CheckInstanceFieldID(ScopedObjectAccess& soa, jobject java_object, jfieldID fid)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    mirror::Object* o = soa_.Decode<mirror::Object*>(java_object);
-    if (o == nullptr || !Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
+    mirror::Object* o = soa.Decode<mirror::Object*>(java_object);
+    if (o == nullptr) {
+      AbortF("field operation on NULL object: %p", java_object);
+      return false;
+    }
+    if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
       Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
-      JniAbortF(function_name_, "field operation on invalid %s: %p",
-                ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object);
-      return;
+      AbortF("field operation on invalid %s: %p",
+             ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(),
+             java_object);
+      return false;
     }
 
-    mirror::ArtField* f = CheckFieldID(fid);
+    mirror::ArtField* f = CheckFieldID(soa, fid);
     if (f == nullptr) {
-      return;
+      return false;
     }
     mirror::Class* c = o->GetClass();
     if (c->FindInstanceField(f->GetName(), f->GetTypeDescriptor()) == nullptr) {
-      JniAbortF(function_name_, "jfieldID %s not valid for an object of class %s",
-                PrettyField(f).c_str(), PrettyTypeOf(o).c_str());
+      AbortF("jfieldID %s not valid for an object of class %s",
+             PrettyField(f).c_str(), PrettyTypeOf(o).c_str());
+      return false;
     }
+    return true;
   }
 
   /*
    * Verify that the pointer value is non-NULL.
    */
-  void CheckNonNull(const void* ptr) {
-    if (ptr == nullptr) {
-      JniAbortF(function_name_, "non-nullable argument was NULL");
+  bool CheckNonNull(const void* ptr) {
+    if (UNLIKELY(ptr == nullptr)) {
+      AbortF("non-nullable argument was NULL");
+      return false;
     }
+    return true;
   }
 
   /*
    * Verify that the method's return type matches the type of call.
    * 'expectedType' will be "L" for all objects, including arrays.
    */
-  void CheckSig(jmethodID mid, const char* expectedType, bool isStatic)
+  bool CheckMethodAndSig(ScopedObjectAccess& soa, jobject jobj, jclass jc,
+                         jmethodID mid, Primitive::Type type, InvokeType invoke)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    mirror::ArtMethod* m = CheckMethodID(mid);
+    mirror::ArtMethod* m = CheckMethodID(soa, mid);
     if (m == nullptr) {
-      return;
+      return false;
     }
-    if (*expectedType != m->GetShorty()[0]) {
-      JniAbortF(function_name_, "the return type of %s does not match %s",
-                function_name_, PrettyMethod(m).c_str());
+    if (type != Primitive::GetType(m->GetShorty()[0])) {
+      AbortF("the return type of %s does not match %s", function_name_, PrettyMethod(m).c_str());
+      return false;
     }
-    if (isStatic != m->IsStatic()) {
-      if (isStatic) {
-        JniAbortF(function_name_, "calling non-static method %s with %s",
-                  PrettyMethod(m).c_str(), function_name_);
+    bool is_static = (invoke == kStatic);
+    if (is_static != m->IsStatic()) {
+      if (is_static) {
+        AbortF("calling non-static method %s with %s",
+               PrettyMethod(m).c_str(), function_name_);
       } else {
-        JniAbortF(function_name_, "calling static method %s with %s",
-                  PrettyMethod(m).c_str(), function_name_);
+        AbortF("calling static method %s with %s",
+               PrettyMethod(m).c_str(), function_name_);
+      }
+      return false;
+    }
+    if (invoke != kVirtual) {
+      mirror::Class* c = soa.Decode<mirror::Class*>(jc);
+      if (!m->GetDeclaringClass()->IsAssignableFrom(c)) {
+        AbortF("can't call %s %s with class %s", invoke == kStatic ? "static" : "nonvirtual",
+            PrettyMethod(m).c_str(), PrettyClass(c).c_str());
+        return false;
+      }
+    }
+    if (invoke != kStatic) {
+      mirror::Object* o = soa.Decode<mirror::Object*>(jobj);
+      if (o == nullptr) {
+        AbortF("can't call %s on null object", PrettyMethod(m).c_str());
+        return false;
+      } else if (!o->InstanceOf(m->GetDeclaringClass())) {
+        AbortF("can't call %s on instance of %s", PrettyMethod(m).c_str(), PrettyTypeOf(o).c_str());
+        return false;
       }
     }
+    return true;
   }
 
   /*
@@ -304,17 +244,18 @@ class ScopedCheck {
    *
    * Assumes "java_class" has already been validated.
    */
-  void CheckStaticFieldID(jclass java_class, jfieldID fid)
+  bool CheckStaticFieldID(ScopedObjectAccess& soa, jclass java_class, jfieldID fid)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    mirror::Class* c = soa_.Decode<mirror::Class*>(java_class);
-    mirror::ArtField* f = CheckFieldID(fid);
+    mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
+    mirror::ArtField* f = CheckFieldID(soa, fid);
     if (f == nullptr) {
-      return;
+      return false;
     }
     if (f->GetDeclaringClass() != c) {
-      JniAbortF(function_name_, "static jfieldID %p not valid for class %s",
-                fid, PrettyClass(c).c_str());
+      AbortF("static jfieldID %p not valid for class %s", fid, PrettyClass(c).c_str());
+      return false;
     }
+    return true;
   }
 
   /*
@@ -326,17 +267,18 @@ class ScopedCheck {
    *
    * Instances of "java_class" must be instances of the method's declaring class.
    */
-  void CheckStaticMethod(jclass java_class, jmethodID mid)
+  bool CheckStaticMethod(ScopedObjectAccess& soa, jclass java_class, jmethodID mid)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    mirror::ArtMethod* m = CheckMethodID(mid);
+    mirror::ArtMethod* m = CheckMethodID(soa, mid);
     if (m == nullptr) {
-      return;
+      return false;
     }
-    mirror::Class* c = soa_.Decode<mirror::Class*>(java_class);
+    mirror::Class* c = soa.Decode<mirror::Class*>(java_class);
     if (!m->GetDeclaringClass()->IsAssignableFrom(c)) {
-      JniAbortF(function_name_, "can't call static %s on class %s",
-                PrettyMethod(m).c_str(), PrettyClass(c).c_str());
+      AbortF("can't call static %s on class %s", PrettyMethod(m).c_str(), PrettyClass(c).c_str());
+      return false;
     }
+    return true;
   }
 
   /*
@@ -346,19 +288,21 @@ class ScopedCheck {
    * (Note the mid might point to a declaration in an interface; this
    * will be handled automatically by the instanceof check.)
    */
-  void CheckVirtualMethod(jobject java_object, jmethodID mid)
+  bool CheckVirtualMethod(ScopedObjectAccess& soa, jobject java_object, jmethodID mid)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    mirror::ArtMethod* m = CheckMethodID(mid);
+    mirror::ArtMethod* m = CheckMethodID(soa, mid);
     if (m == nullptr) {
-      return;
+      return false;
     }
-    mirror::Object* o = soa_.Decode<mirror::Object*>(java_object);
+    mirror::Object* o = soa.Decode<mirror::Object*>(java_object);
     if (o == nullptr) {
-      JniAbortF(function_name_, "can't call %s on null object", PrettyMethod(m).c_str());
+      AbortF("can't call %s on null object", PrettyMethod(m).c_str());
+      return false;
     } else if (!o->InstanceOf(m->GetDeclaringClass())) {
-      JniAbortF(function_name_, "can't call %s on instance of %s",
-                PrettyMethod(m).c_str(), PrettyTypeOf(o).c_str());
+      AbortF("can't call %s on instance of %s", PrettyMethod(m).c_str(), PrettyTypeOf(o).c_str());
+      return false;
     }
+    return true;
   }
 
   /**
@@ -397,11 +341,10 @@ class ScopedCheck {
    *
    * Use the kFlag_NullableUtf flag where 'u' field(s) are nullable.
    */
-  void Check(bool entry, const char* fmt0, ...) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    va_list ap;
-
+  bool Check(ScopedObjectAccess& soa, bool entry, const char* fmt, JniValueType* args)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     mirror::ArtMethod* traceMethod = nullptr;
-    if (has_method_ && (!soa_.Vm()->trace.empty() || VLOG_IS_ON(third_party_jni))) {
+    if (has_method_ && soa.Vm()->IsTracingEnabled()) {
       // We need to guard some of the invocation interface's calls: a bad caller might
       // use DetachCurrentThread or GetEnv on a thread that's not yet attached.
       Thread* self = Thread::Current();
@@ -411,129 +354,70 @@ class ScopedCheck {
     }
 
     if (((flags_ & kFlag_ForceTrace) != 0) ||
-        (traceMethod != nullptr && ShouldTrace(soa_.Vm(), traceMethod))) {
-      va_start(ap, fmt0);
+        (traceMethod != nullptr && soa.Vm()->ShouldTrace(traceMethod))) {
       std::string msg;
-      for (const char* fmt = fmt0; *fmt;) {
-        char ch = *fmt++;
-        if (ch == 'B') {  // jbyte
-          jbyte b = va_arg(ap, int);
-          if (b >= 0 && b < 10) {
-            StringAppendF(&msg, "%d", b);
-          } else {
-            StringAppendF(&msg, "%#x (%d)", b, b);
-          }
-        } else if (ch == 'C') {  // jchar
-          jchar c = va_arg(ap, int);
-          if (c < 0x7f && c >= ' ') {
-            StringAppendF(&msg, "U+%x ('%c')", c, c);
-          } else {
-            StringAppendF(&msg, "U+%x", c);
-          }
-        } else if (ch == 'F' || ch == 'D') {  // jfloat, jdouble
-          StringAppendF(&msg, "%g", va_arg(ap, double));
-        } else if (ch == 'I' || ch == 'S') {  // jint, jshort
-          StringAppendF(&msg, "%d", va_arg(ap, int));
-        } else if (ch == 'J') {  // jlong
-          StringAppendF(&msg, "%" PRId64, va_arg(ap, jlong));
-        } else if (ch == 'Z') {  // jboolean
-          StringAppendF(&msg, "%s", va_arg(ap, int) ? "true" : "false");
-        } else if (ch == 'V') {  // void
-          msg += "void";
-        } else if (ch == 'v') {  // JavaVM*
-          JavaVM* vm = va_arg(ap, JavaVM*);
-          StringAppendF(&msg, "(JavaVM*)%p", vm);
-        } else if (ch == 'E') {  // JNIEnv*
-          JNIEnv* env = va_arg(ap, JNIEnv*);
-          StringAppendF(&msg, "(JNIEnv*)%p", env);
-        } else if (ch == 'L' || ch == 'a' || ch == 's') {  // jobject, jarray, jstring
-          // For logging purposes, these are identical.
-          jobject o = va_arg(ap, jobject);
-          if (o == nullptr) {
-            msg += "NULL";
-          } else {
-            StringAppendF(&msg, "%p", o);
-          }
-        } else if (ch == 'b') {  // jboolean (JNI-style)
-          jboolean b = va_arg(ap, int);
-          msg += (b ? "JNI_TRUE" : "JNI_FALSE");
-        } else if (ch == 'c') {  // jclass
-          jclass jc = va_arg(ap, jclass);
-          mirror::Class* c = reinterpret_cast<mirror::Class*>(Thread::Current()->DecodeJObject(jc));
-          if (c == nullptr) {
-            msg += "NULL";
-          } else if (c == kInvalidIndirectRefObject ||
-              !Runtime::Current()->GetHeap()->IsValidObjectAddress(c)) {
-            StringAppendF(&msg, "INVALID POINTER:%p", jc);
-          } else if (!c->IsClass()) {
-            msg += "INVALID NON-CLASS OBJECT OF TYPE:" + PrettyTypeOf(c);
-          } else {
-            msg += PrettyClass(c);
-            if (!entry) {
-              StringAppendF(&msg, " (%p)", jc);
-            }
-          }
-        } else if (ch == 'f') {  // jfieldID
-          jfieldID fid = va_arg(ap, jfieldID);
-          mirror::ArtField* f = reinterpret_cast<mirror::ArtField*>(fid);
-          msg += PrettyField(f);
-          if (!entry) {
-            StringAppendF(&msg, " (%p)", fid);
-          }
-        } else if (ch == 'z') {  // non-negative jsize
-          // You might expect jsize to be size_t, but it's not; it's the same as jint.
-          // We only treat this specially so we can do the non-negative check.
-          // TODO: maybe this wasn't worth it?
-          jint i = va_arg(ap, jint);
-          StringAppendF(&msg, "%d", i);
-        } else if (ch == 'm') {  // jmethodID
-          jmethodID mid = va_arg(ap, jmethodID);
-          mirror::ArtMethod* m = reinterpret_cast<mirror::ArtMethod*>(mid);
-          msg += PrettyMethod(m);
-          if (!entry) {
-            StringAppendF(&msg, " (%p)", mid);
-          }
-        } else if (ch == 'p') {  // void* ("pointer")
-          void* p = va_arg(ap, void*);
-          if (p == nullptr) {
-            msg += "NULL";
-          } else {
-            StringAppendF(&msg, "(void*) %p", p);
-          }
-        } else if (ch == 'r') {  // jint (release mode)
-          jint releaseMode = va_arg(ap, jint);
-          if (releaseMode == 0) {
-            msg += "0";
-          } else if (releaseMode == JNI_ABORT) {
-            msg += "JNI_ABORT";
-          } else if (releaseMode == JNI_COMMIT) {
-            msg += "JNI_COMMIT";
-          } else {
-            StringAppendF(&msg, "invalid release mode %d", releaseMode);
-          }
-        } else if (ch == 'u') {  // const char* (Modified UTF-8)
-          const char* utf = va_arg(ap, const char*);
-          if (utf == nullptr) {
-            msg += "NULL";
-          } else {
-            StringAppendF(&msg, "\"%s\"", utf);
-          }
-        } else if (ch == '.') {
-          msg += "...";
+      for (size_t i = 0; fmt[i] != '\0'; ++i) {
+        TracePossibleHeapValue(soa, entry, fmt[i], args[i], &msg);
+        if (fmt[i + 1] != '\0') {
+          StringAppendF(&msg, ", ");
+        }
+      }
+
+      if ((flags_ & kFlag_ForceTrace) != 0) {
+        LOG(INFO) << "JNI: call to " << function_name_ << "(" << msg << ")";
+      } else if (entry) {
+        if (has_method_) {
+          std::string methodName(PrettyMethod(traceMethod, false));
+          LOG(INFO) << "JNI: " << methodName << " -> " << function_name_ << "(" << msg << ")";
+          indent_ = methodName.size() + 1;
         } else {
-          JniAbortF(function_name_, "unknown trace format specifier: %c", ch);
-          return;
+          LOG(INFO) << "JNI: -> " << function_name_ << "(" << msg << ")";
+          indent_ = 0;
+        }
+      } else {
+        LOG(INFO) << StringPrintf("JNI: %*s<- %s returned %s", indent_, "", function_name_, msg.c_str());
+      }
+    }
+
+    // We always do the thorough checks on entry, and never on exit...
+    if (entry) {
+      for (size_t i = 0; fmt[i] != '\0'; ++i) {
+        if (!CheckPossibleHeapValue(soa, fmt[i], args[i])) {
+          return false;
         }
-        if (*fmt) {
+      }
+    }
+    return true;
+  }
+
+  bool CheckNonHeap(JavaVMExt* vm, bool entry, const char* fmt, JniValueType* args) {
+    bool should_trace = (flags_ & kFlag_ForceTrace) != 0;
+    if (!should_trace && vm->IsTracingEnabled()) {
+      // We need to guard some of the invocation interface's calls: a bad caller might
+      // use DetachCurrentThread or GetEnv on a thread that's not yet attached.
+      Thread* self = Thread::Current();
+      if ((flags_ & kFlag_Invocation) == 0 || self != nullptr) {
+        ScopedObjectAccess soa(self);
+        mirror::ArtMethod* traceMethod = self->GetCurrentMethod(nullptr);
+        should_trace = (traceMethod != nullptr && vm->ShouldTrace(traceMethod));
+      }
+    }
+    if (should_trace) {
+      std::string msg;
+      for (size_t i = 0; fmt[i] != '\0'; ++i) {
+        TraceNonHeapValue(fmt[i], args[i], &msg);
+        if (fmt[i + 1] != '\0') {
           StringAppendF(&msg, ", ");
         }
       }
-      va_end(ap);
 
       if ((flags_ & kFlag_ForceTrace) != 0) {
         LOG(INFO) << "JNI: call to " << function_name_ << "(" << msg << ")";
       } else if (entry) {
         if (has_method_) {
+          Thread* self = Thread::Current();
+          ScopedObjectAccess soa(self);
+          mirror::ArtMethod* traceMethod = self->GetCurrentMethod(nullptr);
           std::string methodName(PrettyMethod(traceMethod, false));
           LOG(INFO) << "JNI: " << methodName << " -> " << function_name_ << "(" << msg << ")";
           indent_ = methodName.size() + 1;
@@ -548,43 +432,176 @@ class ScopedCheck {
 
     // We always do the thorough checks on entry, and never on exit...
     if (entry) {
-      va_start(ap, fmt0);
-      for (const char* fmt = fmt0; *fmt; ++fmt) {
-        char ch = *fmt;
-        if (ch == 'a') {
-          CheckArray(va_arg(ap, jarray));
-        } else if (ch == 'c') {
-          CheckInstance(kClass, va_arg(ap, jclass));
-        } else if (ch == 'L') {
-          CheckObject(va_arg(ap, jobject));
-        } else if (ch == 'r') {
-          CheckReleaseMode(va_arg(ap, jint));
-        } else if (ch == 's') {
-          CheckInstance(kString, va_arg(ap, jstring));
-        } else if (ch == 'u') {
-          if ((flags_ & kFlag_Release) != 0) {
-            CheckNonNull(va_arg(ap, const char*));
-          } else {
-            bool nullable = ((flags_ & kFlag_NullableUtf) != 0);
-            CheckUtfString(va_arg(ap, const char*), nullable);
-          }
-        } else if (ch == 'z') {
-          CheckLengthPositive(va_arg(ap, jsize));
-        } else if (strchr("BCISZbfmpEv", ch) != nullptr) {
-          va_arg(ap, uint32_t);  // Skip this argument.
-        } else if (ch == 'D' || ch == 'F') {
-          va_arg(ap, double);  // Skip this argument.
-        } else if (ch == 'J') {
-          va_arg(ap, uint64_t);  // Skip this argument.
-        } else if (ch == '.') {
-        } else {
-          LOG(FATAL) << "Unknown check format specifier: " << ch;
+      for (size_t i = 0; fmt[i] != '\0'; ++i) {
+        if (!CheckNonHeapValue(fmt[i], args[i])) {
+          return false;
         }
       }
-      va_end(ap);
     }
+    return true;
+  }
+
+  bool CheckReflectedMethod(ScopedObjectAccess& soa, jobject jmethod)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    mirror::Object* method = soa.Decode<mirror::Object*>(jmethod);
+    if (method == nullptr) {
+      AbortF("expected non-null method");
+      return false;
+    }
+    mirror::Class* c = method->GetClass();
+    if (soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_reflect_Method) != c &&
+        soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_reflect_Constructor) != c) {
+      AbortF("expected java.lang.reflect.Method or "
+          "java.lang.reflect.Constructor but got object of type %s: %p",
+          PrettyTypeOf(method).c_str(), jmethod);
+      return false;
+    }
+    return true;
+  }
+
+  bool CheckConstructor(ScopedObjectAccess& soa, jmethodID mid)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    mirror::ArtMethod* method = soa.DecodeMethod(mid);
+    if (method == nullptr) {
+      AbortF("expected non-null constructor");
+      return false;
+    }
+    if (!method->IsConstructor() || method->IsStatic()) {
+      AbortF("expected a constructor but %s: %p", PrettyTypeOf(method).c_str(), mid);
+      return false;
+    }
+    return true;
+  }
+
+  bool CheckReflectedField(ScopedObjectAccess& soa, jobject jfield)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    mirror::Object* field = soa.Decode<mirror::Object*>(jfield);
+    if (field == nullptr) {
+      AbortF("expected non-null java.lang.reflect.Field");
+      return false;
+    }
+    mirror::Class* c = field->GetClass();
+    if (soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_reflect_Field) != c) {
+      AbortF("expected java.lang.reflect.Field but got object of type %s: %p",
+             PrettyTypeOf(field).c_str(), jfield);
+      return false;
+    }
+    return true;
+  }
+
+  bool CheckThrowable(ScopedObjectAccess& soa, jthrowable jobj)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    mirror::Object* obj = soa.Decode<mirror::Object*>(jobj);
+    if (!obj->GetClass()->IsThrowableClass()) {
+      AbortF("expected java.lang.Throwable but got object of type "
+             "%s: %p", PrettyTypeOf(obj).c_str(), obj);
+      return false;
+    }
+    return true;
+  }
+
+  bool CheckThrowableClass(ScopedObjectAccess& soa, jclass jc)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    mirror::Class* c = soa.Decode<mirror::Class*>(jc);
+    if (!c->IsThrowableClass()) {
+      AbortF("expected java.lang.Throwable class but got object of "
+             "type %s: %p", PrettyDescriptor(c).c_str(), c);
+      return false;
+    }
+    return true;
+  }
+
+  bool CheckReferenceKind(IndirectRefKind expected_kind, JavaVMExt* vm, Thread* self, jobject obj) {
+    IndirectRefKind found_kind;
+    if (expected_kind == kLocal) {
+      found_kind = GetIndirectRefKind(obj);
+      if (found_kind == kHandleScopeOrInvalid && self->HandleScopeContains(obj)) {
+        found_kind = kLocal;
+      }
+    } else {
+      found_kind = GetIndirectRefKind(obj);
+    }
+    if (obj != nullptr && found_kind != expected_kind) {
+      AbortF("expected reference of kind %s but found %s: %p",
+             ToStr<IndirectRefKind>(expected_kind).c_str(),
+             ToStr<IndirectRefKind>(GetIndirectRefKind(obj)).c_str(),
+             obj);
+      return false;
+    }
+    return true;
+  }
+
+  bool CheckInstantiableNonArray(ScopedObjectAccess& soa, jclass jc)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    mirror::Class* c = soa.Decode<mirror::Class*>(jc);
+    if (!c->IsInstantiableNonArray()) {
+      AbortF("can't make objects of type %s: %p", PrettyDescriptor(c).c_str(), c);
+      return false;
+    }
+    return true;
+  }
+
+  bool CheckPrimitiveArrayType(ScopedObjectAccess& soa, jarray array, Primitive::Type type)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    if (!CheckArray(soa, array)) {
+      return false;
+    }
+    mirror::Array* a = soa.Decode<mirror::Array*>(array);
+    if (a->GetClass()->GetComponentType()->GetPrimitiveType() != type) {
+      AbortF("incompatible array type %s expected %s[]: %p",
+             PrettyDescriptor(a->GetClass()).c_str(), PrettyDescriptor(type).c_str(), array);
+      return false;
+    }
+    return true;
+  }
+
+  bool CheckFieldAccess(ScopedObjectAccess& soa, jobject obj, jfieldID fid, bool is_static,
+                        Primitive::Type type)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    if (is_static && !CheckStaticFieldID(soa, down_cast<jclass>(obj), fid)) {
+      return false;
+    }
+    if (!is_static && !CheckInstanceFieldID(soa, obj, fid)) {
+      return false;
+    }
+    mirror::ArtField* field = soa.DecodeField(fid);
+    DCHECK(field != nullptr);  // Already checked by Check.
+    if (is_static != field->IsStatic()) {
+      AbortF("attempt to access %s field %s: %p",
+             field->IsStatic() ? "static" : "non-static", PrettyField(field).c_str(), fid);
+      return false;
+    }
+    if (type != field->GetTypeAsPrimitiveType()) {
+      AbortF("attempt to access field %s of type %s with the wrong type %s: %p",
+             PrettyField(field).c_str(), PrettyDescriptor(field->GetTypeDescriptor()).c_str(),
+             PrettyDescriptor(type).c_str(), fid);
+      return false;
+    }
+    if (is_static) {
+      mirror::Object* o = soa.Decode<mirror::Object*>(obj);
+      if (o == nullptr || !o->IsClass()) {
+        AbortF("attempt to access static field %s with a class argument of type %s: %p",
+               PrettyField(field).c_str(), PrettyTypeOf(o).c_str(), fid);
+        return false;
+      }
+      mirror::Class* c = o->AsClass();
+      if (field->GetDeclaringClass() != c) {
+        AbortF("attempt to access static field %s with an incompatible class argument of %s: %p",
+               PrettyField(field).c_str(), PrettyDescriptor(c).c_str(), fid);
+        return false;
+      }
+    } else {
+      mirror::Object* o = soa.Decode<mirror::Object*>(obj);
+      if (o == nullptr || !field->GetDeclaringClass()->IsAssignableFrom(o->GetClass())) {
+        AbortF("attempt to access field %s from an object argument of type %s: %p",
+               PrettyField(field).c_str(), PrettyTypeOf(o).c_str(), fid);
+        return false;
+      }
+    }
+    return true;
   }
 
+ private:
   enum InstanceKind {
     kClass,
     kDirectByteBuffer,
@@ -600,7 +617,7 @@ class ScopedCheck {
    * Because we're looking at an object on the GC heap, we have to switch
    * to "running" mode before doing the checks.
    */
-  bool CheckInstance(InstanceKind kind, jobject java_object)
+  bool CheckInstance(ScopedObjectAccess& soa, InstanceKind kind, jobject java_object, bool null_ok)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     const char* what = nullptr;
     switch (kind) {
@@ -624,15 +641,20 @@ class ScopedCheck {
     }
 
     if (java_object == nullptr) {
-      JniAbortF(function_name_, "%s received null %s", function_name_, what);
-      return false;
+      if (null_ok) {
+        return true;
+      } else {
+        AbortF("%s received NULL %s", function_name_, what);
+        return false;
+      }
     }
 
-    mirror::Object* obj = soa_.Decode<mirror::Object*>(java_object);
+    mirror::Object* obj = soa.Decode<mirror::Object*>(java_object);
     if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(obj)) {
       Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
-      JniAbortF(function_name_, "%s is an invalid %s: %p (%p)",
-                what, ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object, obj);
+      AbortF("%s is an invalid %s: %p (%p)",
+             what, ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(),
+             java_object, obj);
       return false;
     }
 
@@ -654,114 +676,333 @@ class ScopedCheck {
       break;
     }
     if (!okay) {
-      JniAbortF(function_name_, "%s has wrong type: %s", what, PrettyTypeOf(obj).c_str());
+      AbortF("%s has wrong type: %s", what, PrettyTypeOf(obj).c_str());
       return false;
     }
 
     return true;
   }
 
- private:
-  // Set "has_method" to true if we have a valid thread with a method pointer.
-  // We won't have one before attaching a thread, after detaching a thread, or
-  // when shutting down the runtime.
-  void Init(int flags, const char* functionName, bool has_method) {
-    flags_ = flags;
-    function_name_ = functionName;
-    has_method_ = has_method;
-  }
-
   /*
-   * Verify that "array" is non-NULL and points to an Array object.
-   *
-   * Since we're dealing with objects, switch to "running" mode.
+   * Verify that the "mode" argument passed to a primitive array Release
+   * function is one of the valid values.
    */
-  void CheckArray(jarray java_array) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    if (java_array == nullptr) {
-      JniAbortF(function_name_, "jarray was NULL");
-      return;
-    }
-
-    mirror::Array* a = soa_.Decode<mirror::Array*>(java_array);
-    if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(a)) {
-      Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
-      JniAbortF(function_name_, "jarray is an invalid %s: %p (%p)",
-                ToStr<IndirectRefKind>(GetIndirectRefKind(java_array)).c_str(), java_array, a);
-    } else if (!a->IsArrayInstance()) {
-      JniAbortF(function_name_, "jarray argument has non-array type: %s", PrettyTypeOf(a).c_str());
-    }
-  }
-
-  void CheckLengthPositive(jsize length) {
-    if (length < 0) {
-      JniAbortF(function_name_, "negative jsize: %d", length);
+  bool CheckReleaseMode(jint mode) {
+    if (mode != 0 && mode != JNI_COMMIT && mode != JNI_ABORT) {
+      AbortF("unknown value for release mode: %d", mode);
+      return false;
     }
+    return true;
   }
 
-  mirror::ArtField* CheckFieldID(jfieldID fid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    if (fid == nullptr) {
-      JniAbortF(function_name_, "jfieldID was NULL");
-      return nullptr;
-    }
-    mirror::ArtField* f = soa_.DecodeField(fid);
-    if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(f) || !f->IsArtField()) {
-      Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
-      JniAbortF(function_name_, "invalid jfieldID: %p", fid);
-      return nullptr;
+  bool CheckPossibleHeapValue(ScopedObjectAccess& soa, char fmt, JniValueType arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    switch (fmt) {
+      case 'a':  // jarray
+        return CheckArray(soa, arg.a);
+      case 'c':  // jclass
+        return CheckInstance(soa, kClass, arg.c, false);
+      case 'f':  // jfieldID
+        return CheckFieldID(soa, arg.f) != nullptr;
+      case 'm':  // jmethodID
+        return CheckMethodID(soa, arg.m) != nullptr;
+      case 'r':  // release int
+        return CheckReleaseMode(arg.r);
+      case 's':  // jstring
+        return CheckInstance(soa, kString, arg.s, false);
+      case 't':  // jthrowable
+        return CheckInstance(soa, kThrowable, arg.t, false);
+      case 'E':  // JNIEnv*
+        return CheckThread(arg.E);
+      case 'L':  // jobject
+        return CheckInstance(soa, kObject, arg.L, true);
+      default:
+        return CheckNonHeapValue(fmt, arg);
+    }
+  }
+
+  bool CheckNonHeapValue(char fmt, JniValueType arg) {
+    switch (fmt) {
+      case '.':  // ...
+      case 'p':  // TODO: pointer - null or readable?
+      case 'v':  // JavaVM*
+      case 'B':  // jbyte
+      case 'C':  // jchar
+      case 'D':  // jdouble
+      case 'F':  // jfloat
+      case 'I':  // jint
+      case 'J':  // jlong
+      case 'S':  // jshort
+        break;  // Ignored.
+      case 'b':  // jboolean, why two? Fall-through.
+      case 'Z':
+        return CheckBoolean(arg.Z);
+      case 'u':  // utf8
+        if ((flags_ & kFlag_Release) != 0) {
+          return CheckNonNull(arg.u);
+        } else {
+          bool nullable = ((flags_ & kFlag_NullableUtf) != 0);
+          return CheckUtfString(arg.u, nullable);
+        }
+      case 'w':  // jobjectRefType
+        switch (arg.w) {
+          case JNIInvalidRefType:
+          case JNILocalRefType:
+          case JNIGlobalRefType:
+          case JNIWeakGlobalRefType:
+            break;
+          default:
+            AbortF("Unknown reference type");
+            return false;
+        }
+        break;
+      case 'z':  // jsize
+        return CheckLengthPositive(arg.z);
+      default:
+        AbortF("unknown format specifier: '%c'", fmt);
+        return false;
     }
-    return f;
+    return true;
   }
 
-  mirror::ArtMethod* CheckMethodID(jmethodID mid) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    if (mid == nullptr) {
-      JniAbortF(function_name_, "jmethodID was NULL");
-      return nullptr;
-    }
-    mirror::ArtMethod* m = soa_.DecodeMethod(mid);
-    if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(m) || !m->IsArtMethod()) {
-      Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
-      JniAbortF(function_name_, "invalid jmethodID: %p", mid);
-      return nullptr;
+  void TracePossibleHeapValue(ScopedObjectAccess& soa, bool entry, char fmt, JniValueType arg,
+                              std::string* msg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    switch (fmt) {
+      case 'L':  // jobject fall-through.
+      case 'a':  // jarray fall-through.
+      case 's':  // jstring fall-through.
+      case 't':  // jthrowable fall-through.
+        if (arg.L == nullptr) {
+          *msg += "NULL";
+        } else {
+          StringAppendF(msg, "%p", arg.L);
+        }
+        break;
+      case 'c': {  // jclass
+        jclass jc = arg.c;
+        mirror::Class* c = soa.Decode<mirror::Class*>(jc);
+        if (c == nullptr) {
+          *msg += "NULL";
+        } else if (c == kInvalidIndirectRefObject ||
+            !Runtime::Current()->GetHeap()->IsValidObjectAddress(c)) {
+          StringAppendF(msg, "INVALID POINTER:%p", jc);
+        } else if (!c->IsClass()) {
+          *msg += "INVALID NON-CLASS OBJECT OF TYPE:" + PrettyTypeOf(c);
+        } else {
+          *msg += PrettyClass(c);
+          if (!entry) {
+            StringAppendF(msg, " (%p)", jc);
+          }
+        }
+        break;
+      }
+      case 'f': {  // jfieldID
+        jfieldID fid = arg.f;
+        mirror::ArtField* f = soa.DecodeField(fid);
+        *msg += PrettyField(f);
+        if (!entry) {
+          StringAppendF(msg, " (%p)", fid);
+        }
+        break;
+      }
+      case 'm': {  // jmethodID
+        jmethodID mid = arg.m;
+        mirror::ArtMethod* m = soa.DecodeMethod(mid);
+        *msg += PrettyMethod(m);
+        if (!entry) {
+          StringAppendF(msg, " (%p)", mid);
+        }
+        break;
+      }
+      default:
+        TraceNonHeapValue(fmt, arg, msg);
+        break;
     }
-    return m;
   }
 
+  void TraceNonHeapValue(char fmt, JniValueType arg, std::string* msg) {
+    switch (fmt) {
+      case 'B':  // jbyte
+        if (arg.B >= 0 && arg.B < 10) {
+          StringAppendF(msg, "%d", arg.B);
+        } else {
+          StringAppendF(msg, "%#x (%d)", arg.B, arg.B);
+        }
+        break;
+      case 'C':  // jchar
+        if (arg.C < 0x7f && arg.C >= ' ') {
+          StringAppendF(msg, "U+%x ('%c')", arg.C, arg.C);
+        } else {
+          StringAppendF(msg, "U+%x", arg.C);
+        }
+        break;
+      case 'F':  // jfloat
+        StringAppendF(msg, "%g", arg.F);
+        break;
+      case 'D':  // jdouble
+        StringAppendF(msg, "%g", arg.D);
+        break;
+      case 'S':  // jshort
+        StringAppendF(msg, "%d", arg.S);
+        break;
+      case 'i':  // jint - fall-through.
+      case 'I':  // jint
+        StringAppendF(msg, "%d", arg.I);
+        break;
+      case 'J':  // jlong
+        StringAppendF(msg, "%" PRId64, arg.J);
+        break;
+      case 'Z':  // jboolean
+      case 'b':  // jboolean (JNI-style)
+        *msg += arg.b == JNI_TRUE ? "true" : "false";
+        break;
+      case 'V':  // void
+        DCHECK(arg.V == nullptr);
+        *msg += "void";
+        break;
+      case 'v':  // JavaVM*
+        StringAppendF(msg, "(JavaVM*)%p", arg.v);
+        break;
+      case 'E':
+        StringAppendF(msg, "(JNIEnv*)%p", arg.E);
+        break;
+      case 'z':  // non-negative jsize
+        // You might expect jsize to be size_t, but it's not; it's the same as jint.
+        // We only treat this specially so we can do the non-negative check.
+        // TODO: maybe this wasn't worth it?
+        StringAppendF(msg, "%d", arg.z);
+        break;
+      case 'p':  // void* ("pointer")
+        if (arg.p == nullptr) {
+          *msg += "NULL";
+        } else {
+          StringAppendF(msg, "(void*) %p", arg.p);
+        }
+        break;
+      case 'r': {  // jint (release mode)
+        jint releaseMode = arg.r;
+        if (releaseMode == 0) {
+          *msg += "0";
+        } else if (releaseMode == JNI_ABORT) {
+          *msg += "JNI_ABORT";
+        } else if (releaseMode == JNI_COMMIT) {
+          *msg += "JNI_COMMIT";
+        } else {
+          StringAppendF(msg, "invalid release mode %d", releaseMode);
+        }
+        break;
+      }
+      case 'u':  // const char* (Modified UTF-8)
+        if (arg.u == nullptr) {
+          *msg += "NULL";
+        } else {
+          StringAppendF(msg, "\"%s\"", arg.u);
+        }
+        break;
+      case 'w':  // jobjectRefType
+        switch (arg.w) {
+          case JNIInvalidRefType:
+            *msg += "invalid reference type";
+            break;
+          case JNILocalRefType:
+            *msg += "local ref type";
+            break;
+          case JNIGlobalRefType:
+            *msg += "global ref type";
+            break;
+          case JNIWeakGlobalRefType:
+            *msg += "weak global ref type";
+            break;
+          default:
+            *msg += "unknown ref type";
+            break;
+        }
+        break;
+      case '.':
+        *msg += "...";
+        break;
+      default:
+        LOG(FATAL) << function_name_ << ": unknown trace format specifier: '" << fmt << "'";
+    }
+  }
   /*
-   * Verify that "jobj" is a valid object, and that it's an object that JNI
-   * is allowed to know about.  We allow NULL references.
+   * Verify that "array" is non-NULL and points to an Array object.
    *
-   * Switches to "running" mode before performing checks.
+   * Since we're dealing with objects, switch to "running" mode.
    */
-  void CheckObject(jobject java_object)
+  bool CheckArray(ScopedObjectAccess& soa, jarray java_array)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    if (java_object == nullptr) {
-      return;
+    if (UNLIKELY(java_array == nullptr)) {
+      AbortF("jarray was NULL");
+      return false;
     }
 
-    mirror::Object* o = soa_.Decode<mirror::Object*>(java_object);
-    if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(o)) {
+    mirror::Array* a = soa.Decode<mirror::Array*>(java_array);
+    if (UNLIKELY(!Runtime::Current()->GetHeap()->IsValidObjectAddress(a))) {
+      Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+      AbortF("jarray is an invalid %s: %p (%p)",
+             ToStr<IndirectRefKind>(GetIndirectRefKind(java_array)).c_str(),
+             java_array, a);
+      return false;
+    } else if (!a->IsArrayInstance()) {
+      AbortF("jarray argument has non-array type: %s", PrettyTypeOf(a).c_str());
+      return false;
+    }
+    return true;
+  }
+
+  bool CheckBoolean(jboolean z) {
+    if (z != JNI_TRUE && z != JNI_FALSE) {
+      AbortF("unexpected jboolean value: %d", z);
+      return false;
+    }
+    return true;
+  }
+
+  bool CheckLengthPositive(jsize length) {
+    if (length < 0) {
+      AbortF("negative jsize: %d", length);
+      return false;
+    }
+    return true;
+  }
+
+  mirror::ArtField* CheckFieldID(ScopedObjectAccess& soa, jfieldID fid)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    if (fid == nullptr) {
+      AbortF("jfieldID was NULL");
+      return nullptr;
+    }
+    mirror::ArtField* f = soa.DecodeField(fid);
+    if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(f) || !f->IsArtField()) {
       Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
-      // TODO: when we remove work_around_app_jni_bugs, this should be impossible.
-      JniAbortF(function_name_, "native code passing in reference to invalid %s: %p",
-                ToStr<IndirectRefKind>(GetIndirectRefKind(java_object)).c_str(), java_object);
+      AbortF("invalid jfieldID: %p", fid);
+      return nullptr;
     }
+    return f;
   }
 
-  /*
-   * Verify that the "mode" argument passed to a primitive array Release
-   * function is one of the valid values.
-   */
-  void CheckReleaseMode(jint mode) {
-    if (mode != 0 && mode != JNI_COMMIT && mode != JNI_ABORT) {
-      JniAbortF(function_name_, "unknown value for release mode: %d", mode);
+  mirror::ArtMethod* CheckMethodID(ScopedObjectAccess& soa, jmethodID mid)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    if (mid == nullptr) {
+      AbortF("jmethodID was NULL");
+      return nullptr;
     }
+    mirror::ArtMethod* m = soa.DecodeMethod(mid);
+    if (!Runtime::Current()->GetHeap()->IsValidObjectAddress(m) || !m->IsArtMethod()) {
+      Runtime::Current()->GetHeap()->DumpSpaces(LOG(ERROR));
+      AbortF("invalid jmethodID: %p", mid);
+      return nullptr;
+    }
+    return m;
   }
 
-  void CheckThread(int flags) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  bool CheckThread(JNIEnv* env) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     Thread* self = Thread::Current();
     if (self == nullptr) {
-      JniAbortF(function_name_, "a thread (tid %d) is making JNI calls without being attached", GetTid());
-      return;
+      AbortF("a thread (tid %d) is making JNI calls without being attached", GetTid());
+      return false;
     }
 
     // Get the *correct* JNIEnv by going through our TLS pointer.
@@ -769,21 +1010,22 @@ class ScopedCheck {
 
     // Verify that the current thread is (a) attached and (b) associated with
     // this particular instance of JNIEnv.
-    if (soa_.Env() != threadEnv) {
-      JniAbortF(function_name_, "thread %s using JNIEnv* from thread %s",
-                ToStr<Thread>(*self).c_str(), ToStr<Thread>(*soa_.Self()).c_str());
-      return;
+    if (env != threadEnv) {
+      AbortF("thread %s using JNIEnv* from thread %s",
+             ToStr<Thread>(*self).c_str(), ToStr<Thread>(*self).c_str());
+      return false;
     }
 
     // Verify that, if this thread previously made a critical "get" call, we
     // do the corresponding "release" call before we try anything else.
-    switch (flags & kFlag_CritMask) {
+    switch (flags_ & kFlag_CritMask) {
     case kFlag_CritOkay:    // okay to call this method
       break;
     case kFlag_CritBad:     // not okay to call
       if (threadEnv->critical) {
-        JniAbortF(function_name_, "thread %s using JNI after critical get", ToStr<Thread>(*self).c_str());
-        return;
+        AbortF("thread %s using JNI after critical get",
+               ToStr<Thread>(*self).c_str());
+        return false;
       }
       break;
     case kFlag_CritGet:     // this is a "get" call
@@ -793,44 +1035,46 @@ class ScopedCheck {
     case kFlag_CritRelease:  // this is a "release" call
       threadEnv->critical--;
       if (threadEnv->critical < 0) {
-        JniAbortF(function_name_, "thread %s called too many critical releases", ToStr<Thread>(*self).c_str());
-        return;
+        AbortF("thread %s called too many critical releases",
+               ToStr<Thread>(*self).c_str());
+        return false;
       }
       break;
     default:
-      LOG(FATAL) << "Bad flags (internal error): " << flags;
+      LOG(FATAL) << "Bad flags (internal error): " << flags_;
     }
 
     // Verify that, if an exception has been raised, the native code doesn't
     // make any JNI calls other than the Exception* methods.
-    if ((flags & kFlag_ExcepOkay) == 0 && self->IsExceptionPending()) {
+    if ((flags_ & kFlag_ExcepOkay) == 0 && self->IsExceptionPending()) {
       ThrowLocation throw_location;
       mirror::Throwable* exception = self->GetException(&throw_location);
       std::string type(PrettyTypeOf(exception));
-      JniAbortF(function_name_, "JNI %s called with pending exception '%s' thrown in %s",
-                function_name_, type.c_str(), throw_location.Dump().c_str());
-      return;
+      AbortF("JNI %s called with pending exception '%s' thrown in %s",
+             function_name_, type.c_str(), throw_location.Dump().c_str());
+      return false;
     }
+    return true;
   }
 
   // Verifies that "bytes" points to valid Modified UTF-8 data.
-  void CheckUtfString(const char* bytes, bool nullable) {
+  bool CheckUtfString(const char* bytes, bool nullable) {
     if (bytes == nullptr) {
       if (!nullable) {
-        JniAbortF(function_name_, "non-nullable const char* was NULL");
-        return;
+        AbortF("non-nullable const char* was NULL");
+        return false;
       }
-      return;
+      return true;
     }
 
     const char* errorKind = nullptr;
     uint8_t utf8 = CheckUtfBytes(bytes, &errorKind);
     if (errorKind != nullptr) {
-      JniAbortF(function_name_,
-                "input is not valid Modified UTF-8: illegal %s byte %#x\n"
-                "    string: '%s'", errorKind, utf8, bytes);
-      return;
+      AbortF("input is not valid Modified UTF-8: illegal %s byte %#x\n"
+          "    string: '%s'", errorKind, utf8, bytes);
+      return false;
     }
+    return true;
   }
 
   static uint8_t CheckUtfBytes(const char* bytes, const char** errorKind) {
@@ -882,92 +1126,120 @@ class ScopedCheck {
     return 0;
   }
 
-  const ScopedObjectAccess soa_;
-  const char* function_name_;
-  int flags_;
-  bool has_method_;
+  void AbortF(const char* fmt, ...) __attribute__((__format__(__printf__, 2, 3))) {
+    va_list args;
+    va_start(args, fmt);
+    Runtime::Current()->GetJavaVM()->JniAbortV(function_name_, fmt, args);
+    va_end(args);
+  }
+
+  // The name of the JNI function being checked.
+  const char* const function_name_;
+
+  const int flags_;
   int indent_;
 
+  const bool has_method_;
+
   DISALLOW_COPY_AND_ASSIGN(ScopedCheck);
 };
 
-#define CHECK_JNI_ENTRY(flags, types, args...) \
-  ScopedCheck sc(env, flags, __FUNCTION__); \
-  sc.Check(true, types, ##args)
-
-#define CHECK_JNI_EXIT(type, exp) ({ \
-    auto _rc = (exp); \
-  sc.Check(false, type, _rc); \
-  _rc; })
-#define CHECK_JNI_EXIT_VOID() \
-  sc.Check(false, "V")
-
 /*
  * ===========================================================================
  *      Guarded arrays
  * ===========================================================================
  */
 
-#define kGuardLen       512         /* must be multiple of 2 */
-#define kGuardPattern   0xd5e3      /* uncommon values; d5e3d5e3 invalid addr */
-#define kGuardMagic     0xffd5aa96
-
 /* this gets tucked in at the start of the buffer; struct size must be even */
-struct GuardedCopy {
-  uint32_t magic;
-  uLong adler;
-  size_t original_length;
-  const void* original_ptr;
-
-  /* find the GuardedCopy given the pointer into the "live" data */
-  static inline const GuardedCopy* FromData(const void* dataBuf) {
-    return reinterpret_cast<const GuardedCopy*>(ActualBuffer(dataBuf));
-  }
-
+class GuardedCopy {
+ public:
   /*
    * Create an over-sized buffer to hold the contents of "buf".  Copy it in,
    * filling in the area around it with guard data.
-   *
-   * We use a 16-bit pattern to make a rogue memset less likely to elude us.
    */
-  static void* Create(const void* buf, size_t len, bool modOkay) {
-    size_t newLen = ActualLength(len);
-    uint8_t* newBuf = DebugAlloc(newLen);
+  static void* Create(const void* original_buf, size_t len, bool mod_okay) {
+    const size_t new_len = LengthIncludingRedZones(len);
+    uint8_t* const new_buf = DebugAlloc(new_len);
 
-    // Fill it in with a pattern.
-    uint16_t* pat = reinterpret_cast<uint16_t*>(newBuf);
-    for (size_t i = 0; i < newLen / 2; i++) {
-      *pat++ = kGuardPattern;
+    // If modification is not expected, grab a checksum.
+    uLong adler = 0;
+    if (!mod_okay) {
+      adler = adler32(adler32(0L, Z_NULL, 0), reinterpret_cast<const Bytef*>(original_buf), len);
+    }
+
+    GuardedCopy* copy = new (new_buf) GuardedCopy(original_buf, len, adler);
+
+    // Fill begin region with canary pattern.
+    const size_t kStartCanaryLength = (GuardedCopy::kRedZoneSize / 2) - sizeof(GuardedCopy);
+    for (size_t i = 0, j = 0; i < kStartCanaryLength; ++i) {
+      const_cast<char*>(copy->StartRedZone())[i] = kCanary[j];
+      if (kCanary[j] == '\0') {
+        j = 0;
+      }
     }
 
     // Copy the data in; note "len" could be zero.
-    memcpy(newBuf + kGuardLen / 2, buf, len);
+    memcpy(const_cast<uint8_t*>(copy->BufferWithinRedZones()), original_buf, len);
 
-    // If modification is not expected, grab a checksum.
-    uLong adler = 0;
-    if (!modOkay) {
-      adler = adler32(0L, Z_NULL, 0);
-      adler = adler32(adler, reinterpret_cast<const Bytef*>(buf), len);
-      *reinterpret_cast<uLong*>(newBuf) = adler;
+    // Fill end region with canary pattern.
+    for (size_t i = 0, j = 0; i < kEndCanaryLength; ++i) {
+      const_cast<char*>(copy->EndRedZone())[i] = kCanary[j];
+      if (kCanary[j] == '\0') {
+        j = 0;
+      }
+    }
+
+    return const_cast<uint8_t*>(copy->BufferWithinRedZones());
+  }
+
+  /*
+   * Create a guarded copy of a primitive array.  Modifications to the copied
+   * data are allowed.  Returns a pointer to the copied data.
+   */
+  static void* CreateGuardedPACopy(JNIEnv* env, const jarray java_array, jboolean* is_copy) {
+    ScopedObjectAccess soa(env);
+
+    mirror::Array* a = soa.Decode<mirror::Array*>(java_array);
+    size_t component_size = a->GetClass()->GetComponentSize();
+    size_t byte_count = a->GetLength() * component_size;
+    void* result = Create(a->GetRawData(component_size, 0), byte_count, true);
+    if (is_copy != nullptr) {
+      *is_copy = JNI_TRUE;
     }
+    return result;
+  }
 
-    GuardedCopy* pExtra = reinterpret_cast<GuardedCopy*>(newBuf);
-    pExtra->magic = kGuardMagic;
-    pExtra->adler = adler;
-    pExtra->original_ptr = buf;
-    pExtra->original_length = len;
+  /*
+   * Perform the array "release" operation, which may or may not copy data
+   * back into the managed heap, and may or may not release the underlying storage.
+   */
+  static void* ReleaseGuardedPACopy(const char* function_name, JNIEnv* env, jarray java_array,
+                                   void* embedded_buf, int mode) {
+    ScopedObjectAccess soa(env);
+    mirror::Array* a = soa.Decode<mirror::Array*>(java_array);
 
-    return newBuf + kGuardLen / 2;
+    if (!GuardedCopy::Check(function_name, embedded_buf, true)) {
+      return nullptr;
+    }
+    if (mode != JNI_ABORT) {
+      size_t len = FromEmbedded(embedded_buf)->original_length_;
+      memcpy(a->GetRawData(a->GetClass()->GetComponentSize(), 0), embedded_buf, len);
+    }
+    if (mode != JNI_COMMIT) {
+      return Destroy(embedded_buf);
+    }
+    return embedded_buf;
   }
 
+
   /*
    * Free up the guard buffer, scrub it, and return the original pointer.
    */
-  static void* Destroy(void* dataBuf) {
-    const GuardedCopy* pExtra = GuardedCopy::FromData(dataBuf);
-    void* original_ptr = const_cast<void*>(pExtra->original_ptr);
-    size_t len = pExtra->original_length;
-    DebugFree(dataBuf, len);
+  static void* Destroy(void* embedded_buf) {
+    GuardedCopy* copy = FromEmbedded(embedded_buf);
+    void* original_ptr = const_cast<void*>(copy->original_ptr_);
+    size_t len = LengthIncludingRedZones(copy->original_length_);
+    DebugFree(copy, len);
     return original_ptr;
   }
 
@@ -977,137 +1249,144 @@ struct GuardedCopy {
    *
    * The caller has already checked that "dataBuf" is non-NULL.
    */
-  static void Check(const char* functionName, const void* dataBuf, bool modOkay) {
+  static bool Check(const char* function_name, const void* embedded_buf, bool mod_okay) {
+    const GuardedCopy* copy = FromEmbedded(embedded_buf);
+    return copy->CheckHeader(function_name, mod_okay) && copy->CheckRedZones(function_name);
+  }
+
+ private:
+  GuardedCopy(const void* original_buf, size_t len, uLong adler) :
+    magic_(kGuardMagic), adler_(adler), original_ptr_(original_buf), original_length_(len) {
+  }
+
+  static uint8_t* DebugAlloc(size_t len) {
+    void* result = mmap(nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
+    if (result == MAP_FAILED) {
+      PLOG(FATAL) << "GuardedCopy::create mmap(" << len << ") failed";
+    }
+    return reinterpret_cast<uint8_t*>(result);
+  }
+
+  static void DebugFree(void* buf, size_t len) {
+    if (munmap(buf, len) != 0) {
+      PLOG(FATAL) << "munmap(" << buf << ", " << len << ") failed";
+    }
+  }
+
+  static size_t LengthIncludingRedZones(size_t len) {
+    return len + kRedZoneSize;
+  }
+
+  // Get the GuardedCopy from the interior pointer.
+  static GuardedCopy* FromEmbedded(void* embedded_buf) {
+    return reinterpret_cast<GuardedCopy*>(
+        reinterpret_cast<uint8_t*>(embedded_buf) - (kRedZoneSize / 2));
+  }
+
+  static const GuardedCopy* FromEmbedded(const void* embedded_buf) {
+    return reinterpret_cast<const GuardedCopy*>(
+        reinterpret_cast<const uint8_t*>(embedded_buf) - (kRedZoneSize / 2));
+  }
+
+  static void AbortF(const char* jni_function_name, const char* fmt, ...) {
+    va_list args;
+    va_start(args, fmt);
+    Runtime::Current()->GetJavaVM()->JniAbortV(jni_function_name, fmt, args);
+    va_end(args);
+  }
+
+  bool CheckHeader(const char* function_name, bool mod_okay) const {
     static const uint32_t kMagicCmp = kGuardMagic;
-    const uint8_t* fullBuf = ActualBuffer(dataBuf);
-    const GuardedCopy* pExtra = GuardedCopy::FromData(dataBuf);
 
     // Before we do anything with "pExtra", check the magic number.  We
     // do the check with memcmp rather than "==" in case the pointer is
     // unaligned.  If it points to completely bogus memory we're going
     // to crash, but there's no easy way around that.
-    if (memcmp(&pExtra->magic, &kMagicCmp, 4) != 0) {
+    if (UNLIKELY(memcmp(&magic_, &kMagicCmp, 4) != 0)) {
       uint8_t buf[4];
-      memcpy(buf, &pExtra->magic, 4);
-      JniAbortF(functionName,
-          "guard magic does not match (found 0x%02x%02x%02x%02x) -- incorrect data pointer %p?",
-          buf[3], buf[2], buf[1], buf[0], dataBuf);  // Assumes little-endian.
+      memcpy(buf, &magic_, 4);
+      AbortF(function_name,
+             "guard magic does not match (found 0x%02x%02x%02x%02x) -- incorrect data pointer %p?",
+             buf[3], buf[2], buf[1], buf[0], this);  // Assumes little-endian.
+      return false;
     }
 
-    size_t len = pExtra->original_length;
-
-    // Check bottom half of guard; skip over optional checksum storage.
-    const uint16_t* pat = reinterpret_cast<const uint16_t*>(fullBuf);
-    for (size_t i = sizeof(GuardedCopy) / 2; i < (kGuardLen / 2 - sizeof(GuardedCopy)) / 2; i++) {
-      if (pat[i] != kGuardPattern) {
-        JniAbortF(functionName, "guard pattern(1) disturbed at %p +%zd", fullBuf, i*2);
+    // If modification is not expected, verify checksum. Strictly speaking this is wrong: if we
+    // told the client that we made a copy, there's no reason they can't alter the buffer.
+    if (!mod_okay) {
+      uLong computed_adler =
+          adler32(adler32(0L, Z_NULL, 0), BufferWithinRedZones(), original_length_);
+      if (computed_adler != adler_) {
+        AbortF(function_name, "buffer modified (0x%08lx vs 0x%08lx) at address %p",
+               computed_adler, adler_, this);
+        return false;
       }
     }
+    return true;
+  }
 
-    int offset = kGuardLen / 2 + len;
-    if (offset & 0x01) {
-      // Odd byte; expected value depends on endian.
-      const uint16_t patSample = kGuardPattern;
-      uint8_t expected_byte = reinterpret_cast<const uint8_t*>(&patSample)[1];
-      if (fullBuf[offset] != expected_byte) {
-        JniAbortF(functionName, "guard pattern disturbed in odd byte after %p +%d 0x%02x 0x%02x",
-                  fullBuf, offset, fullBuf[offset], expected_byte);
+  bool CheckRedZones(const char* function_name) const {
+    // Check the begin red zone.
+    const size_t kStartCanaryLength = (GuardedCopy::kRedZoneSize / 2) - sizeof(GuardedCopy);
+    for (size_t i = 0, j = 0; i < kStartCanaryLength; ++i) {
+      if (UNLIKELY(StartRedZone()[i] != kCanary[j])) {
+        AbortF(function_name, "guard pattern before buffer disturbed at %p +%zd", this, i);
+        return false;
       }
-      offset++;
-    }
-
-    // Check top half of guard.
-    pat = reinterpret_cast<const uint16_t*>(fullBuf + offset);
-    for (size_t i = 0; i < kGuardLen / 4; i++) {
-      if (pat[i] != kGuardPattern) {
-        JniAbortF(functionName, "guard pattern(2) disturbed at %p +%zd", fullBuf, offset + i*2);
+      if (kCanary[j] == '\0') {
+        j = 0;
       }
     }
 
-    // If modification is not expected, verify checksum.  Strictly speaking
-    // this is wrong: if we told the client that we made a copy, there's no
-    // reason they can't alter the buffer.
-    if (!modOkay) {
-      uLong adler = adler32(0L, Z_NULL, 0);
-      adler = adler32(adler, (const Bytef*)dataBuf, len);
-      if (pExtra->adler != adler) {
-        JniAbortF(functionName, "buffer modified (0x%08lx vs 0x%08lx) at address %p",
-                  pExtra->adler, adler, dataBuf);
+    // Check end region.
+    for (size_t i = 0, j = 0; i < kEndCanaryLength; ++i) {
+      if (UNLIKELY(EndRedZone()[i] != kCanary[j])) {
+        size_t offset_from_buffer_start =
+            &(EndRedZone()[i]) - &(StartRedZone()[kStartCanaryLength]);
+        AbortF(function_name, "guard pattern after buffer disturbed at %p +%zd", this,
+               offset_from_buffer_start);
+        return false;
+      }
+      if (kCanary[j] == '\0') {
+        j = 0;
       }
     }
+    return true;
   }
 
- private:
-  static uint8_t* DebugAlloc(size_t len) {
-    void* result = mmap(nullptr, len, PROT_READ|PROT_WRITE, MAP_PRIVATE|MAP_ANON, -1, 0);
-    if (result == MAP_FAILED) {
-      PLOG(FATAL) << "GuardedCopy::create mmap(" << len << ") failed";
-    }
-    return reinterpret_cast<uint8_t*>(result);
-  }
-
-  static void DebugFree(void* dataBuf, size_t len) {
-    uint8_t* fullBuf = ActualBuffer(dataBuf);
-    size_t totalByteCount = ActualLength(len);
-    // TODO: we could mprotect instead, and keep the allocation around for a while.
-    // This would be even more expensive, but it might catch more errors.
-    // if (mprotect(fullBuf, totalByteCount, PROT_NONE) != 0) {
-    //     PLOG(WARNING) << "mprotect(PROT_NONE) failed";
-    // }
-    if (munmap(fullBuf, totalByteCount) != 0) {
-      PLOG(FATAL) << "munmap(" << reinterpret_cast<void*>(fullBuf) << ", " << totalByteCount << ") failed";
-    }
-  }
-
-  static const uint8_t* ActualBuffer(const void* dataBuf) {
-    return reinterpret_cast<const uint8_t*>(dataBuf) - kGuardLen / 2;
+  // Location that canary value will be written before the guarded region.
+  const char* StartRedZone() const {
+    const uint8_t* buf = reinterpret_cast<const uint8_t*>(this);
+    return reinterpret_cast<const char*>(buf + sizeof(GuardedCopy));
   }
 
-  static uint8_t* ActualBuffer(void* dataBuf) {
-    return reinterpret_cast<uint8_t*>(dataBuf) - kGuardLen / 2;
+  // Return the interior embedded buffer.
+  const uint8_t* BufferWithinRedZones() const {
+    const uint8_t* embedded_buf = reinterpret_cast<const uint8_t*>(this) + (kRedZoneSize / 2);
+    return embedded_buf;
   }
 
-  // Underlying length of a user allocation of 'length' bytes.
-  static size_t ActualLength(size_t length) {
-    return (length + kGuardLen + 1) & ~0x01;
+  // Location that canary value will be written after the guarded region.
+  const char* EndRedZone() const {
+    const uint8_t* buf = reinterpret_cast<const uint8_t*>(this);
+    size_t buf_len = LengthIncludingRedZones(original_length_);
+    return reinterpret_cast<const char*>(buf + (buf_len - (kRedZoneSize / 2)));
   }
-};
 
-/*
- * Create a guarded copy of a primitive array.  Modifications to the copied
- * data are allowed.  Returns a pointer to the copied data.
- */
-static void* CreateGuardedPACopy(JNIEnv* env, const jarray java_array, jboolean* isCopy) {
-  ScopedObjectAccess soa(env);
-
-  mirror::Array* a = soa.Decode<mirror::Array*>(java_array);
-  size_t component_size = a->GetClass()->GetComponentSize();
-  size_t byte_count = a->GetLength() * component_size;
-  void* result = GuardedCopy::Create(a->GetRawData(component_size, 0), byte_count, true);
-  if (isCopy != nullptr) {
-    *isCopy = JNI_TRUE;
-  }
-  return result;
-}
+  static constexpr size_t kRedZoneSize = 512;
+  static constexpr size_t kEndCanaryLength = kRedZoneSize / 2;
 
-/*
- * Perform the array "release" operation, which may or may not copy data
- * back into the managed heap, and may or may not release the underlying storage.
- */
-static void ReleaseGuardedPACopy(JNIEnv* env, jarray java_array, void* dataBuf, int mode) {
-  ScopedObjectAccess soa(env);
-  mirror::Array* a = soa.Decode<mirror::Array*>(java_array);
+  // Value written before and after the guarded array.
+  static const char* const kCanary;
 
-  GuardedCopy::Check(__FUNCTION__, dataBuf, true);
+  static constexpr uint32_t kGuardMagic = 0xffd5aa96;
 
-  if (mode != JNI_ABORT) {
-    size_t len = GuardedCopy::FromData(dataBuf)->original_length;
-    memcpy(a->GetRawData(a->GetClass()->GetComponentSize(), 0), dataBuf, len);
-  }
-  if (mode != JNI_COMMIT) {
-    GuardedCopy::Destroy(dataBuf);
-  }
-}
+  const uint32_t magic_;
+  const uLong adler_;
+  const void* const original_ptr_;
+  const size_t original_length_;
+};
+const char* const GuardedCopy::kCanary = "JNI BUFFER RED ZONE";
 
 /*
  * ===========================================================================
@@ -1118,667 +1397,1953 @@ static void ReleaseGuardedPACopy(JNIEnv* env, jarray java_array, void* dataBuf,
 class CheckJNI {
  public:
   static jint GetVersion(JNIEnv* env) {
-    CHECK_JNI_ENTRY(kFlag_Default, "E", env);
-    return CHECK_JNI_EXIT("I", baseEnv(env)->GetVersion(env));
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[1] = {{.E = env }};
+    if (sc.Check(soa, true, "E", args)) {
+      JniValueType result;
+      result.I = baseEnv(env)->GetVersion(env);
+      if (sc.Check(soa, false, "I", &result)) {
+        return result.I;
+      }
+    }
+    return JNI_ERR;
+  }
+
+  static jint GetJavaVM(JNIEnv *env, JavaVM **vm) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[2] = {{.E = env }, {.p = vm}};
+    if (sc.Check(soa, true, "Ep", args)) {
+      JniValueType result;
+      result.i = baseEnv(env)->GetJavaVM(env, vm);
+      if (sc.Check(soa, false, "i", &result)) {
+        return result.i;
+      }
+    }
+    return JNI_ERR;
+  }
+
+  static jint RegisterNatives(JNIEnv* env, jclass c, const JNINativeMethod* methods, jint nMethods) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[4] = {{.E = env }, {.c = c}, {.p = methods}, {.I = nMethods}};
+    if (sc.Check(soa, true, "EcpI", args)) {
+      JniValueType result;
+      result.i = baseEnv(env)->RegisterNatives(env, c, methods, nMethods);
+      if (sc.Check(soa, false, "i", &result)) {
+        return result.i;
+      }
+    }
+    return JNI_ERR;
   }
 
-  static jclass DefineClass(JNIEnv* env, const char* name, jobject loader, const jbyte* buf, jsize bufLen) {
-    CHECK_JNI_ENTRY(kFlag_Default, "EuLpz", env, name, loader, buf, bufLen);
-    sc.CheckClassName(name);
-    return CHECK_JNI_EXIT("c", baseEnv(env)->DefineClass(env, name, loader, buf, bufLen));
+  static jint UnregisterNatives(JNIEnv* env, jclass c) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[2] = {{.E = env }, {.c = c}};
+    if (sc.Check(soa, true, "Ec", args)) {
+      JniValueType result;
+      result.i = baseEnv(env)->UnregisterNatives(env, c);
+      if (sc.Check(soa, false, "i", &result)) {
+        return result.i;
+      }
+    }
+    return JNI_ERR;
+  }
+
+  static jobjectRefType GetObjectRefType(JNIEnv* env, jobject obj) {
+    // Note: we use "Ep" rather than "EL" because this is the one JNI function that it's okay to
+    // pass an invalid reference to.
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[2] = {{.E = env }, {.p = obj}};
+    if (sc.Check(soa, true, "Ep", args)) {
+      JniValueType result;
+      result.w = baseEnv(env)->GetObjectRefType(env, obj);
+      if (sc.Check(soa, false, "w", &result)) {
+        return result.w;
+      }
+    }
+    return JNIInvalidRefType;
+  }
+
+  static jclass DefineClass(JNIEnv* env, const char* name, jobject loader, const jbyte* buf,
+                            jsize bufLen) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[5] = {{.E = env}, {.u = name}, {.L = loader}, {.p = buf}, {.z = bufLen}};
+    if (sc.Check(soa, true, "EuLpz", args) && sc.CheckClassName(name)) {
+      JniValueType result;
+      result.c = baseEnv(env)->DefineClass(env, name, loader, buf, bufLen);
+      if (sc.Check(soa, false, "c", &result)) {
+        return result.c;
+      }
+    }
+    return nullptr;
   }
 
   static jclass FindClass(JNIEnv* env, const char* name) {
-    CHECK_JNI_ENTRY(kFlag_Default, "Eu", env, name);
-    sc.CheckClassName(name);
-    return CHECK_JNI_EXIT("c", baseEnv(env)->FindClass(env, name));
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[2] = {{.E = env}, {.u = name}};
+    if (sc.Check(soa, true, "Eu", args) && sc.CheckClassName(name)) {
+      JniValueType result;
+      result.c = baseEnv(env)->FindClass(env, name);
+      if (sc.Check(soa, false, "c", &result)) {
+        return result.c;
+      }
+    }
+    return nullptr;
   }
 
   static jclass GetSuperclass(JNIEnv* env, jclass c) {
-    CHECK_JNI_ENTRY(kFlag_Default, "Ec", env, c);
-    return CHECK_JNI_EXIT("c", baseEnv(env)->GetSuperclass(env, c));
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[2] = {{.E = env}, {.c = c}};
+    if (sc.Check(soa, true, "Ec", args)) {
+      JniValueType result;
+      result.c = baseEnv(env)->GetSuperclass(env, c);
+      if (sc.Check(soa, false, "c", &result)) {
+        return result.c;
+      }
+    }
+    return nullptr;
   }
 
   static jboolean IsAssignableFrom(JNIEnv* env, jclass c1, jclass c2) {
-    CHECK_JNI_ENTRY(kFlag_Default, "Ecc", env, c1, c2);
-    return CHECK_JNI_EXIT("b", baseEnv(env)->IsAssignableFrom(env, c1, c2));
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[3] = {{.E = env}, {.c = c1}, {.c = c2}};
+    if (sc.Check(soa, true, "Ecc", args)) {
+      JniValueType result;
+      result.b = baseEnv(env)->IsAssignableFrom(env, c1, c2);
+      if (sc.Check(soa, false, "b", &result)) {
+        return result.b;
+      }
+    }
+    return JNI_FALSE;
   }
 
   static jmethodID FromReflectedMethod(JNIEnv* env, jobject method) {
-    CHECK_JNI_ENTRY(kFlag_Default, "EL", env, method);
-    // TODO: check that 'field' is a java.lang.reflect.Method.
-    return CHECK_JNI_EXIT("m", baseEnv(env)->FromReflectedMethod(env, method));
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[2] = {{.E = env}, {.L = method}};
+    if (sc.Check(soa, true, "EL", args) && sc.CheckReflectedMethod(soa, method)) {
+      JniValueType result;
+      result.m = baseEnv(env)->FromReflectedMethod(env, method);
+      if (sc.Check(soa, false, "m", &result)) {
+        return result.m;
+      }
+    }
+    return nullptr;
   }
 
   static jfieldID FromReflectedField(JNIEnv* env, jobject field) {
-    CHECK_JNI_ENTRY(kFlag_Default, "EL", env, field);
-    // TODO: check that 'field' is a java.lang.reflect.Field.
-    return CHECK_JNI_EXIT("f", baseEnv(env)->FromReflectedField(env, field));
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[2] = {{.E = env}, {.L = field}};
+    if (sc.Check(soa, true, "EL", args) && sc.CheckReflectedField(soa, field)) {
+      JniValueType result;
+      result.f = baseEnv(env)->FromReflectedField(env, field);
+      if (sc.Check(soa, false, "f", &result)) {
+        return result.f;
+      }
+    }
+    return nullptr;
   }
 
   static jobject ToReflectedMethod(JNIEnv* env, jclass cls, jmethodID mid, jboolean isStatic) {
-    CHECK_JNI_ENTRY(kFlag_Default, "Ecmb", env, cls, mid, isStatic);
-    return CHECK_JNI_EXIT("L", baseEnv(env)->ToReflectedMethod(env, cls, mid, isStatic));
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[4] = {{.E = env}, {.c = cls}, {.m = mid}, {.b = isStatic}};
+    if (sc.Check(soa, true, "Ecmb", args)) {
+      JniValueType result;
+      result.L = baseEnv(env)->ToReflectedMethod(env, cls, mid, isStatic);
+      if (sc.Check(soa, false, "L", &result) && (result.L != nullptr)) {
+        DCHECK(sc.CheckReflectedMethod(soa, result.L));
+        return result.L;
+      }
+    }
+    return nullptr;
   }
 
   static jobject ToReflectedField(JNIEnv* env, jclass cls, jfieldID fid, jboolean isStatic) {
-    CHECK_JNI_ENTRY(kFlag_Default, "Ecfb", env, cls, fid, isStatic);
-    return CHECK_JNI_EXIT("L", baseEnv(env)->ToReflectedField(env, cls, fid, isStatic));
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[4] = {{.E = env}, {.c = cls}, {.f = fid}, {.b = isStatic}};
+    if (sc.Check(soa, true, "Ecfb", args)) {
+      JniValueType result;
+      result.L = baseEnv(env)->ToReflectedField(env, cls, fid, isStatic);
+      if (sc.Check(soa, false, "L", &result) && (result.L != nullptr)) {
+        DCHECK(sc.CheckReflectedField(soa, result.L));
+        return result.L;
+      }
+    }
+    return nullptr;
   }
 
   static jint Throw(JNIEnv* env, jthrowable obj) {
-    CHECK_JNI_ENTRY(kFlag_Default, "EL", env, obj);
-    // TODO: check that 'obj' is a java.lang.Throwable.
-    return CHECK_JNI_EXIT("I", baseEnv(env)->Throw(env, obj));
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[2] = {{.E = env}, {.t = obj}};
+    if (sc.Check(soa, true, "Et", args) && sc.CheckThrowable(soa, obj)) {
+      JniValueType result;
+      result.i = baseEnv(env)->Throw(env, obj);
+      if (sc.Check(soa, false, "i", &result)) {
+        return result.i;
+      }
+    }
+    return JNI_ERR;
   }
 
   static jint ThrowNew(JNIEnv* env, jclass c, const char* message) {
-    CHECK_JNI_ENTRY(kFlag_NullableUtf, "Ecu", env, c, message);
-    return CHECK_JNI_EXIT("I", baseEnv(env)->ThrowNew(env, c, message));
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_NullableUtf, __FUNCTION__);
+    JniValueType args[5] = {{.E = env}, {.c = c}, {.u = message}};
+    if (sc.Check(soa, true, "Ecu", args) && sc.CheckThrowableClass(soa, c)) {
+      JniValueType result;
+      result.i = baseEnv(env)->ThrowNew(env, c, message);
+      if (sc.Check(soa, false, "i", &result)) {
+        return result.i;
+      }
+    }
+    return JNI_ERR;
   }
 
   static jthrowable ExceptionOccurred(JNIEnv* env) {
-    CHECK_JNI_ENTRY(kFlag_ExcepOkay, "E", env);
-    return CHECK_JNI_EXIT("L", baseEnv(env)->ExceptionOccurred(env));
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_ExcepOkay, __FUNCTION__);
+    JniValueType args[1] = {{.E = env}};
+    if (sc.Check(soa, true, "E", args)) {
+      JniValueType result;
+      result.t = baseEnv(env)->ExceptionOccurred(env);
+      if (sc.Check(soa, false, "t", &result)) {
+        return result.t;
+      }
+    }
+    return nullptr;
   }
 
   static void ExceptionDescribe(JNIEnv* env) {
-    CHECK_JNI_ENTRY(kFlag_ExcepOkay, "E", env);
-    baseEnv(env)->ExceptionDescribe(env);
-    CHECK_JNI_EXIT_VOID();
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_ExcepOkay, __FUNCTION__);
+    JniValueType args[1] = {{.E = env}};
+    if (sc.Check(soa, true, "E", args)) {
+      JniValueType result;
+      baseEnv(env)->ExceptionDescribe(env);
+      result.V = nullptr;
+      sc.Check(soa, false, "V", &result);
+    }
   }
 
   static void ExceptionClear(JNIEnv* env) {
-    CHECK_JNI_ENTRY(kFlag_ExcepOkay, "E", env);
-    baseEnv(env)->ExceptionClear(env);
-    CHECK_JNI_EXIT_VOID();
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_ExcepOkay, __FUNCTION__);
+    JniValueType args[1] = {{.E = env}};
+    if (sc.Check(soa, true, "E", args)) {
+      JniValueType result;
+      baseEnv(env)->ExceptionClear(env);
+      result.V = nullptr;
+      sc.Check(soa, false, "V", &result);
+    }
+  }
+
+  static jboolean ExceptionCheck(JNIEnv* env) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_CritOkay | kFlag_ExcepOkay, __FUNCTION__);
+    JniValueType args[1] = {{.E = env}};
+    if (sc.Check(soa, true, "E", args)) {
+      JniValueType result;
+      result.b = baseEnv(env)->ExceptionCheck(env);
+      if (sc.Check(soa, false, "b", &result)) {
+        return result.b;
+      }
+    }
+    return JNI_FALSE;
   }
 
   static void FatalError(JNIEnv* env, const char* msg) {
     // The JNI specification doesn't say it's okay to call FatalError with a pending exception,
     // but you're about to abort anyway, and it's quite likely that you have a pending exception,
     // and it's not unimaginable that you don't know that you do. So we allow it.
-    CHECK_JNI_ENTRY(kFlag_ExcepOkay | kFlag_NullableUtf, "Eu", env, msg);
-    baseEnv(env)->FatalError(env, msg);
-    CHECK_JNI_EXIT_VOID();
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_ExcepOkay | kFlag_NullableUtf, __FUNCTION__);
+    JniValueType args[2] = {{.E = env}, {.u = msg}};
+    if (sc.Check(soa, true, "Eu", args)) {
+      JniValueType result;
+      baseEnv(env)->FatalError(env, msg);
+      // Unreachable.
+      result.V = nullptr;
+      sc.Check(soa, false, "V", &result);
+    }
   }
 
   static jint PushLocalFrame(JNIEnv* env, jint capacity) {
-    CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "EI", env, capacity);
-    return CHECK_JNI_EXIT("I", baseEnv(env)->PushLocalFrame(env, capacity));
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_ExcepOkay, __FUNCTION__);
+    JniValueType args[2] = {{.E = env}, {.I = capacity}};
+    if (sc.Check(soa, true, "EI", args)) {
+      JniValueType result;
+      result.i = baseEnv(env)->PushLocalFrame(env, capacity);
+      if (sc.Check(soa, false, "i", &result)) {
+        return result.i;
+      }
+    }
+    return JNI_ERR;
   }
 
   static jobject PopLocalFrame(JNIEnv* env, jobject res) {
-    CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "EL", env, res);
-    return CHECK_JNI_EXIT("L", baseEnv(env)->PopLocalFrame(env, res));
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_ExcepOkay, __FUNCTION__);
+    JniValueType args[2] = {{.E = env}, {.L = res}};
+    if (sc.Check(soa, true, "EL", args)) {
+      JniValueType result;
+      result.L = baseEnv(env)->PopLocalFrame(env, res);
+      sc.Check(soa, false, "L", &result);
+      return result.L;
+    }
+    return nullptr;
   }
 
   static jobject NewGlobalRef(JNIEnv* env, jobject obj) {
-    CHECK_JNI_ENTRY(kFlag_Default, "EL", env, obj);
-    return CHECK_JNI_EXIT("L", baseEnv(env)->NewGlobalRef(env, obj));
+    return NewRef(__FUNCTION__, env, obj, kGlobal);
   }
 
-  static jobject NewLocalRef(JNIEnv* env, jobject ref) {
-    CHECK_JNI_ENTRY(kFlag_Default, "EL", env, ref);
-    return CHECK_JNI_EXIT("L", baseEnv(env)->NewLocalRef(env, ref));
+  static jobject NewLocalRef(JNIEnv* env, jobject obj) {
+    return NewRef(__FUNCTION__, env, obj, kLocal);
   }
 
-  static void DeleteGlobalRef(JNIEnv* env, jobject globalRef) {
-    CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "EL", env, globalRef);
-    if (globalRef != nullptr && GetIndirectRefKind(globalRef) != kGlobal) {
-      JniAbortF(__FUNCTION__, "DeleteGlobalRef on %s: %p",
-                ToStr<IndirectRefKind>(GetIndirectRefKind(globalRef)).c_str(), globalRef);
-    } else {
-      baseEnv(env)->DeleteGlobalRef(env, globalRef);
-      CHECK_JNI_EXIT_VOID();
-    }
+  static jweak NewWeakGlobalRef(JNIEnv* env, jobject obj) {
+    return NewRef(__FUNCTION__, env, obj, kWeakGlobal);
   }
 
-  static void DeleteWeakGlobalRef(JNIEnv* env, jweak weakGlobalRef) {
-    CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "EL", env, weakGlobalRef);
-    if (weakGlobalRef != nullptr && GetIndirectRefKind(weakGlobalRef) != kWeakGlobal) {
-      JniAbortF(__FUNCTION__, "DeleteWeakGlobalRef on %s: %p",
-                ToStr<IndirectRefKind>(GetIndirectRefKind(weakGlobalRef)).c_str(), weakGlobalRef);
-    } else {
-      baseEnv(env)->DeleteWeakGlobalRef(env, weakGlobalRef);
-      CHECK_JNI_EXIT_VOID();
-    }
+  static void DeleteGlobalRef(JNIEnv* env, jobject obj) {
+    DeleteRef(__FUNCTION__, env, obj, kGlobal);
   }
 
-  static void DeleteLocalRef(JNIEnv* env, jobject localRef) {
-    CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "EL", env, localRef);
-    if (localRef != nullptr && GetIndirectRefKind(localRef) != kLocal && !IsHandleScopeLocalRef(env, localRef)) {
-      JniAbortF(__FUNCTION__, "DeleteLocalRef on %s: %p",
-                ToStr<IndirectRefKind>(GetIndirectRefKind(localRef)).c_str(), localRef);
-    } else {
-      baseEnv(env)->DeleteLocalRef(env, localRef);
-      CHECK_JNI_EXIT_VOID();
-    }
+  static void DeleteWeakGlobalRef(JNIEnv* env, jweak obj) {
+    DeleteRef(__FUNCTION__, env, obj, kWeakGlobal);
+  }
+
+  static void DeleteLocalRef(JNIEnv* env, jobject obj) {
+    DeleteRef(__FUNCTION__, env, obj, kLocal);
   }
 
   static jint EnsureLocalCapacity(JNIEnv *env, jint capacity) {
-    CHECK_JNI_ENTRY(kFlag_Default, "EI", env, capacity);
-    return CHECK_JNI_EXIT("I", baseEnv(env)->EnsureLocalCapacity(env, capacity));
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[2] = {{.E = env}, {.I = capacity}};
+    if (sc.Check(soa, true, "EI", args)) {
+      JniValueType result;
+      result.i = baseEnv(env)->EnsureLocalCapacity(env, capacity);
+      if (sc.Check(soa, false, "i", &result)) {
+        return result.i;
+      }
+    }
+    return JNI_ERR;
   }
 
   static jboolean IsSameObject(JNIEnv* env, jobject ref1, jobject ref2) {
-    CHECK_JNI_ENTRY(kFlag_Default, "ELL", env, ref1, ref2);
-    return CHECK_JNI_EXIT("b", baseEnv(env)->IsSameObject(env, ref1, ref2));
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[3] = {{.E = env}, {.L = ref1}, {.L = ref2}};
+    if (sc.Check(soa, true, "ELL", args)) {
+      JniValueType result;
+      result.b = baseEnv(env)->IsSameObject(env, ref1, ref2);
+      if (sc.Check(soa, false, "b", &result)) {
+        return result.b;
+      }
+    }
+    return JNI_FALSE;
   }
 
   static jobject AllocObject(JNIEnv* env, jclass c) {
-    CHECK_JNI_ENTRY(kFlag_Default, "Ec", env, c);
-    return CHECK_JNI_EXIT("L", baseEnv(env)->AllocObject(env, c));
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[2] = {{.E = env}, {.c = c}};
+    if (sc.Check(soa, true, "Ec", args) && sc.CheckInstantiableNonArray(soa, c)) {
+      JniValueType result;
+      result.L = baseEnv(env)->AllocObject(env, c);
+      if (sc.Check(soa, false, "L", &result)) {
+        return result.L;
+      }
+    }
+    return nullptr;
+  }
+
+  static jobject NewObjectV(JNIEnv* env, jclass c, jmethodID mid, va_list vargs) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[3] = {{.E = env}, {.c = c}, {.m = mid}};
+    if (sc.Check(soa, true, "Ecm.", args) && sc.CheckInstantiableNonArray(soa, c) &&
+        sc.CheckConstructor(soa, mid)) {
+      JniValueType result;
+      result.L = baseEnv(env)->NewObjectV(env, c, mid, vargs);
+      if (sc.Check(soa, false, "L", &result)) {
+        return result.L;
+      }
+    }
+    return nullptr;
   }
 
   static jobject NewObject(JNIEnv* env, jclass c, jmethodID mid, ...) {
-    CHECK_JNI_ENTRY(kFlag_Default, "Ecm.", env, c, mid);
     va_list args;
     va_start(args, mid);
-    jobject result = baseEnv(env)->NewObjectV(env, c, mid, args);
+    jobject result = NewObjectV(env, c, mid, args);
     va_end(args);
-    return CHECK_JNI_EXIT("L", result);
-  }
-
-  static jobject NewObjectV(JNIEnv* env, jclass c, jmethodID mid, va_list args) {
-    CHECK_JNI_ENTRY(kFlag_Default, "Ecm.", env, c, mid);
-    return CHECK_JNI_EXIT("L", baseEnv(env)->NewObjectV(env, c, mid, args));
+    return result;
   }
 
-  static jobject NewObjectA(JNIEnv* env, jclass c, jmethodID mid, jvalue* args) {
-    CHECK_JNI_ENTRY(kFlag_Default, "Ecm.", env, c, mid);
-    return CHECK_JNI_EXIT("L", baseEnv(env)->NewObjectA(env, c, mid, args));
+  static jobject NewObjectA(JNIEnv* env, jclass c, jmethodID mid, jvalue* vargs) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[3] = {{.E = env}, {.c = c}, {.m = mid}};
+    if (sc.Check(soa, true, "Ecm.", args) && sc.CheckInstantiableNonArray(soa, c) &&
+        sc.CheckConstructor(soa, mid)) {
+      JniValueType result;
+      result.L = baseEnv(env)->NewObjectA(env, c, mid, vargs);
+      if (sc.Check(soa, false, "L", &result)) {
+        return result.L;
+      }
+    }
+    return nullptr;
   }
 
   static jclass GetObjectClass(JNIEnv* env, jobject obj) {
-    CHECK_JNI_ENTRY(kFlag_Default, "EL", env, obj);
-    return CHECK_JNI_EXIT("c", baseEnv(env)->GetObjectClass(env, obj));
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[2] = {{.E = env}, {.L = obj}};
+    if (sc.Check(soa, true, "EL", args)) {
+      JniValueType result;
+      result.c = baseEnv(env)->GetObjectClass(env, obj);
+      if (sc.Check(soa, false, "c", &result)) {
+        return result.c;
+      }
+    }
+    return nullptr;
   }
 
   static jboolean IsInstanceOf(JNIEnv* env, jobject obj, jclass c) {
-    CHECK_JNI_ENTRY(kFlag_Default, "ELc", env, obj, c);
-    return CHECK_JNI_EXIT("b", baseEnv(env)->IsInstanceOf(env, obj, c));
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[3] = {{.E = env}, {.L = obj}, {.c = c}};
+    if (sc.Check(soa, true, "ELc", args)) {
+      JniValueType result;
+      result.b = baseEnv(env)->IsInstanceOf(env, obj, c);
+      if (sc.Check(soa, false, "b", &result)) {
+        return result.b;
+      }
+    }
+    return JNI_FALSE;
   }
 
   static jmethodID GetMethodID(JNIEnv* env, jclass c, const char* name, const char* sig) {
-    CHECK_JNI_ENTRY(kFlag_Default, "Ecuu", env, c, name, sig);
-    return CHECK_JNI_EXIT("m", baseEnv(env)->GetMethodID(env, c, name, sig));
+    return GetMethodIDInternal(__FUNCTION__, env, c, name, sig, false);
   }
 
-  static jfieldID GetFieldID(JNIEnv* env, jclass c, const char* name, const char* sig) {
-    CHECK_JNI_ENTRY(kFlag_Default, "Ecuu", env, c, name, sig);
-    return CHECK_JNI_EXIT("f", baseEnv(env)->GetFieldID(env, c, name, sig));
+  static jmethodID GetStaticMethodID(JNIEnv* env, jclass c, const char* name, const char* sig) {
+    return GetMethodIDInternal(__FUNCTION__, env, c, name, sig, true);
   }
 
-  static jmethodID GetStaticMethodID(JNIEnv* env, jclass c, const char* name, const char* sig) {
-    CHECK_JNI_ENTRY(kFlag_Default, "Ecuu", env, c, name, sig);
-    return CHECK_JNI_EXIT("m", baseEnv(env)->GetStaticMethodID(env, c, name, sig));
+  static jfieldID GetFieldID(JNIEnv* env, jclass c, const char* name, const char* sig) {
+    return GetFieldIDInternal(__FUNCTION__, env, c, name, sig, false);
   }
 
   static jfieldID GetStaticFieldID(JNIEnv* env, jclass c, const char* name, const char* sig) {
-    CHECK_JNI_ENTRY(kFlag_Default, "Ecuu", env, c, name, sig);
-    return CHECK_JNI_EXIT("f", baseEnv(env)->GetStaticFieldID(env, c, name, sig));
-  }
-
-#define FIELD_ACCESSORS(_ctype, _jname, _jvalue_type, _type) \
-    static _ctype GetStatic##_jname##Field(JNIEnv* env, jclass c, jfieldID fid) { \
-        CHECK_JNI_ENTRY(kFlag_Default, "Ecf", env, c, fid); \
-        sc.CheckStaticFieldID(c, fid); \
-        return CHECK_JNI_EXIT(_type, baseEnv(env)->GetStatic##_jname##Field(env, c, fid)); \
-    } \
-    static _ctype Get##_jname##Field(JNIEnv* env, jobject obj, jfieldID fid) { \
-        CHECK_JNI_ENTRY(kFlag_Default, "ELf", env, obj, fid); \
-        sc.CheckInstanceFieldID(obj, fid); \
-        return CHECK_JNI_EXIT(_type, baseEnv(env)->Get##_jname##Field(env, obj, fid)); \
-    } \
-    static void SetStatic##_jname##Field(JNIEnv* env, jclass c, jfieldID fid, _ctype value) { \
-        CHECK_JNI_ENTRY(kFlag_Default, "Ecf" _type, env, c, fid, value); \
-        sc.CheckStaticFieldID(c, fid); \
-        /* "value" arg only used when type == ref */ \
-        jvalue java_type_value; \
-        java_type_value._jvalue_type = value; \
-        sc.CheckFieldType(java_type_value, fid, _type[0], true); \
-        baseEnv(env)->SetStatic##_jname##Field(env, c, fid, value); \
-        CHECK_JNI_EXIT_VOID(); \
-    } \
-    static void Set##_jname##Field(JNIEnv* env, jobject obj, jfieldID fid, _ctype value) { \
-        CHECK_JNI_ENTRY(kFlag_Default, "ELf" _type, env, obj, fid, value); \
-        sc.CheckInstanceFieldID(obj, fid); \
-        /* "value" arg only used when type == ref */ \
-        jvalue java_type_value; \
-        java_type_value._jvalue_type = value; \
-        sc.CheckFieldType(java_type_value, fid, _type[0], false); \
-        baseEnv(env)->Set##_jname##Field(env, obj, fid, value); \
-        CHECK_JNI_EXIT_VOID(); \
-    }
-
-FIELD_ACCESSORS(jobject, Object, l, "L");
-FIELD_ACCESSORS(jboolean, Boolean, z, "Z");
-FIELD_ACCESSORS(jbyte, Byte, b, "B");
-FIELD_ACCESSORS(jchar, Char, c, "C");
-FIELD_ACCESSORS(jshort, Short, s, "S");
-FIELD_ACCESSORS(jint, Int, i, "I");
-FIELD_ACCESSORS(jlong, Long, j, "J");
-FIELD_ACCESSORS(jfloat, Float, f, "F");
-FIELD_ACCESSORS(jdouble, Double, d, "D");
-
-#define CALL(_ctype, _jname, _retdecl, _retasgn, _retok, _retsig) \
-    /* Virtual... */ \
-    static _ctype Call##_jname##Method(JNIEnv* env, jobject obj, \
-        jmethodID mid, ...) \
-    { \
-        CHECK_JNI_ENTRY(kFlag_Default, "ELm.", env, obj, mid); /* TODO: args! */ \
-        sc.CheckSig(mid, _retsig, false); \
-        sc.CheckVirtualMethod(obj, mid); \
-        _retdecl; \
-        va_list args; \
-        va_start(args, mid); \
-        _retasgn(baseEnv(env)->Call##_jname##MethodV(env, obj, mid, args)); \
-        va_end(args); \
-        _retok; \
-    } \
-    static _ctype Call##_jname##MethodV(JNIEnv* env, jobject obj, \
-        jmethodID mid, va_list args) \
-    { \
-        CHECK_JNI_ENTRY(kFlag_Default, "ELm.", env, obj, mid); /* TODO: args! */ \
-        sc.CheckSig(mid, _retsig, false); \
-        sc.CheckVirtualMethod(obj, mid); \
-        _retdecl; \
-        _retasgn(baseEnv(env)->Call##_jname##MethodV(env, obj, mid, args)); \
-        _retok; \
-    } \
-    static _ctype Call##_jname##MethodA(JNIEnv* env, jobject obj, \
-        jmethodID mid, jvalue* args) \
-    { \
-        CHECK_JNI_ENTRY(kFlag_Default, "ELm.", env, obj, mid); /* TODO: args! */ \
-        sc.CheckSig(mid, _retsig, false); \
-        sc.CheckVirtualMethod(obj, mid); \
-        _retdecl; \
-        _retasgn(baseEnv(env)->Call##_jname##MethodA(env, obj, mid, args)); \
-        _retok; \
-    } \
-    /* Non-virtual... */ \
-    static _ctype CallNonvirtual##_jname##Method(JNIEnv* env, \
-        jobject obj, jclass c, jmethodID mid, ...) \
-    { \
-        CHECK_JNI_ENTRY(kFlag_Default, "ELcm.", env, obj, c, mid); /* TODO: args! */ \
-        sc.CheckSig(mid, _retsig, false); \
-        sc.CheckVirtualMethod(obj, mid); \
-        _retdecl; \
-        va_list args; \
-        va_start(args, mid); \
-        _retasgn(baseEnv(env)->CallNonvirtual##_jname##MethodV(env, obj, c, mid, args)); \
-        va_end(args); \
-        _retok; \
-    } \
-    static _ctype CallNonvirtual##_jname##MethodV(JNIEnv* env, \
-        jobject obj, jclass c, jmethodID mid, va_list args) \
-    { \
-        CHECK_JNI_ENTRY(kFlag_Default, "ELcm.", env, obj, c, mid); /* TODO: args! */ \
-        sc.CheckSig(mid, _retsig, false); \
-        sc.CheckVirtualMethod(obj, mid); \
-        _retdecl; \
-        _retasgn(baseEnv(env)->CallNonvirtual##_jname##MethodV(env, obj, c, mid, args)); \
-        _retok; \
-    } \
-    static _ctype CallNonvirtual##_jname##MethodA(JNIEnv* env, \
-        jobject obj, jclass c, jmethodID mid, jvalue* args) \
-    { \
-        CHECK_JNI_ENTRY(kFlag_Default, "ELcm.", env, obj, c, mid); /* TODO: args! */ \
-        sc.CheckSig(mid, _retsig, false); \
-        sc.CheckVirtualMethod(obj, mid); \
-        _retdecl; \
-        _retasgn(baseEnv(env)->CallNonvirtual##_jname##MethodA(env, obj, c, mid, args)); \
-        _retok; \
-    } \
-    /* Static... */ \
-    static _ctype CallStatic##_jname##Method(JNIEnv* env, jclass c, jmethodID mid, ...) \
-    { \
-        CHECK_JNI_ENTRY(kFlag_Default, "Ecm.", env, c, mid); /* TODO: args! */ \
-        sc.CheckSig(mid, _retsig, true); \
-        sc.CheckStaticMethod(c, mid); \
-        _retdecl; \
-        va_list args; \
-        va_start(args, mid); \
-        _retasgn(baseEnv(env)->CallStatic##_jname##MethodV(env, c, mid, args)); \
-        va_end(args); \
-        _retok; \
-    } \
-    static _ctype CallStatic##_jname##MethodV(JNIEnv* env, jclass c, jmethodID mid, va_list args) \
-    { \
-        CHECK_JNI_ENTRY(kFlag_Default, "Ecm.", env, c, mid); /* TODO: args! */ \
-        sc.CheckSig(mid, _retsig, true); \
-        sc.CheckStaticMethod(c, mid); \
-        _retdecl; \
-         _retasgn(baseEnv(env)->CallStatic##_jname##MethodV(env, c, mid, args)); \
-        _retok; \
-    } \
-    static _ctype CallStatic##_jname##MethodA(JNIEnv* env, jclass c, jmethodID mid, jvalue* args) \
-    { \
-        CHECK_JNI_ENTRY(kFlag_Default, "Ecm.", env, c, mid); /* TODO: args! */ \
-        sc.CheckSig(mid, _retsig, true); \
-        sc.CheckStaticMethod(c, mid); \
-        _retdecl; \
-        _retasgn(baseEnv(env)->CallStatic##_jname##MethodA(env, c, mid, args)); \
-        _retok; \
-    }
-
-#define NON_VOID_RETURN(_retsig, _ctype) return CHECK_JNI_EXIT(_retsig, (_ctype) result)
-#define VOID_RETURN CHECK_JNI_EXIT_VOID()
-
-CALL(jobject, Object, mirror::Object* result, result = reinterpret_cast<mirror::Object*>, NON_VOID_RETURN("L", jobject), "L");
-CALL(jboolean, Boolean, jboolean result, result =, NON_VOID_RETURN("Z", jboolean), "Z");
-CALL(jbyte, Byte, jbyte result, result =, NON_VOID_RETURN("B", jbyte), "B");
-CALL(jchar, Char, jchar result, result =, NON_VOID_RETURN("C", jchar), "C");
-CALL(jshort, Short, jshort result, result =, NON_VOID_RETURN("S", jshort), "S");
-CALL(jint, Int, jint result, result =, NON_VOID_RETURN("I", jint), "I");
-CALL(jlong, Long, jlong result, result =, NON_VOID_RETURN("J", jlong), "J");
-CALL(jfloat, Float, jfloat result, result =, NON_VOID_RETURN("F", jfloat), "F");
-CALL(jdouble, Double, jdouble result, result =, NON_VOID_RETURN("D", jdouble), "D");
-CALL(void, Void, , , VOID_RETURN, "V");
-
-  static jstring NewString(JNIEnv* env, const jchar* unicodeChars, jsize len) {
-    CHECK_JNI_ENTRY(kFlag_Default, "Epz", env, unicodeChars, len);
-    return CHECK_JNI_EXIT("s", baseEnv(env)->NewString(env, unicodeChars, len));
+    return GetFieldIDInternal(__FUNCTION__, env, c, name, sig, true);
+  }
+
+#define FIELD_ACCESSORS(jtype, name, ptype, shorty) \
+  static jtype GetStatic##name##Field(JNIEnv* env, jclass c, jfieldID fid) { \
+    return GetField(__FUNCTION__, env, c, fid, true, ptype).shorty; \
+  } \
+  \
+  static jtype Get##name##Field(JNIEnv* env, jobject obj, jfieldID fid) { \
+    return GetField(__FUNCTION__, env, obj, fid, false, ptype).shorty; \
+  } \
+  \
+  static void SetStatic##name##Field(JNIEnv* env, jclass c, jfieldID fid, jtype v) { \
+    JniValueType value; \
+    value.shorty = v; \
+    SetField(__FUNCTION__, env, c, fid, true, ptype, value); \
+  } \
+  \
+  static void Set##name##Field(JNIEnv* env, jobject obj, jfieldID fid, jtype v) { \
+    JniValueType value; \
+    value.shorty = v; \
+    SetField(__FUNCTION__, env, obj, fid, false, ptype, value); \
+  }
+
+  FIELD_ACCESSORS(jobject, Object, Primitive::kPrimNot, L)
+  FIELD_ACCESSORS(jboolean, Boolean, Primitive::kPrimBoolean, Z)
+  FIELD_ACCESSORS(jbyte, Byte, Primitive::kPrimByte, B)
+  FIELD_ACCESSORS(jchar, Char, Primitive::kPrimChar, C)
+  FIELD_ACCESSORS(jshort, Short, Primitive::kPrimShort, S)
+  FIELD_ACCESSORS(jint, Int, Primitive::kPrimInt, I)
+  FIELD_ACCESSORS(jlong, Long, Primitive::kPrimLong, J)
+  FIELD_ACCESSORS(jfloat, Float, Primitive::kPrimFloat, F)
+  FIELD_ACCESSORS(jdouble, Double, Primitive::kPrimDouble, D)
+#undef FIELD_ACCESSORS
+
+  static void CallVoidMethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* vargs) {
+    CallMethodA(__FUNCTION__, env, obj, nullptr, mid, vargs, Primitive::kPrimVoid, kVirtual);
+  }
+
+  static void CallNonvirtualVoidMethodA(JNIEnv* env, jobject obj, jclass c, jmethodID mid,
+                                        jvalue* vargs) {
+    CallMethodA(__FUNCTION__, env, obj, c, mid, vargs, Primitive::kPrimVoid, kDirect);
+  }
+
+  static void CallStaticVoidMethodA(JNIEnv* env, jclass c, jmethodID mid, jvalue* vargs) {
+    CallMethodA(__FUNCTION__, env, c, nullptr, mid, vargs, Primitive::kPrimVoid, kStatic);
+  }
+
+  static void CallVoidMethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list vargs) {
+    CallMethodV(__FUNCTION__, env, obj, nullptr, mid, vargs, Primitive::kPrimVoid, kVirtual);
+  }
+
+  static void CallNonvirtualVoidMethodV(JNIEnv* env, jobject obj, jclass c, jmethodID mid,
+                                        va_list vargs) {
+    CallMethodV(__FUNCTION__, env, obj, c, mid, vargs, Primitive::kPrimVoid, kDirect);
+  }
+
+  static void CallStaticVoidMethodV(JNIEnv* env, jclass c, jmethodID mid, va_list vargs) {
+    CallMethodV(__FUNCTION__, env, nullptr, c, mid, vargs, Primitive::kPrimVoid, kStatic);
+  }
+
+  static void CallVoidMethod(JNIEnv* env, jobject obj, jmethodID mid, ...) {
+    va_list vargs;
+    va_start(vargs, mid);
+    CallMethodV(__FUNCTION__, env, obj, nullptr, mid, vargs, Primitive::kPrimVoid, kVirtual);
+    va_end(vargs);
+  }
+
+  static void CallNonvirtualVoidMethod(JNIEnv* env, jobject obj, jclass c, jmethodID mid, ...) {
+    va_list vargs;
+    va_start(vargs, mid);
+    CallMethodV(__FUNCTION__, env, obj, c, mid, vargs, Primitive::kPrimVoid, kDirect);
+    va_end(vargs);
+  }
+
+  static void CallStaticVoidMethod(JNIEnv* env, jclass c, jmethodID mid, ...) {
+    va_list vargs;
+    va_start(vargs, mid);
+    CallMethodV(__FUNCTION__, env, nullptr, c, mid, vargs, Primitive::kPrimVoid, kStatic);
+    va_end(vargs);
+  }
+
+#define CALL(rtype, name, ptype, shorty) \
+  static rtype Call##name##MethodA(JNIEnv* env, jobject obj, jmethodID mid, jvalue* vargs) { \
+    return CallMethodA(__FUNCTION__, env, obj, nullptr, mid, vargs, ptype, kVirtual).shorty; \
+  } \
+  \
+  static rtype CallNonvirtual##name##MethodA(JNIEnv* env, jobject obj, jclass c, jmethodID mid, \
+                                             jvalue* vargs) { \
+    return CallMethodA(__FUNCTION__, env, obj, c, mid, vargs, ptype, kDirect).shorty; \
+  } \
+  \
+  static rtype CallStatic##name##MethodA(JNIEnv* env, jclass c, jmethodID mid, jvalue* vargs) { \
+    return CallMethodA(__FUNCTION__, env, nullptr, c, mid, vargs, ptype, kStatic).shorty; \
+  } \
+  \
+  static rtype Call##name##MethodV(JNIEnv* env, jobject obj, jmethodID mid, va_list vargs) { \
+    return CallMethodV(__FUNCTION__, env, obj, nullptr, mid, vargs, ptype, kVirtual).shorty; \
+  } \
+  \
+  static rtype CallNonvirtual##name##MethodV(JNIEnv* env, jobject obj, jclass c, jmethodID mid, \
+                                             va_list vargs) { \
+    return CallMethodV(__FUNCTION__, env, obj, c, mid, vargs, ptype, kDirect).shorty; \
+  } \
+  \
+  static rtype CallStatic##name##MethodV(JNIEnv* env, jclass c, jmethodID mid, va_list vargs) { \
+    return CallMethodV(__FUNCTION__, env, nullptr, c, mid, vargs, ptype, kStatic).shorty; \
+  } \
+  \
+  static rtype Call##name##Method(JNIEnv* env, jobject obj, jmethodID mid, ...) { \
+    va_list vargs; \
+    va_start(vargs, mid); \
+    rtype result = \
+        CallMethodV(__FUNCTION__, env, obj, nullptr, mid, vargs, ptype, kVirtual).shorty; \
+    va_end(vargs); \
+    return result; \
+  } \
+  \
+  static rtype CallNonvirtual##name##Method(JNIEnv* env, jobject obj, jclass c, jmethodID mid, \
+                                            ...) { \
+    va_list vargs; \
+    va_start(vargs, mid); \
+    rtype result = \
+        CallMethodV(__FUNCTION__, env, obj, c, mid, vargs, ptype, kDirect).shorty; \
+    va_end(vargs); \
+    return result; \
+  } \
+  \
+  static rtype CallStatic##name##Method(JNIEnv* env, jclass c, jmethodID mid, ...) { \
+    va_list vargs; \
+    va_start(vargs, mid); \
+    rtype result = \
+        CallMethodV(__FUNCTION__, env, nullptr, c, mid, vargs, ptype, kStatic).shorty; \
+    va_end(vargs); \
+    return result; \
+  }
+
+  CALL(jobject, Object, Primitive::kPrimNot, L)
+  CALL(jboolean, Boolean, Primitive::kPrimBoolean, Z)
+  CALL(jbyte, Byte, Primitive::kPrimByte, B)
+  CALL(jchar, Char, Primitive::kPrimChar, C)
+  CALL(jshort, Short, Primitive::kPrimShort, S)
+  CALL(jint, Int, Primitive::kPrimInt, I)
+  CALL(jlong, Long, Primitive::kPrimLong, J)
+  CALL(jfloat, Float, Primitive::kPrimFloat, F)
+  CALL(jdouble, Double, Primitive::kPrimDouble, D)
+#undef CALL
+
+  static jstring NewString(JNIEnv* env, const jchar* unicode_chars, jsize len) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[3] = {{.E = env}, {.p = unicode_chars}, {.z = len}};
+    if (sc.Check(soa, true, "Epz", args)) {
+      JniValueType result;
+      result.s = baseEnv(env)->NewString(env, unicode_chars, len);
+      if (sc.Check(soa, false, "s", &result)) {
+        return result.s;
+      }
+    }
+    return nullptr;
   }
 
-  static jsize GetStringLength(JNIEnv* env, jstring string) {
-    CHECK_JNI_ENTRY(kFlag_CritOkay, "Es", env, string);
-    return CHECK_JNI_EXIT("I", baseEnv(env)->GetStringLength(env, string));
+  static jstring NewStringUTF(JNIEnv* env, const char* chars) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_NullableUtf, __FUNCTION__);
+    JniValueType args[2] = {{.E = env}, {.u = chars}};
+    if (sc.Check(soa, true, "Eu", args)) {
+      JniValueType result;
+      // TODO: stale? show pointer and truncate string.
+      result.s = baseEnv(env)->NewStringUTF(env, chars);
+      if (sc.Check(soa, false, "s", &result)) {
+        return result.s;
+      }
+    }
+    return nullptr;
   }
 
-  static const jchar* GetStringChars(JNIEnv* env, jstring java_string, jboolean* isCopy) {
-    CHECK_JNI_ENTRY(kFlag_CritOkay, "Esp", env, java_string, isCopy);
-    const jchar* result = baseEnv(env)->GetStringChars(env, java_string, isCopy);
-    if (sc.ForceCopy() && result != nullptr) {
-      mirror::String* s = sc.soa().Decode<mirror::String*>(java_string);
-      int byteCount = s->GetLength() * 2;
-      result = (const jchar*) GuardedCopy::Create(result, byteCount, false);
-      if (isCopy != nullptr) {
-        *isCopy = JNI_TRUE;
+  static jsize GetStringLength(JNIEnv* env, jstring string) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_CritOkay, __FUNCTION__);
+    JniValueType args[2] = {{.E = env}, {.s = string}};
+    if (sc.Check(soa, true, "Es", args)) {
+      JniValueType result;
+      result.z = baseEnv(env)->GetStringLength(env, string);
+      if (sc.Check(soa, false, "z", &result)) {
+        return result.z;
       }
     }
-    return CHECK_JNI_EXIT("p", result);
+    return JNI_ERR;
   }
 
-  static void ReleaseStringChars(JNIEnv* env, jstring string, const jchar* chars) {
-    CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "Esp", env, string, chars);
-    sc.CheckNonNull(chars);
-    if (sc.ForceCopy()) {
-      GuardedCopy::Check(__FUNCTION__, chars, false);
-      chars = reinterpret_cast<const jchar*>(GuardedCopy::Destroy(const_cast<jchar*>(chars)));
+  static jsize GetStringUTFLength(JNIEnv* env, jstring string) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_CritOkay, __FUNCTION__);
+    JniValueType args[2] = {{.E = env}, {.s = string}};
+    if (sc.Check(soa, true, "Es", args)) {
+      JniValueType result;
+      result.z = baseEnv(env)->GetStringUTFLength(env, string);
+      if (sc.Check(soa, false, "z", &result)) {
+        return result.z;
+      }
     }
-    baseEnv(env)->ReleaseStringChars(env, string, chars);
-    CHECK_JNI_EXIT_VOID();
+    return JNI_ERR;
   }
 
-  static jstring NewStringUTF(JNIEnv* env, const char* bytes) {
-    CHECK_JNI_ENTRY(kFlag_NullableUtf, "Eu", env, bytes);  // TODO: show pointer and truncate string.
-    return CHECK_JNI_EXIT("s", baseEnv(env)->NewStringUTF(env, bytes));
+  static const jchar* GetStringChars(JNIEnv* env, jstring string, jboolean* is_copy) {
+    return reinterpret_cast<const jchar*>(GetStringCharsInternal(__FUNCTION__, env, string,
+                                                                 is_copy, false, false));
   }
 
-  static jsize GetStringUTFLength(JNIEnv* env, jstring string) {
-    CHECK_JNI_ENTRY(kFlag_CritOkay, "Es", env, string);
-    return CHECK_JNI_EXIT("I", baseEnv(env)->GetStringUTFLength(env, string));
+  static const char* GetStringUTFChars(JNIEnv* env, jstring string, jboolean* is_copy) {
+    return reinterpret_cast<const char*>(GetStringCharsInternal(__FUNCTION__, env, string,
+                                                                is_copy, true, false));
   }
 
-  static const char* GetStringUTFChars(JNIEnv* env, jstring string, jboolean* isCopy) {
-    CHECK_JNI_ENTRY(kFlag_CritOkay, "Esp", env, string, isCopy);
-    const char* result = baseEnv(env)->GetStringUTFChars(env, string, isCopy);
-    if (sc.ForceCopy() && result != nullptr) {
-      result = (const char*) GuardedCopy::Create(result, strlen(result) + 1, false);
-      if (isCopy != nullptr) {
-        *isCopy = JNI_TRUE;
-      }
-    }
-    return CHECK_JNI_EXIT("u", result);  // TODO: show pointer and truncate string.
+  static const jchar* GetStringCritical(JNIEnv* env, jstring string, jboolean* is_copy) {
+    return reinterpret_cast<const jchar*>(GetStringCharsInternal(__FUNCTION__, env, string,
+                                                                 is_copy, false, true));
+  }
+
+  static void ReleaseStringChars(JNIEnv* env, jstring string, const jchar* chars) {
+    ReleaseStringCharsInternal(__FUNCTION__, env, string, chars, false, false);
   }
 
   static void ReleaseStringUTFChars(JNIEnv* env, jstring string, const char* utf) {
-    CHECK_JNI_ENTRY(kFlag_ExcepOkay | kFlag_Release, "Esu", env, string, utf);  // TODO: show pointer and truncate string.
-    if (sc.ForceCopy()) {
-      GuardedCopy::Check(__FUNCTION__, utf, false);
-      utf = reinterpret_cast<const char*>(GuardedCopy::Destroy(const_cast<char*>(utf)));
+    ReleaseStringCharsInternal(__FUNCTION__, env, string, utf, true, false);
+  }
+
+  static void ReleaseStringCritical(JNIEnv* env, jstring string, const jchar* chars) {
+    ReleaseStringCharsInternal(__FUNCTION__, env, string, chars, false, true);
+  }
+
+  static void GetStringRegion(JNIEnv* env, jstring string, jsize start, jsize len, jchar* buf) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_CritOkay, __FUNCTION__);
+    JniValueType args[5] = {{.E = env}, {.s = string}, {.z = start}, {.z = len}, {.p = buf}};
+    // Note: the start and len arguments are checked as 'I' rather than 'z' as invalid indices
+    // result in ArrayIndexOutOfBoundsExceptions in the base implementation.
+    if (sc.Check(soa, true, "EsIIp", args)) {
+      baseEnv(env)->GetStringRegion(env, string, start, len, buf);
+      JniValueType result;
+      result.V = nullptr;
+      sc.Check(soa, false, "V", &result);
     }
-    baseEnv(env)->ReleaseStringUTFChars(env, string, utf);
-    CHECK_JNI_EXIT_VOID();
   }
 
-  static jsize GetArrayLength(JNIEnv* env, jarray array) {
-    CHECK_JNI_ENTRY(kFlag_CritOkay, "Ea", env, array);
-    return CHECK_JNI_EXIT("I", baseEnv(env)->GetArrayLength(env, array));
+  static void GetStringUTFRegion(JNIEnv* env, jstring string, jsize start, jsize len, char* buf) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_CritOkay, __FUNCTION__);
+    JniValueType args[5] = {{.E = env}, {.s = string}, {.z = start}, {.z = len}, {.p = buf}};
+    // Note: the start and len arguments are checked as 'I' rather than 'z' as invalid indices
+    // result in ArrayIndexOutOfBoundsExceptions in the base implementation.
+    if (sc.Check(soa, true, "EsIIp", args)) {
+      baseEnv(env)->GetStringUTFRegion(env, string, start, len, buf);
+      JniValueType result;
+      result.V = nullptr;
+      sc.Check(soa, false, "V", &result);
+    }
   }
 
-  static jobjectArray NewObjectArray(JNIEnv* env, jsize length, jclass elementClass, jobject initialElement) {
-    CHECK_JNI_ENTRY(kFlag_Default, "EzcL", env, length, elementClass, initialElement);
-    return CHECK_JNI_EXIT("a", baseEnv(env)->NewObjectArray(env, length, elementClass, initialElement));
+  static jsize GetArrayLength(JNIEnv* env, jarray array) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_CritOkay, __FUNCTION__);
+    JniValueType args[2] = {{.E = env}, {.a = array}};
+    if (sc.Check(soa, true, "Ea", args)) {
+      JniValueType result;
+      result.z = baseEnv(env)->GetArrayLength(env, array);
+      if (sc.Check(soa, false, "z", &result)) {
+        return result.z;
+      }
+    }
+    return JNI_ERR;
+  }
+
+  static jobjectArray NewObjectArray(JNIEnv* env, jsize length, jclass element_class,
+                                     jobject initial_element) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[4] =
+        {{.E = env}, {.z = length}, {.c = element_class}, {.L = initial_element}};
+    if (sc.Check(soa, true, "EzcL", args)) {
+      JniValueType result;
+      // Note: assignability tests of initial_element are done in the base implementation.
+      result.a = baseEnv(env)->NewObjectArray(env, length, element_class, initial_element);
+      if (sc.Check(soa, false, "a", &result)) {
+        return down_cast<jobjectArray>(result.a);
+      }
+    }
+    return nullptr;
   }
 
   static jobject GetObjectArrayElement(JNIEnv* env, jobjectArray array, jsize index) {
-    CHECK_JNI_ENTRY(kFlag_Default, "EaI", env, array, index);
-    return CHECK_JNI_EXIT("L", baseEnv(env)->GetObjectArrayElement(env, array, index));
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[3] = {{.E = env}, {.a = array}, {.z = index}};
+    if (sc.Check(soa, true, "Eaz", args)) {
+      JniValueType result;
+      result.L = baseEnv(env)->GetObjectArrayElement(env, array, index);
+      if (sc.Check(soa, false, "L", &result)) {
+        return result.L;
+      }
+    }
+    return nullptr;
   }
 
   static void SetObjectArrayElement(JNIEnv* env, jobjectArray array, jsize index, jobject value) {
-    CHECK_JNI_ENTRY(kFlag_Default, "EaIL", env, array, index, value);
-    baseEnv(env)->SetObjectArrayElement(env, array, index, value);
-    CHECK_JNI_EXIT_VOID();
-  }
-
-#define NEW_PRIMITIVE_ARRAY(_artype, _jname) \
-    static _artype New##_jname##Array(JNIEnv* env, jsize length) { \
-        CHECK_JNI_ENTRY(kFlag_Default, "Ez", env, length); \
-        return CHECK_JNI_EXIT("a", baseEnv(env)->New##_jname##Array(env, length)); \
-    }
-NEW_PRIMITIVE_ARRAY(jbooleanArray, Boolean);
-NEW_PRIMITIVE_ARRAY(jbyteArray, Byte);
-NEW_PRIMITIVE_ARRAY(jcharArray, Char);
-NEW_PRIMITIVE_ARRAY(jshortArray, Short);
-NEW_PRIMITIVE_ARRAY(jintArray, Int);
-NEW_PRIMITIVE_ARRAY(jlongArray, Long);
-NEW_PRIMITIVE_ARRAY(jfloatArray, Float);
-NEW_PRIMITIVE_ARRAY(jdoubleArray, Double);
-
-struct ForceCopyGetChecker {
- public:
-  ForceCopyGetChecker(ScopedCheck& sc, jboolean* isCopy) {
-    force_copy = sc.ForceCopy();
-    no_copy = 0;
-    if (force_copy && isCopy != nullptr) {
-      // Capture this before the base call tramples on it.
-      no_copy = *reinterpret_cast<uint32_t*>(isCopy);
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[4] = {{.E = env}, {.a = array}, {.z = index}, {.L = value}};
+    // Note: the index arguments is checked as 'I' rather than 'z' as invalid indices result in
+    // ArrayIndexOutOfBoundsExceptions in the base implementation. Similarly invalid stores result
+    // in ArrayStoreExceptions.
+    if (sc.Check(soa, true, "EaIL", args)) {
+      baseEnv(env)->SetObjectArrayElement(env, array, index, value);
+      JniValueType result;
+      result.V = nullptr;
+      sc.Check(soa, false, "V", &result);
     }
   }
 
-  template<typename ResultT>
-  ResultT Check(JNIEnv* env, jarray array, jboolean* isCopy, ResultT result) {
-    if (force_copy && result != nullptr) {
-      result = reinterpret_cast<ResultT>(CreateGuardedPACopy(env, array, isCopy));
-    }
-    return result;
+  static jbooleanArray NewBooleanArray(JNIEnv* env, jsize length) {
+    return down_cast<jbooleanArray>(NewPrimitiveArray(__FUNCTION__, env, length,
+                                                      Primitive::kPrimBoolean));
   }
 
-  uint32_t no_copy;
-  bool force_copy;
-};
+  static jbyteArray NewByteArray(JNIEnv* env, jsize length) {
+    return down_cast<jbyteArray>(NewPrimitiveArray(__FUNCTION__, env, length,
+                                                   Primitive::kPrimByte));
+  }
+
+  static jcharArray NewCharArray(JNIEnv* env, jsize length) {
+    return down_cast<jcharArray>(NewPrimitiveArray(__FUNCTION__, env, length,
+                                                   Primitive::kPrimChar));
+  }
 
-#define GET_PRIMITIVE_ARRAY_ELEMENTS(_ctype, _jname) \
-  static _ctype* Get##_jname##ArrayElements(JNIEnv* env, _ctype##Array array, jboolean* isCopy) { \
-    CHECK_JNI_ENTRY(kFlag_Default, "Eap", env, array, isCopy); \
-    _ctype* result = ForceCopyGetChecker(sc, isCopy).Check(env, array, isCopy, baseEnv(env)->Get##_jname##ArrayElements(env, array, isCopy)); \
-    return CHECK_JNI_EXIT("p", result); \
-  }
-
-#define RELEASE_PRIMITIVE_ARRAY_ELEMENTS(_ctype, _jname) \
-  static void Release##_jname##ArrayElements(JNIEnv* env, _ctype##Array array, _ctype* elems, jint mode) { \
-    CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "Eapr", env, array, elems, mode); \
-    sc.CheckNonNull(elems); \
-    if (sc.ForceCopy()) { \
-      ReleaseGuardedPACopy(env, array, elems, mode); \
-    } \
-    baseEnv(env)->Release##_jname##ArrayElements(env, array, elems, mode); \
-    CHECK_JNI_EXIT_VOID(); \
-  }
-
-#define GET_PRIMITIVE_ARRAY_REGION(_ctype, _jname) \
-    static void Get##_jname##ArrayRegion(JNIEnv* env, _ctype##Array array, jsize start, jsize len, _ctype* buf) { \
-        CHECK_JNI_ENTRY(kFlag_Default, "EaIIp", env, array, start, len, buf); \
-        baseEnv(env)->Get##_jname##ArrayRegion(env, array, start, len, buf); \
-        CHECK_JNI_EXIT_VOID(); \
-    }
-
-#define SET_PRIMITIVE_ARRAY_REGION(_ctype, _jname) \
-    static void Set##_jname##ArrayRegion(JNIEnv* env, _ctype##Array array, jsize start, jsize len, const _ctype* buf) { \
-        CHECK_JNI_ENTRY(kFlag_Default, "EaIIp", env, array, start, len, buf); \
-        baseEnv(env)->Set##_jname##ArrayRegion(env, array, start, len, buf); \
-        CHECK_JNI_EXIT_VOID(); \
-    }
-
-#define PRIMITIVE_ARRAY_FUNCTIONS(_ctype, _jname, _typechar) \
-    GET_PRIMITIVE_ARRAY_ELEMENTS(_ctype, _jname); \
-    RELEASE_PRIMITIVE_ARRAY_ELEMENTS(_ctype, _jname); \
-    GET_PRIMITIVE_ARRAY_REGION(_ctype, _jname); \
-    SET_PRIMITIVE_ARRAY_REGION(_ctype, _jname);
-
-// TODO: verify primitive array type matches call type.
-PRIMITIVE_ARRAY_FUNCTIONS(jboolean, Boolean, 'Z');
-PRIMITIVE_ARRAY_FUNCTIONS(jbyte, Byte, 'B');
-PRIMITIVE_ARRAY_FUNCTIONS(jchar, Char, 'C');
-PRIMITIVE_ARRAY_FUNCTIONS(jshort, Short, 'S');
-PRIMITIVE_ARRAY_FUNCTIONS(jint, Int, 'I');
-PRIMITIVE_ARRAY_FUNCTIONS(jlong, Long, 'J');
-PRIMITIVE_ARRAY_FUNCTIONS(jfloat, Float, 'F');
-PRIMITIVE_ARRAY_FUNCTIONS(jdouble, Double, 'D');
+  static jshortArray NewShortArray(JNIEnv* env, jsize length) {
+    return down_cast<jshortArray>(NewPrimitiveArray(__FUNCTION__, env, length,
+                                                    Primitive::kPrimShort));
+  }
 
-  static jint RegisterNatives(JNIEnv* env, jclass c, const JNINativeMethod* methods, jint nMethods) {
-    CHECK_JNI_ENTRY(kFlag_Default, "EcpI", env, c, methods, nMethods);
-    return CHECK_JNI_EXIT("I", baseEnv(env)->RegisterNatives(env, c, methods, nMethods));
+  static jintArray NewIntArray(JNIEnv* env, jsize length) {
+    return down_cast<jintArray>(NewPrimitiveArray(__FUNCTION__, env, length, Primitive::kPrimInt));
   }
 
-  static jint UnregisterNatives(JNIEnv* env, jclass c) {
-    CHECK_JNI_ENTRY(kFlag_Default, "Ec", env, c);
-    return CHECK_JNI_EXIT("I", baseEnv(env)->UnregisterNatives(env, c));
+  static jlongArray NewLongArray(JNIEnv* env, jsize length) {
+    return down_cast<jlongArray>(NewPrimitiveArray(__FUNCTION__, env, length,
+                                                   Primitive::kPrimLong));
+  }
+
+  static jfloatArray NewFloatArray(JNIEnv* env, jsize length) {
+    return down_cast<jfloatArray>(NewPrimitiveArray(__FUNCTION__, env, length,
+                                                    Primitive::kPrimFloat));
+  }
+
+  static jdoubleArray NewDoubleArray(JNIEnv* env, jsize length) {
+    return down_cast<jdoubleArray>(NewPrimitiveArray(__FUNCTION__, env, length,
+                                                     Primitive::kPrimDouble));
   }
 
+#define PRIMITIVE_ARRAY_FUNCTIONS(ctype, name, ptype) \
+  static ctype* Get##name##ArrayElements(JNIEnv* env, ctype##Array array, jboolean* is_copy) { \
+    return reinterpret_cast<ctype*>( \
+        GetPrimitiveArrayElements(__FUNCTION__, ptype, env, array, is_copy)); \
+  } \
+  \
+  static void Release##name##ArrayElements(JNIEnv* env, ctype##Array array, ctype* elems, \
+                                           jint mode) { \
+    ReleasePrimitiveArrayElements(__FUNCTION__, ptype, env, array, elems, mode); \
+  } \
+  \
+  static void Get##name##ArrayRegion(JNIEnv* env, ctype##Array array, jsize start, jsize len, \
+                                     ctype* buf) { \
+    GetPrimitiveArrayRegion(__FUNCTION__, ptype, env, array, start, len, buf); \
+  } \
+  \
+  static void Set##name##ArrayRegion(JNIEnv* env, ctype##Array array, jsize start, jsize len, \
+                                     const ctype* buf) { \
+    SetPrimitiveArrayRegion(__FUNCTION__, ptype, env, array, start, len, buf); \
+  }
+
+  PRIMITIVE_ARRAY_FUNCTIONS(jboolean, Boolean, Primitive::kPrimBoolean)
+  PRIMITIVE_ARRAY_FUNCTIONS(jbyte, Byte, Primitive::kPrimByte)
+  PRIMITIVE_ARRAY_FUNCTIONS(jchar, Char, Primitive::kPrimChar)
+  PRIMITIVE_ARRAY_FUNCTIONS(jshort, Short, Primitive::kPrimShort)
+  PRIMITIVE_ARRAY_FUNCTIONS(jint, Int, Primitive::kPrimInt)
+  PRIMITIVE_ARRAY_FUNCTIONS(jlong, Long, Primitive::kPrimLong)
+  PRIMITIVE_ARRAY_FUNCTIONS(jfloat, Float, Primitive::kPrimFloat)
+  PRIMITIVE_ARRAY_FUNCTIONS(jdouble, Double, Primitive::kPrimDouble)
+#undef PRIMITIVE_ARRAY_FUNCTIONS
+
   static jint MonitorEnter(JNIEnv* env, jobject obj) {
-    CHECK_JNI_ENTRY(kFlag_Default, "EL", env, obj);
-    if (!sc.CheckInstance(ScopedCheck::kObject, obj)) {
-      return JNI_ERR;  // Only for jni_internal_test. Real code will have aborted already.
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[2] = {{.E = env}, {.L = obj}};
+    if (sc.Check(soa, true, "EL", args)) {
+      JniValueType result;
+      result.i = baseEnv(env)->MonitorEnter(env, obj);
+      if (sc.Check(soa, false, "i", &result)) {
+        return result.i;
+      }
     }
-    return CHECK_JNI_EXIT("I", baseEnv(env)->MonitorEnter(env, obj));
+    return JNI_ERR;
   }
 
   static jint MonitorExit(JNIEnv* env, jobject obj) {
-    CHECK_JNI_ENTRY(kFlag_Default | kFlag_ExcepOkay, "EL", env, obj);
-    if (!sc.CheckInstance(ScopedCheck::kObject, obj)) {
-      return JNI_ERR;  // Only for jni_internal_test. Real code will have aborted already.
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_ExcepOkay, __FUNCTION__);
+    JniValueType args[2] = {{.E = env}, {.L = obj}};
+    if (sc.Check(soa, true, "EL", args)) {
+      JniValueType result;
+      result.i = baseEnv(env)->MonitorExit(env, obj);
+      if (sc.Check(soa, false, "i", &result)) {
+        return result.i;
+      }
     }
-    return CHECK_JNI_EXIT("I", baseEnv(env)->MonitorExit(env, obj));
+    return JNI_ERR;
   }
 
-  static jint GetJavaVM(JNIEnv *env, JavaVM **vm) {
-    CHECK_JNI_ENTRY(kFlag_Default, "Ep", env, vm);
-    return CHECK_JNI_EXIT("I", baseEnv(env)->GetJavaVM(env, vm));
+  static void* GetPrimitiveArrayCritical(JNIEnv* env, jarray array, jboolean* is_copy) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_CritGet, __FUNCTION__);
+    JniValueType args[3] = {{.E = env}, {.a = array}, {.p = is_copy}};
+    if (sc.Check(soa, true, "Eap", args)) {
+      JniValueType result;
+      result.p = baseEnv(env)->GetPrimitiveArrayCritical(env, array, is_copy);
+      if (result.p != nullptr && soa.ForceCopy()) {
+        result.p = GuardedCopy::CreateGuardedPACopy(env, array, is_copy);
+      }
+      if (sc.Check(soa, false, "p", &result)) {
+        return const_cast<void*>(result.p);
+      }
+    }
+    return nullptr;
   }
 
-  static void GetStringRegion(JNIEnv* env, jstring str, jsize start, jsize len, jchar* buf) {
-    CHECK_JNI_ENTRY(kFlag_CritOkay, "EsIIp", env, str, start, len, buf);
-    baseEnv(env)->GetStringRegion(env, str, start, len, buf);
-    CHECK_JNI_EXIT_VOID();
+  static void ReleasePrimitiveArrayCritical(JNIEnv* env, jarray array, void* carray, jint mode) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_CritRelease | kFlag_ExcepOkay, __FUNCTION__);
+    sc.CheckNonNull(carray);
+    JniValueType args[4] = {{.E = env}, {.a = array}, {.p = carray}, {.r = mode}};
+    if (sc.Check(soa, true, "Eapr", args)) {
+      if (soa.ForceCopy()) {
+        GuardedCopy::ReleaseGuardedPACopy(__FUNCTION__, env, array, carray, mode);
+      }
+      baseEnv(env)->ReleasePrimitiveArrayCritical(env, array, carray, mode);
+      JniValueType result;
+      result.V = nullptr;
+      sc.Check(soa, false, "V", &result);
+    }
   }
 
-  static void GetStringUTFRegion(JNIEnv* env, jstring str, jsize start, jsize len, char* buf) {
-    CHECK_JNI_ENTRY(kFlag_CritOkay, "EsIIp", env, str, start, len, buf);
-    baseEnv(env)->GetStringUTFRegion(env, str, start, len, buf);
-    CHECK_JNI_EXIT_VOID();
+  static jobject NewDirectByteBuffer(JNIEnv* env, void* address, jlong capacity) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[3] = {{.E = env}, {.p = address}, {.J = capacity}};
+    if (sc.Check(soa, true, "EpJ", args)) {
+      JniValueType result;
+      // Note: the validity of address and capacity are checked in the base implementation.
+      result.L = baseEnv(env)->NewDirectByteBuffer(env, address, capacity);
+      if (sc.Check(soa, false, "L", &result)) {
+        return result.L;
+      }
+    }
+    return nullptr;
   }
 
-  static void* GetPrimitiveArrayCritical(JNIEnv* env, jarray array, jboolean* isCopy) {
-    CHECK_JNI_ENTRY(kFlag_CritGet, "Eap", env, array, isCopy);
-    void* result = baseEnv(env)->GetPrimitiveArrayCritical(env, array, isCopy);
-    if (sc.ForceCopy() && result != nullptr) {
-      result = CreateGuardedPACopy(env, array, isCopy);
+  static void* GetDirectBufferAddress(JNIEnv* env, jobject buf) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[2] = {{.E = env}, {.L = buf}};
+    if (sc.Check(soa, true, "EL", args)) {
+      JniValueType result;
+      // Note: this is implemented in the base environment by a GetLongField which will sanity
+      // check the type of buf in GetLongField above.
+      result.p = baseEnv(env)->GetDirectBufferAddress(env, buf);
+      if (sc.Check(soa, false, "p", &result)) {
+        return const_cast<void*>(result.p);
+      }
     }
-    return CHECK_JNI_EXIT("p", result);
+    return nullptr;
   }
 
-  static void ReleasePrimitiveArrayCritical(JNIEnv* env, jarray array, void* carray, jint mode) {
-    CHECK_JNI_ENTRY(kFlag_CritRelease | kFlag_ExcepOkay, "Eapr", env, array, carray, mode);
-    sc.CheckNonNull(carray);
-    if (sc.ForceCopy()) {
-      ReleaseGuardedPACopy(env, array, carray, mode);
+  static jlong GetDirectBufferCapacity(JNIEnv* env, jobject buf) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[2] = {{.E = env}, {.L = buf}};
+    if (sc.Check(soa, true, "EL", args)) {
+      JniValueType result;
+      // Note: this is implemented in the base environment by a GetIntField which will sanity
+      // check the type of buf in GetIntField above.
+      result.J = baseEnv(env)->GetDirectBufferCapacity(env, buf);
+      if (sc.Check(soa, false, "J", &result)) {
+        return result.J;
+      }
     }
-    baseEnv(env)->ReleasePrimitiveArrayCritical(env, array, carray, mode);
-    CHECK_JNI_EXIT_VOID();
+    return JNI_ERR;
   }
 
-  static const jchar* GetStringCritical(JNIEnv* env, jstring java_string, jboolean* isCopy) {
-    CHECK_JNI_ENTRY(kFlag_CritGet, "Esp", env, java_string, isCopy);
-    const jchar* result = baseEnv(env)->GetStringCritical(env, java_string, isCopy);
-    if (sc.ForceCopy() && result != nullptr) {
-      mirror::String* s = sc.soa().Decode<mirror::String*>(java_string);
-      int byteCount = s->GetLength() * 2;
-      result = (const jchar*) GuardedCopy::Create(result, byteCount, false);
-      if (isCopy != nullptr) {
-        *isCopy = JNI_TRUE;
+ private:
+  static JavaVMExt* GetJavaVMExt(JNIEnv* env) {
+    return reinterpret_cast<JNIEnvExt*>(env)->vm;
+  }
+
+  static const JNINativeInterface* baseEnv(JNIEnv* env) {
+    return reinterpret_cast<JNIEnvExt*>(env)->unchecked_functions;
+  }
+
+  static jobject NewRef(const char* function_name, JNIEnv* env, jobject obj, IndirectRefKind kind) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, function_name);
+    JniValueType args[2] = {{.E = env}, {.L = obj}};
+    if (sc.Check(soa, true, "EL", args)) {
+      JniValueType result;
+      switch (kind) {
+        case kGlobal:
+          result.L = baseEnv(env)->NewGlobalRef(env, obj);
+          break;
+        case kLocal:
+          result.L = baseEnv(env)->NewLocalRef(env, obj);
+          break;
+        case kWeakGlobal:
+          result.L = baseEnv(env)->NewWeakGlobalRef(env, obj);
+          break;
+        default:
+          LOG(FATAL) << "Unexpected reference kind: " << kind;
       }
+      if (sc.Check(soa, false, "L", &result)) {
+        DCHECK_EQ(IsSameObject(env, obj, result.L), JNI_TRUE);
+        DCHECK(sc.CheckReferenceKind(kind, soa.Vm(), soa.Self(), result.L));
+        return result.L;
+      }
+    }
+    return nullptr;
+  }
+
+  static void DeleteRef(const char* function_name, JNIEnv* env, jobject obj, IndirectRefKind kind) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_ExcepOkay, function_name);
+    JniValueType args[2] = {{.E = env}, {.L = obj}};
+    sc.Check(soa, true, "EL", args);
+    if (sc.CheckReferenceKind(kind, soa.Vm(), soa.Self(), obj)) {
+      JniValueType result;
+      switch (kind) {
+        case kGlobal:
+          baseEnv(env)->DeleteGlobalRef(env, obj);
+          break;
+        case kLocal:
+          baseEnv(env)->DeleteLocalRef(env, obj);
+          break;
+        case kWeakGlobal:
+          baseEnv(env)->DeleteWeakGlobalRef(env, obj);
+          break;
+        default:
+          LOG(FATAL) << "Unexpected reference kind: " << kind;
+      }
+      result.V = nullptr;
+      sc.Check(soa, false, "V", &result);
     }
-    return CHECK_JNI_EXIT("p", result);
   }
 
-  static void ReleaseStringCritical(JNIEnv* env, jstring string, const jchar* carray) {
-    CHECK_JNI_ENTRY(kFlag_CritRelease | kFlag_ExcepOkay, "Esp", env, string, carray);
-    sc.CheckNonNull(carray);
-    if (sc.ForceCopy()) {
-      GuardedCopy::Check(__FUNCTION__, carray, false);
-      carray = reinterpret_cast<const jchar*>(GuardedCopy::Destroy(const_cast<jchar*>(carray)));
+  static jmethodID GetMethodIDInternal(const char* function_name, JNIEnv* env, jclass c,
+                                       const char* name, const char* sig, bool is_static) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, function_name);
+    JniValueType args[4] = {{.E = env}, {.c = c}, {.u = name}, {.u = sig}};
+    if (sc.Check(soa, true, "Ecuu", args)) {
+      JniValueType result;
+      if (is_static) {
+        result.m = baseEnv(env)->GetStaticMethodID(env, c, name, sig);
+      } else {
+        result.m = baseEnv(env)->GetMethodID(env, c, name, sig);
+      }
+      if (sc.Check(soa, false, "m", &result)) {
+        return result.m;
+      }
     }
-    baseEnv(env)->ReleaseStringCritical(env, string, carray);
-    CHECK_JNI_EXIT_VOID();
+    return nullptr;
   }
 
-  static jweak NewWeakGlobalRef(JNIEnv* env, jobject obj) {
-    CHECK_JNI_ENTRY(kFlag_Default, "EL", env, obj);
-    return CHECK_JNI_EXIT("L", baseEnv(env)->NewWeakGlobalRef(env, obj));
+  static jfieldID GetFieldIDInternal(const char* function_name, JNIEnv* env, jclass c,
+                                     const char* name, const char* sig, bool is_static) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, function_name);
+    JniValueType args[4] = {{.E = env}, {.c = c}, {.u = name}, {.u = sig}};
+    if (sc.Check(soa, true, "Ecuu", args)) {
+      JniValueType result;
+      if (is_static) {
+        result.f = baseEnv(env)->GetStaticFieldID(env, c, name, sig);
+      } else {
+        result.f = baseEnv(env)->GetFieldID(env, c, name, sig);
+      }
+      if (sc.Check(soa, false, "f", &result)) {
+        return result.f;
+      }
+    }
+    return nullptr;
+  }
+
+  static JniValueType GetField(const char* function_name, JNIEnv* env, jobject obj, jfieldID fid,
+                               bool is_static, Primitive::Type type) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, function_name);
+    JniValueType args[3] = {{.E = env}, {.L = obj}, {.f = fid}};
+    JniValueType result;
+    if (sc.Check(soa, true, is_static ? "Ecf" : "ELf", args) &&
+        sc.CheckFieldAccess(soa, obj, fid, is_static, type)) {
+      const char* result_check = nullptr;
+      switch (type) {
+        case Primitive::kPrimNot:
+          if (is_static) {
+            result.L = baseEnv(env)->GetStaticObjectField(env, down_cast<jclass>(obj), fid);
+          } else {
+            result.L = baseEnv(env)->GetObjectField(env, obj, fid);
+          }
+          result_check = "L";
+          break;
+        case Primitive::kPrimBoolean:
+          if (is_static) {
+            result.Z = baseEnv(env)->GetStaticBooleanField(env, down_cast<jclass>(obj), fid);
+          } else {
+            result.Z = baseEnv(env)->GetBooleanField(env, obj, fid);
+          }
+          result_check = "Z";
+          break;
+        case Primitive::kPrimByte:
+          if (is_static) {
+            result.B = baseEnv(env)->GetStaticByteField(env, down_cast<jclass>(obj), fid);
+          } else {
+            result.B = baseEnv(env)->GetByteField(env, obj, fid);
+          }
+          result_check = "B";
+          break;
+        case Primitive::kPrimChar:
+          if (is_static) {
+            result.C = baseEnv(env)->GetStaticCharField(env, down_cast<jclass>(obj), fid);
+          } else {
+            result.C = baseEnv(env)->GetCharField(env, obj, fid);
+          }
+          result_check = "C";
+          break;
+        case Primitive::kPrimShort:
+          if (is_static) {
+            result.S = baseEnv(env)->GetStaticShortField(env, down_cast<jclass>(obj), fid);
+          } else {
+            result.S = baseEnv(env)->GetShortField(env, obj, fid);
+          }
+          result_check = "S";
+          break;
+        case Primitive::kPrimInt:
+          if (is_static) {
+            result.I = baseEnv(env)->GetStaticIntField(env, down_cast<jclass>(obj), fid);
+          } else {
+            result.I = baseEnv(env)->GetIntField(env, obj, fid);
+          }
+          result_check = "I";
+          break;
+        case Primitive::kPrimLong:
+          if (is_static) {
+            result.J = baseEnv(env)->GetStaticLongField(env, down_cast<jclass>(obj), fid);
+          } else {
+            result.J = baseEnv(env)->GetLongField(env, obj, fid);
+          }
+          result_check = "J";
+          break;
+        case Primitive::kPrimFloat:
+          if (is_static) {
+            result.F = baseEnv(env)->GetStaticFloatField(env, down_cast<jclass>(obj), fid);
+          } else {
+            result.F = baseEnv(env)->GetFloatField(env, obj, fid);
+          }
+          result_check = "F";
+          break;
+        case Primitive::kPrimDouble:
+          if (is_static) {
+            result.D = baseEnv(env)->GetStaticDoubleField(env, down_cast<jclass>(obj), fid);
+          } else {
+            result.D = baseEnv(env)->GetDoubleField(env, obj, fid);
+          }
+          result_check = "D";
+          break;
+        case Primitive::kPrimVoid:
+          LOG(FATAL) << "Unexpected type: " << type;
+          break;
+      }
+      if (sc.Check(soa, false, result_check, &result)) {
+        return result;
+      }
+    }
+    result.J = 0;
+    return result;
   }
 
-  static jboolean ExceptionCheck(JNIEnv* env) {
-    CHECK_JNI_ENTRY(kFlag_CritOkay | kFlag_ExcepOkay, "E", env);
-    return CHECK_JNI_EXIT("b", baseEnv(env)->ExceptionCheck(env));
+  static void SetField(const char* function_name, JNIEnv* env, jobject obj, jfieldID fid,
+                       bool is_static, Primitive::Type type, JniValueType value) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, function_name);
+    JniValueType args[4] = {{.E = env}, {.L = obj}, {.f = fid}, value};
+    char sig[5] = { 'E', is_static ? 'c' : 'L', 'f',
+        type == Primitive::kPrimNot ? 'L' : Primitive::Descriptor(type)[0], '\0'};
+    if (sc.Check(soa, true, sig, args) &&
+        sc.CheckFieldAccess(soa, obj, fid, is_static, type)) {
+      switch (type) {
+        case Primitive::kPrimNot:
+          if (is_static) {
+            baseEnv(env)->SetStaticObjectField(env, down_cast<jclass>(obj), fid, value.L);
+          } else {
+            baseEnv(env)->SetObjectField(env, obj, fid, value.L);
+          }
+          break;
+        case Primitive::kPrimBoolean:
+          if (is_static) {
+            baseEnv(env)->SetStaticBooleanField(env, down_cast<jclass>(obj), fid, value.Z);
+          } else {
+            baseEnv(env)->SetBooleanField(env, obj, fid, value.Z);
+          }
+          break;
+        case Primitive::kPrimByte:
+          if (is_static) {
+            baseEnv(env)->SetStaticByteField(env, down_cast<jclass>(obj), fid, value.B);
+          } else {
+            baseEnv(env)->SetByteField(env, obj, fid, value.B);
+          }
+          break;
+        case Primitive::kPrimChar:
+          if (is_static) {
+            baseEnv(env)->SetStaticCharField(env, down_cast<jclass>(obj), fid, value.C);
+          } else {
+            baseEnv(env)->SetCharField(env, obj, fid, value.C);
+          }
+          break;
+        case Primitive::kPrimShort:
+          if (is_static) {
+            baseEnv(env)->SetStaticShortField(env, down_cast<jclass>(obj), fid, value.S);
+          } else {
+            baseEnv(env)->SetShortField(env, obj, fid, value.S);
+          }
+          break;
+        case Primitive::kPrimInt:
+          if (is_static) {
+            baseEnv(env)->SetStaticIntField(env, down_cast<jclass>(obj), fid, value.I);
+          } else {
+            baseEnv(env)->SetIntField(env, obj, fid, value.I);
+          }
+          break;
+        case Primitive::kPrimLong:
+          if (is_static) {
+            baseEnv(env)->SetStaticLongField(env, down_cast<jclass>(obj), fid, value.J);
+          } else {
+            baseEnv(env)->SetLongField(env, obj, fid, value.J);
+          }
+          break;
+        case Primitive::kPrimFloat:
+          if (is_static) {
+            baseEnv(env)->SetStaticFloatField(env, down_cast<jclass>(obj), fid, value.F);
+          } else {
+            baseEnv(env)->SetFloatField(env, obj, fid, value.F);
+          }
+          break;
+        case Primitive::kPrimDouble:
+          if (is_static) {
+            baseEnv(env)->SetStaticDoubleField(env, down_cast<jclass>(obj), fid, value.D);
+          } else {
+            baseEnv(env)->SetDoubleField(env, obj, fid, value.D);
+          }
+          break;
+        case Primitive::kPrimVoid:
+          LOG(FATAL) << "Unexpected type: " << type;
+          break;
+      }
+      JniValueType result;
+      result.V = nullptr;
+      sc.Check(soa, false, "V", &result);
+    }
   }
 
-  static jobjectRefType GetObjectRefType(JNIEnv* env, jobject obj) {
-    // Note: we use "Ep" rather than "EL" because this is the one JNI function
-    // that it's okay to pass an invalid reference to.
-    CHECK_JNI_ENTRY(kFlag_Default, "Ep", env, obj);
-    // TODO: proper decoding of jobjectRefType!
-    return CHECK_JNI_EXIT("I", baseEnv(env)->GetObjectRefType(env, obj));
+  static bool CheckCallArgs(ScopedObjectAccess& soa, ScopedCheck& sc, JNIEnv* env, jobject obj,
+                            jclass c, jmethodID mid, InvokeType invoke)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    bool checked;
+    switch (invoke) {
+      case kVirtual: {
+        DCHECK(c == nullptr);
+        JniValueType args[3] = {{.E = env}, {.L = obj}, {.m = mid}};
+        checked = sc.Check(soa, true, "ELm.", args);
+        break;
+      }
+      case kDirect: {
+        JniValueType args[4] = {{.E = env}, {.L = obj}, {.c = c}, {.m = mid}};
+        checked = sc.Check(soa, true, "ELcm.", args);
+        break;
+      }
+      case kStatic: {
+        DCHECK(obj == nullptr);
+        JniValueType args[3] = {{.E = env}, {.c = c}, {.m = mid}};
+        checked = sc.Check(soa, true, "Ecm.", args);
+        break;
+      }
+      default:
+        LOG(FATAL) << "Unexpected invoke: " << invoke;
+        checked = false;
+        break;
+    }
+    return checked;
+  }
+
+  static JniValueType CallMethodA(const char* function_name, JNIEnv* env, jobject obj, jclass c,
+                                  jmethodID mid, jvalue* vargs, Primitive::Type type,
+                                  InvokeType invoke) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, function_name);
+    JniValueType result;
+    if (CheckCallArgs(soa, sc, env, obj, c, mid, invoke) &&
+        sc.CheckMethodAndSig(soa, obj, c, mid, type, invoke)) {
+      const char* result_check;
+      switch (type) {
+        case Primitive::kPrimNot:
+          result_check = "L";
+          switch (invoke) {
+            case kVirtual:
+              result.L = baseEnv(env)->CallObjectMethodA(env, obj, mid, vargs);
+              break;
+            case kDirect:
+              result.L = baseEnv(env)->CallNonvirtualObjectMethodA(env, obj, c, mid, vargs);
+              break;
+            case kStatic:
+              result.L = baseEnv(env)->CallStaticObjectMethodA(env, c, mid, vargs);
+              break;
+            default:
+              break;
+          }
+          break;
+        case Primitive::kPrimBoolean:
+          result_check = "Z";
+          switch (invoke) {
+            case kVirtual:
+              result.Z = baseEnv(env)->CallBooleanMethodA(env, obj, mid, vargs);
+              break;
+            case kDirect:
+              result.Z = baseEnv(env)->CallNonvirtualBooleanMethodA(env, obj, c, mid, vargs);
+              break;
+            case kStatic:
+              result.Z = baseEnv(env)->CallStaticBooleanMethodA(env, c, mid, vargs);
+              break;
+            default:
+              break;
+          }
+          break;
+        case Primitive::kPrimByte:
+          result_check = "B";
+          switch (invoke) {
+            case kVirtual:
+              result.B = baseEnv(env)->CallByteMethodA(env, obj, mid, vargs);
+              break;
+            case kDirect:
+              result.B = baseEnv(env)->CallNonvirtualByteMethodA(env, obj, c, mid, vargs);
+              break;
+            case kStatic:
+              result.B = baseEnv(env)->CallStaticByteMethodA(env, c, mid, vargs);
+              break;
+            default:
+              break;
+          }
+          break;
+        case Primitive::kPrimChar:
+          result_check = "C";
+          switch (invoke) {
+            case kVirtual:
+              result.C = baseEnv(env)->CallCharMethodA(env, obj, mid, vargs);
+              break;
+            case kDirect:
+              result.C = baseEnv(env)->CallNonvirtualCharMethodA(env, obj, c, mid, vargs);
+              break;
+            case kStatic:
+              result.C = baseEnv(env)->CallStaticCharMethodA(env, c, mid, vargs);
+              break;
+            default:
+              break;
+          }
+          break;
+        case Primitive::kPrimShort:
+          result_check = "S";
+          switch (invoke) {
+            case kVirtual:
+              result.S = baseEnv(env)->CallShortMethodA(env, obj, mid, vargs);
+              break;
+            case kDirect:
+              result.S = baseEnv(env)->CallNonvirtualShortMethodA(env, obj, c, mid, vargs);
+              break;
+            case kStatic:
+              result.S = baseEnv(env)->CallStaticShortMethodA(env, c, mid, vargs);
+              break;
+            default:
+              break;
+          }
+          break;
+        case Primitive::kPrimInt:
+          result_check = "I";
+          switch (invoke) {
+            case kVirtual:
+              result.I = baseEnv(env)->CallIntMethodA(env, obj, mid, vargs);
+              break;
+            case kDirect:
+              result.I = baseEnv(env)->CallNonvirtualIntMethodA(env, obj, c, mid, vargs);
+              break;
+            case kStatic:
+              result.I = baseEnv(env)->CallStaticIntMethodA(env, c, mid, vargs);
+              break;
+            default:
+              break;
+          }
+          break;
+        case Primitive::kPrimLong:
+          result_check = "J";
+          switch (invoke) {
+            case kVirtual:
+              result.J = baseEnv(env)->CallLongMethodA(env, obj, mid, vargs);
+              break;
+            case kDirect:
+              result.J = baseEnv(env)->CallNonvirtualLongMethodA(env, obj, c, mid, vargs);
+              break;
+            case kStatic:
+              result.J = baseEnv(env)->CallStaticLongMethodA(env, c, mid, vargs);
+              break;
+            default:
+              break;
+          }
+          break;
+        case Primitive::kPrimFloat:
+          result_check = "F";
+          switch (invoke) {
+            case kVirtual:
+              result.F = baseEnv(env)->CallFloatMethodA(env, obj, mid, vargs);
+              break;
+            case kDirect:
+              result.F = baseEnv(env)->CallNonvirtualFloatMethodA(env, obj, c, mid, vargs);
+              break;
+            case kStatic:
+              result.F = baseEnv(env)->CallStaticFloatMethodA(env, c, mid, vargs);
+              break;
+            default:
+              break;
+          }
+          break;
+        case Primitive::kPrimDouble:
+          result_check = "D";
+          switch (invoke) {
+            case kVirtual:
+              result.D = baseEnv(env)->CallDoubleMethodA(env, obj, mid, vargs);
+              break;
+            case kDirect:
+              result.D = baseEnv(env)->CallNonvirtualDoubleMethodA(env, obj, c, mid, vargs);
+              break;
+            case kStatic:
+              result.D = baseEnv(env)->CallStaticDoubleMethodA(env, c, mid, vargs);
+              break;
+            default:
+              break;
+          }
+          break;
+        case Primitive::kPrimVoid:
+          result_check = "V";
+          result.V = nullptr;
+          switch (invoke) {
+            case kVirtual:
+              baseEnv(env)->CallVoidMethodA(env, obj, mid, vargs);
+              break;
+            case kDirect:
+              baseEnv(env)->CallNonvirtualVoidMethodA(env, obj, c, mid, vargs);
+              break;
+            case kStatic:
+              baseEnv(env)->CallStaticVoidMethodA(env, c, mid, vargs);
+              break;
+            default:
+              LOG(FATAL) << "Unexpected invoke: " << invoke;
+          }
+          break;
+        default:
+          LOG(FATAL) << "Unexpected return type: " << type;
+          result_check = nullptr;
+      }
+      if (sc.Check(soa, false, result_check, &result)) {
+        return result;
+      }
+    }
+    result.J = 0;
+    return result;
   }
 
-  static jobject NewDirectByteBuffer(JNIEnv* env, void* address, jlong capacity) {
-    CHECK_JNI_ENTRY(kFlag_Default, "EpJ", env, address, capacity);
-    if (address == nullptr) {
-      JniAbortF(__FUNCTION__, "non-nullable address is NULL");
-      return nullptr;
+  static JniValueType CallMethodV(const char* function_name, JNIEnv* env, jobject obj, jclass c,
+                                  jmethodID mid, va_list vargs, Primitive::Type type,
+                                  InvokeType invoke) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, function_name);
+    JniValueType result;
+    if (CheckCallArgs(soa, sc, env, obj, c, mid, invoke) &&
+        sc.CheckMethodAndSig(soa, obj, c, mid, type, invoke)) {
+      const char* result_check;
+      switch (type) {
+        case Primitive::kPrimNot:
+          result_check = "L";
+          switch (invoke) {
+            case kVirtual:
+              result.L = baseEnv(env)->CallObjectMethodV(env, obj, mid, vargs);
+              break;
+            case kDirect:
+              result.L = baseEnv(env)->CallNonvirtualObjectMethodV(env, obj, c, mid, vargs);
+              break;
+            case kStatic:
+              result.L = baseEnv(env)->CallStaticObjectMethodV(env, c, mid, vargs);
+              break;
+            default:
+              LOG(FATAL) << "Unexpected invoke: " << invoke;
+          }
+          break;
+        case Primitive::kPrimBoolean:
+          result_check = "Z";
+          switch (invoke) {
+            case kVirtual:
+              result.Z = baseEnv(env)->CallBooleanMethodV(env, obj, mid, vargs);
+              break;
+            case kDirect:
+              result.Z = baseEnv(env)->CallNonvirtualBooleanMethodV(env, obj, c, mid, vargs);
+              break;
+            case kStatic:
+              result.Z = baseEnv(env)->CallStaticBooleanMethodV(env, c, mid, vargs);
+              break;
+            default:
+              LOG(FATAL) << "Unexpected invoke: " << invoke;
+          }
+          break;
+        case Primitive::kPrimByte:
+          result_check = "B";
+          switch (invoke) {
+            case kVirtual:
+              result.B = baseEnv(env)->CallByteMethodV(env, obj, mid, vargs);
+              break;
+            case kDirect:
+              result.B = baseEnv(env)->CallNonvirtualByteMethodV(env, obj, c, mid, vargs);
+              break;
+            case kStatic:
+              result.B = baseEnv(env)->CallStaticByteMethodV(env, c, mid, vargs);
+              break;
+            default:
+              LOG(FATAL) << "Unexpected invoke: " << invoke;
+          }
+          break;
+        case Primitive::kPrimChar:
+          result_check = "C";
+          switch (invoke) {
+            case kVirtual:
+              result.C = baseEnv(env)->CallCharMethodV(env, obj, mid, vargs);
+              break;
+            case kDirect:
+              result.C = baseEnv(env)->CallNonvirtualCharMethodV(env, obj, c, mid, vargs);
+              break;
+            case kStatic:
+              result.C = baseEnv(env)->CallStaticCharMethodV(env, c, mid, vargs);
+              break;
+            default:
+              LOG(FATAL) << "Unexpected invoke: " << invoke;
+          }
+          break;
+        case Primitive::kPrimShort:
+          result_check = "S";
+          switch (invoke) {
+            case kVirtual:
+              result.S = baseEnv(env)->CallShortMethodV(env, obj, mid, vargs);
+              break;
+            case kDirect:
+              result.S = baseEnv(env)->CallNonvirtualShortMethodV(env, obj, c, mid, vargs);
+              break;
+            case kStatic:
+              result.S = baseEnv(env)->CallStaticShortMethodV(env, c, mid, vargs);
+              break;
+            default:
+              LOG(FATAL) << "Unexpected invoke: " << invoke;
+          }
+          break;
+        case Primitive::kPrimInt:
+          result_check = "I";
+          switch (invoke) {
+            case kVirtual:
+              result.I = baseEnv(env)->CallIntMethodV(env, obj, mid, vargs);
+              break;
+            case kDirect:
+              result.I = baseEnv(env)->CallNonvirtualIntMethodV(env, obj, c, mid, vargs);
+              break;
+            case kStatic:
+              result.I = baseEnv(env)->CallStaticIntMethodV(env, c, mid, vargs);
+              break;
+            default:
+              LOG(FATAL) << "Unexpected invoke: " << invoke;
+          }
+          break;
+        case Primitive::kPrimLong:
+          result_check = "J";
+          switch (invoke) {
+            case kVirtual:
+              result.J = baseEnv(env)->CallLongMethodV(env, obj, mid, vargs);
+              break;
+            case kDirect:
+              result.J = baseEnv(env)->CallNonvirtualLongMethodV(env, obj, c, mid, vargs);
+              break;
+            case kStatic:
+              result.J = baseEnv(env)->CallStaticLongMethodV(env, c, mid, vargs);
+              break;
+            default:
+              LOG(FATAL) << "Unexpected invoke: " << invoke;
+          }
+          break;
+        case Primitive::kPrimFloat:
+          result_check = "F";
+          switch (invoke) {
+            case kVirtual:
+              result.F = baseEnv(env)->CallFloatMethodV(env, obj, mid, vargs);
+              break;
+            case kDirect:
+              result.F = baseEnv(env)->CallNonvirtualFloatMethodV(env, obj, c, mid, vargs);
+              break;
+            case kStatic:
+              result.F = baseEnv(env)->CallStaticFloatMethodV(env, c, mid, vargs);
+              break;
+            default:
+              LOG(FATAL) << "Unexpected invoke: " << invoke;
+          }
+          break;
+        case Primitive::kPrimDouble:
+          result_check = "D";
+          switch (invoke) {
+            case kVirtual:
+              result.D = baseEnv(env)->CallDoubleMethodV(env, obj, mid, vargs);
+              break;
+            case kDirect:
+              result.D = baseEnv(env)->CallNonvirtualDoubleMethodV(env, obj, c, mid, vargs);
+              break;
+            case kStatic:
+              result.D = baseEnv(env)->CallStaticDoubleMethodV(env, c, mid, vargs);
+              break;
+            default:
+              LOG(FATAL) << "Unexpected invoke: " << invoke;
+          }
+          break;
+        case Primitive::kPrimVoid:
+          result_check = "V";
+          result.V = nullptr;
+          switch (invoke) {
+            case kVirtual:
+              baseEnv(env)->CallVoidMethodV(env, obj, mid, vargs);
+              break;
+            case kDirect:
+              baseEnv(env)->CallNonvirtualVoidMethodV(env, obj, c, mid, vargs);
+              break;
+            case kStatic:
+              baseEnv(env)->CallStaticVoidMethodV(env, c, mid, vargs);
+              break;
+            default:
+              LOG(FATAL) << "Unexpected invoke: " << invoke;
+          }
+          break;
+        default:
+          LOG(FATAL) << "Unexpected return type: " << type;
+          result_check = nullptr;
+      }
+      if (sc.Check(soa, false, result_check, &result)) {
+        return result;
+      }
     }
-    return CHECK_JNI_EXIT("L", baseEnv(env)->NewDirectByteBuffer(env, address, capacity));
+    result.J = 0;
+    return result;
   }
 
-  static void* GetDirectBufferAddress(JNIEnv* env, jobject buf) {
-    CHECK_JNI_ENTRY(kFlag_Default, "EL", env, buf);
-    // TODO: check that 'buf' is a java.nio.Buffer.
-    return CHECK_JNI_EXIT("p", baseEnv(env)->GetDirectBufferAddress(env, buf));
+  static const void* GetStringCharsInternal(const char* function_name, JNIEnv* env, jstring string,
+                                            jboolean* is_copy, bool utf, bool critical) {
+    ScopedObjectAccess soa(env);
+    int flags = critical ? kFlag_CritGet : kFlag_CritOkay;
+    ScopedCheck sc(flags, function_name);
+    JniValueType args[3] = {{.E = env}, {.s = string}, {.p = is_copy}};
+    if (sc.Check(soa, true, "Esp", args)) {
+      JniValueType result;
+      if (utf) {
+        CHECK(!critical);
+        result.u = baseEnv(env)->GetStringUTFChars(env, string, is_copy);
+      } else {
+        if (critical) {
+          result.p = baseEnv(env)->GetStringCritical(env, string, is_copy);
+        } else {
+          result.p = baseEnv(env)->GetStringChars(env, string, is_copy);
+        }
+      }
+      // TODO: could we be smarter about not copying when local_is_copy?
+      if (result.p != nullptr && soa.ForceCopy()) {
+        if (utf) {
+          size_t length_in_bytes = strlen(result.u) + 1;
+          result.u =
+              reinterpret_cast<const char*>(GuardedCopy::Create(result.u, length_in_bytes, false));
+        } else {
+          size_t length_in_bytes = baseEnv(env)->GetStringLength(env, string) * 2;
+          result.p =
+              reinterpret_cast<const jchar*>(GuardedCopy::Create(result.p, length_in_bytes, false));
+        }
+        if (is_copy != nullptr) {
+          *is_copy = JNI_TRUE;
+        }
+      }
+      if (sc.Check(soa, false, utf ? "u" : "p", &result)) {
+        return utf ? result.u : result.p;
+      }
+    }
+    return nullptr;
   }
 
-  static jlong GetDirectBufferCapacity(JNIEnv* env, jobject buf) {
-    CHECK_JNI_ENTRY(kFlag_Default, "EL", env, buf);
-    // TODO: check that 'buf' is a java.nio.Buffer.
-    return CHECK_JNI_EXIT("J", baseEnv(env)->GetDirectBufferCapacity(env, buf));
+  static void ReleaseStringCharsInternal(const char* function_name, JNIEnv* env, jstring string,
+                                         const void* chars, bool utf, bool critical) {
+    ScopedObjectAccess soa(env);
+    int flags = kFlag_ExcepOkay | kFlag_Release;
+    if (critical) {
+      flags |= kFlag_CritRelease;
+    }
+    ScopedCheck sc(flags, function_name);
+    sc.CheckNonNull(chars);
+    bool force_copy_ok = !soa.ForceCopy() || GuardedCopy::Check(function_name, chars, false);
+    if (force_copy_ok && soa.ForceCopy()) {
+      chars = reinterpret_cast<const jchar*>(GuardedCopy::Destroy(const_cast<void*>(chars)));
+    }
+    if (force_copy_ok) {
+      JniValueType args[3] = {{.E = env}, {.s = string}, {.p = chars}};
+      if (sc.Check(soa, true, utf ? "Esu" : "Esp", args)) {
+        if (utf) {
+          CHECK(!critical);
+          baseEnv(env)->ReleaseStringUTFChars(env, string, reinterpret_cast<const char*>(chars));
+        } else {
+          if (critical) {
+            baseEnv(env)->ReleaseStringCritical(env, string, reinterpret_cast<const jchar*>(chars));
+          } else {
+            baseEnv(env)->ReleaseStringChars(env, string, reinterpret_cast<const jchar*>(chars));
+          }
+        }
+        JniValueType result;
+        sc.Check(soa, false, "V", &result);
+      }
+    }
   }
 
- private:
-  static inline const JNINativeInterface* baseEnv(JNIEnv* env) {
-    return reinterpret_cast<JNIEnvExt*>(env)->unchecked_functions;
+  static jarray NewPrimitiveArray(const char* function_name, JNIEnv* env, jsize length,
+                                  Primitive::Type type) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, __FUNCTION__);
+    JniValueType args[2] = {{.E = env}, {.z = length}};
+    if (sc.Check(soa, true, "Ez", args)) {
+      JniValueType result;
+      switch (type) {
+        case Primitive::kPrimBoolean:
+          result.a = baseEnv(env)->NewBooleanArray(env, length);
+          break;
+        case Primitive::kPrimByte:
+          result.a = baseEnv(env)->NewByteArray(env, length);
+          break;
+        case Primitive::kPrimChar:
+          result.a = baseEnv(env)->NewCharArray(env, length);
+          break;
+        case Primitive::kPrimShort:
+          result.a = baseEnv(env)->NewShortArray(env, length);
+          break;
+        case Primitive::kPrimInt:
+          result.a = baseEnv(env)->NewIntArray(env, length);
+          break;
+        case Primitive::kPrimLong:
+          result.a = baseEnv(env)->NewLongArray(env, length);
+          break;
+        case Primitive::kPrimFloat:
+          result.a = baseEnv(env)->NewFloatArray(env, length);
+          break;
+        case Primitive::kPrimDouble:
+          result.a = baseEnv(env)->NewDoubleArray(env, length);
+          break;
+        default:
+          LOG(FATAL) << "Unexpected primitive type: " << type;
+      }
+      if (sc.Check(soa, false, "a", &result)) {
+        return result.a;
+      }
+    }
+    return nullptr;
+  }
+
+  static void* GetPrimitiveArrayElements(const char* function_name, Primitive::Type type,
+                                         JNIEnv* env, jarray array, jboolean* is_copy) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, function_name);
+    JniValueType args[3] = {{.E = env}, {.a = array}, {.p = is_copy}};
+    if (sc.Check(soa, true, "Eap", args) && sc.CheckPrimitiveArrayType(soa, array, type)) {
+      JniValueType result;
+      switch (type) {
+        case Primitive::kPrimBoolean:
+          result.p = baseEnv(env)->GetBooleanArrayElements(env, down_cast<jbooleanArray>(array),
+                                                           is_copy);
+          break;
+        case Primitive::kPrimByte:
+          result.p = baseEnv(env)->GetByteArrayElements(env, down_cast<jbyteArray>(array),
+                                                        is_copy);
+          break;
+        case Primitive::kPrimChar:
+          result.p = baseEnv(env)->GetCharArrayElements(env, down_cast<jcharArray>(array),
+                                                        is_copy);
+          break;
+        case Primitive::kPrimShort:
+          result.p = baseEnv(env)->GetShortArrayElements(env, down_cast<jshortArray>(array),
+                                                         is_copy);
+          break;
+        case Primitive::kPrimInt:
+          result.p = baseEnv(env)->GetIntArrayElements(env, down_cast<jintArray>(array), is_copy);
+          break;
+        case Primitive::kPrimLong:
+          result.p = baseEnv(env)->GetLongArrayElements(env, down_cast<jlongArray>(array),
+                                                        is_copy);
+          break;
+        case Primitive::kPrimFloat:
+          result.p = baseEnv(env)->GetFloatArrayElements(env, down_cast<jfloatArray>(array),
+                                                         is_copy);
+          break;
+        case Primitive::kPrimDouble:
+          result.p = baseEnv(env)->GetDoubleArrayElements(env, down_cast<jdoubleArray>(array),
+                                                          is_copy);
+          break;
+        default:
+          LOG(FATAL) << "Unexpected primitive type: " << type;
+      }
+      if (result.p != nullptr && soa.ForceCopy()) {
+        result.p = GuardedCopy::CreateGuardedPACopy(env, array, is_copy);
+        if (is_copy != nullptr) {
+          *is_copy = JNI_TRUE;
+        }
+      }
+      if (sc.Check(soa, false, "p", &result)) {
+        return const_cast<void*>(result.p);
+      }
+    }
+    return nullptr;
+  }
+
+  static void ReleasePrimitiveArrayElements(const char* function_name, Primitive::Type type,
+                                            JNIEnv* env, jarray array, void* elems, jint mode) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_ExcepOkay, function_name);
+    if (sc.CheckNonNull(elems) && sc.CheckPrimitiveArrayType(soa, array, type)) {
+      if (soa.ForceCopy()) {
+        elems = GuardedCopy::ReleaseGuardedPACopy(function_name, env, array, elems, mode);
+      }
+      if (!soa.ForceCopy() || elems != nullptr) {
+        JniValueType args[4] = {{.E = env}, {.a = array}, {.p = elems}, {.r = mode}};
+        if (sc.Check(soa, true, "Eapr", args)) {
+          switch (type) {
+            case Primitive::kPrimBoolean:
+              baseEnv(env)->ReleaseBooleanArrayElements(env, down_cast<jbooleanArray>(array),
+                                                        reinterpret_cast<jboolean*>(elems), mode);
+              break;
+            case Primitive::kPrimByte:
+              baseEnv(env)->ReleaseByteArrayElements(env, down_cast<jbyteArray>(array),
+                                                     reinterpret_cast<jbyte*>(elems), mode);
+              break;
+            case Primitive::kPrimChar:
+              baseEnv(env)->ReleaseCharArrayElements(env, down_cast<jcharArray>(array),
+                                                     reinterpret_cast<jchar*>(elems), mode);
+              break;
+            case Primitive::kPrimShort:
+              baseEnv(env)->ReleaseShortArrayElements(env, down_cast<jshortArray>(array),
+                                                      reinterpret_cast<jshort*>(elems), mode);
+              break;
+            case Primitive::kPrimInt:
+              baseEnv(env)->ReleaseIntArrayElements(env, down_cast<jintArray>(array),
+                                                    reinterpret_cast<jint*>(elems), mode);
+              break;
+            case Primitive::kPrimLong:
+              baseEnv(env)->ReleaseLongArrayElements(env, down_cast<jlongArray>(array),
+                                                     reinterpret_cast<jlong*>(elems), mode);
+              break;
+            case Primitive::kPrimFloat:
+              baseEnv(env)->ReleaseFloatArrayElements(env, down_cast<jfloatArray>(array),
+                                                      reinterpret_cast<jfloat*>(elems), mode);
+              break;
+            case Primitive::kPrimDouble:
+              baseEnv(env)->ReleaseDoubleArrayElements(env, down_cast<jdoubleArray>(array),
+                                                       reinterpret_cast<jdouble*>(elems), mode);
+              break;
+            default:
+              LOG(FATAL) << "Unexpected primitive type: " << type;
+          }
+          JniValueType result;
+          result.V = nullptr;
+          sc.Check(soa, false, "V", &result);
+        }
+      }
+    }
+  }
+
+  static void GetPrimitiveArrayRegion(const char* function_name, Primitive::Type type, JNIEnv* env,
+                                      jarray array, jsize start, jsize len, void* buf) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, function_name);
+    JniValueType args[5] = {{.E = env}, {.a = array}, {.z = start}, {.z = len}, {.p = buf}};
+    // Note: the start and len arguments are checked as 'I' rather than 'z' as invalid indices
+    // result in ArrayIndexOutOfBoundsExceptions in the base implementation.
+    if (sc.Check(soa, true, "EaIIp", args) && sc.CheckPrimitiveArrayType(soa, array, type)) {
+      switch (type) {
+        case Primitive::kPrimBoolean:
+          baseEnv(env)->GetBooleanArrayRegion(env, down_cast<jbooleanArray>(array), start, len,
+                                              reinterpret_cast<jboolean*>(buf));
+          break;
+        case Primitive::kPrimByte:
+          baseEnv(env)->GetByteArrayRegion(env, down_cast<jbyteArray>(array), start, len,
+                                           reinterpret_cast<jbyte*>(buf));
+          break;
+        case Primitive::kPrimChar:
+          baseEnv(env)->GetCharArrayRegion(env, down_cast<jcharArray>(array), start, len,
+                                           reinterpret_cast<jchar*>(buf));
+          break;
+        case Primitive::kPrimShort:
+          baseEnv(env)->GetShortArrayRegion(env, down_cast<jshortArray>(array), start, len,
+                                            reinterpret_cast<jshort*>(buf));
+          break;
+        case Primitive::kPrimInt:
+          baseEnv(env)->GetIntArrayRegion(env, down_cast<jintArray>(array), start, len,
+                                          reinterpret_cast<jint*>(buf));
+          break;
+        case Primitive::kPrimLong:
+          baseEnv(env)->GetLongArrayRegion(env, down_cast<jlongArray>(array), start, len,
+                                           reinterpret_cast<jlong*>(buf));
+          break;
+        case Primitive::kPrimFloat:
+          baseEnv(env)->GetFloatArrayRegion(env, down_cast<jfloatArray>(array), start, len,
+                                            reinterpret_cast<jfloat*>(buf));
+          break;
+        case Primitive::kPrimDouble:
+          baseEnv(env)->GetDoubleArrayRegion(env, down_cast<jdoubleArray>(array), start, len,
+                                             reinterpret_cast<jdouble*>(buf));
+          break;
+        default:
+          LOG(FATAL) << "Unexpected primitive type: " << type;
+      }
+      JniValueType result;
+      result.V = nullptr;
+      sc.Check(soa, false, "V", &result);
+    }
+  }
+
+  static void SetPrimitiveArrayRegion(const char* function_name, Primitive::Type type, JNIEnv* env,
+                                      jarray array, jsize start, jsize len, const void* buf) {
+    ScopedObjectAccess soa(env);
+    ScopedCheck sc(kFlag_Default, function_name);
+    JniValueType args[5] = {{.E = env}, {.a = array}, {.z = start}, {.z = len}, {.p = buf}};
+    // Note: the start and len arguments are checked as 'I' rather than 'z' as invalid indices
+    // result in ArrayIndexOutOfBoundsExceptions in the base implementation.
+    if (sc.Check(soa, true, "EaIIp", args) && sc.CheckPrimitiveArrayType(soa, array, type)) {
+      switch (type) {
+        case Primitive::kPrimBoolean:
+          baseEnv(env)->SetBooleanArrayRegion(env, down_cast<jbooleanArray>(array), start, len,
+                                              reinterpret_cast<const jboolean*>(buf));
+          break;
+        case Primitive::kPrimByte:
+          baseEnv(env)->SetByteArrayRegion(env, down_cast<jbyteArray>(array), start, len,
+                                           reinterpret_cast<const jbyte*>(buf));
+          break;
+        case Primitive::kPrimChar:
+          baseEnv(env)->SetCharArrayRegion(env, down_cast<jcharArray>(array), start, len,
+                                           reinterpret_cast<const jchar*>(buf));
+          break;
+        case Primitive::kPrimShort:
+          baseEnv(env)->SetShortArrayRegion(env, down_cast<jshortArray>(array), start, len,
+                                              reinterpret_cast<const jshort*>(buf));
+          break;
+        case Primitive::kPrimInt:
+          baseEnv(env)->SetIntArrayRegion(env, down_cast<jintArray>(array), start, len,
+                                          reinterpret_cast<const jint*>(buf));
+          break;
+        case Primitive::kPrimLong:
+          baseEnv(env)->SetLongArrayRegion(env, down_cast<jlongArray>(array), start, len,
+                                              reinterpret_cast<const jlong*>(buf));
+          break;
+        case Primitive::kPrimFloat:
+          baseEnv(env)->SetFloatArrayRegion(env, down_cast<jfloatArray>(array), start, len,
+                                            reinterpret_cast<const jfloat*>(buf));
+          break;
+        case Primitive::kPrimDouble:
+          baseEnv(env)->SetDoubleArrayRegion(env, down_cast<jdoubleArray>(array), start, len,
+                                             reinterpret_cast<const jdouble*>(buf));
+          break;
+        default:
+          LOG(FATAL) << "Unexpected primitive type: " << type;
+      }
+      JniValueType result;
+      result.V = nullptr;
+      sc.Check(soa, false, "V", &result);
+    }
   }
 };
 
@@ -2025,38 +3590,58 @@ const JNINativeInterface* GetCheckJniNativeInterface() {
 class CheckJII {
  public:
   static jint DestroyJavaVM(JavaVM* vm) {
-    ScopedCheck sc(vm, false, __FUNCTION__);
-    sc.Check(true, "v", vm);
-    return CHECK_JNI_EXIT("I", BaseVm(vm)->DestroyJavaVM(vm));
+    ScopedCheck sc(kFlag_Invocation, __FUNCTION__, false);
+    JniValueType args[1] = {{.v = vm}};
+    sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), true, "v", args);
+    JniValueType result;
+    result.i = BaseVm(vm)->DestroyJavaVM(vm);
+    sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), false, "i", &result);
+    return result.i;
   }
 
   static jint AttachCurrentThread(JavaVM* vm, JNIEnv** p_env, void* thr_args) {
-    ScopedCheck sc(vm, false, __FUNCTION__);
-    sc.Check(true, "vpp", vm, p_env, thr_args);
-    return CHECK_JNI_EXIT("I", BaseVm(vm)->AttachCurrentThread(vm, p_env, thr_args));
+    ScopedCheck sc(kFlag_Invocation, __FUNCTION__);
+    JniValueType args[3] = {{.v = vm}, {.p = p_env}, {.p = thr_args}};
+    sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), true, "vpp", args);
+    JniValueType result;
+    result.i = BaseVm(vm)->AttachCurrentThread(vm, p_env, thr_args);
+    sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), false, "i", &result);
+    return result.i;
   }
 
   static jint AttachCurrentThreadAsDaemon(JavaVM* vm, JNIEnv** p_env, void* thr_args) {
-    ScopedCheck sc(vm, false, __FUNCTION__);
-    sc.Check(true, "vpp", vm, p_env, thr_args);
-    return CHECK_JNI_EXIT("I", BaseVm(vm)->AttachCurrentThreadAsDaemon(vm, p_env, thr_args));
+    ScopedCheck sc(kFlag_Invocation, __FUNCTION__);
+    JniValueType args[3] = {{.v = vm}, {.p = p_env}, {.p = thr_args}};
+    sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), true, "vpp", args);
+    JniValueType result;
+    result.i = BaseVm(vm)->AttachCurrentThreadAsDaemon(vm, p_env, thr_args);
+    sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), false, "i", &result);
+    return result.i;
   }
 
   static jint DetachCurrentThread(JavaVM* vm) {
-    ScopedCheck sc(vm, true, __FUNCTION__);
-    sc.Check(true, "v", vm);
-    return CHECK_JNI_EXIT("I", BaseVm(vm)->DetachCurrentThread(vm));
+    ScopedCheck sc(kFlag_Invocation, __FUNCTION__);
+    JniValueType args[1] = {{.v = vm}};
+    sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), true, "v", args);
+    JniValueType result;
+    result.i = BaseVm(vm)->DetachCurrentThread(vm);
+    sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), false, "i", &result);
+    return result.i;
   }
 
-  static jint GetEnv(JavaVM* vm, void** env, jint version) {
-    ScopedCheck sc(vm, true, __FUNCTION__);
-    sc.Check(true, "vpI", vm);
-    return CHECK_JNI_EXIT("I", BaseVm(vm)->GetEnv(vm, env, version));
+  static jint GetEnv(JavaVM* vm, void** p_env, jint version) {
+    ScopedCheck sc(kFlag_Invocation, __FUNCTION__);
+    JniValueType args[3] = {{.v = vm}, {.p = p_env}, {.I = version}};
+    sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), true, "vpI", args);
+    JniValueType result;
+    result.i = BaseVm(vm)->GetEnv(vm, p_env, version);
+    sc.CheckNonHeap(reinterpret_cast<JavaVMExt*>(vm), false, "i", &result);
+    return result.i;
   }
 
  private:
-  static inline const JNIInvokeInterface* BaseVm(JavaVM* vm) {
-    return reinterpret_cast<JavaVMExt*>(vm)->unchecked_functions;
+  static const JNIInvokeInterface* BaseVm(JavaVM* vm) {
+    return reinterpret_cast<JavaVMExt*>(vm)->GetUncheckedFunctions();
   }
 };
 
diff --git a/runtime/check_jni.h b/runtime/check_jni.h
new file mode 100644 (file)
index 0000000..f41abf8
--- /dev/null
@@ -0,0 +1,29 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_CHECK_JNI_H_
+#define ART_RUNTIME_CHECK_JNI_H_
+
+#include <jni.h>
+
+namespace art {
+
+const JNINativeInterface* GetCheckJniNativeInterface();
+const JNIInvokeInterface* GetCheckJniInvokeInterface();
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_CHECK_JNI_H_
index 27483e7..0746e0c 100644 (file)
 
 #include "class_linker.h"
 
-#include <fcntl.h>
-#include <sys/file.h>
-#include <sys/stat.h>
 #include <deque>
 #include <memory>
+#include <queue>
 #include <string>
 #include <utility>
 #include <vector>
@@ -136,6 +134,88 @@ static size_t Hash(const char* s) {
   return hash;
 }
 
+// Gap between two fields in object layout.
+struct FieldGap {
+  uint32_t start_offset;  // The offset from the start of the object.
+  uint32_t size;  // The gap size of 1, 2, or 4 bytes.
+};
+struct FieldGapsComparator {
+  explicit FieldGapsComparator() {
+  }
+  bool operator() (const FieldGap& lhs, const FieldGap& rhs)
+      NO_THREAD_SAFETY_ANALYSIS {
+    // Sort by gap size, largest first.
+    return lhs.size > rhs.size;
+  }
+};
+typedef std::priority_queue<FieldGap, std::vector<FieldGap>, FieldGapsComparator> FieldGaps;
+
+// Adds largest aligned gaps to queue of gaps.
+void AddFieldGap(uint32_t gap_start, uint32_t gap_end, FieldGaps* gaps) {
+  DCHECK(gaps != nullptr);
+
+  uint32_t current_offset = gap_start;
+  while (current_offset != gap_end) {
+    size_t remaining = gap_end - current_offset;
+    if (remaining >= sizeof(uint32_t) && IsAligned<4>(current_offset)) {
+      gaps->push(FieldGap {current_offset, sizeof(uint32_t)});
+      current_offset += sizeof(uint32_t);
+    } else if (remaining >= sizeof(uint16_t) && IsAligned<2>(current_offset)) {
+      gaps->push(FieldGap {current_offset, sizeof(uint16_t)});
+      current_offset += sizeof(uint16_t);
+    } else {
+      gaps->push(FieldGap {current_offset, sizeof(uint8_t)});
+      current_offset += sizeof(uint8_t);
+    }
+    DCHECK_LE(current_offset, gap_end) << "Overran gap";
+  }
+}
+// Shuffle fields forward, making use of gaps whenever possible.
+template<int n>
+static void ShuffleForward(const size_t num_fields, size_t* current_field_idx,
+                           MemberOffset* field_offset,
+                           mirror::ObjectArray<mirror::ArtField>* fields,
+                           std::deque<mirror::ArtField*>* grouped_and_sorted_fields,
+                           FieldGaps* gaps)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  DCHECK(current_field_idx != nullptr);
+  DCHECK(grouped_and_sorted_fields != nullptr);
+  DCHECK(fields != nullptr || (num_fields == 0 && grouped_and_sorted_fields->empty()));
+  DCHECK(gaps != nullptr);
+  DCHECK(field_offset != nullptr);
+
+  DCHECK(IsPowerOfTwo(n));
+  while (!grouped_and_sorted_fields->empty()) {
+    mirror::ArtField* field = grouped_and_sorted_fields->front();
+    Primitive::Type type = field->GetTypeAsPrimitiveType();
+    if (Primitive::ComponentSize(type) < n) {
+      break;
+    }
+    if (!IsAligned<n>(field_offset->Uint32Value())) {
+      MemberOffset old_offset = *field_offset;
+      *field_offset = MemberOffset(RoundUp(field_offset->Uint32Value(), n));
+      AddFieldGap(old_offset.Uint32Value(), field_offset->Uint32Value(), gaps);
+    }
+    CHECK(type != Primitive::kPrimNot) << PrettyField(field);  // should be primitive types
+    grouped_and_sorted_fields->pop_front();
+    fields->Set<false>(*current_field_idx, field);
+    if (!gaps->empty() && gaps->top().size >= n) {
+      FieldGap gap = gaps->top();
+      gaps->pop();
+      DCHECK(IsAligned<n>(gap.start_offset));
+      field->SetOffset(MemberOffset(gap.start_offset));
+      if (gap.size > n) {
+        AddFieldGap(gap.start_offset + n, gap.start_offset + gap.size, gaps);
+      }
+    } else {
+      DCHECK(IsAligned<n>(field_offset->Uint32Value()));
+      field->SetOffset(*field_offset);
+      *field_offset = MemberOffset(field_offset->Uint32Value() + n);
+    }
+    ++(*current_field_idx);
+  }
+}
+
 const char* ClassLinker::class_roots_descriptors_[] = {
   "Ljava/lang/Class;",
   "Ljava/lang/Object;",
@@ -1917,7 +1997,7 @@ ClassPathEntry FindInClassPath(const char* descriptor,
 }
 
 mirror::Class* ClassLinker::FindClass(Thread* self, const char* descriptor,
-                                      Handle<mirror::ClassLoader> class_loader) {
+                                      ConstHandle<mirror::ClassLoader> class_loader) {
   DCHECK_NE(*descriptor, '\0') << "descriptor is empty string";
   DCHECK(self != nullptr);
   self->AssertNoPendingException();
@@ -2013,7 +2093,7 @@ mirror::Class* ClassLinker::FindClass(Thread* self, const char* descriptor,
 }
 
 mirror::Class* ClassLinker::DefineClass(const char* descriptor,
-                                        Handle<mirror::ClassLoader> class_loader,
+                                        ConstHandle<mirror::ClassLoader> class_loader,
                                         const DexFile& dex_file,
                                         const DexFile::ClassDef& dex_class_def) {
   Thread* self = Thread::Current();
@@ -2126,6 +2206,8 @@ uint32_t ClassLinker::SizeOfClassWithoutEmbeddedTables(const DexFile& dex_file,
                                                        const DexFile::ClassDef& dex_class_def) {
   const byte* class_data = dex_file.GetClassData(dex_class_def);
   size_t num_ref = 0;
+  size_t num_8 = 0;
+  size_t num_16 = 0;
   size_t num_32 = 0;
   size_t num_64 = 0;
   if (class_data != NULL) {
@@ -2133,33 +2215,49 @@ uint32_t ClassLinker::SizeOfClassWithoutEmbeddedTables(const DexFile& dex_file,
       const DexFile::FieldId& field_id = dex_file.GetFieldId(it.GetMemberIndex());
       const char* descriptor = dex_file.GetFieldTypeDescriptor(field_id);
       char c = descriptor[0];
-      if (c == 'L' || c == '[') {
-        num_ref++;
-      } else if (c == 'J' || c == 'D') {
-        num_64++;
-      } else {
-        num_32++;
+      switch (c) {
+        case 'L':
+        case '[':
+          num_ref++;
+          break;
+        case 'J':
+        case 'D':
+          num_64++;
+          break;
+        case 'I':
+        case 'F':
+          num_32++;
+          break;
+        case 'S':
+        case 'C':
+          num_16++;
+          break;
+        case 'B':
+        case 'Z':
+          num_8++;
+          break;
+        default:
+          LOG(FATAL) << "Unknown descriptor: " << c;
       }
     }
   }
-  return mirror::Class::ComputeClassSize(false, 0, num_32, num_64, num_ref);
+  return mirror::Class::ComputeClassSize(false, 0, num_8, num_16, num_32, num_64, num_ref);
 }
 
-bool ClassLinker::FindOatClass(const DexFile& dex_file,
-                               uint16_t class_def_idx,
-                               OatFile::OatClass* oat_class) {
-  DCHECK(oat_class != nullptr);
+OatFile::OatClass ClassLinker::FindOatClass(const DexFile& dex_file, uint16_t class_def_idx,
+                                            bool* found) {
   DCHECK_NE(class_def_idx, DexFile::kDexNoIndex16);
   const OatFile* oat_file = FindOpenedOatFileForDexFile(dex_file);
   if (oat_file == nullptr) {
-    return false;
+    *found = false;
+    return OatFile::OatClass::Invalid();
   }
   uint dex_location_checksum = dex_file.GetLocationChecksum();
   const OatFile::OatDexFile* oat_dex_file = oat_file->GetOatDexFile(dex_file.GetLocation().c_str(),
                                                                     &dex_location_checksum);
   CHECK(oat_dex_file != NULL) << dex_file.GetLocation();
-  *oat_class = oat_dex_file->GetOatClass(class_def_idx);
-  return true;
+  *found = true;
+  return oat_dex_file->GetOatClass(class_def_idx);
 }
 
 static uint32_t GetOatMethodIndexFromMethodIndex(const DexFile& dex_file, uint16_t class_def_idx,
@@ -2196,8 +2294,7 @@ static uint32_t GetOatMethodIndexFromMethodIndex(const DexFile& dex_file, uint16
   return 0;
 }
 
-bool ClassLinker::FindOatMethodFor(mirror::ArtMethod* method, OatFile::OatMethod* oat_method) {
-  DCHECK(oat_method != nullptr);
+const OatFile::OatMethod ClassLinker::FindOatMethodFor(mirror::ArtMethod* method, bool* found) {
   // Although we overwrite the trampoline of non-static methods, we may get here via the resolution
   // method for direct methods (or virtual methods made direct).
   mirror::Class* declaring_class = method->GetDeclaringClass();
@@ -2224,15 +2321,14 @@ bool ClassLinker::FindOatMethodFor(mirror::ArtMethod* method, OatFile::OatMethod
             GetOatMethodIndexFromMethodIndex(*declaring_class->GetDexCache()->GetDexFile(),
                                              method->GetDeclaringClass()->GetDexClassDefIndex(),
                                              method->GetDexMethodIndex()));
-  OatFile::OatClass oat_class;
-  if (!FindOatClass(*declaring_class->GetDexCache()->GetDexFile(),
-                    declaring_class->GetDexClassDefIndex(),
-                    &oat_class)) {
-    return false;
+  OatFile::OatClass oat_class = FindOatClass(*declaring_class->GetDexCache()->GetDexFile(),
+                                             declaring_class->GetDexClassDefIndex(),
+                                             found);
+  if (!found) {
+    return OatFile::OatMethod::Invalid();
   }
-
-  *oat_method = oat_class.GetOatMethod(oat_method_index);
-  return true;
+  *found = true;
+  return oat_class.GetOatMethod(oat_method_index);
 }
 
 // Special case to get oat code without overwriting a trampoline.
@@ -2241,9 +2337,10 @@ const void* ClassLinker::GetQuickOatCodeFor(mirror::ArtMethod* method) {
   if (method->IsProxyMethod()) {
     return GetQuickProxyInvokeHandler();
   }
-  OatFile::OatMethod oat_method;
+  bool found;
+  OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
   const void* result = nullptr;
-  if (FindOatMethodFor(method, &oat_method)) {
+  if (found) {
     result = oat_method.GetQuickCode();
   }
 
@@ -2269,10 +2366,11 @@ const void* ClassLinker::GetPortableOatCodeFor(mirror::ArtMethod* method,
   if (method->IsProxyMethod()) {
     return GetPortableProxyInvokeHandler();
   }
-  OatFile::OatMethod oat_method;
+  bool found;
+  OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
   const void* result = nullptr;
   const void* quick_code = nullptr;
-  if (FindOatMethodFor(method, &oat_method)) {
+  if (found) {
     result = oat_method.GetPortableCode();
     quick_code = oat_method.GetQuickCode();
   }
@@ -2291,10 +2389,29 @@ const void* ClassLinker::GetPortableOatCodeFor(mirror::ArtMethod* method,
   return result;
 }
 
+const void* ClassLinker::GetOatMethodQuickCodeFor(mirror::ArtMethod* method) {
+  if (method->IsNative() || method->IsAbstract() || method->IsProxyMethod()) {
+    return nullptr;
+  }
+  bool found;
+  OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
+  return found ? oat_method.GetQuickCode() : nullptr;
+}
+
+const void* ClassLinker::GetOatMethodPortableCodeFor(mirror::ArtMethod* method) {
+  if (method->IsNative() || method->IsAbstract() || method->IsProxyMethod()) {
+    return nullptr;
+  }
+  bool found;
+  OatFile::OatMethod oat_method = FindOatMethodFor(method, &found);
+  return found ? oat_method.GetPortableCode() : nullptr;
+}
+
 const void* ClassLinker::GetQuickOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx,
                                             uint32_t method_idx) {
-  OatFile::OatClass oat_class;
-  if (!FindOatClass(dex_file, class_def_idx, &oat_class)) {
+  bool found;
+  OatFile::OatClass oat_class = FindOatClass(dex_file, class_def_idx, &found);
+  if (!found) {
     return nullptr;
   }
   uint32_t oat_method_idx = GetOatMethodIndexFromMethodIndex(dex_file, class_def_idx, method_idx);
@@ -2303,8 +2420,9 @@ const void* ClassLinker::GetQuickOatCodeFor(const DexFile& dex_file, uint16_t cl
 
 const void* ClassLinker::GetPortableOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx,
                                                uint32_t method_idx) {
-  OatFile::OatClass oat_class;
-  if (!FindOatClass(dex_file, class_def_idx, &oat_class)) {
+  bool found;
+  OatFile::OatClass oat_class = FindOatClass(dex_file, class_def_idx, &found);
+  if (!found) {
     return nullptr;
   }
   uint32_t oat_method_idx = GetOatMethodIndexFromMethodIndex(dex_file, class_def_idx, method_idx);
@@ -2360,8 +2478,9 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) {
   while (it.HasNextInstanceField()) {
     it.Next();
   }
-  OatFile::OatClass oat_class;
-  bool has_oat_class = FindOatClass(dex_file, klass->GetDexClassDefIndex(), &oat_class);
+  bool has_oat_class;
+  OatFile::OatClass oat_class = FindOatClass(dex_file, klass->GetDexClassDefIndex(),
+                                             &has_oat_class);
   // Link the code of methods skipped by LinkCode.
   for (size_t method_index = 0; it.HasNextDirectMethod(); ++method_index, it.Next()) {
     mirror::ArtMethod* method = klass->GetDirectMethod(method_index);
@@ -2404,7 +2523,8 @@ void ClassLinker::FixupStaticTrampolines(mirror::Class* klass) {
   // Ignore virtual methods on the iterator.
 }
 
-void ClassLinker::LinkCode(Handle<mirror::ArtMethod> method, const OatFile::OatClass* oat_class,
+void ClassLinker::LinkCode(ConstHandle<mirror::ArtMethod> method,
+                           const OatFile::OatClass* oat_class,
                            const DexFile& dex_file, uint32_t dex_method_index,
                            uint32_t method_index) {
   if (Runtime::Current()->IsCompiler()) {
@@ -2482,9 +2602,11 @@ void ClassLinker::LinkCode(Handle<mirror::ArtMethod> method, const OatFile::OatC
                                                    have_portable_code);
 }
 
+
+
 void ClassLinker::LoadClass(const DexFile& dex_file,
                             const DexFile::ClassDef& dex_class_def,
-                            Handle<mirror::Class> klass,
+                            ConstHandle<mirror::Class> klass,
                             mirror::ClassLoader* class_loader) {
   CHECK(klass.Get() != NULL);
   CHECK(klass->GetDexCache() != NULL);
@@ -2512,19 +2634,23 @@ void ClassLinker::LoadClass(const DexFile& dex_file,
     return;  // no fields or methods - for example a marker interface
   }
 
-  OatFile::OatClass oat_class;
-  if (Runtime::Current()->IsStarted()
-      && !Runtime::Current()->UseCompileTimeClassPath()
-      && FindOatClass(dex_file, klass->GetDexClassDefIndex(), &oat_class)) {
-    LoadClassMembers(dex_file, class_data, klass, class_loader, &oat_class);
-  } else {
+
+  bool has_oat_class = false;
+  if (Runtime::Current()->IsStarted() && !Runtime::Current()->UseCompileTimeClassPath()) {
+    OatFile::OatClass oat_class = FindOatClass(dex_file, klass->GetDexClassDefIndex(),
+                                               &has_oat_class);
+    if (has_oat_class) {
+      LoadClassMembers(dex_file, class_data, klass, class_loader, &oat_class);
+    }
+  }
+  if (!has_oat_class) {
     LoadClassMembers(dex_file, class_data, klass, class_loader, nullptr);
   }
 }
 
 void ClassLinker::LoadClassMembers(const DexFile& dex_file,
                                    const byte* class_data,
-                                   Handle<mirror::Class> klass,
+                                   ConstHandle<mirror::Class> klass,
                                    mirror::ClassLoader* class_loader,
                                    const OatFile::OatClass* oat_class) {
   // Load fields.
@@ -2618,7 +2744,8 @@ void ClassLinker::LoadClassMembers(const DexFile& dex_file,
 }
 
 void ClassLinker::LoadField(const DexFile& /*dex_file*/, const ClassDataItemIterator& it,
-                            Handle<mirror::Class> klass, Handle<mirror::ArtField> dst) {
+                            ConstHandle<mirror::Class> klass,
+                            ConstHandle<mirror::ArtField> dst) {
   uint32_t field_idx = it.GetMemberIndex();
   dst->SetDexFieldIndex(field_idx);
   dst->SetDeclaringClass(klass.Get());
@@ -2627,7 +2754,7 @@ void ClassLinker::LoadField(const DexFile& /*dex_file*/, const ClassDataItemIter
 
 mirror::ArtMethod* ClassLinker::LoadMethod(Thread* self, const DexFile& dex_file,
                                            const ClassDataItemIterator& it,
-                                           Handle<mirror::Class> klass) {
+                                           ConstHandle<mirror::Class> klass) {
   uint32_t dex_method_idx = it.GetMemberIndex();
   const DexFile::MethodId& method_id = dex_file.GetMethodId(dex_method_idx);
   const char* method_name = dex_file.StringDataByIdx(method_id.name_idx_);
@@ -2699,7 +2826,7 @@ void ClassLinker::AppendToBootClassPath(const DexFile& dex_file) {
 }
 
 void ClassLinker::AppendToBootClassPath(const DexFile& dex_file,
-                                        Handle<mirror::DexCache> dex_cache) {
+                                        ConstHandle<mirror::DexCache> dex_cache) {
   CHECK(dex_cache.Get() != NULL) << dex_file.GetLocation();
   boot_class_path_.push_back(&dex_file);
   RegisterDexFile(dex_file, dex_cache);
@@ -2722,7 +2849,7 @@ bool ClassLinker::IsDexFileRegistered(const DexFile& dex_file) {
 }
 
 void ClassLinker::RegisterDexFileLocked(const DexFile& dex_file,
-                                        Handle<mirror::DexCache> dex_cache) {
+                                        ConstHandle<mirror::DexCache> dex_cache) {
   dex_lock_.AssertExclusiveHeld(Thread::Current());
   CHECK(dex_cache.Get() != NULL) << dex_file.GetLocation();
   CHECK(dex_cache->GetLocation()->Equals(dex_file.GetLocation()))
@@ -2759,7 +2886,7 @@ void ClassLinker::RegisterDexFile(const DexFile& dex_file) {
 }
 
 void ClassLinker::RegisterDexFile(const DexFile& dex_file,
-                                  Handle<mirror::DexCache> dex_cache) {
+                                  ConstHandle<mirror::DexCache> dex_cache) {
   WriterMutexLock mu(Thread::Current(), dex_lock_);
   RegisterDexFileLocked(dex_file, dex_cache);
 }
@@ -2837,7 +2964,7 @@ mirror::Class* ClassLinker::InitializePrimitiveClass(mirror::Class* primitive_cl
 //
 // Returns NULL with an exception raised on failure.
 mirror::Class* ClassLinker::CreateArrayClass(Thread* self, const char* descriptor,
-                                             Handle<mirror::ClassLoader> class_loader) {
+                                             ConstHandle<mirror::ClassLoader> class_loader) {
   // Identify the underlying component type
   CHECK_EQ('[', descriptor[0]);
   StackHandleScope<2> hs(self);
@@ -3234,7 +3361,7 @@ void ClassLinker::LookupClasses(const char* descriptor, std::vector<mirror::Clas
   }
 }
 
-void ClassLinker::VerifyClass(Handle<mirror::Class> klass) {
+void ClassLinker::VerifyClass(ConstHandle<mirror::Class> klass) {
   // TODO: assert that the monitor on the Class is held
   Thread* self = Thread::Current();
   ObjectLock<mirror::Class> lock(self, klass);
@@ -3448,7 +3575,7 @@ bool ClassLinker::VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class
 }
 
 void ClassLinker::ResolveClassExceptionHandlerTypes(const DexFile& dex_file,
-                                                    Handle<mirror::Class> klass) {
+                                                    ConstHandle<mirror::Class> klass) {
   for (size_t i = 0; i < klass->NumDirectMethods(); i++) {
     ResolveMethodExceptionHandlerTypes(dex_file, klass->GetDirectMethod(i));
   }
@@ -3488,8 +3615,8 @@ void ClassLinker::ResolveMethodExceptionHandlerTypes(const DexFile& dex_file,
 }
 
 static void CheckProxyConstructor(mirror::ArtMethod* constructor);
-static void CheckProxyMethod(Handle<mirror::ArtMethod> method,
-                             Handle<mirror::ArtMethod> prototype);
+static void CheckProxyMethod(ConstHandle<mirror::ArtMethod> method,
+                             ConstHandle<mirror::ArtMethod> prototype);
 
 mirror::Class* ClassLinker::CreateProxyClass(ScopedObjectAccessAlreadyRunnable& soa, jstring name,
                                              jobjectArray interfaces, jobject loader,
@@ -3680,7 +3807,7 @@ mirror::ArtMethod* ClassLinker::FindMethodForProxy(mirror::Class* proxy_class,
 
 
 mirror::ArtMethod* ClassLinker::CreateProxyConstructor(Thread* self,
-                                                       Handle<mirror::Class> klass,
+                                                       ConstHandle<mirror::Class> klass,
                                                        mirror::Class* proxy_class) {
   // Create constructor for Proxy that must initialize h
   mirror::ObjectArray<mirror::ArtMethod>* proxy_direct_methods =
@@ -3714,8 +3841,8 @@ static void CheckProxyConstructor(mirror::ArtMethod* constructor)
 }
 
 mirror::ArtMethod* ClassLinker::CreateProxyMethod(Thread* self,
-                                                  Handle<mirror::Class> klass,
-                                                  Handle<mirror::ArtMethod> prototype) {
+                                                  ConstHandle<mirror::Class> klass,
+                                                  ConstHandle<mirror::ArtMethod> prototype) {
   // Ensure prototype is in dex cache so that we can use the dex cache to look up the overridden
   // prototype method
   prototype->GetDeclaringClass()->GetDexCache()->SetResolvedMethod(prototype->GetDexMethodIndex(),
@@ -3742,7 +3869,8 @@ mirror::ArtMethod* ClassLinker::CreateProxyMethod(Thread* self,
   return method;
 }
 
-static void CheckProxyMethod(Handle<mirror::ArtMethod> method, Handle<mirror::ArtMethod> prototype)
+static void CheckProxyMethod(ConstHandle<mirror::ArtMethod> method,
+                             ConstHandle<mirror::ArtMethod> prototype)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   // Basic sanity
   CHECK(!prototype->IsFinal());
@@ -3756,8 +3884,9 @@ static void CheckProxyMethod(Handle<mirror::ArtMethod> method, Handle<mirror::Ar
   CHECK(prototype->HasSameDexCacheResolvedTypes(method.Get()));
   CHECK_EQ(prototype->GetDexMethodIndex(), method->GetDexMethodIndex());
 
-  MethodHelper mh(method);
-  MethodHelper mh2(prototype);
+  StackHandleScope<2> hs(Thread::Current());
+  MethodHelper mh(hs.NewHandle(method.Get()));
+  MethodHelper mh2(hs.NewHandle(prototype.Get()));
   CHECK_STREQ(method->GetName(), prototype->GetName());
   CHECK_STREQ(method->GetShorty(), prototype->GetShorty());
   // More complex sanity - via dex cache
@@ -3802,7 +3931,7 @@ bool ClassLinker::IsInitialized() const {
   return init_done_;
 }
 
-bool ClassLinker::InitializeClass(Handle<mirror::Class> klass, bool can_init_statics,
+bool ClassLinker::InitializeClass(ConstHandle<mirror::Class> klass, bool can_init_statics,
                                   bool can_init_parents) {
   // see JLS 3rd edition, 12.4.2 "Detailed Initialization Procedure" for the locking protocol
 
@@ -3917,23 +4046,26 @@ bool ClassLinker::InitializeClass(Handle<mirror::Class> klass, bool can_init_sta
     const DexFile::ClassDef* dex_class_def = klass->GetClassDef();
     CHECK(dex_class_def != NULL);
     const DexFile& dex_file = klass->GetDexFile();
-    StackHandleScope<2> hs(self);
+    StackHandleScope<3> hs(self);
     Handle<mirror::ClassLoader> class_loader(hs.NewHandle(klass->GetClassLoader()));
     Handle<mirror::DexCache> dex_cache(hs.NewHandle(klass->GetDexCache()));
-    EncodedStaticFieldValueIterator it(dex_file, &dex_cache, &class_loader,
-                                       this, *dex_class_def);
-    if (it.HasNext()) {
+    EncodedStaticFieldValueIterator value_it(dex_file, &dex_cache, &class_loader,
+                                             this, *dex_class_def);
+    const byte* class_data = dex_file.GetClassData(*dex_class_def);
+    ClassDataItemIterator field_it(dex_file, class_data);
+    if (value_it.HasNext()) {
+      DCHECK(field_it.HasNextStaticField());
       CHECK(can_init_statics);
-      // We reordered the fields, so we need to be able to map the
-      // field indexes to the right fields.
-      SafeMap<uint32_t, mirror::ArtField*> field_map;
-      ConstructFieldMap(dex_file, *dex_class_def, klass.Get(), field_map);
-      for (size_t i = 0; it.HasNext(); i++, it.Next()) {
+      for ( ; value_it.HasNext(); value_it.Next(), field_it.Next()) {
+        StackHandleScope<1> hs(self);
+        Handle<mirror::ArtField> field(hs.NewHandle(
+            ResolveField(dex_file, field_it.GetMemberIndex(), dex_cache, class_loader, true)));
         if (Runtime::Current()->IsActiveTransaction()) {
-          it.ReadValueToField<true>(field_map.Get(i));
+          value_it.ReadValueToField<true>(field);
         } else {
-          it.ReadValueToField<false>(field_map.Get(i));
+          value_it.ReadValueToField<false>(field);
         }
+        DCHECK(!value_it.HasNext() || field_it.HasNextStaticField());
       }
     }
   }
@@ -3976,7 +4108,7 @@ bool ClassLinker::InitializeClass(Handle<mirror::Class> klass, bool can_init_sta
   return success;
 }
 
-bool ClassLinker::WaitForInitializeClass(Handle<mirror::Class> klass, Thread* self,
+bool ClassLinker::WaitForInitializeClass(ConstHandle<mirror::Class> klass, Thread* self,
                                          ObjectLock<mirror::Class>& lock)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   while (true) {
@@ -4016,7 +4148,7 @@ bool ClassLinker::WaitForInitializeClass(Handle<mirror::Class> klass, Thread* se
   LOG(FATAL) << "Not Reached" << PrettyClass(klass.Get());
 }
 
-bool ClassLinker::ValidateSuperClassDescriptors(Handle<mirror::Class> klass) {
+bool ClassLinker::ValidateSuperClassDescriptors(ConstHandle<mirror::Class> klass) {
   if (klass->IsInterface()) {
     return true;
   }
@@ -4061,7 +4193,7 @@ bool ClassLinker::ValidateSuperClassDescriptors(Handle<mirror::Class> klass) {
   return true;
 }
 
-bool ClassLinker::EnsureInitialized(Handle<mirror::Class> c, bool can_init_fields,
+bool ClassLinker::EnsureInitialized(ConstHandle<mirror::Class> c, bool can_init_fields,
                                     bool can_init_parents) {
   DCHECK(c.Get() != nullptr);
   if (c->IsInitialized()) {
@@ -4079,20 +4211,6 @@ bool ClassLinker::EnsureInitialized(Handle<mirror::Class> c, bool can_init_field
   return success;
 }
 
-void ClassLinker::ConstructFieldMap(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def,
-                                    mirror::Class* c,
-                                    SafeMap<uint32_t, mirror::ArtField*>& field_map) {
-  const byte* class_data = dex_file.GetClassData(dex_class_def);
-  ClassDataItemIterator it(dex_file, class_data);
-  StackHandleScope<2> hs(Thread::Current());
-  Handle<mirror::DexCache> dex_cache(hs.NewHandle(c->GetDexCache()));
-  Handle<mirror::ClassLoader> class_loader(hs.NewHandle(c->GetClassLoader()));
-  CHECK(!kMovingFields);
-  for (size_t i = 0; it.HasNextStaticField(); i++, it.Next()) {
-    field_map.Put(i, ResolveField(dex_file, it.GetMemberIndex(), dex_cache, class_loader, true));
-  }
-}
-
 void ClassLinker::FixupTemporaryDeclaringClass(mirror::Class* temp_class, mirror::Class* new_class) {
   mirror::ObjectArray<mirror::ArtField>* fields = new_class->GetIFields();
   if (fields != nullptr) {
@@ -4131,8 +4249,8 @@ void ClassLinker::FixupTemporaryDeclaringClass(mirror::Class* temp_class, mirror
   }
 }
 
-bool ClassLinker::LinkClass(Thread* self, const char* descriptor, Handle<mirror::Class> klass,
-                            Handle<mirror::ObjectArray<mirror::Class>> interfaces,
+bool ClassLinker::LinkClass(Thread* self, const char* descriptor, ConstHandle<mirror::Class> klass,
+                            ConstHandle<mirror::ObjectArray<mirror::Class>> interfaces,
                             mirror::Class** new_class) {
   CHECK_EQ(mirror::Class::kStatusLoaded, klass->GetStatus());
 
@@ -4198,7 +4316,7 @@ bool ClassLinker::LinkClass(Thread* self, const char* descriptor, Handle<mirror:
   return true;
 }
 
-bool ClassLinker::LoadSuperAndInterfaces(Handle<mirror::Class> klass, const DexFile& dex_file) {
+bool ClassLinker::LoadSuperAndInterfaces(ConstHandle<mirror::Class> klass, const DexFile& dex_file) {
   CHECK_EQ(mirror::Class::kStatusIdx, klass->GetStatus());
   const DexFile::ClassDef& class_def = dex_file.GetClassDef(klass->GetDexClassDefIndex());
   uint16_t super_class_idx = class_def.superclass_idx_;
@@ -4242,7 +4360,7 @@ bool ClassLinker::LoadSuperAndInterfaces(Handle<mirror::Class> klass, const DexF
   return true;
 }
 
-bool ClassLinker::LinkSuperClass(Handle<mirror::Class> klass) {
+bool ClassLinker::LinkSuperClass(ConstHandle<mirror::Class> klass) {
   CHECK(!klass->IsPrimitive());
   mirror::Class* super = klass->GetSuperClass();
   if (klass.Get() == GetClassRoot(kJavaLangObject)) {
@@ -4302,8 +4420,8 @@ bool ClassLinker::LinkSuperClass(Handle<mirror::Class> klass) {
 }
 
 // Populate the class vtable and itable. Compute return type indices.
-bool ClassLinker::LinkMethods(Thread* self, Handle<mirror::Class> klass,
-                              Handle<mirror::ObjectArray<mirror::Class>> interfaces) {
+bool ClassLinker::LinkMethods(Thread* self, ConstHandle<mirror::Class> klass,
+                              ConstHandle<mirror::ObjectArray<mirror::Class>> interfaces) {
   if (klass->IsInterface()) {
     // No vtable.
     size_t count = klass->NumVirtualMethods();
@@ -4323,7 +4441,7 @@ bool ClassLinker::LinkMethods(Thread* self, Handle<mirror::Class> klass,
   return true;
 }
 
-bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass) {
+bool ClassLinker::LinkVirtualMethods(Thread* self, ConstHandle<mirror::Class> klass) {
   if (klass->HasSuperClass()) {
     uint32_t max_count = klass->NumVirtualMethods() +
         klass->GetSuperClass()->GetVTableLength();
@@ -4425,8 +4543,8 @@ bool ClassLinker::LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
   return true;
 }
 
-bool ClassLinker::LinkInterfaceMethods(Handle<mirror::Class> klass,
-                                       Handle<mirror::ObjectArray<mirror::Class>> interfaces) {
+bool ClassLinker::LinkInterfaceMethods(ConstHandle<mirror::Class> klass,
+                                       ConstHandle<mirror::ObjectArray<mirror::Class>> interfaces) {
   Thread* const self = Thread::Current();
   Runtime* const runtime = Runtime::Current();
   // Set the imt table to be all conflicts by default.
@@ -4468,7 +4586,7 @@ bool ClassLinker::LinkInterfaceMethods(Handle<mirror::Class> klass,
       return true;
     }
   }
-  StackHandleScope<4> hs(self);
+  StackHandleScope<5> hs(self);
   Handle<mirror::IfTable> iftable(hs.NewHandle(AllocIfTable(self, ifcount)));
   if (UNLIKELY(iftable.Get() == NULL)) {
     CHECK(self->IsExceptionPending());  // OOME.
@@ -4551,7 +4669,13 @@ bool ClassLinker::LinkInterfaceMethods(Handle<mirror::Class> klass,
   }
   MethodHelper interface_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
   MethodHelper vtable_mh(hs.NewHandle<mirror::ArtMethod>(nullptr));
-  std::vector<mirror::ArtMethod*> miranda_list;
+  size_t max_miranda_methods = 0;  // The max size of miranda_list.
+  for (size_t i = 0; i < ifcount; ++i) {
+    max_miranda_methods += iftable->GetInterface(i)->NumVirtualMethods();
+  }
+  Handle<mirror::ObjectArray<mirror::ArtMethod>>
+      miranda_list(hs.NewHandle(AllocArtMethodArray(self, max_miranda_methods)));
+  size_t miranda_list_size = 0;  // The current size of miranda_list.
   for (size_t i = 0; i < ifcount; ++i) {
     size_t num_methods = iftable->GetInterface(i)->NumVirtualMethods();
     if (num_methods > 0) {
@@ -4566,8 +4690,7 @@ bool ClassLinker::LinkInterfaceMethods(Handle<mirror::Class> klass,
       Handle<mirror::ObjectArray<mirror::ArtMethod>> vtable(
           hs.NewHandle(klass->GetVTableDuringLinking()));
       for (size_t j = 0; j < num_methods; ++j) {
-        mirror::ArtMethod* interface_method = iftable->GetInterface(i)->GetVirtualMethod(j);
-        interface_mh.ChangeMethod(interface_method);
+        interface_mh.ChangeMethod(iftable->GetInterface(i)->GetVirtualMethod(j));
         int32_t k;
         // For each method listed in the interface's method list, find the
         // matching method in our class's method list.  We want to favor the
@@ -4578,22 +4701,21 @@ bool ClassLinker::LinkInterfaceMethods(Handle<mirror::Class> klass,
         // those don't end up in the virtual method table, so it shouldn't
         // matter which direction we go.  We walk it backward anyway.)
         for (k = vtable->GetLength() - 1; k >= 0; --k) {
-          mirror::ArtMethod* vtable_method = vtable->Get(k);
-          vtable_mh.ChangeMethod(vtable_method);
+          vtable_mh.ChangeMethod(vtable->Get(k));
           if (interface_mh.HasSameNameAndSignature(&vtable_mh)) {
-            if (!vtable_method->IsAbstract() && !vtable_method->IsPublic()) {
+            if (!vtable_mh.Get()->IsAbstract() && !vtable_mh.Get()->IsPublic()) {
               ThrowIllegalAccessError(
                   klass.Get(),
                   "Method '%s' implementing interface method '%s' is not public",
-                  PrettyMethod(vtable_method).c_str(),
-                  PrettyMethod(interface_method).c_str());
+                  PrettyMethod(vtable_mh.Get()).c_str(),
+                  PrettyMethod(interface_mh.Get()).c_str());
               return false;
             }
-            method_array->Set<false>(j, vtable_method);
+            method_array->Set<false>(j, vtable_mh.Get());
             // Place method in imt if entry is empty, place conflict otherwise.
-            uint32_t imt_index = interface_method->GetDexMethodIndex() % mirror::Class::kImtSize;
+            uint32_t imt_index = interface_mh.Get()->GetDexMethodIndex() % mirror::Class::kImtSize;
             if (imtable->Get(imt_index) == NULL) {
-              imtable->Set<false>(imt_index, vtable_method);
+              imtable->Set<false>(imt_index, vtable_mh.Get());
               imtable_changed = true;
             } else {
               imtable->Set<false>(imt_index, runtime->GetImtConflictMethod());
@@ -4604,7 +4726,9 @@ bool ClassLinker::LinkInterfaceMethods(Handle<mirror::Class> klass,
         if (k < 0) {
           StackHandleScope<1> hs(self);
           auto miranda_method = hs.NewHandle<mirror::ArtMethod>(nullptr);
-          for (mirror::ArtMethod* mir_method : miranda_list) {
+          for (size_t l = 0; l < miranda_list_size; ++l) {
+            mirror::ArtMethod* mir_method = miranda_list->Get(l);
+            DCHECK(mir_method != nullptr);
             vtable_mh.ChangeMethod(mir_method);
             if (interface_mh.HasSameNameAndSignature(&vtable_mh)) {
               miranda_method.Assign(mir_method);
@@ -4613,13 +4737,13 @@ bool ClassLinker::LinkInterfaceMethods(Handle<mirror::Class> klass,
           }
           if (miranda_method.Get() == NULL) {
             // Point the interface table at a phantom slot.
-            miranda_method.Assign(down_cast<mirror::ArtMethod*>(interface_method->Clone(self)));
+            miranda_method.Assign(down_cast<mirror::ArtMethod*>(interface_mh.Get()->Clone(self)));
             if (UNLIKELY(miranda_method.Get() == NULL)) {
               CHECK(self->IsExceptionPending());  // OOME.
               return false;
             }
-            // TODO: If a methods move then the miranda_list may hold stale references.
-            miranda_list.push_back(miranda_method.Get());
+            DCHECK_LT(miranda_list_size, max_miranda_methods);
+            miranda_list->Set<false>(miranda_list_size++, miranda_method.Get());
           }
           method_array->Set<false>(j, miranda_method.Get());
         }
@@ -4636,9 +4760,9 @@ bool ClassLinker::LinkInterfaceMethods(Handle<mirror::Class> klass,
     }
     klass->SetImTable(imtable.Get());
   }
-  if (!miranda_list.empty()) {
+  if (miranda_list_size > 0) {
     int old_method_count = klass->NumVirtualMethods();
-    int new_method_count = old_method_count + miranda_list.size();
+    int new_method_count = old_method_count + miranda_list_size;
     mirror::ObjectArray<mirror::ArtMethod>* virtuals;
     if (old_method_count == 0) {
       virtuals = AllocArtMethodArray(self, new_method_count);
@@ -4656,14 +4780,14 @@ bool ClassLinker::LinkInterfaceMethods(Handle<mirror::Class> klass,
         hs.NewHandle(klass->GetVTableDuringLinking()));
     CHECK(vtable.Get() != NULL);
     int old_vtable_count = vtable->GetLength();
-    int new_vtable_count = old_vtable_count + miranda_list.size();
+    int new_vtable_count = old_vtable_count + miranda_list_size;
     vtable.Assign(vtable->CopyOf(self, new_vtable_count));
     if (UNLIKELY(vtable.Get() == NULL)) {
       CHECK(self->IsExceptionPending());  // OOME.
       return false;
     }
-    for (size_t i = 0; i < miranda_list.size(); ++i) {
-      mirror::ArtMethod* method = miranda_list[i];
+    for (size_t i = 0; i < miranda_list_size; ++i) {
+      mirror::ArtMethod* method = miranda_list->Get(i);
       // Leave the declaring class alone as type indices are relative to it
       method->SetAccessFlags(method->GetAccessFlags() | kAccMiranda);
       method->SetMethodIndex(0xFFFF & (old_vtable_count + i));
@@ -4684,12 +4808,12 @@ bool ClassLinker::LinkInterfaceMethods(Handle<mirror::Class> klass,
   return true;
 }
 
-bool ClassLinker::LinkInstanceFields(Handle<mirror::Class> klass) {
+bool ClassLinker::LinkInstanceFields(ConstHandle<mirror::Class> klass) {
   CHECK(klass.Get() != NULL);
   return LinkFields(klass, false, nullptr);
 }
 
-bool ClassLinker::LinkStaticFields(Handle<mirror::Class> klass, size_t* class_size) {
+bool ClassLinker::LinkStaticFields(ConstHandle<mirror::Class> klass, size_t* class_size) {
   CHECK(klass.Get() != NULL);
   return LinkFields(klass, true, class_size);
 }
@@ -4700,20 +4824,20 @@ struct LinkFieldsComparator {
   // No thread safety analysis as will be called from STL. Checked lock held in constructor.
   bool operator()(mirror::ArtField* field1, mirror::ArtField* field2)
       NO_THREAD_SAFETY_ANALYSIS {
-    // First come reference fields, then 64-bit, and finally 32-bit
+    // First come reference fields, then 64-bit, then 32-bit, and then 16-bit, then finally 8-bit.
     Primitive::Type type1 = field1->GetTypeAsPrimitiveType();
     Primitive::Type type2 = field2->GetTypeAsPrimitiveType();
     if (type1 != type2) {
       bool is_primitive1 = type1 != Primitive::kPrimNot;
       bool is_primitive2 = type2 != Primitive::kPrimNot;
-      bool is64bit1 = is_primitive1 && (type1 == Primitive::kPrimLong ||
-                                        type1 == Primitive::kPrimDouble);
-      bool is64bit2 = is_primitive2 && (type2 == Primitive::kPrimLong ||
-                                        type2 == Primitive::kPrimDouble);
-      int order1 = !is_primitive1 ? 0 : (is64bit1 ? 1 : 2);
-      int order2 = !is_primitive2 ? 0 : (is64bit2 ? 1 : 2);
-      if (order1 != order2) {
-        return order1 < order2;
+      if (type1 != type2) {
+        if (is_primitive1 && is_primitive2) {
+          // Larger primitive types go first.
+          return Primitive::ComponentSize(type1) > Primitive::ComponentSize(type2);
+        } else {
+          // Reference always goes first.
+          return !is_primitive1;
+        }
       }
     }
     // same basic group? then sort by string.
@@ -4721,7 +4845,7 @@ struct LinkFieldsComparator {
   }
 };
 
-bool ClassLinker::LinkFields(Handle<mirror::Class> klass, bool is_static, size_t* class_size) {
+bool ClassLinker::LinkFields(ConstHandle<mirror::Class> klass, bool is_static, size_t* class_size) {
   size_t num_fields =
       is_static ? klass->NumStaticFields() : klass->NumInstanceFields();
 
@@ -4735,7 +4859,7 @@ bool ClassLinker::LinkFields(Handle<mirror::Class> klass, bool is_static, size_t
     if (klass->ShouldHaveEmbeddedImtAndVTable()) {
       // Static fields come after the embedded tables.
       base = mirror::Class::ComputeClassSize(true, klass->GetVTableDuringLinking()->GetLength(),
-                                             0, 0, 0);
+                                             0, 0, 0, 0, 0);
     }
     field_offset = MemberOffset(base);
   } else {
@@ -4752,6 +4876,8 @@ bool ClassLinker::LinkFields(Handle<mirror::Class> klass, bool is_static, size_t
   // we want a relatively stable order so that adding new fields
   // minimizes disruption of C++ version such as Class and Method.
   std::deque<mirror::ArtField*> grouped_and_sorted_fields;
+  const char* old_no_suspend_cause  = Thread::Current()->StartAssertNoThreadSuspension(
+      "Naked ArtField references in deque");
   for (size_t i = 0; i < num_fields; i++) {
     mirror::ArtField* f = fields->Get(i);
     CHECK(f != NULL) << PrettyClass(klass.Get());
@@ -4763,6 +4889,8 @@ bool ClassLinker::LinkFields(Handle<mirror::Class> klass, bool is_static, size_t
   // References should be at the front.
   size_t current_field = 0;
   size_t num_reference_fields = 0;
+  FieldGaps gaps;
+
   for (; current_field < num_fields; current_field++) {
     mirror::ArtField* field = grouped_and_sorted_fields.front();
     Primitive::Type type = field->GetTypeAsPrimitiveType();
@@ -4770,51 +4898,32 @@ bool ClassLinker::LinkFields(Handle<mirror::Class> klass, bool is_static, size_t
     if (isPrimitive) {
       break;  // past last reference, move on to the next phase
     }
+    if (UNLIKELY(!IsAligned<4>(field_offset.Uint32Value()))) {
+      MemberOffset old_offset = field_offset;
+      field_offset = MemberOffset(RoundUp(field_offset.Uint32Value(), 4));
+      AddFieldGap(old_offset.Uint32Value(), field_offset.Uint32Value(), &gaps);
+    }
+    DCHECK(IsAligned<4>(field_offset.Uint32Value()));
     grouped_and_sorted_fields.pop_front();
     num_reference_fields++;
     fields->Set<false>(current_field, field);
     field->SetOffset(field_offset);
     field_offset = MemberOffset(field_offset.Uint32Value() + sizeof(uint32_t));
   }
-
-  // Now we want to pack all of the double-wide fields together.  If
-  // we're not aligned, though, we want to shuffle one 32-bit field
-  // into place.  If we can't find one, we'll have to pad it.
-  if (current_field != num_fields && !IsAligned<8>(field_offset.Uint32Value())) {
-    for (size_t i = 0; i < grouped_and_sorted_fields.size(); i++) {
-      mirror::ArtField* field = grouped_and_sorted_fields[i];
-      Primitive::Type type = field->GetTypeAsPrimitiveType();
-      CHECK(type != Primitive::kPrimNot) << PrettyField(field);  // should be primitive types
-      if (type == Primitive::kPrimLong || type == Primitive::kPrimDouble) {
-        continue;
-      }
-      fields->Set<false>(current_field++, field);
-      field->SetOffset(field_offset);
-      // drop the consumed field
-      grouped_and_sorted_fields.erase(grouped_and_sorted_fields.begin() + i);
-      break;
-    }
-    // whether we found a 32-bit field for padding or not, we advance
-    field_offset = MemberOffset(field_offset.Uint32Value() + sizeof(uint32_t));
-  }
-
-  // Alignment is good, shuffle any double-wide fields forward, and
-  // finish assigning field offsets to all fields.
-  DCHECK(current_field == num_fields || IsAligned<8>(field_offset.Uint32Value()))
-      << PrettyClass(klass.Get());
-  while (!grouped_and_sorted_fields.empty()) {
-    mirror::ArtField* field = grouped_and_sorted_fields.front();
-    grouped_and_sorted_fields.pop_front();
-    Primitive::Type type = field->GetTypeAsPrimitiveType();
-    CHECK(type != Primitive::kPrimNot) << PrettyField(field);  // should be primitive types
-    fields->Set<false>(current_field, field);
-    field->SetOffset(field_offset);
-    field_offset = MemberOffset(field_offset.Uint32Value() +
-                                ((type == Primitive::kPrimLong || type == Primitive::kPrimDouble)
-                                 ? sizeof(uint64_t)
-                                 : sizeof(uint32_t)));
-    current_field++;
-  }
+  // Gaps are stored as a max heap which means that we must shuffle from largest to smallest
+  // otherwise we could end up with suboptimal gap fills.
+  ShuffleForward<8>(num_fields, &current_field, &field_offset,
+                    fields, &grouped_and_sorted_fields, &gaps);
+  ShuffleForward<4>(num_fields, &current_field, &field_offset,
+                    fields, &grouped_and_sorted_fields, &gaps);
+  ShuffleForward<2>(num_fields, &current_field, &field_offset,
+                    fields, &grouped_and_sorted_fields, &gaps);
+  ShuffleForward<1>(num_fields, &current_field, &field_offset,
+                    fields, &grouped_and_sorted_fields, &gaps);
+  CHECK(grouped_and_sorted_fields.empty()) << "Missed " << grouped_and_sorted_fields.size() <<
+      " fields.";
+
+  Thread::Current()->EndAssertNoThreadSuspension(old_no_suspend_cause);
 
   // We lie to the GC about the java.lang.ref.Reference.referent field, so it doesn't scan it.
   if (!is_static && klass->DescriptorEquals("Ljava/lang/ref/Reference;")) {
@@ -4881,7 +4990,7 @@ bool ClassLinker::LinkFields(Handle<mirror::Class> klass, bool is_static, size_t
 
 //  Set the bitmap of reference offsets, refOffsets, from the ifields
 //  list.
-void ClassLinker::CreateReferenceInstanceOffsets(Handle<mirror::Class> klass) {
+void ClassLinker::CreateReferenceInstanceOffsets(ConstHandle<mirror::Class> klass) {
   uint32_t reference_offsets = 0;
   mirror::Class* super_class = klass->GetSuperClass();
   if (super_class != NULL) {
@@ -4895,11 +5004,11 @@ void ClassLinker::CreateReferenceInstanceOffsets(Handle<mirror::Class> klass) {
   CreateReferenceOffsets(klass, false, reference_offsets);
 }
 
-void ClassLinker::CreateReferenceStaticOffsets(Handle<mirror::Class> klass) {
+void ClassLinker::CreateReferenceStaticOffsets(ConstHandle<mirror::Class> klass) {
   CreateReferenceOffsets(klass, true, 0);
 }
 
-void ClassLinker::CreateReferenceOffsets(Handle<mirror::Class> klass, bool is_static,
+void ClassLinker::CreateReferenceOffsets(ConstHandle<mirror::Class> klass, bool is_static,
                                          uint32_t reference_offsets) {
   size_t num_reference_fields =
       is_static ? klass->NumReferenceStaticFieldsDuringLinking()
@@ -4932,7 +5041,7 @@ void ClassLinker::CreateReferenceOffsets(Handle<mirror::Class> klass, bool is_st
 }
 
 mirror::String* ClassLinker::ResolveString(const DexFile& dex_file, uint32_t string_idx,
-                                           Handle<mirror::DexCache> dex_cache) {
+                                           ConstHandle<mirror::DexCache> dex_cache) {
   DCHECK(dex_cache.Get() != nullptr);
   mirror::String* resolved = dex_cache->GetResolvedString(string_idx);
   if (resolved != NULL) {
@@ -4954,8 +5063,8 @@ mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, uint16_t type_i
 }
 
 mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, uint16_t type_idx,
-                                        Handle<mirror::DexCache> dex_cache,
-                                        Handle<mirror::ClassLoader> class_loader) {
+                                        ConstHandle<mirror::DexCache> dex_cache,
+                                        ConstHandle<mirror::ClassLoader> class_loader) {
   DCHECK(dex_cache.Get() != NULL);
   mirror::Class* resolved = dex_cache->GetResolvedType(type_idx);
   if (resolved == NULL) {
@@ -4987,9 +5096,9 @@ mirror::Class* ClassLinker::ResolveType(const DexFile& dex_file, uint16_t type_i
 }
 
 mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t method_idx,
-                                              Handle<mirror::DexCache> dex_cache,
-                                              Handle<mirror::ClassLoader> class_loader,
-                                              Handle<mirror::ArtMethod> referrer,
+                                              ConstHandle<mirror::DexCache> dex_cache,
+                                              ConstHandle<mirror::ClassLoader> class_loader,
+                                              ConstHandle<mirror::ArtMethod> referrer,
                                               InvokeType type) {
   DCHECK(dex_cache.Get() != nullptr);
   // Check for hit in the dex cache.
@@ -5140,8 +5249,8 @@ mirror::ArtMethod* ClassLinker::ResolveMethod(const DexFile& dex_file, uint32_t
 }
 
 mirror::ArtField* ClassLinker::ResolveField(const DexFile& dex_file, uint32_t field_idx,
-                                            Handle<mirror::DexCache> dex_cache,
-                                            Handle<mirror::ClassLoader> class_loader,
+                                            ConstHandle<mirror::DexCache> dex_cache,
+                                            ConstHandle<mirror::ClassLoader> class_loader,
                                             bool is_static) {
   DCHECK(dex_cache.Get() != nullptr);
   mirror::ArtField* resolved = dex_cache->GetResolvedField(field_idx);
@@ -5183,8 +5292,8 @@ mirror::ArtField* ClassLinker::ResolveField(const DexFile& dex_file, uint32_t fi
 
 mirror::ArtField* ClassLinker::ResolveFieldJLS(const DexFile& dex_file,
                                                uint32_t field_idx,
-                                               Handle<mirror::DexCache> dex_cache,
-                                               Handle<mirror::ClassLoader> class_loader) {
+                                               ConstHandle<mirror::DexCache> dex_cache,
+                                               ConstHandle<mirror::ClassLoader> class_loader) {
   DCHECK(dex_cache.Get() != nullptr);
   mirror::ArtField* resolved = dex_cache->GetResolvedField(field_idx);
   if (resolved != nullptr) {
index b68b23e..064a85d 100644 (file)
@@ -17,6 +17,7 @@
 #ifndef ART_RUNTIME_CLASS_LINKER_H_
 #define ART_RUNTIME_CLASS_LINKER_H_
 
+#include <deque>
 #include <string>
 #include <utility>
 #include <vector>
@@ -47,6 +48,7 @@ namespace mirror {
   class StackTraceElement;
 }  // namespace mirror
 
+template<class T> class ConstHandle;
 class InternTable;
 template<class T> class ObjectLock;
 class ScopedObjectAccessAlreadyRunnable;
@@ -71,7 +73,7 @@ class ClassLinker {
   // Finds a class by its descriptor, loading it if necessary.
   // If class_loader is null, searches boot_class_path_.
   mirror::Class* FindClass(Thread* self, const char* descriptor,
-                           Handle<mirror::ClassLoader> class_loader)
+                           ConstHandle<mirror::ClassLoader> class_loader)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Finds a class by its descriptor using the "system" class loader, ie by searching the
@@ -88,7 +90,7 @@ class ClassLinker {
 
   // Define a new a class based on a ClassDef from a DexFile
   mirror::Class* DefineClass(const char* descriptor,
-                             Handle<mirror::ClassLoader> class_loader,
+                             ConstHandle<mirror::ClassLoader> class_loader,
                              const DexFile& dex_file, const DexFile::ClassDef& dex_class_def)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -132,7 +134,7 @@ class ClassLinker {
   // Resolve a String with the given index from the DexFile, storing the
   // result in the DexCache.
   mirror::String* ResolveString(const DexFile& dex_file, uint32_t string_idx,
-                                Handle<mirror::DexCache> dex_cache)
+                                ConstHandle<mirror::DexCache> dex_cache)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Resolve a Type with the given index from the DexFile, storing the
@@ -155,8 +157,8 @@ class ClassLinker {
   // type, since it may be referenced from but not contained within
   // the given DexFile.
   mirror::Class* ResolveType(const DexFile& dex_file, uint16_t type_idx,
-                             Handle<mirror::DexCache> dex_cache,
-                             Handle<mirror::ClassLoader> class_loader)
+                             ConstHandle<mirror::DexCache> dex_cache,
+                             ConstHandle<mirror::ClassLoader> class_loader)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Resolve a method with a given ID from the DexFile, storing the
@@ -166,9 +168,9 @@ class ClassLinker {
   // virtual method.
   mirror::ArtMethod* ResolveMethod(const DexFile& dex_file,
                                    uint32_t method_idx,
-                                   Handle<mirror::DexCache> dex_cache,
-                                   Handle<mirror::ClassLoader> class_loader,
-                                   Handle<mirror::ArtMethod> referrer,
+                                   ConstHandle<mirror::DexCache> dex_cache,
+                                   ConstHandle<mirror::ClassLoader> class_loader,
+                                   ConstHandle<mirror::ArtMethod> referrer,
                                    InvokeType type)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -192,8 +194,8 @@ class ClassLinker {
   // field.
   mirror::ArtField* ResolveField(const DexFile& dex_file,
                                  uint32_t field_idx,
-                                 Handle<mirror::DexCache> dex_cache,
-                                 Handle<mirror::ClassLoader> class_loader,
+                                 ConstHandle<mirror::DexCache> dex_cache,
+                                 ConstHandle<mirror::ClassLoader> class_loader,
                                  bool is_static)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -202,8 +204,8 @@ class ClassLinker {
   // in ResolveType. No is_static argument is provided so that Java
   // field resolution semantics are followed.
   mirror::ArtField* ResolveFieldJLS(const DexFile& dex_file, uint32_t field_idx,
-                                    Handle<mirror::DexCache> dex_cache,
-                                    Handle<mirror::ClassLoader> class_loader)
+                                    ConstHandle<mirror::DexCache> dex_cache,
+                                    ConstHandle<mirror::ClassLoader> class_loader)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Get shorty from method index without resolution. Used to do handlerization.
@@ -213,7 +215,7 @@ class ClassLinker {
   // Returns true on success, false if there's an exception pending.
   // can_run_clinit=false allows the compiler to attempt to init a class,
   // given the restriction that no <clinit> execution is possible.
-  bool EnsureInitialized(Handle<mirror::Class> c, bool can_init_fields, bool can_init_parents)
+  bool EnsureInitialized(ConstHandle<mirror::Class> c, bool can_init_fields, bool can_init_parents)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Initializes classes that have instances in the image but that have
@@ -223,7 +225,7 @@ class ClassLinker {
   void RegisterDexFile(const DexFile& dex_file)
       LOCKS_EXCLUDED(dex_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void RegisterDexFile(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
+  void RegisterDexFile(const DexFile& dex_file, ConstHandle<mirror::DexCache> dex_cache)
       LOCKS_EXCLUDED(dex_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -310,12 +312,12 @@ class ClassLinker {
                                                                               size_t length)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void VerifyClass(Handle<mirror::Class> klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void VerifyClass(ConstHandle<mirror::Class> klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool VerifyClassUsingOatFile(const DexFile& dex_file, mirror::Class* klass,
                                mirror::Class::Status& oat_file_class_status)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void ResolveClassExceptionHandlerTypes(const DexFile& dex_file,
-                                         Handle<mirror::Class> klass)
+                                         ConstHandle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void ResolveMethodExceptionHandlerTypes(const DexFile& dex_file, mirror::ArtMethod* klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -343,6 +345,14 @@ class ClassLinker {
   const void* GetPortableOatCodeFor(const DexFile& dex_file, uint16_t class_def_idx, uint32_t method_idx)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  // Get compiled code for a method, return null if no code
+  // exists. This is unlike Get..OatCodeFor which will return a bridge
+  // or interpreter entrypoint.
+  const void* GetOatMethodQuickCodeFor(mirror::ArtMethod* method)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  const void* GetOatMethodPortableCodeFor(mirror::ArtMethod* method)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   pid_t GetClassesLockOwner();  // For SignalCatcher.
   pid_t GetDexLockOwner();  // For SignalCatcher.
 
@@ -391,7 +401,7 @@ class ClassLinker {
   }
 
  private:
-  bool FindOatMethodFor(mirror::ArtMethod* method, OatFile::OatMethod* oat_method)
+  const OatFile::OatMethod FindOatMethodFor(mirror::ArtMethod* method, bool* found)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   OatFile& GetImageOatFile(gc::space::ImageSpace* space)
@@ -420,16 +430,12 @@ class ClassLinker {
 
 
   mirror::Class* CreateArrayClass(Thread* self, const char* descriptor,
-                                  Handle<mirror::ClassLoader> class_loader)
+                                  ConstHandle<mirror::ClassLoader> class_loader)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void AppendToBootClassPath(const DexFile& dex_file)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void AppendToBootClassPath(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  void ConstructFieldMap(const DexFile& dex_file, const DexFile::ClassDef& dex_class_def,
-                         mirror::Class* c, SafeMap<uint32_t, mirror::ArtField*>& field_map)
+  void AppendToBootClassPath(const DexFile& dex_file, ConstHandle<mirror::DexCache> dex_cache)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // Precomputes size needed for Class, in the case of a non-temporary class this size must be
@@ -439,49 +445,49 @@ class ClassLinker {
 
   void LoadClass(const DexFile& dex_file,
                  const DexFile::ClassDef& dex_class_def,
-                 Handle<mirror::Class> klass,
+                 ConstHandle<mirror::Class> klass,
                  mirror::ClassLoader* class_loader)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void LoadClassMembers(const DexFile& dex_file,
                         const byte* class_data,
-                        Handle<mirror::Class> klass,
+                        ConstHandle<mirror::Class> klass,
                         mirror::ClassLoader* class_loader,
                         const OatFile::OatClass* oat_class)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void LoadField(const DexFile& dex_file, const ClassDataItemIterator& it,
-                 Handle<mirror::Class> klass, Handle<mirror::ArtField> dst)
+                 ConstHandle<mirror::Class> klass, ConstHandle<mirror::ArtField> dst)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   mirror::ArtMethod* LoadMethod(Thread* self, const DexFile& dex_file,
                                 const ClassDataItemIterator& dex_method,
-                                Handle<mirror::Class> klass)
+                                ConstHandle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   void FixupStaticTrampolines(mirror::Class* klass) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  // Finds the associated oat class for a dex_file and descriptor. Returns whether the class
-  // was found, and sets the data in oat_class.
-  bool FindOatClass(const DexFile& dex_file, uint16_t class_def_idx, OatFile::OatClass* oat_class)
+  // Finds the associated oat class for a dex_file and descriptor. Returns an invalid OatClass on
+  // error and sets found to false.
+  OatFile::OatClass FindOatClass(const DexFile& dex_file, uint16_t class_def_idx, bool* found)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  void RegisterDexFileLocked(const DexFile& dex_file, Handle<mirror::DexCache> dex_cache)
+  void RegisterDexFileLocked(const DexFile& dex_file, ConstHandle<mirror::DexCache> dex_cache)
       EXCLUSIVE_LOCKS_REQUIRED(dex_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   bool IsDexFileRegisteredLocked(const DexFile& dex_file)
       SHARED_LOCKS_REQUIRED(dex_lock_, Locks::mutator_lock_);
 
-  bool InitializeClass(Handle<mirror::Class> klass, bool can_run_clinit,
+  bool InitializeClass(ConstHandle<mirror::Class> klass, bool can_run_clinit,
                        bool can_init_parents)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool WaitForInitializeClass(Handle<mirror::Class> klass, Thread* self,
+  bool WaitForInitializeClass(ConstHandle<mirror::Class> klass, Thread* self,
                               ObjectLock<mirror::Class>& lock);
-  bool ValidateSuperClassDescriptors(Handle<mirror::Class> klass)
+  bool ValidateSuperClassDescriptors(ConstHandle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   bool IsSameDescriptorInDifferentClassContexts(Thread* self, const char* descriptor,
-                                                Handle<mirror::ClassLoader> class_loader1,
-                                                Handle<mirror::ClassLoader> class_loader2)
+                                                ConstHandle<mirror::ClassLoader> class_loader1,
+                                                ConstHandle<mirror::ClassLoader> class_loader2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   bool IsSameMethodSignatureInDifferentClassContexts(Thread* self, mirror::ArtMethod* method,
@@ -489,43 +495,42 @@ class ClassLinker {
                                                      mirror::Class* klass2)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool LinkClass(Thread* self, const char* descriptor, Handle<mirror::Class> klass,
-                 Handle<mirror::ObjectArray<mirror::Class>> interfaces,
+  bool LinkClass(Thread* self, const char* descriptor, ConstHandle<mirror::Class> klass,
+                 ConstHandle<mirror::ObjectArray<mirror::Class>> interfaces,
                  mirror::Class** new_class)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool LinkSuperClass(Handle<mirror::Class> klass)
+  bool LinkSuperClass(ConstHandle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool LoadSuperAndInterfaces(Handle<mirror::Class> klass, const DexFile& dex_file)
+  bool LoadSuperAndInterfaces(ConstHandle<mirror::Class> klass, const DexFile& dex_file)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool LinkMethods(Thread* self, Handle<mirror::Class> klass,
-                   Handle<mirror::ObjectArray<mirror::Class>> interfaces)
+  bool LinkMethods(Thread* self, ConstHandle<mirror::Class> klass,
+                   ConstHandle<mirror::ObjectArray<mirror::Class>> interfaces)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool LinkVirtualMethods(Thread* self, Handle<mirror::Class> klass)
+  bool LinkVirtualMethods(Thread* self, ConstHandle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool LinkInterfaceMethods(Handle<mirror::Class> klass,
-                            Handle<mirror::ObjectArray<mirror::Class>> interfaces)
+  bool LinkInterfaceMethods(ConstHandle<mirror::Class> klass,
+                            ConstHandle<mirror::ObjectArray<mirror::Class>> interfaces)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  bool LinkStaticFields(Handle<mirror::Class> klass, size_t* class_size)
+  bool LinkStaticFields(ConstHandle<mirror::Class> klass, size_t* class_size)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool LinkInstanceFields(Handle<mirror::Class> klass)
+  bool LinkInstanceFields(ConstHandle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  bool LinkFields(Handle<mirror::Class> klass, bool is_static, size_t* class_size)
+  bool LinkFields(ConstHandle<mirror::Class> klass, bool is_static, size_t* class_size)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void LinkCode(Handle<mirror::ArtMethod> method, const OatFile::OatClass* oat_class,
+  void LinkCode(ConstHandle<mirror::ArtMethod> method, const OatFile::OatClass* oat_class,
                 const DexFile& dex_file, uint32_t dex_method_index, uint32_t method_index)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  void CreateReferenceInstanceOffsets(Handle<mirror::Class> klass)
+  void CreateReferenceInstanceOffsets(ConstHandle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void CreateReferenceStaticOffsets(Handle<mirror::Class> klass)
+  void CreateReferenceStaticOffsets(ConstHandle<mirror::Class> klass)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void CreateReferenceOffsets(Handle<mirror::Class> klass, bool is_static,
+  void CreateReferenceOffsets(ConstHandle<mirror::Class> klass, bool is_static,
                               uint32_t reference_offsets)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -608,11 +613,11 @@ class ClassLinker {
   bool VerifyOatWithDexFile(const OatFile* oat_file, const char* dex_location,
                             std::string* error_msg);
 
-  mirror::ArtMethod* CreateProxyConstructor(Thread* self, Handle<mirror::Class> klass,
+  mirror::ArtMethod* CreateProxyConstructor(Thread* self, ConstHandle<mirror::Class> klass,
                                             mirror::Class* proxy_class)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  mirror::ArtMethod* CreateProxyMethod(Thread* self, Handle<mirror::Class> klass,
-                                       Handle<mirror::ArtMethod> prototype)
+  mirror::ArtMethod* CreateProxyMethod(Thread* self, ConstHandle<mirror::Class> klass,
+                                       ConstHandle<mirror::ArtMethod> prototype)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   std::vector<const DexFile*> boot_class_path_;
index 989384e..eed6f71 100644 (file)
@@ -341,23 +341,22 @@ jobject CommonRuntimeTest::LoadDex(const char* dex_name) {
   for (const DexFile* dex_file : dex_files) {
     class_linker_->RegisterDexFile(*dex_file);
   }
-  ScopedObjectAccessUnchecked soa(Thread::Current());
-  ScopedLocalRef<jobject> class_loader_local(soa.Env(),
-      soa.Env()->AllocObject(WellKnownClasses::dalvik_system_PathClassLoader));
-  jobject class_loader = soa.Env()->NewGlobalRef(class_loader_local.get());
-  soa.Self()->SetClassLoaderOverride(soa.Decode<mirror::ClassLoader*>(class_loader_local.get()));
+  Thread* self = Thread::Current();
+  JNIEnvExt* env = self->GetJniEnv();
+  ScopedLocalRef<jobject> class_loader_local(env,
+      env->AllocObject(WellKnownClasses::dalvik_system_PathClassLoader));
+  jobject class_loader = env->NewGlobalRef(class_loader_local.get());
+  self->SetClassLoaderOverride(class_loader_local.get());
   Runtime::Current()->SetCompileTimeClassPath(class_loader, dex_files);
   return class_loader;
 }
 
 CheckJniAbortCatcher::CheckJniAbortCatcher() : vm_(Runtime::Current()->GetJavaVM()) {
-  vm_->check_jni_abort_hook = Hook;
-  vm_->check_jni_abort_hook_data = &actual_;
+  vm_->SetCheckJniAbortHook(Hook, &actual_);
 }
 
 CheckJniAbortCatcher::~CheckJniAbortCatcher() {
-  vm_->check_jni_abort_hook = nullptr;
-  vm_->check_jni_abort_hook_data = nullptr;
+  vm_->SetCheckJniAbortHook(nullptr, nullptr);
   EXPECT_TRUE(actual_.empty()) << actual_;
 }
 
index 5b014b3..1ca6eb3 100644 (file)
@@ -132,7 +132,7 @@ class CheckJniAbortCatcher {
  private:
   static void Hook(void* data, const std::string& reason);
 
-  JavaVMExt* vm_;
+  JavaVMExt* const vm_;
   std::string actual_;
 
   DISALLOW_COPY_AND_ASSIGN(CheckJniAbortCatcher);
index bb48be3..846216c 100644 (file)
@@ -449,6 +449,10 @@ void ThrowNullPointerExceptionFromDexPC(const ThrowLocation& throw_location) {
       break;
     }
     case Instruction::IPUT_QUICK:
+    case Instruction::IPUT_BOOLEAN_QUICK:
+    case Instruction::IPUT_BYTE_QUICK:
+    case Instruction::IPUT_CHAR_QUICK:
+    case Instruction::IPUT_SHORT_QUICK:
     case Instruction::IPUT_WIDE_QUICK:
     case Instruction::IPUT_OBJECT_QUICK: {
       // Since we replaced the field index, we ask the verifier to tell us which
index 0092160..eb90696 100644 (file)
@@ -3032,12 +3032,13 @@ static bool IsMethodPossiblyInlined(Thread* self, mirror::ArtMethod* m)
     // should never be null. We could just check we never encounter this case.
     return false;
   }
-  StackHandleScope<2> hs(self);
+  StackHandleScope<3> hs(self);
   mirror::Class* declaring_class = m->GetDeclaringClass();
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
-  verifier::MethodVerifier verifier(dex_cache->GetDexFile(), &dex_cache, &class_loader,
-                                    &m->GetClassDef(), code_item, m->GetDexMethodIndex(), m,
+  Handle<mirror::ArtMethod> method(hs.NewHandle(m));
+  verifier::MethodVerifier verifier(dex_cache->GetDexFile(), dex_cache, class_loader,
+                                    &m->GetClassDef(), code_item, m->GetDexMethodIndex(), method,
                                     m->GetAccessFlags(), false, true, false);
   // Note: we don't need to verify the method.
   return InlineMethodAnalyser::AnalyseMethodCode(&verifier, nullptr);
index 38ea754..ed3592c 100644 (file)
@@ -1216,7 +1216,7 @@ void EncodedStaticFieldValueIterator::Next() {
 }
 
 template<bool kTransactionActive>
-void EncodedStaticFieldValueIterator::ReadValueToField(mirror::ArtField* field) const {
+void EncodedStaticFieldValueIterator::ReadValueToField(Handle<mirror::ArtField> field) const {
   switch (type_) {
     case kBoolean: field->SetBoolean<kTransactionActive>(field->GetDeclaringClass(), jval_.z); break;
     case kByte:    field->SetByte<kTransactionActive>(field->GetDeclaringClass(), jval_.b); break;
@@ -1243,8 +1243,8 @@ void EncodedStaticFieldValueIterator::ReadValueToField(mirror::ArtField* field)
     default: UNIMPLEMENTED(FATAL) << ": type " << type_;
   }
 }
-template void EncodedStaticFieldValueIterator::ReadValueToField<true>(mirror::ArtField* field) const;
-template void EncodedStaticFieldValueIterator::ReadValueToField<false>(mirror::ArtField* field) const;
+template void EncodedStaticFieldValueIterator::ReadValueToField<true>(Handle<mirror::ArtField> field) const;
+template void EncodedStaticFieldValueIterator::ReadValueToField<false>(Handle<mirror::ArtField> field) const;
 
 CatchHandlerIterator::CatchHandlerIterator(const DexFile::CodeItem& code_item, uint32_t address) {
   handler_.address_ = -1;
index 1da1f81..118bd80 100644 (file)
@@ -1220,7 +1220,7 @@ class EncodedStaticFieldValueIterator {
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   template<bool kTransactionActive>
-  void ReadValueToField(mirror::ArtField* field) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void ReadValueToField(Handle<mirror::ArtField> field) const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   bool HasNext() { return pos_ < array_size_; }
 
index fa13290..330d045 100644 (file)
@@ -355,9 +355,10 @@ TEST_F(DexFileTest, GetMultiDexClassesDexName) {
 
 TEST_F(DexFileTest, GetDexCanonicalLocation) {
   ScratchFile file;
-  std::string dex_location = file.GetFilename();
+  char* dex_location_real = realpath(file.GetFilename().c_str(), nullptr);
+  std::string dex_location(dex_location_real);
 
-  ASSERT_EQ(file.GetFilename(), DexFile::GetDexCanonicalLocation(dex_location.c_str()));
+  ASSERT_EQ(dex_location, DexFile::GetDexCanonicalLocation(dex_location.c_str()));
   std::string multidex_location = DexFile::GetMultiDexClassesDexName(1, dex_location.c_str());
   ASSERT_EQ(multidex_location, DexFile::GetDexCanonicalLocation(multidex_location.c_str()));
 
@@ -370,6 +371,8 @@ TEST_F(DexFileTest, GetDexCanonicalLocation) {
   ASSERT_EQ(multidex_location, DexFile::GetDexCanonicalLocation(multidex_location_sym.c_str()));
 
   ASSERT_EQ(0, unlink(dex_location_sym.c_str()));
+
+  free(dex_location_real);
 }
 
 }  // namespace art
index 103b0d7..64c9185 100644 (file)
   V(0xE8, IPUT_OBJECT_QUICK, "iput-object-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegA | kVerifyRegB | kVerifyRuntimeOnly) \
   V(0xE9, INVOKE_VIRTUAL_QUICK, "invoke-virtual-quick", k35c, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyVarArgNonZero | kVerifyRuntimeOnly) \
   V(0xEA, INVOKE_VIRTUAL_RANGE_QUICK, "invoke-virtual/range-quick", k3rc, false, kMethodRef, kContinue | kThrow | kInvoke, kVerifyVarArgRangeNonZero | kVerifyRuntimeOnly) \
-  V(0xEB, UNUSED_EB, "unused-eb", k10x, false, kUnknown, 0, kVerifyError) \
-  V(0xEC, UNUSED_EC, "unused-ec", k10x, false, kUnknown, 0, kVerifyError) \
-  V(0xED, UNUSED_ED, "unused-ed", k10x, false, kUnknown, 0, kVerifyError) \
-  V(0xEE, UNUSED_EE, "unused-ee", k10x, false, kUnknown, 0, kVerifyError) \
+  V(0xEB, IPUT_BOOLEAN_QUICK, "iput-boolean-quick", k22c, false, kUnknown, 0, kVerifyError) \
+  V(0xEC, IPUT_BYTE_QUICK, "iput-byte-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRuntimeOnly) \
+  V(0xED, IPUT_CHAR_QUICK, "iput-char-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRuntimeOnly) \
+  V(0xEE, IPUT_SHORT_QUICK, "iput-short-quick", k22c, false, kFieldRef, kContinue | kThrow | kStore | kRegCFieldOrConstant, kVerifyRegAWide | kVerifyRegB | kVerifyRuntimeOnly) \
   V(0xEF, UNUSED_EF, "unused-ef", k10x, false, kUnknown, 0, kVerifyError) \
   V(0xF0, UNUSED_F0, "unused-f0", k10x, false, kUnknown, 0, kVerifyError) \
   V(0xF1, UNUSED_F1, "unused-f1", k10x, false, kUnknown, 0, kVerifyError) \
index 6179b5e..566ce03 100644 (file)
@@ -1020,7 +1020,7 @@ Elf32_Shdr* ElfFile::FindSectionByName(const std::string& name) const {
   return nullptr;
 }
 
-struct PACKED(1) FDE {
+struct PACKED(1) FDE32 {
   uint32_t raw_length_;
   uint32_t GetLength() {
     return raw_length_ + sizeof(raw_length_);
@@ -1031,25 +1031,186 @@ struct PACKED(1) FDE {
   uint8_t instructions[0];
 };
 
-static FDE* NextFDE(FDE* frame) {
+static FDE32* NextFDE(FDE32* frame) {
   byte* fde_bytes = reinterpret_cast<byte*>(frame);
   fde_bytes += frame->GetLength();
-  return reinterpret_cast<FDE*>(fde_bytes);
+  return reinterpret_cast<FDE32*>(fde_bytes);
 }
 
-static bool IsFDE(FDE* frame) {
+static bool IsFDE(FDE32* frame) {
   return frame->CIE_pointer != 0;
 }
 
-// TODO This only works for 32-bit Elf Files.
-static bool FixupEHFrame(uintptr_t text_start, byte* eh_frame, size_t eh_frame_size) {
-  FDE* last_frame = reinterpret_cast<FDE*>(eh_frame + eh_frame_size);
-  FDE* frame = NextFDE(reinterpret_cast<FDE*>(eh_frame));
-  for (; frame < last_frame; frame = NextFDE(frame)) {
-    if (!IsFDE(frame)) {
+struct PACKED(1) FDE64 {
+  uint32_t raw_length_;
+  uint64_t extended_length_;
+  uint64_t GetLength() {
+    return extended_length_ + sizeof(raw_length_) + sizeof(extended_length_);
+  }
+  uint64_t CIE_pointer;
+  uint64_t initial_location;
+  uint64_t address_range;
+  uint8_t instructions[0];
+};
+
+static FDE64* NextFDE(FDE64* frame) {
+  byte* fde_bytes = reinterpret_cast<byte*>(frame);
+  fde_bytes += frame->GetLength();
+  return reinterpret_cast<FDE64*>(fde_bytes);
+}
+
+static bool IsFDE(FDE64* frame) {
+  return frame->CIE_pointer != 0;
+}
+
+static bool FixupEHFrame(off_t base_address_delta,
+                           byte* eh_frame, size_t eh_frame_size) {
+  if (*(reinterpret_cast<uint32_t*>(eh_frame)) == 0xffffffff) {
+    FDE64* last_frame = reinterpret_cast<FDE64*>(eh_frame + eh_frame_size);
+    FDE64* frame = NextFDE(reinterpret_cast<FDE64*>(eh_frame));
+    for (; frame < last_frame; frame = NextFDE(frame)) {
+      if (!IsFDE(frame)) {
+        return false;
+      }
+      frame->initial_location += base_address_delta;
+    }
+    return true;
+  } else {
+    FDE32* last_frame = reinterpret_cast<FDE32*>(eh_frame + eh_frame_size);
+    FDE32* frame = NextFDE(reinterpret_cast<FDE32*>(eh_frame));
+    for (; frame < last_frame; frame = NextFDE(frame)) {
+      if (!IsFDE(frame)) {
+        return false;
+      }
+      frame->initial_location += base_address_delta;
+    }
+    return true;
+  }
+}
+
+static uint8_t* NextLeb128(uint8_t* current) {
+  DecodeUnsignedLeb128(const_cast<const uint8_t**>(&current));
+  return current;
+}
+
+struct PACKED(1) DebugLineHeader {
+  uint32_t unit_length_;  // TODO 32-bit specific size
+  uint16_t version_;
+  uint32_t header_length_;  // TODO 32-bit specific size
+  uint8_t minimum_instruction_lenght_;
+  uint8_t maximum_operations_per_instruction_;
+  uint8_t default_is_stmt_;
+  int8_t line_base_;
+  uint8_t line_range_;
+  uint8_t opcode_base_;
+  uint8_t remaining_[0];
+
+  bool IsStandardOpcode(const uint8_t* op) const {
+    return *op != 0 && *op < opcode_base_;
+  }
+
+  bool IsExtendedOpcode(const uint8_t* op) const {
+    return *op == 0;
+  }
+
+  const uint8_t* GetStandardOpcodeLengths() const {
+    return remaining_;
+  }
+
+  uint8_t* GetNextOpcode(uint8_t* op) const {
+    if (IsExtendedOpcode(op)) {
+      uint8_t* length_field = op + 1;
+      uint32_t length = DecodeUnsignedLeb128(const_cast<const uint8_t**>(&length_field));
+      return length_field + length;
+    } else if (!IsStandardOpcode(op)) {
+      return op + 1;
+    } else if (*op == DW_LNS_fixed_advance_pc) {
+      return op + 1 + sizeof(uint16_t);
+    } else {
+      uint8_t num_args = GetStandardOpcodeLengths()[*op - 1];
+      op += 1;
+      for (int i = 0; i < num_args; i++) {
+        op = NextLeb128(op);
+      }
+      return op;
+    }
+  }
+
+  uint8_t* GetDebugLineData() const {
+    const uint8_t* hdr_start =
+        reinterpret_cast<const uint8_t*>(&header_length_) + sizeof(header_length_);
+    return const_cast<uint8_t*>(hdr_start + header_length_);
+  }
+};
+
+class DebugLineInstructionIterator {
+ public:
+  static DebugLineInstructionIterator* Create(DebugLineHeader* header, size_t section_size) {
+    std::unique_ptr<DebugLineInstructionIterator> line_iter(
+        new DebugLineInstructionIterator(header, section_size));
+    if (line_iter.get() == nullptr) {
+      return nullptr;
+    } else {
+      return line_iter.release();
+    }
+  }
+
+  ~DebugLineInstructionIterator() {}
+
+  bool Next() {
+    if (current_instruction_ == nullptr) {
       return false;
     }
-    frame->initial_location += text_start;
+    current_instruction_ = header_->GetNextOpcode(current_instruction_);
+    if (current_instruction_ >= last_instruction_) {
+      current_instruction_ = nullptr;
+      return false;
+    } else {
+      return true;
+    }
+  }
+
+  uint8_t* GetInstruction() {
+    return current_instruction_;
+  }
+
+  bool IsExtendedOpcode() {
+    return header_->IsExtendedOpcode(current_instruction_);
+  }
+
+  uint8_t GetOpcode() {
+    if (!IsExtendedOpcode()) {
+      return *current_instruction_;
+    } else {
+      uint8_t* len_ptr = current_instruction_ + 1;
+      return *NextLeb128(len_ptr);
+    }
+  }
+
+  uint8_t* GetArguments() {
+    if (!IsExtendedOpcode()) {
+      return current_instruction_ + 1;
+    } else {
+      uint8_t* len_ptr = current_instruction_ + 1;
+      return NextLeb128(len_ptr) + 1;
+    }
+  }
+
+ private:
+  DebugLineInstructionIterator(DebugLineHeader* header, size_t size)
+    : header_(header), last_instruction_(reinterpret_cast<uint8_t*>(header) + size),
+      current_instruction_(header->GetDebugLineData()) {}
+
+  DebugLineHeader* header_;
+  uint8_t* last_instruction_;
+  uint8_t* current_instruction_;
+};
+
+static bool FixupDebugLine(off_t base_offset_delta, DebugLineInstructionIterator* iter) {
+  while (iter->Next()) {
+    if (iter->IsExtendedOpcode() && iter->GetOpcode() == DW_LNE_set_address) {
+      *reinterpret_cast<uint32_t*>(iter->GetArguments()) += base_offset_delta;
+    }
   }
   return true;
 }
@@ -1189,18 +1350,27 @@ class DebugAbbrev {
  public:
   ~DebugAbbrev() {}
   static DebugAbbrev* Create(const byte* dbg_abbrev, size_t dbg_abbrev_size) {
-    std::unique_ptr<DebugAbbrev> abbrev(new DebugAbbrev);
-    const byte* last = dbg_abbrev + dbg_abbrev_size;
-    while (dbg_abbrev < last) {
+    std::unique_ptr<DebugAbbrev> abbrev(new DebugAbbrev(dbg_abbrev, dbg_abbrev + dbg_abbrev_size));
+    if (!abbrev->ReadAtOffset(0)) {
+      return nullptr;
+    }
+    return abbrev.release();
+  }
+
+  bool ReadAtOffset(uint32_t abbrev_offset) {
+    tags_.clear();
+    tag_list_.clear();
+    const byte* dbg_abbrev = begin_ + abbrev_offset;
+    while (dbg_abbrev < end_ && *dbg_abbrev != 0) {
       std::unique_ptr<DebugTag> tag(DebugTag::Create(&dbg_abbrev));
       if (tag.get() == nullptr) {
-        return nullptr;
+        return false;
       } else {
-        abbrev->tags_.insert(std::pair<uint32_t, uint32_t>(tag->index_, abbrev->tag_list_.size()));
-        abbrev->tag_list_.push_back(std::move(tag));
+        tags_.insert(std::pair<uint32_t, uint32_t>(tag->index_, tag_list_.size()));
+        tag_list_.push_back(std::move(tag));
       }
     }
-    return abbrev.release();
+    return true;
   }
 
   DebugTag* ReadTag(const byte* entry) {
@@ -1215,7 +1385,9 @@ class DebugAbbrev {
   }
 
  private:
-  DebugAbbrev() {}
+  DebugAbbrev(const byte* begin, const byte* end) : begin_(begin), end_(end) {}
+  const byte* begin_;
+  const byte* end_;
   std::map<uint32_t, uint32_t> tags_;
   std::vector<std::unique_ptr<DebugTag>> tag_list_;
 };
@@ -1239,11 +1411,21 @@ class DebugInfoIterator {
     if (current_entry_ == nullptr || current_tag_ == nullptr) {
       return false;
     }
+    bool reread_abbrev = false;
     current_entry_ += current_tag_->GetSize();
+    if (reinterpret_cast<DebugInfoHeader*>(current_entry_) >= next_cu_) {
+      current_cu_ = next_cu_;
+      next_cu_ = GetNextCu(current_cu_);
+      current_entry_ = reinterpret_cast<byte*>(current_cu_) + sizeof(DebugInfoHeader);
+      reread_abbrev = true;
+    }
     if (current_entry_ >= last_entry_) {
       current_entry_ = nullptr;
       return false;
     }
+    if (reread_abbrev) {
+      abbrev_->ReadAtOffset(current_cu_->debug_abbrev_offset);
+    }
     current_tag_ = abbrev_->ReadTag(current_entry_);
     if (current_tag_ == nullptr) {
       current_entry_ = nullptr;
@@ -1271,49 +1453,91 @@ class DebugInfoIterator {
   }
 
  private:
+  static DebugInfoHeader* GetNextCu(DebugInfoHeader* hdr) {
+    byte* hdr_byte = reinterpret_cast<byte*>(hdr);
+    return reinterpret_cast<DebugInfoHeader*>(hdr_byte + sizeof(uint32_t) + hdr->unit_length);
+  }
+
   DebugInfoIterator(DebugInfoHeader* header, size_t frame_size, DebugAbbrev* abbrev)
       : abbrev_(abbrev),
+        current_cu_(header),
+        next_cu_(GetNextCu(header)),
         last_entry_(reinterpret_cast<byte*>(header) + frame_size),
         current_entry_(reinterpret_cast<byte*>(header) + sizeof(DebugInfoHeader)),
         current_tag_(abbrev_->ReadTag(current_entry_)) {}
   DebugAbbrev* abbrev_;
+  DebugInfoHeader* current_cu_;
+  DebugInfoHeader* next_cu_;
   byte* last_entry_;
   byte* current_entry_;
   DebugTag* current_tag_;
 };
 
-static bool FixupDebugInfo(uint32_t text_start, DebugInfoIterator* iter) {
+static bool FixupDebugInfo(off_t base_address_delta, DebugInfoIterator* iter) {
   do {
     if (iter->GetCurrentTag()->GetAttrSize(DW_AT_low_pc) != sizeof(int32_t) ||
         iter->GetCurrentTag()->GetAttrSize(DW_AT_high_pc) != sizeof(int32_t)) {
+      LOG(ERROR) << "DWARF information with 64 bit pointers is not supported yet.";
       return false;
     }
     uint32_t* PC_low = reinterpret_cast<uint32_t*>(iter->GetPointerToField(DW_AT_low_pc));
     uint32_t* PC_high = reinterpret_cast<uint32_t*>(iter->GetPointerToField(DW_AT_high_pc));
     if (PC_low != nullptr && PC_high != nullptr) {
-      *PC_low  += text_start;
-      *PC_high += text_start;
+      *PC_low  += base_address_delta;
+      *PC_high += base_address_delta;
     }
   } while (iter->next());
   return true;
 }
 
-static bool FixupDebugSections(const byte* dbg_abbrev, size_t dbg_abbrev_size,
-                               uintptr_t text_start,
-                               byte* dbg_info, size_t dbg_info_size,
-                               byte* eh_frame, size_t eh_frame_size) {
-  std::unique_ptr<DebugAbbrev> abbrev(DebugAbbrev::Create(dbg_abbrev, dbg_abbrev_size));
+bool ElfFile::FixupDebugSections(off_t base_address_delta) {
+  const Elf32_Shdr* debug_info = FindSectionByName(".debug_info");
+  const Elf32_Shdr* debug_abbrev = FindSectionByName(".debug_abbrev");
+  const Elf32_Shdr* eh_frame = FindSectionByName(".eh_frame");
+  const Elf32_Shdr* debug_str = FindSectionByName(".debug_str");
+  const Elf32_Shdr* debug_line = FindSectionByName(".debug_line");
+  const Elf32_Shdr* strtab_sec = FindSectionByName(".strtab");
+  const Elf32_Shdr* symtab_sec = FindSectionByName(".symtab");
+
+  if (debug_info == nullptr || debug_abbrev == nullptr ||
+      debug_str == nullptr || strtab_sec == nullptr || symtab_sec == nullptr) {
+    // Release version of ART does not generate debug info.
+    return true;
+  }
+  if (base_address_delta == 0) {
+    return true;
+  }
+  if (eh_frame != nullptr &&
+      !FixupEHFrame(base_address_delta, Begin() + eh_frame->sh_offset, eh_frame->sh_size)) {
+    return false;
+  }
+
+  std::unique_ptr<DebugAbbrev> abbrev(DebugAbbrev::Create(Begin() + debug_abbrev->sh_offset,
+                                                          debug_abbrev->sh_size));
   if (abbrev.get() == nullptr) {
     return false;
   }
-  std::unique_ptr<DebugInfoIterator> iter(
-      DebugInfoIterator::Create(reinterpret_cast<DebugInfoHeader*>(dbg_info),
-                                dbg_info_size, abbrev.get()));
-  if (iter.get() == nullptr) {
+  DebugInfoHeader* info_header =
+      reinterpret_cast<DebugInfoHeader*>(Begin() + debug_info->sh_offset);
+  std::unique_ptr<DebugInfoIterator> info_iter(DebugInfoIterator::Create(info_header,
+                                                                         debug_info->sh_size,
+                                                                         abbrev.get()));
+  if (info_iter.get() == nullptr) {
     return false;
   }
-  return FixupDebugInfo(text_start, iter.get())
-      && FixupEHFrame(text_start, eh_frame, eh_frame_size);
+  if (debug_line != nullptr) {
+    DebugLineHeader* line_header =
+        reinterpret_cast<DebugLineHeader*>(Begin() + debug_line->sh_offset);
+    std::unique_ptr<DebugLineInstructionIterator> line_iter(
+        DebugLineInstructionIterator::Create(line_header, debug_line->sh_size));
+    if (line_iter.get() == nullptr) {
+      return false;
+    }
+    if (!FixupDebugLine(base_address_delta, line_iter.get())) {
+      return false;
+    }
+  }
+  return FixupDebugInfo(base_address_delta, info_iter.get());
 }
 
 void ElfFile::GdbJITSupport() {
@@ -1331,19 +1555,13 @@ void ElfFile::GdbJITSupport() {
   }
   ElfFile& all = *all_ptr;
 
-  // Do we have interesting sections?
-  const Elf32_Shdr* debug_info = all.FindSectionByName(".debug_info");
-  const Elf32_Shdr* debug_abbrev = all.FindSectionByName(".debug_abbrev");
+  // We need the eh_frame for gdb but debug info might be present without it.
   const Elf32_Shdr* eh_frame = all.FindSectionByName(".eh_frame");
-  const Elf32_Shdr* debug_str = all.FindSectionByName(".debug_str");
-  const Elf32_Shdr* strtab_sec = all.FindSectionByName(".strtab");
-  const Elf32_Shdr* symtab_sec = all.FindSectionByName(".symtab");
-  Elf32_Shdr* text_sec = all.FindSectionByName(".text");
-  if (debug_info == nullptr || debug_abbrev == nullptr || eh_frame == nullptr ||
-      debug_str == nullptr || text_sec == nullptr || strtab_sec == nullptr ||
-      symtab_sec == nullptr) {
+  if (eh_frame == nullptr) {
     return;
   }
+
+  // Do we have interesting sections?
   // We need to add in a strtab and symtab to the image.
   // all is MAP_PRIVATE so it can be written to freely.
   // We also already have strtab and symtab so we are fine there.
@@ -1354,13 +1572,9 @@ void ElfFile::GdbJITSupport() {
   elf_hdr.e_phentsize = 0;
   elf_hdr.e_type = ET_EXEC;
 
-  text_sec->sh_type = SHT_NOBITS;
-  text_sec->sh_offset = 0;
-
-  if (!FixupDebugSections(
-        all.Begin() + debug_abbrev->sh_offset, debug_abbrev->sh_size, text_sec->sh_addr,
-        all.Begin() + debug_info->sh_offset, debug_info->sh_size,
-        all.Begin() + eh_frame->sh_offset, eh_frame->sh_size)) {
+  // Since base_address_ is 0 if we are actually loaded at a known address (i.e. this is boot.oat)
+  // and the actual address stuff starts at in regular files this is good.
+  if (!all.FixupDebugSections(reinterpret_cast<intptr_t>(base_address_))) {
     LOG(ERROR) << "Failed to load GDB data";
     return;
   }
index a966bd9..1922911 100644 (file)
@@ -128,6 +128,8 @@ class ElfFile {
   // executable is true at run time, false at compile time.
   bool Load(bool executable, std::string* error_msg);
 
+  bool FixupDebugSections(off_t base_address_delta);
+
  private:
   ElfFile(File* file, bool writable, bool program_header_only);
 
index ccbedc0..38842cb 100644 (file)
@@ -37,6 +37,7 @@ namespace art {
 
 // TODO: Fix no thread safety analysis when GCC can handle template specialization.
 template <const bool kAccessCheck>
+ALWAYS_INLINE
 static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
                                               mirror::ArtMethod* method,
                                               Thread* self, bool* slow_path) {
@@ -90,6 +91,7 @@ static inline mirror::Class* CheckObjectAlloc(uint32_t type_idx,
 }
 
 // TODO: Fix no thread safety analysis when annotalysis is smarter.
+ALWAYS_INLINE
 static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class* klass,
                                                                  Thread* self,
                                                                  bool* slow_path) {
@@ -120,6 +122,7 @@ static inline mirror::Class* CheckClassInitializedForObjectAlloc(mirror::Class*
 // check.
 // TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
 template <bool kAccessCheck, bool kInstrumented>
+ALWAYS_INLINE
 static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
                                                   mirror::ArtMethod* method,
                                                   Thread* self,
@@ -139,6 +142,7 @@ static inline mirror::Object* AllocObjectFromCode(uint32_t type_idx,
 // Given the context of a calling Method and a resolved class, create an instance.
 // TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
 template <bool kInstrumented>
+ALWAYS_INLINE
 static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
                                                           mirror::ArtMethod* method,
                                                           Thread* self,
@@ -161,6 +165,7 @@ static inline mirror::Object* AllocObjectFromCodeResolved(mirror::Class* klass,
 // Given the context of a calling Method and an initialized class, create an instance.
 // TODO: Fix NO_THREAD_SAFETY_ANALYSIS when GCC is smarter.
 template <bool kInstrumented>
+ALWAYS_INLINE
 static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klass,
                                                              mirror::ArtMethod* method,
                                                              Thread* self,
@@ -173,6 +178,7 @@ static inline mirror::Object* AllocObjectFromCodeInitialized(mirror::Class* klas
 
 // TODO: Fix no thread safety analysis when GCC can handle template specialization.
 template <bool kAccessCheck>
+ALWAYS_INLINE
 static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
                                              mirror::ArtMethod* method,
                                              int32_t component_count,
@@ -209,6 +215,7 @@ static inline mirror::Class* CheckArrayAlloc(uint32_t type_idx,
 // check.
 // TODO: Fix no thread safety analysis when GCC can handle template specialization.
 template <bool kAccessCheck, bool kInstrumented>
+ALWAYS_INLINE
 static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
                                                 mirror::ArtMethod* method,
                                                 int32_t component_count,
@@ -231,6 +238,7 @@ static inline mirror::Array* AllocArrayFromCode(uint32_t type_idx,
 }
 
 template <bool kAccessCheck, bool kInstrumented>
+ALWAYS_INLINE
 static inline mirror::Array* AllocArrayFromCodeResolved(mirror::Class* klass,
                                                         mirror::ArtMethod* method,
                                                         int32_t component_count,
index 34f92b5..cf89850 100644 (file)
@@ -225,7 +225,8 @@ void CheckReferenceResult(mirror::Object* o, Thread* self) {
   }
   mirror::ArtMethod* m = self->GetCurrentMethod(NULL);
   if (o == kInvalidIndirectRefObject) {
-    JniAbortF(NULL, "invalid reference returned from %s", PrettyMethod(m).c_str());
+    Runtime::Current()->GetJavaVM()->JniAbortF(NULL, "invalid reference returned from %s",
+                                               PrettyMethod(m).c_str());
   }
   // Make sure that the result is an instance of the type this method was expected to return.
   StackHandleScope<1> hs(self);
@@ -233,8 +234,9 @@ void CheckReferenceResult(mirror::Object* o, Thread* self) {
   mirror::Class* return_type = MethodHelper(h_m).GetReturnType();
 
   if (!o->InstanceOf(return_type)) {
-    JniAbortF(NULL, "attempt to return an instance of %s from %s", PrettyTypeOf(o).c_str(),
-              PrettyMethod(h_m.Get()).c_str());
+    Runtime::Current()->GetJavaVM()->JniAbortF(NULL, "attempt to return an instance of %s from %s",
+                                               PrettyTypeOf(o).c_str(),
+                                               PrettyMethod(h_m.Get()).c_str());
   }
 }
 
index 1f2713a..9d850c5 100644 (file)
 
 namespace art {
 
+static constexpr bool kUseTlabFastPath = true;
+
 #define GENERATE_ENTRYPOINTS_FOR_ALLOCATOR_INST(suffix, suffix2, instrumented_bool, allocator_type) \
 extern "C" mirror::Object* artAllocObjectFromCode ##suffix##suffix2( \
     uint32_t type_idx, mirror::ArtMethod* method, Thread* self, \
     StackReference<mirror::ArtMethod>* sp) \
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+  if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
+    mirror::Class* klass = method->GetDexCacheResolvedType<false>(type_idx); \
+    if (LIKELY(klass != nullptr && klass->IsInitialized() && !klass->IsFinalizable())) { \
+      size_t byte_count = klass->GetObjectSize(); \
+      byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \
+      mirror::Object* obj; \
+      if (LIKELY(byte_count < self->TlabSize())) { \
+        obj = self->AllocTlab(byte_count); \
+        DCHECK(obj != nullptr) << "AllocTlab can't fail"; \
+        obj->SetClass(klass); \
+        if (kUseBakerOrBrooksReadBarrier) { \
+          if (kUseBrooksReadBarrier) { \
+            obj->SetReadBarrierPointer(obj); \
+          } \
+          obj->AssertReadBarrierPointer(); \
+        } \
+        QuasiAtomic::ThreadFenceForConstructor(); \
+        return obj; \
+      } \
+    } \
+  } \
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
   return AllocObjectFromCode<false, instrumented_bool>(type_idx, method, self, allocator_type); \
 } \
@@ -37,6 +60,26 @@ extern "C" mirror::Object* artAllocObjectFromCodeResolved##suffix##suffix2( \
     mirror::Class* klass, mirror::ArtMethod* method, Thread* self, \
     StackReference<mirror::ArtMethod>* sp) \
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+  if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
+    if (LIKELY(klass->IsInitialized())) { \
+      size_t byte_count = klass->GetObjectSize(); \
+      byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \
+      mirror::Object* obj; \
+      if (LIKELY(byte_count < self->TlabSize())) { \
+        obj = self->AllocTlab(byte_count); \
+        DCHECK(obj != nullptr) << "AllocTlab can't fail"; \
+        obj->SetClass(klass); \
+        if (kUseBakerOrBrooksReadBarrier) { \
+          if (kUseBrooksReadBarrier) { \
+            obj->SetReadBarrierPointer(obj); \
+          } \
+          obj->AssertReadBarrierPointer(); \
+        } \
+        QuasiAtomic::ThreadFenceForConstructor(); \
+        return obj; \
+      } \
+    } \
+  } \
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
   return AllocObjectFromCodeResolved<instrumented_bool>(klass, method, self, allocator_type); \
 } \
@@ -44,6 +87,24 @@ extern "C" mirror::Object* artAllocObjectFromCodeInitialized##suffix##suffix2( \
     mirror::Class* klass, mirror::ArtMethod* method, Thread* self, \
     StackReference<mirror::ArtMethod>* sp) \
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) { \
+  if (kUseTlabFastPath && !instrumented_bool && allocator_type == gc::kAllocatorTypeTLAB) { \
+    size_t byte_count = klass->GetObjectSize(); \
+    byte_count = RoundUp(byte_count, gc::space::BumpPointerSpace::kAlignment); \
+    mirror::Object* obj; \
+    if (LIKELY(byte_count < self->TlabSize())) { \
+      obj = self->AllocTlab(byte_count); \
+      DCHECK(obj != nullptr) << "AllocTlab can't fail"; \
+      obj->SetClass(klass); \
+      if (kUseBakerOrBrooksReadBarrier) { \
+        if (kUseBrooksReadBarrier) { \
+          obj->SetReadBarrierPointer(obj); \
+        } \
+        obj->AssertReadBarrierPointer(); \
+      } \
+      QuasiAtomic::ThreadFenceForConstructor(); \
+      return obj; \
+    } \
+  } \
   FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly); \
   return AllocObjectFromCodeInitialized<instrumented_bool>(klass, method, self, allocator_type); \
 } \
index f858743..fbc7913 100644 (file)
   V(InitializeType, void*, uint32_t, void*) \
   V(ResolveString, void*, void*, uint32_t) \
 \
+  V(Set8Instance, int, uint32_t, void*, int8_t) \
+  V(Set8Static, int, uint32_t, int8_t) \
+  V(Set16Instance, int, uint32_t, void*, int16_t) \
+  V(Set16Static, int, uint32_t, int16_t) \
   V(Set32Instance, int, uint32_t, void*, int32_t) \
   V(Set32Static, int, uint32_t, int32_t) \
   V(Set64Instance, int, uint32_t, void*, int64_t) \
   V(Set64Static, int, uint32_t, int64_t) \
   V(SetObjInstance, int, uint32_t, void*, void*) \
   V(SetObjStatic, int, uint32_t, void*) \
+  V(GetByteInstance, int8_t, uint32_t, void*) \
+  V(GetBooleanInstance, uint8_t, uint32_t, void*) \
+  V(GetByteStatic, int8_t, uint32_t) \
+  V(GetBooleanStatic, uint8_t, uint32_t) \
+  V(GetShortInstance, int16_t, uint32_t, void*) \
+  V(GetCharInstance, uint16_t, uint32_t, void*) \
+  V(GetShortStatic, int16_t, uint32_t) \
+  V(GetCharStatic, uint16_t, uint32_t) \
   V(Get32Instance, int32_t, uint32_t, void*) \
   V(Get32Static, int32_t, uint32_t) \
   V(Get64Instance, int64_t, uint32_t, void*) \
index cd1e247..b89c015 100644 (file)
 
 namespace art {
 
+extern "C" int8_t artGetByteStaticFromCode(uint32_t field_idx,
+                                           mirror::ArtMethod* referrer,
+                                           Thread* self, StackReference<mirror::ArtMethod>* sp)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead,
+                                          sizeof(int8_t));
+  if (LIKELY(field != NULL)) {
+    return field->GetByte(field->GetDeclaringClass());
+  }
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int8_t));
+  if (LIKELY(field != NULL)) {
+    return field->GetByte(field->GetDeclaringClass());
+  }
+  return 0;  // Will throw exception by checking with Thread::Current
+}
+
+extern "C" uint8_t artGetBooleanStaticFromCode(uint32_t field_idx,
+                                               mirror::ArtMethod* referrer,
+                                               Thread* self, StackReference<mirror::ArtMethod>* sp)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead,
+                                          sizeof(int8_t));
+  if (LIKELY(field != NULL)) {
+    return field->GetBoolean(field->GetDeclaringClass());
+  }
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int8_t));
+  if (LIKELY(field != NULL)) {
+    return field->GetBoolean(field->GetDeclaringClass());
+  }
+  return 0;  // Will throw exception by checking with Thread::Current
+}
+
+extern "C" int16_t artGetShortStaticFromCode(uint32_t field_idx,
+                                             mirror::ArtMethod* referrer,
+                                             Thread* self, StackReference<mirror::ArtMethod>* sp)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead,
+                                          sizeof(int16_t));
+  if (LIKELY(field != NULL)) {
+    return field->GetShort(field->GetDeclaringClass());
+  }
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int16_t));
+  if (LIKELY(field != NULL)) {
+    return field->GetShort(field->GetDeclaringClass());
+  }
+  return 0;  // Will throw exception by checking with Thread::Current
+}
+
+extern "C" uint16_t artGetCharStaticFromCode(uint32_t field_idx,
+                                             mirror::ArtMethod* referrer,
+                                             Thread* self, StackReference<mirror::ArtMethod>* sp)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveRead,
+                                          sizeof(int16_t));
+  if (LIKELY(field != NULL)) {
+    return field->GetChar(field->GetDeclaringClass());
+  }
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  field = FindFieldFromCode<StaticPrimitiveRead, true>(field_idx, referrer, self, sizeof(int16_t));
+  if (LIKELY(field != NULL)) {
+    return field->GetChar(field->GetDeclaringClass());
+  }
+  return 0;  // Will throw exception by checking with Thread::Current
+}
+
 extern "C" uint32_t artGet32StaticFromCode(uint32_t field_idx,
                                            mirror::ArtMethod* referrer,
                                            Thread* self, StackReference<mirror::ArtMethod>* sp)
@@ -78,6 +146,97 @@ extern "C" mirror::Object* artGetObjStaticFromCode(uint32_t field_idx,
   return NULL;  // Will throw exception by checking with Thread::Current
 }
 
+extern "C" int8_t artGetByteInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
+                                             mirror::ArtMethod* referrer, Thread* self,
+                                             StackReference<mirror::ArtMethod>* sp)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead,
+                                          sizeof(int8_t));
+  if (LIKELY(field != NULL && obj != NULL)) {
+    return field->GetByte(obj);
+  }
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, self,
+                                                         sizeof(int8_t));
+  if (LIKELY(field != NULL)) {
+    if (UNLIKELY(obj == NULL)) {
+      ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+      ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
+    } else {
+      return field->GetByte(obj);
+    }
+  }
+  return 0;  // Will throw exception by checking with Thread::Current
+}
+
+extern "C" uint8_t artGetBooleanInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
+                                                 mirror::ArtMethod* referrer, Thread* self,
+                                                 StackReference<mirror::ArtMethod>* sp)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead,
+                                          sizeof(int8_t));
+  if (LIKELY(field != NULL && obj != NULL)) {
+    return field->GetBoolean(obj);
+  }
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, self,
+                                                         sizeof(int8_t));
+  if (LIKELY(field != NULL)) {
+    if (UNLIKELY(obj == NULL)) {
+      ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+      ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
+    } else {
+      return field->GetBoolean(obj);
+    }
+  }
+  return 0;  // Will throw exception by checking with Thread::Current
+}
+extern "C" int16_t artGetShortInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
+                                               mirror::ArtMethod* referrer, Thread* self,
+                                               StackReference<mirror::ArtMethod>* sp)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead,
+                                          sizeof(int16_t));
+  if (LIKELY(field != NULL && obj != NULL)) {
+    return field->GetShort(obj);
+  }
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, self,
+                                                         sizeof(int16_t));
+  if (LIKELY(field != NULL)) {
+    if (UNLIKELY(obj == NULL)) {
+      ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+      ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
+    } else {
+      return field->GetShort(obj);
+    }
+  }
+  return 0;  // Will throw exception by checking with Thread::Current
+}
+
+extern "C" uint16_t artGetCharInstanceFromCode(uint32_t field_idx, mirror::Object* obj,
+                                               mirror::ArtMethod* referrer, Thread* self,
+                                               StackReference<mirror::ArtMethod>* sp)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveRead,
+                                          sizeof(int16_t));
+  if (LIKELY(field != NULL && obj != NULL)) {
+    return field->GetChar(obj);
+  }
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  field = FindFieldFromCode<InstancePrimitiveRead, true>(field_idx, referrer, self,
+                                                         sizeof(int16_t));
+  if (LIKELY(field != NULL)) {
+    if (UNLIKELY(obj == NULL)) {
+      ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+      ThrowNullPointerExceptionForFieldAccess(throw_location, field, true);
+    } else {
+      return field->GetChar(obj);
+    }
+  }
+  return 0;  // Will throw exception by checking with Thread::Current
+}
+
 extern "C" uint32_t artGet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj,
                                              mirror::ArtMethod* referrer, Thread* self,
                                              StackReference<mirror::ArtMethod>* sp)
@@ -148,6 +307,72 @@ extern "C" mirror::Object* artGetObjInstanceFromCode(uint32_t field_idx, mirror:
   return NULL;  // Will throw exception by checking with Thread::Current
 }
 
+extern "C" int artSet8StaticFromCode(uint32_t field_idx, uint32_t new_value,
+                                     mirror::ArtMethod* referrer, Thread* self,
+                                     StackReference<mirror::ArtMethod>* sp)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite,
+                                          sizeof(int8_t));
+  if (LIKELY(field != NULL)) {
+    Primitive::Type type = field->GetTypeAsPrimitiveType();
+    // Compiled code can't use transactional mode.
+    if (type == Primitive::kPrimBoolean) {
+      field->SetBoolean<false>(field->GetDeclaringClass(), new_value);
+    } else {
+      DCHECK_EQ(Primitive::kPrimByte, type);
+      field->SetByte<false>(field->GetDeclaringClass(), new_value);
+    }
+    return 0;  // success
+  }
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, self, sizeof(int8_t));
+  if (LIKELY(field != NULL)) {
+    Primitive::Type type = field->GetTypeAsPrimitiveType();
+    // Compiled code can't use transactional mode.
+    if (type == Primitive::kPrimBoolean) {
+      field->SetBoolean<false>(field->GetDeclaringClass(), new_value);
+    } else {
+      DCHECK_EQ(Primitive::kPrimByte, type);
+      field->SetByte<false>(field->GetDeclaringClass(), new_value);
+    }
+    return 0;  // success
+  }
+  return -1;  // failure
+}
+
+extern "C" int artSet16StaticFromCode(uint32_t field_idx, uint16_t new_value,
+                                      mirror::ArtMethod* referrer, Thread* self,
+                                      StackReference<mirror::ArtMethod>* sp)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::ArtField* field = FindFieldFast(field_idx, referrer, StaticPrimitiveWrite,
+                                          sizeof(int16_t));
+  if (LIKELY(field != NULL)) {
+    Primitive::Type type = field->GetTypeAsPrimitiveType();
+    // Compiled code can't use transactional mode.
+    if (type == Primitive::kPrimChar) {
+      field->SetChar<false>(field->GetDeclaringClass(), new_value);
+    } else {
+      DCHECK_EQ(Primitive::kPrimShort, type);
+      field->SetShort<false>(field->GetDeclaringClass(), new_value);
+    }
+    return 0;  // success
+  }
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  field = FindFieldFromCode<StaticPrimitiveWrite, true>(field_idx, referrer, self, sizeof(int16_t));
+  if (LIKELY(field != NULL)) {
+    Primitive::Type type = field->GetTypeAsPrimitiveType();
+    // Compiled code can't use transactional mode.
+    if (type == Primitive::kPrimChar) {
+      field->SetChar<false>(field->GetDeclaringClass(), new_value);
+    } else {
+      DCHECK_EQ(Primitive::kPrimShort, type);
+      field->SetShort<false>(field->GetDeclaringClass(), new_value);
+    }
+    return 0;  // success
+  }
+  return -1;  // failure
+}
+
 extern "C" int artSet32StaticFromCode(uint32_t field_idx, uint32_t new_value,
                                       mirror::ArtMethod* referrer, Thread* self,
                                       StackReference<mirror::ArtMethod>* sp)
@@ -214,6 +439,91 @@ extern "C" int artSetObjStaticFromCode(uint32_t field_idx, mirror::Object* new_v
   return -1;  // failure
 }
 
+extern "C" int artSet8InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint8_t new_value,
+                                       mirror::ArtMethod* referrer, Thread* self,
+                                       StackReference<mirror::ArtMethod>* sp)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
+                                          sizeof(int8_t));
+  if (LIKELY(field != NULL && obj != NULL)) {
+    Primitive::Type type = field->GetTypeAsPrimitiveType();
+    // Compiled code can't use transactional mode.
+    if (type == Primitive::kPrimBoolean) {
+      field->SetBoolean<false>(obj, new_value);
+    } else {
+      DCHECK_EQ(Primitive::kPrimByte, type);
+      field->SetByte<false>(obj, new_value);
+    }
+    return 0;  // success
+  }
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  {
+    StackHandleScope<1> hs(self);
+    HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&obj));
+    field = FindFieldFromCode<InstancePrimitiveWrite, true>(field_idx, referrer, self,
+                                                            sizeof(int8_t));
+  }
+  if (LIKELY(field != NULL)) {
+    if (UNLIKELY(obj == NULL)) {
+      ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+      ThrowNullPointerExceptionForFieldAccess(throw_location, field, false);
+    } else {
+      Primitive::Type type = field->GetTypeAsPrimitiveType();
+      // Compiled code can't use transactional mode.
+      if (type == Primitive::kPrimBoolean) {
+        field->SetBoolean<false>(obj, new_value);
+      } else {
+        field->SetByte<false>(obj, new_value);
+      }
+      return 0;  // success
+    }
+  }
+  return -1;  // failure
+}
+
+extern "C" int artSet16InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint16_t new_value,
+                                        mirror::ArtMethod* referrer, Thread* self,
+                                        StackReference<mirror::ArtMethod>* sp)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  mirror::ArtField* field = FindFieldFast(field_idx, referrer, InstancePrimitiveWrite,
+                                          sizeof(int16_t));
+  if (LIKELY(field != NULL && obj != NULL)) {
+    Primitive::Type type = field->GetTypeAsPrimitiveType();
+    // Compiled code can't use transactional mode.
+    if (type == Primitive::kPrimChar) {
+      field->SetChar<false>(obj, new_value);
+    } else {
+      DCHECK_EQ(Primitive::kPrimShort, type);
+      field->SetShort<false>(obj, new_value);
+    }
+    return 0;  // success
+  }
+  FinishCalleeSaveFrameSetup(self, sp, Runtime::kRefsOnly);
+  {
+    StackHandleScope<1> hs(self);
+    HandleWrapper<mirror::Object> h_obj(hs.NewHandleWrapper(&obj));
+    field = FindFieldFromCode<InstancePrimitiveWrite, true>(field_idx, referrer, self,
+                                                            sizeof(int16_t));
+  }
+  if (LIKELY(field != NULL)) {
+    if (UNLIKELY(obj == NULL)) {
+      ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+      ThrowNullPointerExceptionForFieldAccess(throw_location, field, false);
+    } else {
+      Primitive::Type type = field->GetTypeAsPrimitiveType();
+      // Compiled code can't use transactional mode.
+      if (type == Primitive::kPrimChar) {
+        field->SetChar<false>(obj, new_value);
+      } else {
+        DCHECK_EQ(Primitive::kPrimShort, type);
+        field->SetShort<false>(obj, new_value);
+      }
+      return 0;  // success
+    }
+  }
+  return -1;  // failure
+}
+
 extern "C" int artSet32InstanceFromCode(uint32_t field_idx, mirror::Object* obj, uint32_t new_value,
                                         mirror::ArtMethod* referrer, Thread* self,
                                         StackReference<mirror::ArtMethod>* sp)
index d205e2a..305e5a2 100644 (file)
@@ -186,13 +186,25 @@ class EntrypointsOrderTest : public CommonRuntimeTest {
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeTypeAndVerifyAccess, pInitializeType,
                          kPointerSize);
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pInitializeType, pResolveString, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pResolveString, pSet32Instance, kPointerSize);
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pResolveString, pSet8Instance, kPointerSize);
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet8Instance, pSet8Static, kPointerSize);
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet8Static, pSet16Instance, kPointerSize);
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet16Instance, pSet16Static, kPointerSize);
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet16Static, pSet32Instance, kPointerSize);
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet32Instance, pSet32Static, kPointerSize);
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet32Static, pSet64Instance, kPointerSize);
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet64Instance, pSet64Static, kPointerSize);
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSet64Static, pSetObjInstance, kPointerSize);
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSetObjInstance, pSetObjStatic, kPointerSize);
-    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSetObjStatic, pGet32Instance, kPointerSize);
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pSetObjStatic, pGetByteInstance, kPointerSize);
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetByteInstance, pGetBooleanInstance, kPointerSize);
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetBooleanInstance, pGetByteStatic, kPointerSize);
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetByteStatic, pGetBooleanStatic, kPointerSize);
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetBooleanStatic, pGetShortInstance, kPointerSize);
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetShortInstance, pGetCharInstance, kPointerSize);
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetCharInstance, pGetShortStatic, kPointerSize);
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetShortStatic, pGetCharStatic, kPointerSize);
+    EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGetCharStatic, pGet32Instance, kPointerSize);
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGet32Instance, pGet32Static, kPointerSize);
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGet32Static, pGet64Instance, kPointerSize);
     EXPECT_OFFSET_DIFFNP(QuickEntryPoints, pGet64Instance, pGet64Static, kPointerSize);
index a88b2c9..433855a 100644 (file)
@@ -33,14 +33,16 @@ namespace mirror {
   class Object;
 }  // namespace mirror
 
+namespace gc {
+namespace accounting {
+
 class CardTableTest : public CommonRuntimeTest {
  public:
-  std::unique_ptr<gc::accounting::CardTable> card_table_;
-  static constexpr size_t kCardSize = gc::accounting::CardTable::kCardSize;
+  std::unique_ptr<CardTable> card_table_;
 
   void CommonSetup() {
     if (card_table_.get() == nullptr) {
-      card_table_.reset(gc::accounting::CardTable::Create(heap_begin_, heap_size_));
+      card_table_.reset(CardTable::Create(heap_begin_, heap_size_));
       EXPECT_TRUE(card_table_.get() != nullptr);
     } else {
       ClearCardTable();
@@ -58,15 +60,16 @@ class CardTableTest : public CommonRuntimeTest {
   byte* HeapLimit() const {
     return HeapBegin() + heap_size_;
   }
-  byte PRandCard(const byte* addr) const {
-    size_t offset = RoundDown(addr - heap_begin_, kCardSize);
+  // Return a pseudo random card for an address.
+  byte PseudoRandomCard(const byte* addr) const {
+    size_t offset = RoundDown(addr - heap_begin_, CardTable::kCardSize);
     return 1 + offset % 254;
   }
   void FillRandom() {
-    for (const byte* addr = HeapBegin(); addr != HeapLimit(); addr += kCardSize) {
+    for (const byte* addr = HeapBegin(); addr != HeapLimit(); addr += CardTable::kCardSize) {
       EXPECT_TRUE(card_table_->AddrIsInCardTable(addr));
       byte* card = card_table_->CardFromAddr(addr);
-      *card = PRandCard(addr);
+      *card = PseudoRandomCard(addr);
     }
   }
 
@@ -79,15 +82,15 @@ TEST_F(CardTableTest, TestMarkCard) {
   CommonSetup();
   for (const byte* addr = HeapBegin(); addr < HeapLimit(); addr += kObjectAlignment) {
     auto obj = reinterpret_cast<const mirror::Object*>(addr);
-    EXPECT_EQ(card_table_->GetCard(obj), gc::accounting::CardTable::kCardClean);
+    EXPECT_EQ(card_table_->GetCard(obj), CardTable::kCardClean);
     EXPECT_TRUE(!card_table_->IsDirty(obj));
     card_table_->MarkCard(addr);
     EXPECT_TRUE(card_table_->IsDirty(obj));
-    EXPECT_EQ(card_table_->GetCard(obj), gc::accounting::CardTable::kCardDirty);
+    EXPECT_EQ(card_table_->GetCard(obj), CardTable::kCardDirty);
     byte* card_addr = card_table_->CardFromAddr(addr);
-    EXPECT_EQ(*card_addr, gc::accounting::CardTable::kCardDirty);
-    *card_addr = gc::accounting::CardTable::kCardClean;
-    EXPECT_EQ(*card_addr, gc::accounting::CardTable::kCardClean);
+    EXPECT_EQ(*card_addr, CardTable::kCardDirty);
+    *card_addr = CardTable::kCardClean;
+    EXPECT_EQ(*card_addr, CardTable::kCardClean);
   }
 }
 
@@ -103,33 +106,36 @@ class UpdateVisitor {
 TEST_F(CardTableTest, TestModifyCardsAtomic) {
   CommonSetup();
   FillRandom();
-  const size_t delta = std::min(static_cast<size_t>(HeapLimit() - HeapBegin()), 8U * kCardSize);
+  const size_t delta = std::min(static_cast<size_t>(HeapLimit() - HeapBegin()),
+                                8U * CardTable::kCardSize);
   UpdateVisitor visitor;
   size_t start_offset = 0;
-  for (byte* cstart = HeapBegin(); cstart < HeapBegin() + delta; cstart += kCardSize) {
-    start_offset = (start_offset + kObjectAlignment) % kCardSize;
+  for (byte* cstart = HeapBegin(); cstart < HeapBegin() + delta; cstart += CardTable::kCardSize) {
+    start_offset = (start_offset + kObjectAlignment) % CardTable::kCardSize;
     size_t end_offset = 0;
-    for (byte* cend = HeapLimit() - delta; cend < HeapLimit(); cend += kCardSize) {
+    for (byte* cend = HeapLimit() - delta; cend < HeapLimit(); cend += CardTable::kCardSize) {
       // Don't always start at a card boundary.
       byte* start = cstart + start_offset;
       byte* end = cend - end_offset;
-      end_offset = (end_offset + kObjectAlignment) % kCardSize;
+      end_offset = (end_offset + kObjectAlignment) % CardTable::kCardSize;
       // Modify cards.
       card_table_->ModifyCardsAtomic(start, end, visitor, visitor);
       // Check adjacent cards not modified.
-      for (byte* cur = start - kCardSize; cur >= HeapBegin(); cur -= kCardSize) {
-        EXPECT_EQ(card_table_->GetCard(reinterpret_cast<mirror::Object*>(cur)), PRandCard(cur));
+      for (byte* cur = start - CardTable::kCardSize; cur >= HeapBegin();
+          cur -= CardTable::kCardSize) {
+        EXPECT_EQ(card_table_->GetCard(reinterpret_cast<mirror::Object*>(cur)),
+                  PseudoRandomCard(cur));
       }
-      for (byte* cur = end + kCardSize; cur < HeapLimit(); cur += kCardSize) {
-        EXPECT_EQ(card_table_->GetCard(reinterpret_cast<mirror::Object*>(cur)), PRandCard(cur));
+      for (byte* cur = end + CardTable::kCardSize; cur < HeapLimit();
+          cur += CardTable::kCardSize) {
+        EXPECT_EQ(card_table_->GetCard(reinterpret_cast<mirror::Object*>(cur)),
+                  PseudoRandomCard(cur));
       }
       // Verify Range.
-      for (byte* cur = start; cur < AlignUp(end, kCardSize); cur += kCardSize) {
+      for (byte* cur = start; cur < AlignUp(end, CardTable::kCardSize);
+          cur += CardTable::kCardSize) {
         byte* card = card_table_->CardFromAddr(cur);
-        byte value = PRandCard(cur);
-        if (visitor(value) != *card) {
-          LOG(ERROR) << reinterpret_cast<void*>(start) << " " << reinterpret_cast<void*>(cur) << " " << reinterpret_cast<void*>(end);
-        }
+        byte value = PseudoRandomCard(cur);
         EXPECT_EQ(visitor(value), *card);
         // Restore for next iteration.
         *card = value;
@@ -139,5 +145,6 @@ TEST_F(CardTableTest, TestModifyCardsAtomic) {
 }
 
 // TODO: Add test for CardTable::Scan.
-
+}  // namespace accounting
+}  // namespace gc
 }  // namespace art
index 2686af0..3acf80d 100644 (file)
@@ -72,9 +72,11 @@ class ModUnionClearCardVisitor {
 
 class ModUnionUpdateObjectReferencesVisitor {
  public:
-  ModUnionUpdateObjectReferencesVisitor(MarkHeapReferenceCallback* callback, void* arg)
-    : callback_(callback),
-      arg_(arg) {
+  ModUnionUpdateObjectReferencesVisitor(MarkHeapReferenceCallback* callback, void* arg,
+                                        space::ContinuousSpace* from_space,
+                                        bool* contains_reference_to_other_space)
+    : callback_(callback), arg_(arg), from_space_(from_space),
+      contains_reference_to_other_space_(contains_reference_to_other_space) {
   }
 
   // Extra parameters are required since we use this same visitor signature for checking objects.
@@ -82,7 +84,9 @@ class ModUnionUpdateObjectReferencesVisitor {
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     // Only add the reference if it is non null and fits our criteria.
     mirror::HeapReference<Object>* obj_ptr = obj->GetFieldObjectReferenceAddr(offset);
-    if (obj_ptr->AsMirrorPtr() != nullptr) {
+    mirror::Object* ref = obj_ptr->AsMirrorPtr();
+    if (ref != nullptr && !from_space_->HasAddress(ref)) {
+      *contains_reference_to_other_space_ = true;
       callback_(obj_ptr, arg_);
     }
   }
@@ -90,24 +94,36 @@ class ModUnionUpdateObjectReferencesVisitor {
  private:
   MarkHeapReferenceCallback* const callback_;
   void* arg_;
+  // Space which we are scanning
+  space::ContinuousSpace* const from_space_;
+  // Set if we have any references to another space.
+  bool* const contains_reference_to_other_space_;
 };
 
 class ModUnionScanImageRootVisitor {
  public:
-  ModUnionScanImageRootVisitor(MarkHeapReferenceCallback* callback, void* arg)
-      : callback_(callback), arg_(arg) {}
+  ModUnionScanImageRootVisitor(MarkHeapReferenceCallback* callback, void* arg,
+                               space::ContinuousSpace* from_space,
+                               bool* contains_reference_to_other_space)
+      : callback_(callback), arg_(arg), from_space_(from_space),
+        contains_reference_to_other_space_(contains_reference_to_other_space) {}
 
   void operator()(Object* root) const
       EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     DCHECK(root != NULL);
-    ModUnionUpdateObjectReferencesVisitor ref_visitor(callback_, arg_);
+    ModUnionUpdateObjectReferencesVisitor ref_visitor(callback_, arg_, from_space_,
+                                                      contains_reference_to_other_space_);
     root->VisitReferences<kMovingClasses>(ref_visitor, VoidFunctor());
   }
 
  private:
   MarkHeapReferenceCallback* const callback_;
   void* const arg_;
+  // Space which we are scanning
+  space::ContinuousSpace* const from_space_;
+  // Set if we have any references to another space.
+  bool* const contains_reference_to_other_space_;
 };
 
 void ModUnionTableReferenceCache::ClearCards() {
@@ -313,12 +329,20 @@ void ModUnionTableCardCache::ClearCards() {
 void ModUnionTableCardCache::UpdateAndMarkReferences(MarkHeapReferenceCallback* callback,
                                                      void* arg) {
   CardTable* card_table = heap_->GetCardTable();
-  ModUnionScanImageRootVisitor scan_visitor(callback, arg);
   ContinuousSpaceBitmap* bitmap = space_->GetLiveBitmap();
-  for (const byte* card_addr : cleared_cards_) {
-    uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(card_addr));
+  bool reference_to_other_space = false;
+  ModUnionScanImageRootVisitor scan_visitor(callback, arg, space_, &reference_to_other_space);
+  for (auto it = cleared_cards_.begin(), end = cleared_cards_.end(); it != end; ) {
+    uintptr_t start = reinterpret_cast<uintptr_t>(card_table->AddrFromCard(*it));
     DCHECK(space_->HasAddress(reinterpret_cast<Object*>(start)));
+    reference_to_other_space = false;
     bitmap->VisitMarkedRange(start, start + CardTable::kCardSize, scan_visitor);
+    if (!reference_to_other_space) {
+      // No non null reference to another space, remove the card.
+      it = cleared_cards_.erase(it);
+    } else {
+      ++it;
+    }
   }
 }
 
@@ -333,6 +357,17 @@ void ModUnionTableCardCache::Dump(std::ostream& os) {
   os << "]";
 }
 
+void ModUnionTableCardCache::SetCards() {
+  CardTable* card_table = heap_->GetCardTable();
+  for (byte* addr = space_->Begin(); addr < AlignUp(space_->End(), CardTable::kCardSize);
+       addr += CardTable::kCardSize) {
+    cleared_cards_.insert(card_table->CardFromAddr(addr));
+  }
+}
+
+void ModUnionTableReferenceCache::SetCards() {
+}
+
 }  // namespace accounting
 }  // namespace gc
 }  // namespace art
index f9e8261..d0e11e0 100644 (file)
@@ -66,6 +66,9 @@ class ModUnionTable {
   // determining references to track.
   virtual void ClearCards() = 0;
 
+  // Set all the cards.
+  virtual void SetCards() = 0;
+
   // Update the mod-union table using data stored by ClearCards. There may be multiple ClearCards
   // before a call to update, for example, back-to-back sticky GCs. Also mark references to other
   // spaces which are stored in the mod-union table.
@@ -121,6 +124,8 @@ class ModUnionTableReferenceCache : public ModUnionTable {
 
   void Dump(std::ostream& os) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  void SetCards() OVERRIDE;
+
  protected:
   // Cleared card array, used to update the mod-union table.
   ModUnionTable::CardSet cleared_cards_;
@@ -150,6 +155,8 @@ class ModUnionTableCardCache : public ModUnionTable {
 
   void Dump(std::ostream& os);
 
+  void SetCards() OVERRIDE;
+
  protected:
   // Cleared card array, used to update the mod-union table.
   CardSet cleared_cards_;
index 46d79bf..07b61e6 100644 (file)
@@ -55,7 +55,8 @@ GarbageCollector::GarbageCollector(Heap* heap, const std::string& name)
     : heap_(heap),
       name_(name),
       pause_histogram_((name_ + " paused").c_str(), kPauseBucketSize, kPauseBucketCount),
-      cumulative_timings_(name) {
+      cumulative_timings_(name),
+      pause_histogram_lock_("pause histogram lock", kDefaultMutexLevel, true) {
   ResetCumulativeStatistics();
 }
 
@@ -65,10 +66,11 @@ void GarbageCollector::RegisterPause(uint64_t nano_length) {
 
 void GarbageCollector::ResetCumulativeStatistics() {
   cumulative_timings_.Reset();
-  pause_histogram_.Reset();
   total_time_ns_ = 0;
   total_freed_objects_ = 0;
   total_freed_bytes_ = 0;
+  MutexLock mu(Thread::Current(), pause_histogram_lock_);
+  pause_histogram_.Reset();
 }
 
 void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
@@ -95,6 +97,7 @@ void GarbageCollector::Run(GcCause gc_cause, bool clear_soft_references) {
   }
   total_time_ns_ += current_iteration->GetDurationNs();
   for (uint64_t pause_time : current_iteration->GetPauseTimes()) {
+    MutexLock mu(self, pause_histogram_lock_);
     pause_histogram_.AddValue(pause_time / 1000);
   }
   ATRACE_END();
@@ -137,8 +140,11 @@ uint64_t GarbageCollector::GetEstimatedMeanThroughput() const {
 }
 
 void GarbageCollector::ResetMeasurements() {
+  {
+    MutexLock mu(Thread::Current(), pause_histogram_lock_);
+    pause_histogram_.Reset();
+  }
   cumulative_timings_.Reset();
-  pause_histogram_.Reset();
   total_time_ns_ = 0;
   total_freed_objects_ = 0;
   total_freed_bytes_ = 0;
@@ -171,6 +177,38 @@ void GarbageCollector::RecordFreeLOS(const ObjectBytePair& freed) {
   heap_->RecordFree(freed.objects, freed.bytes);
 }
 
+uint64_t GarbageCollector::GetTotalPausedTimeNs() {
+  MutexLock mu(Thread::Current(), pause_histogram_lock_);
+  return pause_histogram_.AdjustedSum();
+}
+
+void GarbageCollector::DumpPerformanceInfo(std::ostream& os) {
+  const CumulativeLogger& logger = GetCumulativeTimings();
+  const size_t iterations = logger.GetIterations();
+  if (iterations == 0) {
+    return;
+  }
+  os << ConstDumpable<CumulativeLogger>(logger);
+  const uint64_t total_ns = logger.GetTotalNs();
+  double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
+  const uint64_t freed_bytes = GetTotalFreedBytes();
+  const uint64_t freed_objects = GetTotalFreedObjects();
+  {
+    MutexLock mu(Thread::Current(), pause_histogram_lock_);
+    if (pause_histogram_.SampleSize() > 0) {
+      Histogram<uint64_t>::CumulativeData cumulative_data;
+      pause_histogram_.CreateHistogram(&cumulative_data);
+      pause_histogram_.PrintConfidenceIntervals(os, 0.99, cumulative_data);
+    }
+  }
+  os << GetName() << " total time: " << PrettyDuration(total_ns)
+     << " mean time: " << PrettyDuration(total_ns / iterations) << "\n"
+     << GetName() << " freed: " << freed_objects
+     << " objects with total size " << PrettySize(freed_bytes) << "\n"
+     << GetName() << " throughput: " << freed_objects / seconds << "/s / "
+     << PrettySize(freed_bytes / seconds) << "/s\n";
+}
+
 }  // namespace collector
 }  // namespace gc
 }  // namespace art
index 885569e..b809469 100644 (file)
@@ -119,18 +119,13 @@ class GarbageCollector {
 
   GarbageCollector(Heap* heap, const std::string& name);
   virtual ~GarbageCollector() { }
-
   const char* GetName() const {
     return name_.c_str();
   }
-
   virtual GcType GetGcType() const = 0;
-
   virtual CollectorType GetCollectorType() const = 0;
-
   // Run the garbage collector.
   void Run(GcCause gc_cause, bool clear_soft_references);
-
   Heap* GetHeap() const {
     return heap_;
   }
@@ -138,24 +133,17 @@ class GarbageCollector {
   const CumulativeLogger& GetCumulativeTimings() const {
     return cumulative_timings_;
   }
-
   void ResetCumulativeStatistics();
-
   // Swap the live and mark bitmaps of spaces that are active for the collector. For partial GC,
   // this is the allocation space, for full GC then we swap the zygote bitmaps too.
   void SwapBitmaps() EXCLUSIVE_LOCKS_REQUIRED(Locks::heap_bitmap_lock_);
-  uint64_t GetTotalPausedTimeNs() const {
-    return pause_histogram_.AdjustedSum();
-  }
+  uint64_t GetTotalPausedTimeNs() LOCKS_EXCLUDED(pause_histogram_lock_);
   int64_t GetTotalFreedBytes() const {
     return total_freed_bytes_;
   }
   uint64_t GetTotalFreedObjects() const {
     return total_freed_objects_;
   }
-  const Histogram<uint64_t>& GetPauseHistogram() const {
-    return pause_histogram_;
-  }
   // Reset the cumulative timings and pause histogram.
   void ResetMeasurements();
   // Returns the estimated throughput in bytes / second.
@@ -174,11 +162,11 @@ class GarbageCollector {
   void RecordFree(const ObjectBytePair& freed);
   // Record a free of large objects.
   void RecordFreeLOS(const ObjectBytePair& freed);
+  void DumpPerformanceInfo(std::ostream& os) LOCKS_EXCLUDED(pause_histogram_lock_);
 
  protected:
   // Run all of the GC phases.
   virtual void RunPhases() = 0;
-
   // Revoke all the thread-local buffers.
   virtual void RevokeAllThreadLocalBuffers() = 0;
 
@@ -188,11 +176,12 @@ class GarbageCollector {
   Heap* const heap_;
   std::string name_;
   // Cumulative statistics.
-  Histogram<uint64_t> pause_histogram_;
+  Histogram<uint64_t> pause_histogram_ GUARDED_BY(pause_histogram_lock_);
   uint64_t total_time_ns_;
   uint64_t total_freed_objects_;
   int64_t total_freed_bytes_;
   CumulativeLogger cumulative_timings_;
+  mutable Mutex pause_histogram_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
 };
 
 }  // namespace collector
index ce914e5..a1495f5 100644 (file)
@@ -129,8 +129,8 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
       long_gc_log_threshold_(long_gc_log_threshold),
       ignore_max_footprint_(ignore_max_footprint),
       zygote_creation_lock_("zygote creation lock", kZygoteCreationLock),
-      have_zygote_space_(false),
-      large_object_threshold_(std::numeric_limits<size_t>::max()),  // Starts out disabled.
+      zygote_space_(nullptr),
+      large_object_threshold_(kDefaultLargeObjectThreshold),  // Starts out disabled.
       collector_type_running_(kCollectorTypeNone),
       last_gc_type_(collector::kGcTypeNone),
       next_gc_type_(collector::kGcTypePartial),
@@ -190,7 +190,6 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
   // entrypoints.
   const bool is_zygote = Runtime::Current()->IsZygote();
   if (!is_zygote) {
-    large_object_threshold_ = kDefaultLargeObjectThreshold;
     // Background compaction is currently not supported for command line runs.
     if (background_collector_type_ != foreground_collector_type_) {
       VLOG(heap) << "Disabling background compaction for non zygote";
@@ -407,9 +406,11 @@ Heap::Heap(size_t initial_size, size_t growth_limit, size_t min_free, size_t max
     mark_compact_collector_ = new collector::MarkCompact(this);
     garbage_collectors_.push_back(mark_compact_collector_);
   }
-  if (GetImageSpace() != nullptr && non_moving_space_ != nullptr) {
+  if (GetImageSpace() != nullptr && non_moving_space_ != nullptr &&
+      (is_zygote || separate_non_moving_space || foreground_collector_type_ == kCollectorTypeGSS)) {
     // Check that there's no gap between the image space and the non moving space so that the
-    // immune region won't break (eg. due to a large object allocated in the gap).
+    // immune region won't break (eg. due to a large object allocated in the gap). This is only
+    // required when we're the zygote or using GSS.
     bool no_gap = MemMap::CheckNoGaps(GetImageSpace()->GetMemMap(),
                                       non_moving_space_->GetMemMap());
     if (!no_gap) {
@@ -477,7 +478,7 @@ void Heap::CreateMainMallocSpace(MemMap* mem_map, size_t initial_size, size_t gr
     // After the zygote we want this to be false if we don't have background compaction enabled so
     // that getting primitive array elements is faster.
     // We never have homogeneous compaction with GSS and don't need a space with movable objects.
-    can_move_objects = !have_zygote_space_ && foreground_collector_type_ != kCollectorTypeGSS;
+    can_move_objects = !HasZygoteSpace() && foreground_collector_type_ != kCollectorTypeGSS;
   }
   if (collector::SemiSpace::kUseRememberedSet && main_space_ != nullptr) {
     RemoveRememberedSet(main_space_);
@@ -795,28 +796,9 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) {
   // Dump cumulative loggers for each GC type.
   uint64_t total_paused_time = 0;
   for (auto& collector : garbage_collectors_) {
-    const CumulativeLogger& logger = collector->GetCumulativeTimings();
-    const size_t iterations = logger.GetIterations();
-    const Histogram<uint64_t>& pause_histogram = collector->GetPauseHistogram();
-    if (iterations != 0 && pause_histogram.SampleSize() != 0) {
-      os << ConstDumpable<CumulativeLogger>(logger);
-      const uint64_t total_ns = logger.GetTotalNs();
-      const uint64_t total_pause_ns = collector->GetTotalPausedTimeNs();
-      double seconds = NsToMs(logger.GetTotalNs()) / 1000.0;
-      const uint64_t freed_bytes = collector->GetTotalFreedBytes();
-      const uint64_t freed_objects = collector->GetTotalFreedObjects();
-      Histogram<uint64_t>::CumulativeData cumulative_data;
-      pause_histogram.CreateHistogram(&cumulative_data);
-      pause_histogram.PrintConfidenceIntervals(os, 0.99, cumulative_data);
-      os << collector->GetName() << " total time: " << PrettyDuration(total_ns)
-         << " mean time: " << PrettyDuration(total_ns / iterations) << "\n"
-         << collector->GetName() << " freed: " << freed_objects
-         << " objects with total size " << PrettySize(freed_bytes) << "\n"
-         << collector->GetName() << " throughput: " << freed_objects / seconds << "/s / "
-         << PrettySize(freed_bytes / seconds) << "/s\n";
-      total_duration += total_ns;
-      total_paused_time += total_pause_ns;
-    }
+    total_duration += collector->GetCumulativeTimings().GetTotalNs();
+    total_paused_time += collector->GetTotalPausedTimeNs();
+    collector->DumpPerformanceInfo(os);
     collector->ResetMeasurements();
   }
   uint64_t allocation_time =
@@ -843,6 +825,9 @@ void Heap::DumpGcPerformanceInfo(std::ostream& os) {
     os << "Mean allocation time: " << PrettyDuration(allocation_time / total_objects_allocated)
        << "\n";
   }
+  if (HasZygoteSpace()) {
+    os << "Zygote space size " << PrettySize(zygote_space_->Size()) << "\n";
+  }
   os << "Total mutator paused time: " << PrettyDuration(total_paused_time) << "\n";
   os << "Total time waiting for GC to complete: " << PrettyDuration(total_wait_time_) << "\n";
   BaseMutex::DumpAll(os);
@@ -1898,7 +1883,8 @@ void Heap::PreZygoteFork() {
   Thread* self = Thread::Current();
   MutexLock mu(self, zygote_creation_lock_);
   // Try to see if we have any Zygote spaces.
-  if (have_zygote_space_) {
+  if (HasZygoteSpace()) {
+    LOG(WARNING) << __FUNCTION__ << " called when we already have a zygote space.";
     return;
   }
   VLOG(heap) << "Starting PreZygoteFork";
@@ -1973,26 +1959,26 @@ void Heap::PreZygoteFork() {
     // from this point on.
     RemoveRememberedSet(old_alloc_space);
   }
-  space::ZygoteSpace* zygote_space = old_alloc_space->CreateZygoteSpace("alloc space",
-                                                                        low_memory_mode_,
-                                                                        &non_moving_space_);
+  zygote_space_ = old_alloc_space->CreateZygoteSpace("alloc space", low_memory_mode_,
+                                                     &non_moving_space_);
   CHECK(!non_moving_space_->CanMoveObjects());
   if (same_space) {
     main_space_ = non_moving_space_;
     SetSpaceAsDefault(main_space_);
   }
   delete old_alloc_space;
-  CHECK(zygote_space != nullptr) << "Failed creating zygote space";
-  AddSpace(zygote_space);
+  CHECK(HasZygoteSpace()) << "Failed creating zygote space";
+  AddSpace(zygote_space_);
   non_moving_space_->SetFootprintLimit(non_moving_space_->Capacity());
   AddSpace(non_moving_space_);
-  have_zygote_space_ = true;
-  // Enable large object space allocations.
-  large_object_threshold_ = kDefaultLargeObjectThreshold;
   // Create the zygote space mod union table.
   accounting::ModUnionTable* mod_union_table =
-      new accounting::ModUnionTableCardCache("zygote space mod-union table", this, zygote_space);
+      new accounting::ModUnionTableCardCache("zygote space mod-union table", this,
+                                             zygote_space_);
   CHECK(mod_union_table != nullptr) << "Failed to create zygote space mod-union table";
+  // Set all the cards in the mod-union table since we don't know which objects contain references
+  // to large objects.
+  mod_union_table->SetCards();
   AddModUnionTable(mod_union_table);
   if (collector::SemiSpace::kUseRememberedSet) {
     // Add a new remembered set for the post-zygote non-moving space.
@@ -2062,7 +2048,7 @@ collector::GcType Heap::CollectGarbageInternal(collector::GcType gc_type, GcCaus
   // If the heap can't run the GC, silently fail and return that no GC was run.
   switch (gc_type) {
     case collector::kGcTypePartial: {
-      if (!have_zygote_space_) {
+      if (!HasZygoteSpace()) {
         return collector::kGcTypeNone;
       }
       break;
@@ -2594,6 +2580,17 @@ void Heap::RevokeAllThreadLocalAllocationStacks(Thread* self) {
   }
 }
 
+void Heap::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
+  if (kIsDebugBuild) {
+    if (rosalloc_space_ != nullptr) {
+      rosalloc_space_->AssertThreadLocalBuffersAreRevoked(thread);
+    }
+    if (bump_pointer_space_ != nullptr) {
+      bump_pointer_space_->AssertThreadLocalBuffersAreRevoked(thread);
+    }
+  }
+}
+
 void Heap::AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked() {
   if (kIsDebugBuild) {
     if (bump_pointer_space_ != nullptr) {
@@ -2875,7 +2872,7 @@ void Heap::GrowForUtilization(collector::GarbageCollector* collector_ran) {
     next_gc_type_ = collector::kGcTypeSticky;
   } else {
     collector::GcType non_sticky_gc_type =
-        have_zygote_space_ ? collector::kGcTypePartial : collector::kGcTypeFull;
+        HasZygoteSpace() ? collector::kGcTypePartial : collector::kGcTypeFull;
     // Find what the next non sticky collector will be.
     collector::GarbageCollector* non_sticky_collector = FindCollectorByGcType(non_sticky_gc_type);
     // If the throughput of the current sticky GC >= throughput of the non sticky collector, then
@@ -3098,7 +3095,7 @@ void Heap::RegisterNativeAllocation(JNIEnv* env, int bytes) {
   size_t new_native_bytes_allocated = native_bytes_allocated_.FetchAndAddSequentiallyConsistent(bytes);
   new_native_bytes_allocated += bytes;
   if (new_native_bytes_allocated > native_footprint_gc_watermark_) {
-    collector::GcType gc_type = have_zygote_space_ ? collector::kGcTypePartial :
+    collector::GcType gc_type = HasZygoteSpace() ? collector::kGcTypePartial :
         collector::kGcTypeFull;
 
     // The second watermark is higher than the gc watermark. If you hit this it means you are
index a0fcf53..a230f44 100644 (file)
@@ -79,6 +79,7 @@ namespace allocator {
 namespace space {
   class AllocSpace;
   class BumpPointerSpace;
+  class ContinuousMemMapAllocSpace;
   class DiscontinuousSpace;
   class DlMallocSpace;
   class ImageSpace;
@@ -87,7 +88,7 @@ namespace space {
   class RosAllocSpace;
   class Space;
   class SpaceTest;
-  class ContinuousMemMapAllocSpace;
+  class ZygoteSpace;
 }  // namespace space
 
 class AgeCardVisitor {
@@ -470,6 +471,7 @@ class Heap {
   void RevokeThreadLocalBuffers(Thread* thread);
   void RevokeRosAllocThreadLocalBuffers(Thread* thread);
   void RevokeAllThreadLocalBuffers();
+  void AssertThreadLocalBuffersAreRevoked(Thread* thread);
   void AssertAllBumpPointerSpaceThreadLocalBuffersAreRevoked();
   void RosAllocVerification(TimingLogger* timings, const char* name)
       EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -600,6 +602,10 @@ class Heap {
     return &reference_processor_;
   }
 
+  bool HasZygoteSpace() const {
+    return zygote_space_ != nullptr;
+  }
+
  private:
   // Compact source space to target space.
   void Compact(space::ContinuousMemMapAllocSpace* target_space,
@@ -677,7 +683,7 @@ class Heap {
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   template <bool kGrow>
-  bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size);
+  ALWAYS_INLINE bool IsOutOfMemoryOnAllocation(AllocatorType allocator_type, size_t alloc_size);
 
   // Returns true if the address passed in is within the address range of a continuous space.
   bool IsValidContinuousSpaceObjectAddress(const mirror::Object* obj) const
@@ -852,8 +858,9 @@ class Heap {
   // Lock which guards zygote space creation.
   Mutex zygote_creation_lock_;
 
-  // If we have a zygote space.
-  bool have_zygote_space_;
+  // Non-null iff we have a zygote space. Doesn't contain the large objects allocated before
+  // zygote space creation.
+  space::ZygoteSpace* zygote_space_;
 
   // Minimum allocation size of large object.
   size_t large_object_threshold_;
index 92c6f53..3f39c77 100644 (file)
@@ -338,6 +338,12 @@ void RosAllocSpace::RevokeAllThreadLocalBuffers() {
   rosalloc_->RevokeAllThreadLocalRuns();
 }
 
+void RosAllocSpace::AssertThreadLocalBuffersAreRevoked(Thread* thread) {
+  if (kIsDebugBuild) {
+    rosalloc_->AssertThreadLocalRunsAreRevoked(thread);
+  }
+}
+
 void RosAllocSpace::AssertAllThreadLocalBuffersAreRevoked() {
   if (kIsDebugBuild) {
     rosalloc_->AssertAllThreadLocalRunsAreRevoked();
index f505305..f1ce115 100644 (file)
@@ -101,6 +101,7 @@ class RosAllocSpace : public MallocSpace {
 
   void RevokeThreadLocalBuffers(Thread* thread);
   void RevokeAllThreadLocalBuffers();
+  void AssertThreadLocalBuffersAreRevoked(Thread* thread);
   void AssertAllThreadLocalBuffersAreRevoked();
 
   // Returns the class of a recently freed object.
index f70faf4..06938e5 100644 (file)
@@ -146,9 +146,9 @@ class Handle : public ConstHandle<T> {
 
 // A special case of Handle that only holds references to null.
 template<class T>
-class NullHandle : public Handle<T> {
+class NullHandle : public ConstHandle<T> {
  public:
-  NullHandle() : Handle<T>(&null_ref_) {
+  NullHandle() : ConstHandle<T>(&null_ref_) {
   }
 
  private:
index c826716..00f7b06 100644 (file)
@@ -79,7 +79,7 @@ inline mirror::Object* IndirectReferenceTable::Get(IndirectRef iref) const {
   mirror::Object* obj = table_[idx].Read<kWithoutReadBarrier>();
   if (LIKELY(obj != kClearedJniWeakGlobal)) {
     // The read barrier or VerifyObject won't handle kClearedJniWeakGlobal.
-    obj = table_[idx].Read();
+    obj = table_[idx].Read<kReadBarrierOption>();
     VerifyObject(obj);
   }
   return obj;
index 9b2b82e..1ba2291 100644 (file)
@@ -56,7 +56,8 @@ std::ostream& operator<<(std::ostream& os, const MutatorLockedDumpable<T>& rhs)
 
 void IndirectReferenceTable::AbortIfNoCheckJNI() {
   // If -Xcheck:jni is on, it'll give a more detailed error before aborting.
-  if (!Runtime::Current()->GetJavaVM()->check_jni) {
+  JavaVMExt* vm = Runtime::Current()->GetJavaVM();
+  if (!vm->IsCheckJniEnabled()) {
     // Otherwise, we want to abort rather than hand back a bad reference.
     LOG(FATAL) << "JNI ERROR (app bug): see above.";
   }
index fb910e2..d25bc42 100644 (file)
 #include "base/logging.h"
 #include "base/mutex.h"
 #include "gc_root.h"
-#include "mem_map.h"
 #include "object_callbacks.h"
 #include "offsets.h"
 #include "read_barrier_option.h"
 
 namespace art {
+
 namespace mirror {
 class Object;
 }  // namespace mirror
 
+class MemMap;
+
 /*
  * Maintain a table of indirect references.  Used for local/global JNI
  * references.
index c66f99e..f6e6661 100644 (file)
@@ -103,7 +103,7 @@ mirror::String* InternTable::Lookup(Table* table, mirror::String* s) {
   Locks::intern_table_lock_->AssertHeld(Thread::Current());
   auto it = table->find(GcRoot<mirror::String>(s));
   if (LIKELY(it != table->end())) {
-    return const_cast<GcRoot<mirror::String>&>(*it).Read<kWithReadBarrier>();
+    return const_cast<GcRoot<mirror::String>&>(*it).Read();
   }
   return nullptr;
 }
@@ -299,7 +299,7 @@ void InternTable::SweepInternTableWeaks(IsMarkedCallback* callback, void* arg) {
     if (new_object == nullptr) {
       it = weak_interns_.erase(it);
     } else {
-      root.Assign(down_cast<mirror::String*>(new_object));
+      root = GcRoot<mirror::String>(down_cast<mirror::String*>(new_object));
       ++it;
     }
   }
@@ -309,8 +309,7 @@ std::size_t InternTable::StringHashEquals::operator()(const GcRoot<mirror::Strin
   if (kIsDebugBuild) {
     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
   }
-  return static_cast<size_t>(
-      const_cast<GcRoot<mirror::String>&>(root).Read<kWithoutReadBarrier>()->GetHashCode());
+  return static_cast<size_t>(const_cast<GcRoot<mirror::String>&>(root).Read()->GetHashCode());
 }
 
 bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
@@ -318,8 +317,8 @@ bool InternTable::StringHashEquals::operator()(const GcRoot<mirror::String>& a,
   if (kIsDebugBuild) {
     Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
   }
-  return const_cast<GcRoot<mirror::String>&>(a).Read<kWithoutReadBarrier>()->Equals(
-      const_cast<GcRoot<mirror::String>&>(b).Read<kWithoutReadBarrier>());
+  return const_cast<GcRoot<mirror::String>&>(a).Read()->Equals(
+      const_cast<GcRoot<mirror::String>&>(b).Read());
 }
 
 }  // namespace art
index 6705695..5724e35 100644 (file)
@@ -32,7 +32,7 @@ bool DoFieldGet(Thread* self, ShadowFrame& shadow_frame, const Instruction* inst
   const bool is_static = (find_type == StaticObjectRead) || (find_type == StaticPrimitiveRead);
   const uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
   ArtField* f = FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
-                                                              Primitive::FieldSize(field_type));
+                                                              Primitive::ComponentSize(field_type));
   if (UNLIKELY(f == nullptr)) {
     CHECK(self->IsExceptionPending());
     return false;
@@ -208,7 +208,7 @@ bool DoFieldPut(Thread* self, const ShadowFrame& shadow_frame, const Instruction
   bool is_static = (find_type == StaticObjectWrite) || (find_type == StaticPrimitiveWrite);
   uint32_t field_idx = is_static ? inst->VRegB_21c() : inst->VRegC_22c();
   ArtField* f = FindFieldFromCode<find_type, do_access_check>(field_idx, shadow_frame.GetMethod(), self,
-                                                              Primitive::FieldSize(field_type));
+                                                              Primitive::ComponentSize(field_type));
   if (UNLIKELY(f == nullptr)) {
     CHECK(self->IsExceptionPending());
     return false;
@@ -346,6 +346,18 @@ bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint1
   }
   // Note: iput-x-quick instructions are only for non-volatile fields.
   switch (field_type) {
+    case Primitive::kPrimBoolean:
+      obj->SetFieldBoolean<transaction_active>(field_offset, shadow_frame.GetVReg(vregA));
+      break;
+    case Primitive::kPrimByte:
+      obj->SetFieldByte<transaction_active>(field_offset, shadow_frame.GetVReg(vregA));
+      break;
+    case Primitive::kPrimChar:
+      obj->SetFieldChar<transaction_active>(field_offset, shadow_frame.GetVReg(vregA));
+      break;
+    case Primitive::kPrimShort:
+      obj->SetFieldShort<transaction_active>(field_offset, shadow_frame.GetVReg(vregA));
+      break;
     case Primitive::kPrimInt:
       obj->SetField32<transaction_active>(field_offset, shadow_frame.GetVReg(vregA));
       break;
@@ -371,9 +383,13 @@ bool DoIPutQuick(const ShadowFrame& shadow_frame, const Instruction* inst, uint1
   EXPLICIT_DO_IPUT_QUICK_TEMPLATE_DECL(_field_type, false);     \
   EXPLICIT_DO_IPUT_QUICK_TEMPLATE_DECL(_field_type, true);
 
-EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimInt);    // iget-quick.
-EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimLong);   // iget-wide-quick.
-EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimNot);    // iget-object-quick.
+EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimInt);  // iput-quick.
+EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimBoolean);  // iput-boolean-quick.
+EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimByte);  // iput-byte-quick.
+EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimChar);  // iput-char-quick.
+EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimShort);  // iput-short-quick.
+EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimLong);  // iput-wide-quick.
+EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL(Primitive::kPrimNot);  // iput-object-quick.
 #undef EXPLICIT_DO_IPUT_QUICK_ALL_TEMPLATE_DECL
 #undef EXPLICIT_DO_IPUT_QUICK_TEMPLATE_DECL
 
@@ -764,8 +780,8 @@ void RecordArrayElementsInTransaction(mirror::Array* array, int32_t count)
 }
 
 // Helper function to deal with class loading in an unstarted runtime.
-static void UnstartedRuntimeFindClass(Thread* self, Handle<mirror::String> className,
-                                      Handle<mirror::ClassLoader> class_loader, JValue* result,
+static void UnstartedRuntimeFindClass(Thread* self, ConstHandle<mirror::String> className,
+                                      ConstHandle<mirror::ClassLoader> class_loader, JValue* result,
                                       const std::string& method_name, bool initialize_class,
                                       bool abort_if_not_found)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
index e098ac8..755e1ed 100644 (file)
@@ -1369,6 +1369,30 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem*
   }
   HANDLE_INSTRUCTION_END();
 
+  HANDLE_INSTRUCTION_START(IPUT_BOOLEAN_QUICK) {
+    bool success = DoIPutQuick<Primitive::kPrimBoolean, transaction_active>(shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+  }
+  HANDLE_INSTRUCTION_END();
+
+  HANDLE_INSTRUCTION_START(IPUT_BYTE_QUICK) {
+    bool success = DoIPutQuick<Primitive::kPrimByte, transaction_active>(shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+  }
+  HANDLE_INSTRUCTION_END();
+
+  HANDLE_INSTRUCTION_START(IPUT_CHAR_QUICK) {
+    bool success = DoIPutQuick<Primitive::kPrimChar, transaction_active>(shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+  }
+  HANDLE_INSTRUCTION_END();
+
+  HANDLE_INSTRUCTION_START(IPUT_SHORT_QUICK) {
+    bool success = DoIPutQuick<Primitive::kPrimShort, transaction_active>(shadow_frame, inst, inst_data);
+    POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
+  }
+  HANDLE_INSTRUCTION_END();
+
   HANDLE_INSTRUCTION_START(IPUT_WIDE_QUICK) {
     bool success = DoIPutQuick<Primitive::kPrimLong, transaction_active>(shadow_frame, inst, inst_data);
     POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, 2);
@@ -2304,22 +2328,6 @@ JValue ExecuteGotoImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem*
     UnexpectedOpcode(inst, mh);
   HANDLE_INSTRUCTION_END();
 
-  HANDLE_INSTRUCTION_START(UNUSED_EB)
-    UnexpectedOpcode(inst, mh);
-  HANDLE_INSTRUCTION_END();
-
-  HANDLE_INSTRUCTION_START(UNUSED_EC)
-    UnexpectedOpcode(inst, mh);
-  HANDLE_INSTRUCTION_END();
-
-  HANDLE_INSTRUCTION_START(UNUSED_ED)
-    UnexpectedOpcode(inst, mh);
-  HANDLE_INSTRUCTION_END();
-
-  HANDLE_INSTRUCTION_START(UNUSED_EE)
-    UnexpectedOpcode(inst, mh);
-  HANDLE_INSTRUCTION_END();
-
   HANDLE_INSTRUCTION_START(UNUSED_EF)
     UnexpectedOpcode(inst, mh);
   HANDLE_INSTRUCTION_END();
index 5401495..6054a25 100644 (file)
@@ -1266,6 +1266,30 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
         POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
         break;
       }
+      case Instruction::IPUT_BOOLEAN_QUICK: {
+        PREAMBLE();
+        bool success = DoIPutQuick<Primitive::kPrimBoolean, transaction_active>(shadow_frame, inst, inst_data);
+        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+        break;
+      }
+      case Instruction::IPUT_BYTE_QUICK: {
+        PREAMBLE();
+        bool success = DoIPutQuick<Primitive::kPrimByte, transaction_active>(shadow_frame, inst, inst_data);
+        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+        break;
+      }
+      case Instruction::IPUT_CHAR_QUICK: {
+        PREAMBLE();
+        bool success = DoIPutQuick<Primitive::kPrimChar, transaction_active>(shadow_frame, inst, inst_data);
+        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+        break;
+      }
+      case Instruction::IPUT_SHORT_QUICK: {
+        PREAMBLE();
+        bool success = DoIPutQuick<Primitive::kPrimShort, transaction_active>(shadow_frame, inst, inst_data);
+        POSSIBLY_HANDLE_PENDING_EXCEPTION(!success, Next_2xx);
+        break;
+      }
       case Instruction::IPUT_WIDE_QUICK: {
         PREAMBLE();
         bool success = DoIPutQuick<Primitive::kPrimLong, transaction_active>(shadow_frame, inst, inst_data);
@@ -2164,7 +2188,7 @@ JValue ExecuteSwitchImpl(Thread* self, MethodHelper& mh, const DexFile::CodeItem
         inst = inst->Next_2xx();
         break;
       case Instruction::UNUSED_3E ... Instruction::UNUSED_43:
-      case Instruction::UNUSED_EB ... Instruction::UNUSED_FF:
+      case Instruction::UNUSED_EF ... Instruction::UNUSED_FF:
       case Instruction::UNUSED_79:
       case Instruction::UNUSED_7A:
         UnexpectedOpcode(inst, mh);
diff --git a/runtime/java_vm_ext.cc b/runtime/java_vm_ext.cc
new file mode 100644 (file)
index 0000000..dd3b7cf
--- /dev/null
@@ -0,0 +1,829 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_internal.h"
+
+#include <dlfcn.h>
+
+#include "base/mutex.h"
+#include "base/stl_util.h"
+#include "check_jni.h"
+#include "indirect_reference_table-inl.h"
+#include "mirror/art_method.h"
+#include "mirror/class-inl.h"
+#include "mirror/class_loader.h"
+#include "nativebridge/native_bridge.h"
+#include "java_vm_ext.h"
+#include "parsed_options.h"
+#include "ScopedLocalRef.h"
+#include "scoped_thread_state_change.h"
+#include "thread-inl.h"
+#include "thread_list.h"
+
+namespace art {
+
+static const size_t kPinTableInitial = 16;  // Arbitrary.
+static const size_t kPinTableMax = 1024;  // Arbitrary sanity check.
+
+static size_t gGlobalsInitial = 512;  // Arbitrary.
+static size_t gGlobalsMax = 51200;  // Arbitrary sanity check. (Must fit in 16 bits.)
+
+static const size_t kWeakGlobalsInitial = 16;  // Arbitrary.
+static const size_t kWeakGlobalsMax = 51200;  // Arbitrary sanity check. (Must fit in 16 bits.)
+
+static bool IsBadJniVersion(int version) {
+  // We don't support JNI_VERSION_1_1. These are the only other valid versions.
+  return version != JNI_VERSION_1_2 && version != JNI_VERSION_1_4 && version != JNI_VERSION_1_6;
+}
+
+class SharedLibrary {
+ public:
+  SharedLibrary(JNIEnv* env, Thread* self, const std::string& path, void* handle,
+                jobject class_loader)
+      : path_(path),
+        handle_(handle),
+        needs_native_bridge_(false),
+        class_loader_(env->NewGlobalRef(class_loader)),
+        jni_on_load_lock_("JNI_OnLoad lock"),
+        jni_on_load_cond_("JNI_OnLoad condition variable", jni_on_load_lock_),
+        jni_on_load_thread_id_(self->GetThreadId()),
+        jni_on_load_result_(kPending) {
+  }
+
+  ~SharedLibrary() {
+    Thread* self = Thread::Current();
+    if (self != nullptr) {
+      self->GetJniEnv()->DeleteGlobalRef(class_loader_);
+    }
+  }
+
+  jobject GetClassLoader() const {
+    return class_loader_;
+  }
+
+  const std::string& GetPath() const {
+    return path_;
+  }
+
+  /*
+   * Check the result of an earlier call to JNI_OnLoad on this library.
+   * If the call has not yet finished in another thread, wait for it.
+   */
+  bool CheckOnLoadResult()
+      LOCKS_EXCLUDED(jni_on_load_lock_) {
+    Thread* self = Thread::Current();
+    bool okay;
+    {
+      MutexLock mu(self, jni_on_load_lock_);
+
+      if (jni_on_load_thread_id_ == self->GetThreadId()) {
+        // Check this so we don't end up waiting for ourselves.  We need to return "true" so the
+        // caller can continue.
+        LOG(INFO) << *self << " recursive attempt to load library " << "\"" << path_ << "\"";
+        okay = true;
+      } else {
+        while (jni_on_load_result_ == kPending) {
+          VLOG(jni) << "[" << *self << " waiting for \"" << path_ << "\" " << "JNI_OnLoad...]";
+          jni_on_load_cond_.Wait(self);
+        }
+
+        okay = (jni_on_load_result_ == kOkay);
+        VLOG(jni) << "[Earlier JNI_OnLoad for \"" << path_ << "\" "
+            << (okay ? "succeeded" : "failed") << "]";
+      }
+    }
+    return okay;
+  }
+
+  void SetResult(bool result) LOCKS_EXCLUDED(jni_on_load_lock_) {
+    Thread* self = Thread::Current();
+    MutexLock mu(self, jni_on_load_lock_);
+
+    jni_on_load_result_ = result ? kOkay : kFailed;
+    jni_on_load_thread_id_ = 0;
+
+    // Broadcast a wakeup to anybody sleeping on the condition variable.
+    jni_on_load_cond_.Broadcast(self);
+  }
+
+  void SetNeedsNativeBridge() {
+    needs_native_bridge_ = true;
+  }
+
+  bool NeedsNativeBridge() const {
+    return needs_native_bridge_;
+  }
+
+  void* FindSymbol(const std::string& symbol_name) {
+    return dlsym(handle_, symbol_name.c_str());
+  }
+
+  void* FindSymbolWithNativeBridge(const std::string& symbol_name, const char* shorty) {
+    CHECK(NeedsNativeBridge());
+
+    uint32_t len = 0;
+    return android::NativeBridgeGetTrampoline(handle_, symbol_name.c_str(), shorty, len);
+  }
+
+ private:
+  enum JNI_OnLoadState {
+    kPending,
+    kFailed,
+    kOkay,
+  };
+
+  // Path to library "/system/lib/libjni.so".
+  const std::string path_;
+
+  // The void* returned by dlopen(3).
+  void* const handle_;
+
+  // True if a native bridge is required.
+  bool needs_native_bridge_;
+
+  // The ClassLoader this library is associated with, a global JNI reference that is
+  // created/deleted with the scope of the library.
+  const jobject class_loader_;
+
+  // Guards remaining items.
+  Mutex jni_on_load_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  // Wait for JNI_OnLoad in other thread.
+  ConditionVariable jni_on_load_cond_ GUARDED_BY(jni_on_load_lock_);
+  // Recursive invocation guard.
+  uint32_t jni_on_load_thread_id_ GUARDED_BY(jni_on_load_lock_);
+  // Result of earlier JNI_OnLoad call.
+  JNI_OnLoadState jni_on_load_result_ GUARDED_BY(jni_on_load_lock_);
+};
+
+// This exists mainly to keep implementation details out of the header file.
+class Libraries {
+ public:
+  Libraries() {
+  }
+
+  ~Libraries() {
+    STLDeleteValues(&libraries_);
+  }
+
+  void Dump(std::ostream& os) const {
+    bool first = true;
+    for (const auto& library : libraries_) {
+      if (!first) {
+        os << ' ';
+      }
+      first = false;
+      os << library.first;
+    }
+  }
+
+  size_t size() const {
+    return libraries_.size();
+  }
+
+  SharedLibrary* Get(const std::string& path) {
+    auto it = libraries_.find(path);
+    return (it == libraries_.end()) ? nullptr : it->second;
+  }
+
+  void Put(const std::string& path, SharedLibrary* library) {
+    libraries_.Put(path, library);
+  }
+
+  // See section 11.3 "Linking Native Methods" of the JNI spec.
+  void* FindNativeMethod(mirror::ArtMethod* m, std::string& detail)
+      EXCLUSIVE_LOCKS_REQUIRED(Locks::jni_libraries_lock_)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    std::string jni_short_name(JniShortName(m));
+    std::string jni_long_name(JniLongName(m));
+    const mirror::ClassLoader* declaring_class_loader = m->GetDeclaringClass()->GetClassLoader();
+    ScopedObjectAccessUnchecked soa(Thread::Current());
+    for (const auto& lib : libraries_) {
+      SharedLibrary* library = lib.second;
+      if (soa.Decode<mirror::ClassLoader*>(library->GetClassLoader()) != declaring_class_loader) {
+        // We only search libraries loaded by the appropriate ClassLoader.
+        continue;
+      }
+      // Try the short name then the long name...
+      void* fn;
+      if (library->NeedsNativeBridge()) {
+        const char* shorty = m->GetShorty();
+        fn = library->FindSymbolWithNativeBridge(jni_short_name, shorty);
+        if (fn == nullptr) {
+          fn = library->FindSymbolWithNativeBridge(jni_long_name, shorty);
+        }
+      } else {
+        fn = library->FindSymbol(jni_short_name);
+        if (fn == nullptr) {
+          fn = library->FindSymbol(jni_long_name);
+        }
+      }
+      if (fn == nullptr) {
+        fn = library->FindSymbol(jni_long_name);
+      }
+      if (fn != nullptr) {
+        VLOG(jni) << "[Found native code for " << PrettyMethod(m)
+                  << " in \"" << library->GetPath() << "\"]";
+        return fn;
+      }
+    }
+    detail += "No implementation found for ";
+    detail += PrettyMethod(m);
+    detail += " (tried " + jni_short_name + " and " + jni_long_name + ")";
+    LOG(ERROR) << detail;
+    return nullptr;
+  }
+
+ private:
+  AllocationTrackingSafeMap<std::string, SharedLibrary*, kAllocatorTagJNILibrarires> libraries_;
+};
+
+
+class JII {
+ public:
+  static jint DestroyJavaVM(JavaVM* vm) {
+    if (vm == nullptr) {
+      return JNI_ERR;
+    }
+    JavaVMExt* raw_vm = reinterpret_cast<JavaVMExt*>(vm);
+    delete raw_vm->GetRuntime();
+    return JNI_OK;
+  }
+
+  static jint AttachCurrentThread(JavaVM* vm, JNIEnv** p_env, void* thr_args) {
+    return AttachCurrentThreadInternal(vm, p_env, thr_args, false);
+  }
+
+  static jint AttachCurrentThreadAsDaemon(JavaVM* vm, JNIEnv** p_env, void* thr_args) {
+    return AttachCurrentThreadInternal(vm, p_env, thr_args, true);
+  }
+
+  static jint DetachCurrentThread(JavaVM* vm) {
+    if (vm == nullptr || Thread::Current() == nullptr) {
+      return JNI_ERR;
+    }
+    JavaVMExt* raw_vm = reinterpret_cast<JavaVMExt*>(vm);
+    Runtime* runtime = raw_vm->GetRuntime();
+    runtime->DetachCurrentThread();
+    return JNI_OK;
+  }
+
+  static jint GetEnv(JavaVM* vm, void** env, jint version) {
+    // GetEnv always returns a JNIEnv* for the most current supported JNI version,
+    // and unlike other calls that take a JNI version doesn't care if you supply
+    // JNI_VERSION_1_1, which we don't otherwise support.
+    if (IsBadJniVersion(version) && version != JNI_VERSION_1_1) {
+      LOG(ERROR) << "Bad JNI version passed to GetEnv: " << version;
+      return JNI_EVERSION;
+    }
+    if (vm == nullptr || env == nullptr) {
+      return JNI_ERR;
+    }
+    Thread* thread = Thread::Current();
+    if (thread == nullptr) {
+      *env = nullptr;
+      return JNI_EDETACHED;
+    }
+    *env = thread->GetJniEnv();
+    return JNI_OK;
+  }
+
+ private:
+  static jint AttachCurrentThreadInternal(JavaVM* vm, JNIEnv** p_env, void* raw_args, bool as_daemon) {
+    if (vm == nullptr || p_env == nullptr) {
+      return JNI_ERR;
+    }
+
+    // Return immediately if we're already attached.
+    Thread* self = Thread::Current();
+    if (self != nullptr) {
+      *p_env = self->GetJniEnv();
+      return JNI_OK;
+    }
+
+    Runtime* runtime = reinterpret_cast<JavaVMExt*>(vm)->GetRuntime();
+
+    // No threads allowed in zygote mode.
+    if (runtime->IsZygote()) {
+      LOG(ERROR) << "Attempt to attach a thread in the zygote";
+      return JNI_ERR;
+    }
+
+    JavaVMAttachArgs* args = static_cast<JavaVMAttachArgs*>(raw_args);
+    const char* thread_name = nullptr;
+    jobject thread_group = nullptr;
+    if (args != nullptr) {
+      if (IsBadJniVersion(args->version)) {
+        LOG(ERROR) << "Bad JNI version passed to "
+                   << (as_daemon ? "AttachCurrentThreadAsDaemon" : "AttachCurrentThread") << ": "
+                   << args->version;
+        return JNI_EVERSION;
+      }
+      thread_name = args->name;
+      thread_group = args->group;
+    }
+
+    if (!runtime->AttachCurrentThread(thread_name, as_daemon, thread_group, !runtime->IsCompiler())) {
+      *p_env = nullptr;
+      return JNI_ERR;
+    } else {
+      *p_env = Thread::Current()->GetJniEnv();
+      return JNI_OK;
+    }
+  }
+};
+
+const JNIInvokeInterface gJniInvokeInterface = {
+  nullptr,  // reserved0
+  nullptr,  // reserved1
+  nullptr,  // reserved2
+  JII::DestroyJavaVM,
+  JII::AttachCurrentThread,
+  JII::DetachCurrentThread,
+  JII::GetEnv,
+  JII::AttachCurrentThreadAsDaemon
+};
+
+JavaVMExt::JavaVMExt(Runtime* runtime, ParsedOptions* options)
+    : runtime_(runtime),
+      check_jni_abort_hook_(nullptr),
+      check_jni_abort_hook_data_(nullptr),
+      check_jni_(false),  // Initialized properly in the constructor body below.
+      force_copy_(options->force_copy_),
+      tracing_enabled_(!options->jni_trace_.empty() || VLOG_IS_ON(third_party_jni)),
+      trace_(options->jni_trace_),
+      pins_lock_("JNI pin table lock", kPinTableLock),
+      pin_table_("pin table", kPinTableInitial, kPinTableMax),
+      globals_lock_("JNI global reference table lock"),
+      globals_(gGlobalsInitial, gGlobalsMax, kGlobal),
+      libraries_(new Libraries),
+      unchecked_functions_(&gJniInvokeInterface),
+      weak_globals_lock_("JNI weak global reference table lock"),
+      weak_globals_(kWeakGlobalsInitial, kWeakGlobalsMax, kWeakGlobal),
+      allow_new_weak_globals_(true),
+      weak_globals_add_condition_("weak globals add condition", weak_globals_lock_) {
+  functions = unchecked_functions_;
+  if (options->check_jni_) {
+    SetCheckJniEnabled(true);
+  }
+}
+
+JavaVMExt::~JavaVMExt() {
+}
+
+void JavaVMExt::JniAbort(const char* jni_function_name, const char* msg) {
+  Thread* self = Thread::Current();
+  ScopedObjectAccess soa(self);
+  mirror::ArtMethod* current_method = self->GetCurrentMethod(nullptr);
+
+  std::ostringstream os;
+  os << "JNI DETECTED ERROR IN APPLICATION: " << msg;
+
+  if (jni_function_name != nullptr) {
+    os << "\n    in call to " << jni_function_name;
+  }
+  // TODO: is this useful given that we're about to dump the calling thread's stack?
+  if (current_method != nullptr) {
+    os << "\n    from " << PrettyMethod(current_method);
+  }
+  os << "\n";
+  self->Dump(os);
+
+  if (check_jni_abort_hook_ != nullptr) {
+    check_jni_abort_hook_(check_jni_abort_hook_data_, os.str());
+  } else {
+    // Ensure that we get a native stack trace for this thread.
+    self->TransitionFromRunnableToSuspended(kNative);
+    LOG(FATAL) << os.str();
+    self->TransitionFromSuspendedToRunnable();  // Unreachable, keep annotalysis happy.
+  }
+}
+
+void JavaVMExt::JniAbortV(const char* jni_function_name, const char* fmt, va_list ap) {
+  std::string msg;
+  StringAppendV(&msg, fmt, ap);
+  JniAbort(jni_function_name, msg.c_str());
+}
+
+void JavaVMExt::JniAbortF(const char* jni_function_name, const char* fmt, ...) {
+  va_list args;
+  va_start(args, fmt);
+  JniAbortV(jni_function_name, fmt, args);
+  va_end(args);
+}
+
+bool JavaVMExt::ShouldTrace(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  // Fast where no tracing is enabled.
+  if (trace_.empty() && !VLOG_IS_ON(third_party_jni)) {
+    return false;
+  }
+  // Perform checks based on class name.
+  StringPiece class_name(method->GetDeclaringClassDescriptor());
+  if (!trace_.empty() && class_name.find(trace_) != std::string::npos) {
+    return true;
+  }
+  if (!VLOG_IS_ON(third_party_jni)) {
+    return false;
+  }
+  // Return true if we're trying to log all third-party JNI activity and 'method' doesn't look
+  // like part of Android.
+  static const char* gBuiltInPrefixes[] = {
+      "Landroid/",
+      "Lcom/android/",
+      "Lcom/google/android/",
+      "Ldalvik/",
+      "Ljava/",
+      "Ljavax/",
+      "Llibcore/",
+      "Lorg/apache/harmony/",
+  };
+  for (size_t i = 0; i < arraysize(gBuiltInPrefixes); ++i) {
+    if (class_name.starts_with(gBuiltInPrefixes[i])) {
+      return false;
+    }
+  }
+  return true;
+}
+
+jobject JavaVMExt::AddGlobalRef(Thread* self, mirror::Object* obj) {
+  // Check for null after decoding the object to handle cleared weak globals.
+  if (obj == nullptr) {
+    return nullptr;
+  }
+  WriterMutexLock mu(self, globals_lock_);
+  IndirectRef ref = globals_.Add(IRT_FIRST_SEGMENT, obj);
+  return reinterpret_cast<jobject>(ref);
+}
+
+jweak JavaVMExt::AddWeakGlobalRef(Thread* self, mirror::Object* obj) {
+  if (obj == nullptr) {
+    return nullptr;
+  }
+  MutexLock mu(self, weak_globals_lock_);
+  while (UNLIKELY(!allow_new_weak_globals_)) {
+    weak_globals_add_condition_.WaitHoldingLocks(self);
+  }
+  IndirectRef ref = weak_globals_.Add(IRT_FIRST_SEGMENT, obj);
+  return reinterpret_cast<jweak>(ref);
+}
+
+void JavaVMExt::DeleteGlobalRef(Thread* self, jobject obj) {
+  if (obj == nullptr) {
+    return;
+  }
+  WriterMutexLock mu(self, globals_lock_);
+  if (!globals_.Remove(IRT_FIRST_SEGMENT, obj)) {
+    LOG(WARNING) << "JNI WARNING: DeleteGlobalRef(" << obj << ") "
+                 << "failed to find entry";
+  }
+}
+
+void JavaVMExt::DeleteWeakGlobalRef(Thread* self, jweak obj) {
+  if (obj == nullptr) {
+    return;
+  }
+  MutexLock mu(self, weak_globals_lock_);
+  if (!weak_globals_.Remove(IRT_FIRST_SEGMENT, obj)) {
+    LOG(WARNING) << "JNI WARNING: DeleteWeakGlobalRef(" << obj << ") "
+                 << "failed to find entry";
+  }
+}
+
+static void ThreadEnableCheckJni(Thread* thread, void* arg) {
+  bool* check_jni = reinterpret_cast<bool*>(arg);
+  thread->GetJniEnv()->SetCheckJniEnabled(*check_jni);
+}
+
+bool JavaVMExt::SetCheckJniEnabled(bool enabled) {
+  bool old_check_jni = check_jni_;
+  check_jni_ = enabled;
+  functions = enabled ? GetCheckJniInvokeInterface() : unchecked_functions_;
+  MutexLock mu(Thread::Current(), *Locks::thread_list_lock_);
+  runtime_->GetThreadList()->ForEach(ThreadEnableCheckJni, &check_jni_);
+  return old_check_jni;
+}
+
+void JavaVMExt::DumpForSigQuit(std::ostream& os) {
+  os << "JNI: CheckJNI is " << (check_jni_ ? "on" : "off");
+  if (force_copy_) {
+    os << " (with forcecopy)";
+  }
+  Thread* self = Thread::Current();
+  {
+    MutexLock mu(self, pins_lock_);
+    os << "; pins=" << pin_table_.Size();
+  }
+  {
+    ReaderMutexLock mu(self, globals_lock_);
+    os << "; globals=" << globals_.Capacity();
+  }
+  {
+    MutexLock mu(self, weak_globals_lock_);
+    if (weak_globals_.Capacity() > 0) {
+      os << " (plus " << weak_globals_.Capacity() << " weak)";
+    }
+  }
+  os << '\n';
+
+  {
+    MutexLock mu(self, *Locks::jni_libraries_lock_);
+    os << "Libraries: " << Dumpable<Libraries>(*libraries_) << " (" << libraries_->size() << ")\n";
+  }
+}
+
+void JavaVMExt::DisallowNewWeakGlobals() {
+  MutexLock mu(Thread::Current(), weak_globals_lock_);
+  allow_new_weak_globals_ = false;
+}
+
+void JavaVMExt::AllowNewWeakGlobals() {
+  Thread* self = Thread::Current();
+  MutexLock mu(self, weak_globals_lock_);
+  allow_new_weak_globals_ = true;
+  weak_globals_add_condition_.Broadcast(self);
+}
+
+mirror::Object* JavaVMExt::DecodeGlobal(Thread* self, IndirectRef ref) {
+  return globals_.SynchronizedGet(self, &globals_lock_, ref);
+}
+
+mirror::Object* JavaVMExt::DecodeWeakGlobal(Thread* self, IndirectRef ref) {
+  MutexLock mu(self, weak_globals_lock_);
+  while (UNLIKELY(!allow_new_weak_globals_)) {
+    weak_globals_add_condition_.WaitHoldingLocks(self);
+  }
+  return weak_globals_.Get(ref);
+}
+
+void JavaVMExt::PinPrimitiveArray(Thread* self, mirror::Array* array) {
+  MutexLock mu(self, pins_lock_);
+  pin_table_.Add(array);
+}
+
+void JavaVMExt::UnpinPrimitiveArray(Thread* self, mirror::Array* array) {
+  MutexLock mu(self, pins_lock_);
+  pin_table_.Remove(array);
+}
+
+void JavaVMExt::DumpReferenceTables(std::ostream& os) {
+  Thread* self = Thread::Current();
+  {
+    ReaderMutexLock mu(self, globals_lock_);
+    globals_.Dump(os);
+  }
+  {
+    MutexLock mu(self, weak_globals_lock_);
+    weak_globals_.Dump(os);
+  }
+  {
+    MutexLock mu(self, pins_lock_);
+    pin_table_.Dump(os);
+  }
+}
+
+bool JavaVMExt::LoadNativeLibrary(JNIEnv* env, const std::string& path, jobject class_loader,
+                                  std::string* error_msg) {
+  error_msg->clear();
+
+  // See if we've already loaded this library.  If we have, and the class loader
+  // matches, return successfully without doing anything.
+  // TODO: for better results we should canonicalize the pathname (or even compare
+  // inodes). This implementation is fine if everybody is using System.loadLibrary.
+  SharedLibrary* library;
+  Thread* self = Thread::Current();
+  {
+    // TODO: move the locking (and more of this logic) into Libraries.
+    MutexLock mu(self, *Locks::jni_libraries_lock_);
+    library = libraries_->Get(path);
+  }
+  if (library != nullptr) {
+    if (env->IsSameObject(library->GetClassLoader(), class_loader) == JNI_FALSE) {
+      // The library will be associated with class_loader. The JNI
+      // spec says we can't load the same library into more than one
+      // class loader.
+      StringAppendF(error_msg, "Shared library \"%s\" already opened by "
+          "ClassLoader %p; can't open in ClassLoader %p",
+          path.c_str(), library->GetClassLoader(), class_loader);
+      LOG(WARNING) << error_msg;
+      return false;
+    }
+    VLOG(jni) << "[Shared library \"" << path << "\" already loaded in "
+              << " ClassLoader " << class_loader << "]";
+    if (!library->CheckOnLoadResult()) {
+      StringAppendF(error_msg, "JNI_OnLoad failed on a previous attempt "
+          "to load \"%s\"", path.c_str());
+      return false;
+    }
+    return true;
+  }
+
+  // Open the shared library.  Because we're using a full path, the system
+  // doesn't have to search through LD_LIBRARY_PATH.  (It may do so to
+  // resolve this library's dependencies though.)
+
+  // Failures here are expected when java.library.path has several entries
+  // and we have to hunt for the lib.
+
+  // Below we dlopen but there is no paired dlclose, this would be necessary if we supported
+  // class unloading. Libraries will only be unloaded when the reference count (incremented by
+  // dlopen) becomes zero from dlclose.
+
+  Locks::mutator_lock_->AssertNotHeld(self);
+  const char* path_str = path.empty() ? nullptr : path.c_str();
+  void* handle = dlopen(path_str, RTLD_LAZY);
+  bool needs_native_bridge = false;
+  if (handle == nullptr) {
+    if (android::NativeBridgeIsSupported(path_str)) {
+      handle = android::NativeBridgeLoadLibrary(path_str, RTLD_LAZY);
+      needs_native_bridge = true;
+    }
+  }
+
+  VLOG(jni) << "[Call to dlopen(\"" << path << "\", RTLD_LAZY) returned " << handle << "]";
+
+  if (handle == nullptr) {
+    *error_msg = dlerror();
+    LOG(ERROR) << "dlopen(\"" << path << "\", RTLD_LAZY) failed: " << *error_msg;
+    return false;
+  }
+
+  if (env->ExceptionCheck() == JNI_TRUE) {
+    LOG(ERROR) << "Unexpected exception:";
+    env->ExceptionDescribe();
+    env->ExceptionClear();
+  }
+  // Create a new entry.
+  // TODO: move the locking (and more of this logic) into Libraries.
+  bool created_library = false;
+  {
+    // Create SharedLibrary ahead of taking the libraries lock to maintain lock ordering.
+    std::unique_ptr<SharedLibrary> new_library(
+        new SharedLibrary(env, self, path, handle, class_loader));
+    MutexLock mu(self, *Locks::jni_libraries_lock_);
+    library = libraries_->Get(path);
+    if (library == nullptr) {  // We won race to get libraries_lock.
+      library = new_library.release();
+      libraries_->Put(path, library);
+      created_library = true;
+    }
+  }
+  if (!created_library) {
+    LOG(INFO) << "WOW: we lost a race to add shared library: "
+        << "\"" << path << "\" ClassLoader=" << class_loader;
+    return library->CheckOnLoadResult();
+  }
+  VLOG(jni) << "[Added shared library \"" << path << "\" for ClassLoader " << class_loader << "]";
+
+  bool was_successful = false;
+  void* sym;
+  if (needs_native_bridge) {
+    library->SetNeedsNativeBridge();
+    sym = library->FindSymbolWithNativeBridge("JNI_OnLoad", nullptr);
+  } else {
+    sym = dlsym(handle, "JNI_OnLoad");
+  }
+  if (sym == nullptr) {
+    VLOG(jni) << "[No JNI_OnLoad found in \"" << path << "\"]";
+    was_successful = true;
+  } else {
+    // Call JNI_OnLoad.  We have to override the current class
+    // loader, which will always be "null" since the stuff at the
+    // top of the stack is around Runtime.loadLibrary().  (See
+    // the comments in the JNI FindClass function.)
+    ScopedLocalRef<jobject> old_class_loader(env, env->NewLocalRef(self->GetClassLoaderOverride()));
+    self->SetClassLoaderOverride(class_loader);
+
+    VLOG(jni) << "[Calling JNI_OnLoad in \"" << path << "\"]";
+    typedef int (*JNI_OnLoadFn)(JavaVM*, void*);
+    JNI_OnLoadFn jni_on_load = reinterpret_cast<JNI_OnLoadFn>(sym);
+    int version = (*jni_on_load)(this, nullptr);
+
+    self->SetClassLoaderOverride(old_class_loader.get());
+
+    if (version == JNI_ERR) {
+      StringAppendF(error_msg, "JNI_ERR returned from JNI_OnLoad in \"%s\"", path.c_str());
+    } else if (IsBadJniVersion(version)) {
+      StringAppendF(error_msg, "Bad JNI version returned from JNI_OnLoad in \"%s\": %d",
+                    path.c_str(), version);
+      // It's unwise to call dlclose() here, but we can mark it
+      // as bad and ensure that future load attempts will fail.
+      // We don't know how far JNI_OnLoad got, so there could
+      // be some partially-initialized stuff accessible through
+      // newly-registered native method calls.  We could try to
+      // unregister them, but that doesn't seem worthwhile.
+    } else {
+      was_successful = true;
+    }
+    VLOG(jni) << "[Returned " << (was_successful ? "successfully" : "failure")
+              << " from JNI_OnLoad in \"" << path << "\"]";
+  }
+
+  library->SetResult(was_successful);
+  return was_successful;
+}
+
+void* JavaVMExt::FindCodeForNativeMethod(mirror::ArtMethod* m) {
+  CHECK(m->IsNative());
+  mirror::Class* c = m->GetDeclaringClass();
+  // If this is a static method, it could be called before the class has been initialized.
+  CHECK(c->IsInitializing()) << c->GetStatus() << " " << PrettyMethod(m);
+  std::string detail;
+  void* native_method;
+  Thread* self = Thread::Current();
+  {
+    MutexLock mu(self, *Locks::jni_libraries_lock_);
+    native_method = libraries_->FindNativeMethod(m, detail);
+  }
+  // Throwing can cause libraries_lock to be reacquired.
+  if (native_method == nullptr) {
+    ThrowLocation throw_location = self->GetCurrentLocationForThrow();
+    self->ThrowNewException(throw_location, "Ljava/lang/UnsatisfiedLinkError;", detail.c_str());
+  }
+  return native_method;
+}
+
+void JavaVMExt::SweepJniWeakGlobals(IsMarkedCallback* callback, void* arg) {
+  MutexLock mu(Thread::Current(), weak_globals_lock_);
+  for (mirror::Object** entry : weak_globals_) {
+    // Since this is called by the GC, we don't need a read barrier.
+    mirror::Object* obj = *entry;
+    mirror::Object* new_obj = callback(obj, arg);
+    if (new_obj == nullptr) {
+      new_obj = kClearedJniWeakGlobal;
+    }
+    *entry = new_obj;
+  }
+}
+
+void JavaVMExt::VisitRoots(RootCallback* callback, void* arg) {
+  Thread* self = Thread::Current();
+  {
+    ReaderMutexLock mu(self, globals_lock_);
+    globals_.VisitRoots(callback, arg, 0, kRootJNIGlobal);
+  }
+  {
+    MutexLock mu(self, pins_lock_);
+    pin_table_.VisitRoots(callback, arg, 0, kRootVMInternal);
+  }
+  // The weak_globals table is visited by the GC itself (because it mutates the table).
+}
+
+// JNI Invocation interface.
+
+extern "C" jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args) {
+  const JavaVMInitArgs* args = static_cast<JavaVMInitArgs*>(vm_args);
+  if (IsBadJniVersion(args->version)) {
+    LOG(ERROR) << "Bad JNI version passed to CreateJavaVM: " << args->version;
+    return JNI_EVERSION;
+  }
+  RuntimeOptions options;
+  for (int i = 0; i < args->nOptions; ++i) {
+    JavaVMOption* option = &args->options[i];
+    options.push_back(std::make_pair(std::string(option->optionString), option->extraInfo));
+  }
+  bool ignore_unrecognized = args->ignoreUnrecognized;
+  if (!Runtime::Create(options, ignore_unrecognized)) {
+    return JNI_ERR;
+  }
+  Runtime* runtime = Runtime::Current();
+  bool started = runtime->Start();
+  if (!started) {
+    delete Thread::Current()->GetJniEnv();
+    delete runtime->GetJavaVM();
+    LOG(WARNING) << "CreateJavaVM failed";
+    return JNI_ERR;
+  }
+  *p_env = Thread::Current()->GetJniEnv();
+  *p_vm = runtime->GetJavaVM();
+  return JNI_OK;
+}
+
+extern "C" jint JNI_GetCreatedJavaVMs(JavaVM** vms, jsize, jsize* vm_count) {
+  Runtime* runtime = Runtime::Current();
+  if (runtime == nullptr) {
+    *vm_count = 0;
+  } else {
+    *vm_count = 1;
+    vms[0] = runtime->GetJavaVM();
+  }
+  return JNI_OK;
+}
+
+// Historically unsupported.
+extern "C" jint JNI_GetDefaultJavaVMInitArgs(void* /*vm_args*/) {
+  return JNI_ERR;
+}
+
+}  // namespace art
diff --git a/runtime/java_vm_ext.h b/runtime/java_vm_ext.h
new file mode 100644 (file)
index 0000000..da0b8e3
--- /dev/null
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JAVA_VM_EXT_H_
+#define ART_RUNTIME_JAVA_VM_EXT_H_
+
+#include "jni.h"
+
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "indirect_reference_table.h"
+#include "reference_table.h"
+
+namespace art {
+
+namespace mirror {
+  class ArtMethod;
+  class Array;
+}  // namespace mirror
+
+class Libraries;
+class ParsedOptions;
+class Runtime;
+
+class JavaVMExt : public JavaVM {
+ public:
+  JavaVMExt(Runtime* runtime, ParsedOptions* options);
+  ~JavaVMExt();
+
+  bool ForceCopy() const {
+    return force_copy_;
+  }
+
+  bool IsCheckJniEnabled() const {
+    return check_jni_;
+  }
+
+  bool IsTracingEnabled() const {
+    return tracing_enabled_;
+  }
+
+  Runtime* GetRuntime() const {
+    return runtime_;
+  }
+
+  void SetCheckJniAbortHook(void (*hook)(void*, const std::string&), void* data) {
+    check_jni_abort_hook_ = hook;
+    check_jni_abort_hook_data_ = data;
+  }
+
+  // Aborts execution unless there is an abort handler installed in which case it will return. Its
+  // therefore important that callers return after aborting as otherwise code following the abort
+  // will be executed in the abort handler case.
+  void JniAbort(const char* jni_function_name, const char* msg);
+
+  void JniAbortV(const char* jni_function_name, const char* fmt, va_list ap);
+
+  void JniAbortF(const char* jni_function_name, const char* fmt, ...)
+      __attribute__((__format__(__printf__, 3, 4)));
+
+  // If both "-Xcheck:jni" and "-Xjnitrace:" are enabled, we print trace messages
+  // when a native method that matches the -Xjnitrace argument calls a JNI function
+  // such as NewByteArray.
+  // If -verbose:third-party-jni is on, we want to log any JNI function calls
+  // made by a third-party native method.
+  bool ShouldTrace(mirror::ArtMethod* method) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  /**
+   * Loads the given shared library. 'path' is an absolute pathname.
+   *
+   * Returns 'true' on success. On failure, sets 'detail' to a
+   * human-readable description of the error.
+   */
+  bool LoadNativeLibrary(JNIEnv* env, const std::string& path, jobject javaLoader,
+                         std::string* error_msg);
+
+  /**
+   * Returns a pointer to the code for the native method 'm', found
+   * using dlsym(3) on every native library that's been loaded so far.
+   */
+  void* FindCodeForNativeMethod(mirror::ArtMethod* m)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  void DumpForSigQuit(std::ostream& os)
+      LOCKS_EXCLUDED(Locks::jni_libraries_lock_, globals_lock_, weak_globals_lock_, pins_lock_);
+
+  void DumpReferenceTables(std::ostream& os)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  bool SetCheckJniEnabled(bool enabled);
+
+  void VisitRoots(RootCallback* callback, void* arg) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  void DisallowNewWeakGlobals() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  void AllowNewWeakGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  jobject AddGlobalRef(Thread* self, mirror::Object* obj)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  jweak AddWeakGlobalRef(Thread* self, mirror::Object* obj)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  void DeleteGlobalRef(Thread* self, jobject obj);
+
+  void DeleteWeakGlobalRef(Thread* self, jweak obj);
+
+  void SweepJniWeakGlobals(IsMarkedCallback* callback, void* arg)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  mirror::Object* DecodeGlobal(Thread* self, IndirectRef ref)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  mirror::Object* DecodeWeakGlobal(Thread* self, IndirectRef ref)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  void PinPrimitiveArray(Thread* self, mirror::Array* array)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      LOCKS_EXCLUDED(pins_lock_);
+
+  void UnpinPrimitiveArray(Thread* self, mirror::Array* array)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_)
+      LOCKS_EXCLUDED(pins_lock_);
+
+  const JNIInvokeInterface* GetUncheckedFunctions() const {
+    return unchecked_functions_;
+  }
+
+ private:
+  Runtime* const runtime_;
+
+  // Used for testing. By default, we'll LOG(FATAL) the reason.
+  void (*check_jni_abort_hook_)(void* data, const std::string& reason);
+  void* check_jni_abort_hook_data_;
+
+  // Extra checking.
+  bool check_jni_;
+  bool force_copy_;
+  const bool tracing_enabled_;
+
+  // Extra diagnostics.
+  const std::string trace_;
+
+  // Used to hold references to pinned primitive arrays.
+  Mutex pins_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  ReferenceTable pin_table_ GUARDED_BY(pins_lock_);
+
+  // JNI global references.
+  ReaderWriterMutex globals_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  // Not guarded by globals_lock since we sometimes use SynchronizedGet in Thread::DecodeJObject.
+  IndirectReferenceTable globals_;
+
+  std::unique_ptr<Libraries> libraries_ GUARDED_BY(Locks::jni_libraries_lock_);
+
+  // Used by -Xcheck:jni.
+  const JNIInvokeInterface* const unchecked_functions_;
+
+  // JNI weak global references.
+  Mutex weak_globals_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
+  // Since weak_globals_ contain weak roots, be careful not to
+  // directly access the object references in it. Use Get() with the
+  // read barrier enabled.
+  IndirectReferenceTable weak_globals_ GUARDED_BY(weak_globals_lock_);
+  bool allow_new_weak_globals_ GUARDED_BY(weak_globals_lock_);
+  ConditionVariable weak_globals_add_condition_ GUARDED_BY(weak_globals_lock_);
+
+  DISALLOW_COPY_AND_ASSIGN(JavaVMExt);
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_JAVA_VM_EXT_H_
similarity index 89%
rename from runtime/jni_internal-inl.h
rename to runtime/jni_env_ext-inl.h
index 6cf9a61..dc6a3e8 100644 (file)
  * limitations under the License.
  */
 
-#ifndef ART_RUNTIME_JNI_INTERNAL_INL_H_
-#define ART_RUNTIME_JNI_INTERNAL_INL_H_
+#ifndef ART_RUNTIME_JNI_ENV_EXT_INL_H_
+#define ART_RUNTIME_JNI_ENV_EXT_INL_H_
 
-#include "jni_internal.h"
+#include "jni_env_ext.h"
 
 #include "utils.h"
 
@@ -44,4 +44,4 @@ inline T JNIEnvExt::AddLocalReference(mirror::Object* obj) {
 
 }  // namespace art
 
-#endif  // ART_RUNTIME_JNI_INTERNAL_INL_H_
+#endif  // ART_RUNTIME_JNI_ENV_EXT_INL_H_
diff --git a/runtime/jni_env_ext.cc b/runtime/jni_env_ext.cc
new file mode 100644 (file)
index 0000000..180e3d7
--- /dev/null
@@ -0,0 +1,89 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#include "jni_env_ext.h"
+
+#include "check_jni.h"
+#include "indirect_reference_table.h"
+#include "java_vm_ext.h"
+#include "jni_internal.h"
+
+namespace art {
+
+static constexpr size_t kMonitorsInitial = 32;  // Arbitrary.
+static constexpr size_t kMonitorsMax = 4096;  // Arbitrary sanity check.
+
+static constexpr size_t kLocalsInitial = 64;  // Arbitrary.
+
+JNIEnvExt::JNIEnvExt(Thread* self, JavaVMExt* vm)
+    : self(self),
+      vm(vm),
+      local_ref_cookie(IRT_FIRST_SEGMENT),
+      locals(kLocalsInitial, kLocalsMax, kLocal),
+      check_jni(false),
+      critical(0),
+      monitors("monitors", kMonitorsInitial, kMonitorsMax) {
+  functions = unchecked_functions = GetJniNativeInterface();
+  if (vm->IsCheckJniEnabled()) {
+    SetCheckJniEnabled(true);
+  }
+}
+
+JNIEnvExt::~JNIEnvExt() {
+}
+
+jobject JNIEnvExt::NewLocalRef(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  if (obj == nullptr) {
+    return nullptr;
+  }
+  return reinterpret_cast<jobject>(locals.Add(local_ref_cookie, obj));
+}
+
+void JNIEnvExt::DeleteLocalRef(jobject obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  if (obj != nullptr) {
+    locals.Remove(local_ref_cookie, reinterpret_cast<IndirectRef>(obj));
+  }
+}
+
+void JNIEnvExt::SetCheckJniEnabled(bool enabled) {
+  check_jni = enabled;
+  functions = enabled ? GetCheckJniNativeInterface() : GetJniNativeInterface();
+}
+
+void JNIEnvExt::DumpReferenceTables(std::ostream& os) {
+  locals.Dump(os);
+  monitors.Dump(os);
+}
+
+void JNIEnvExt::PushFrame(int capacity) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  UNUSED(capacity);  // cpplint gets confused with (int) and thinks its a cast.
+  // TODO: take 'capacity' into account.
+  stacked_local_ref_cookies.push_back(local_ref_cookie);
+  local_ref_cookie = locals.GetSegmentState();
+}
+
+void JNIEnvExt::PopFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  locals.SetSegmentState(local_ref_cookie);
+  local_ref_cookie = stacked_local_ref_cookies.back();
+  stacked_local_ref_cookies.pop_back();
+}
+
+Offset JNIEnvExt::SegmentStateOffset() {
+  return Offset(OFFSETOF_MEMBER(JNIEnvExt, locals) +
+                IndirectReferenceTable::SegmentStateOffset().Int32Value());
+}
+
+}  // namespace art
diff --git a/runtime/jni_env_ext.h b/runtime/jni_env_ext.h
new file mode 100644 (file)
index 0000000..af87cb4
--- /dev/null
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2011 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#ifndef ART_RUNTIME_JNI_ENV_EXT_H_
+#define ART_RUNTIME_JNI_ENV_EXT_H_
+
+#include <jni.h>
+
+#include "base/macros.h"
+#include "base/mutex.h"
+#include "indirect_reference_table.h"
+#include "object_callbacks.h"
+#include "reference_table.h"
+
+namespace art {
+
+class JavaVMExt;
+
+// Maximum number of local references in the indirect reference table. The value is arbitrary but
+// low enough that it forces sanity checks.
+static constexpr size_t kLocalsMax = 512;
+
+struct JNIEnvExt : public JNIEnv {
+  JNIEnvExt(Thread* self, JavaVMExt* vm);
+  ~JNIEnvExt();
+
+  void DumpReferenceTables(std::ostream& os)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  void SetCheckJniEnabled(bool enabled);
+
+  void PushFrame(int capacity);
+  void PopFrame();
+
+  template<typename T>
+  T AddLocalReference(mirror::Object* obj)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  static Offset SegmentStateOffset();
+
+  static Offset LocalRefCookieOffset() {
+    return Offset(OFFSETOF_MEMBER(JNIEnvExt, local_ref_cookie));
+  }
+
+  static Offset SelfOffset() {
+    return Offset(OFFSETOF_MEMBER(JNIEnvExt, self));
+  }
+
+  jobject NewLocalRef(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void DeleteLocalRef(jobject obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  Thread* const self;
+  JavaVMExt* const vm;
+
+  // Cookie used when using the local indirect reference table.
+  uint32_t local_ref_cookie;
+
+  // JNI local references.
+  IndirectReferenceTable locals GUARDED_BY(Locks::mutator_lock_);
+
+  // Stack of cookies corresponding to PushLocalFrame/PopLocalFrame calls.
+  // TODO: to avoid leaks (and bugs), we need to clear this vector on entry (or return)
+  // to a native method.
+  std::vector<uint32_t> stacked_local_ref_cookies;
+
+  // Frequently-accessed fields cached from JavaVM.
+  bool check_jni;
+
+  // How many nested "critical" JNI calls are we in?
+  int critical;
+
+  // Entered JNI monitors, for bulk exit on thread detach.
+  ReferenceTable monitors;
+
+  // Used by -Xcheck:jni.
+  const JNINativeInterface* unchecked_functions;
+};
+
+// Used to save and restore the JNIEnvExt state when not going through code created by the JNI
+// compiler.
+class ScopedJniEnvLocalRefState {
+ public:
+  explicit ScopedJniEnvLocalRefState(JNIEnvExt* env) : env_(env) {
+    saved_local_ref_cookie_ = env->local_ref_cookie;
+    env->local_ref_cookie = env->locals.GetSegmentState();
+  }
+
+  ~ScopedJniEnvLocalRefState() {
+    env_->locals.SetSegmentState(env_->local_ref_cookie);
+    env_->local_ref_cookie = saved_local_ref_cookie_;
+  }
+
+ private:
+  JNIEnvExt* const env_;
+  uint32_t saved_local_ref_cookie_;
+
+  DISALLOW_COPY_AND_ASSIGN(ScopedJniEnvLocalRefState);
+};
+
+}  // namespace art
+
+#endif  // ART_RUNTIME_JNI_ENV_EXT_H_
index afbd5cb..9196556 100644 (file)
@@ -34,7 +34,8 @@
 #include "gc/accounting/card_table-inl.h"
 #include "indirect_reference_table-inl.h"
 #include "interpreter/interpreter.h"
-#include "jni.h"
+#include "jni_env_ext.h"
+#include "java_vm_ext.h"
 #include "mirror/art_field-inl.h"
 #include "mirror/art_method-inl.h"
 #include "mirror/class-inl.h"
 
 namespace art {
 
-static const size_t kMonitorsInitial = 32;  // Arbitrary.
-static const size_t kMonitorsMax = 4096;  // Arbitrary sanity check.
-
-static const size_t kLocalsInitial = 64;  // Arbitrary.
-static const size_t kLocalsMax = 512;  // Arbitrary sanity check.
-
-static const size_t kPinTableInitial = 16;  // Arbitrary.
-static const size_t kPinTableMax = 1024;  // Arbitrary sanity check.
-
-static size_t gGlobalsInitial = 512;  // Arbitrary.
-static size_t gGlobalsMax = 51200;  // Arbitrary sanity check. (Must fit in 16 bits.)
-
-static const size_t kWeakGlobalsInitial = 16;  // Arbitrary.
-static const size_t kWeakGlobalsMax = 51200;  // Arbitrary sanity check. (Must fit in 16 bits.)
-
-static jweak AddWeakGlobalReference(ScopedObjectAccess& soa, mirror::Object* obj)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  return soa.Vm()->AddWeakGlobalReference(soa.Self(), obj);
-}
-
-static bool IsBadJniVersion(int version) {
-  // We don't support JNI_VERSION_1_1. These are the only other valid versions.
-  return version != JNI_VERSION_1_2 && version != JNI_VERSION_1_4 && version != JNI_VERSION_1_6;
-}
-
 // Section 12.3.2 of the JNI spec describes JNI class descriptors. They're
 // separated with slashes but aren't wrapped with "L;" like regular descriptors
 // (i.e. "a/b/C" rather than "La/b/C;"). Arrays of reference types are an
@@ -170,7 +146,7 @@ static mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa)
   mirror::ArtMethod* method = soa.Self()->GetCurrentMethod(nullptr);
   // If we are running Runtime.nativeLoad, use the overriding ClassLoader it set.
   if (method == soa.DecodeMethod(WellKnownClasses::java_lang_Runtime_nativeLoad)) {
-    return soa.Self()->GetClassLoaderOverride();
+    return soa.Decode<mirror::ClassLoader*>(soa.Self()->GetClassLoaderOverride());
   }
   // If we have a method, use its ClassLoader for context.
   if (method != nullptr) {
@@ -183,7 +159,7 @@ static mirror::ClassLoader* GetClassLoader(const ScopedObjectAccess& soa)
     return class_loader;
   }
   // See if the override ClassLoader is set for gtests.
-  class_loader = soa.Self()->GetClassLoaderOverride();
+  class_loader = soa.Decode<mirror::ClassLoader*>(soa.Self()->GetClassLoaderOverride());
   if (class_loader != nullptr) {
     // If so, CommonCompilerTest should have set UseCompileTimeClassPath.
     CHECK(Runtime::Current()->UseCompileTimeClassPath());
@@ -243,20 +219,6 @@ static jfieldID FindFieldID(const ScopedObjectAccess& soa, jclass jni_class, con
   return soa.EncodeField(field);
 }
 
-static void PinPrimitiveArray(const ScopedObjectAccess& soa, mirror::Array* array)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  JavaVMExt* vm = soa.Vm();
-  MutexLock mu(soa.Self(), vm->pins_lock);
-  vm->pin_table.Add(array);
-}
-
-static void UnpinPrimitiveArray(const ScopedObjectAccess& soa, mirror::Array* array)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  JavaVMExt* vm = soa.Vm();
-  MutexLock mu(soa.Self(), vm->pins_lock);
-  vm->pin_table.Remove(array);
-}
-
 static void ThrowAIOOBE(ScopedObjectAccess& soa, mirror::Array* array, jsize start,
                         jsize length, const char* identifier)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
@@ -319,255 +281,10 @@ int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobj
   return JNI_OK;
 }
 
-static jint JII_AttachCurrentThread(JavaVM* vm, JNIEnv** p_env, void* raw_args, bool as_daemon) {
-  if (vm == nullptr || p_env == nullptr) {
-    return JNI_ERR;
-  }
-
-  // Return immediately if we're already attached.
-  Thread* self = Thread::Current();
-  if (self != nullptr) {
-    *p_env = self->GetJniEnv();
-    return JNI_OK;
-  }
-
-  Runtime* runtime = reinterpret_cast<JavaVMExt*>(vm)->runtime;
-
-  // No threads allowed in zygote mode.
-  if (runtime->IsZygote()) {
-    LOG(ERROR) << "Attempt to attach a thread in the zygote";
-    return JNI_ERR;
-  }
-
-  JavaVMAttachArgs* args = static_cast<JavaVMAttachArgs*>(raw_args);
-  const char* thread_name = nullptr;
-  jobject thread_group = nullptr;
-  if (args != nullptr) {
-    if (IsBadJniVersion(args->version)) {
-      LOG(ERROR) << "Bad JNI version passed to "
-                 << (as_daemon ? "AttachCurrentThreadAsDaemon" : "AttachCurrentThread") << ": "
-                 << args->version;
-      return JNI_EVERSION;
-    }
-    thread_name = args->name;
-    thread_group = args->group;
-  }
-
-  if (!runtime->AttachCurrentThread(thread_name, as_daemon, thread_group, !runtime->IsCompiler())) {
-    *p_env = nullptr;
-    return JNI_ERR;
-  } else {
-    *p_env = Thread::Current()->GetJniEnv();
-    return JNI_OK;
-  }
+static JavaVMExt* JavaVmExtFromEnv(JNIEnv* env) {
+  return reinterpret_cast<JNIEnvExt*>(env)->vm;
 }
 
-class SharedLibrary {
- public:
-  SharedLibrary(const std::string& path, void* handle, mirror::Object* class_loader)
-      : path_(path),
-        handle_(handle),
-        needs_native_bridge_(false),
-        class_loader_(GcRoot<mirror::Object>(class_loader)),
-        jni_on_load_lock_("JNI_OnLoad lock"),
-        jni_on_load_cond_("JNI_OnLoad condition variable", jni_on_load_lock_),
-        jni_on_load_thread_id_(Thread::Current()->GetThreadId()),
-        jni_on_load_result_(kPending) {
-  }
-
-  mirror::Object* GetClassLoader() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return class_loader_.Read();
-  }
-
-  std::string GetPath() {
-    return path_;
-  }
-
-  /*
-   * Check the result of an earlier call to JNI_OnLoad on this library.
-   * If the call has not yet finished in another thread, wait for it.
-   */
-  bool CheckOnLoadResult()
-      LOCKS_EXCLUDED(jni_on_load_lock_)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    Thread* self = Thread::Current();
-    self->TransitionFromRunnableToSuspended(kWaitingForJniOnLoad);
-    bool okay;
-    {
-      MutexLock mu(self, jni_on_load_lock_);
-
-      if (jni_on_load_thread_id_ == self->GetThreadId()) {
-        // Check this so we don't end up waiting for ourselves.  We need to return "true" so the
-        // caller can continue.
-        LOG(INFO) << *self << " recursive attempt to load library " << "\"" << path_ << "\"";
-        okay = true;
-      } else {
-        while (jni_on_load_result_ == kPending) {
-          VLOG(jni) << "[" << *self << " waiting for \"" << path_ << "\" " << "JNI_OnLoad...]";
-          jni_on_load_cond_.Wait(self);
-        }
-
-        okay = (jni_on_load_result_ == kOkay);
-        VLOG(jni) << "[Earlier JNI_OnLoad for \"" << path_ << "\" "
-            << (okay ? "succeeded" : "failed") << "]";
-      }
-    }
-    self->TransitionFromSuspendedToRunnable();
-    return okay;
-  }
-
-  void SetResult(bool result) LOCKS_EXCLUDED(jni_on_load_lock_) {
-    Thread* self = Thread::Current();
-    MutexLock mu(self, jni_on_load_lock_);
-
-    jni_on_load_result_ = result ? kOkay : kFailed;
-    jni_on_load_thread_id_ = 0;
-
-    // Broadcast a wakeup to anybody sleeping on the condition variable.
-    jni_on_load_cond_.Broadcast(self);
-  }
-
-  void SetNeedsNativeBridge() {
-    needs_native_bridge_ = true;
-  }
-
-  bool NeedsNativeBridge() const {
-    return needs_native_bridge_;
-  }
-
-  void* FindSymbol(const std::string& symbol_name) {
-    return dlsym(handle_, symbol_name.c_str());
-  }
-
-  void* FindSymbolWithNativeBridge(const std::string& symbol_name, mirror::ArtMethod* m)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    CHECK(NeedsNativeBridge());
-
-    uint32_t len = 0;
-    const char* shorty = nullptr;
-    if (m != nullptr) {
-      shorty = m->GetShorty(&len);
-    }
-    return android::NativeBridgeGetTrampoline(handle_, symbol_name.c_str(), shorty, len);
-  }
-
-  void VisitRoots(RootCallback* visitor, void* arg) {
-    if (!class_loader_.IsNull()) {
-      class_loader_.VisitRoot(visitor, arg, 0, kRootVMInternal);
-    }
-  }
-
- private:
-  enum JNI_OnLoadState {
-    kPending,
-    kFailed,
-    kOkay,
-  };
-
-  // Path to library "/system/lib/libjni.so".
-  std::string path_;
-
-  // The void* returned by dlopen(3).
-  void* handle_;
-
-  // True if a native bridge is required.
-  bool needs_native_bridge_;
-
-  // The ClassLoader this library is associated with.
-  GcRoot<mirror::Object> class_loader_;
-
-  // Guards remaining items.
-  Mutex jni_on_load_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-  // Wait for JNI_OnLoad in other thread.
-  ConditionVariable jni_on_load_cond_ GUARDED_BY(jni_on_load_lock_);
-  // Recursive invocation guard.
-  uint32_t jni_on_load_thread_id_ GUARDED_BY(jni_on_load_lock_);
-  // Result of earlier JNI_OnLoad call.
-  JNI_OnLoadState jni_on_load_result_ GUARDED_BY(jni_on_load_lock_);
-};
-
-// This exists mainly to keep implementation details out of the header file.
-class Libraries {
- public:
-  Libraries() {
-  }
-
-  ~Libraries() {
-    STLDeleteValues(&libraries_);
-  }
-
-  void Dump(std::ostream& os) const {
-    bool first = true;
-    for (const auto& library : libraries_) {
-      if (!first) {
-        os << ' ';
-      }
-      first = false;
-      os << library.first;
-    }
-  }
-
-  size_t size() const {
-    return libraries_.size();
-  }
-
-  SharedLibrary* Get(const std::string& path) {
-    auto it = libraries_.find(path);
-    return (it == libraries_.end()) ? nullptr : it->second;
-  }
-
-  void Put(const std::string& path, SharedLibrary* library) {
-    libraries_.Put(path, library);
-  }
-
-  // See section 11.3 "Linking Native Methods" of the JNI spec.
-  void* FindNativeMethod(mirror::ArtMethod* m, std::string& detail)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    std::string jni_short_name(JniShortName(m));
-    std::string jni_long_name(JniLongName(m));
-    const mirror::ClassLoader* declaring_class_loader = m->GetDeclaringClass()->GetClassLoader();
-    for (const auto& lib : libraries_) {
-      SharedLibrary* library = lib.second;
-      if (library->GetClassLoader() != declaring_class_loader) {
-        // We only search libraries loaded by the appropriate ClassLoader.
-        continue;
-      }
-      // Try the short name then the long name...
-      void* fn = nullptr;
-      if (UNLIKELY(library->NeedsNativeBridge())) {
-        fn = library->FindSymbolWithNativeBridge(jni_short_name, m);
-        if (fn == nullptr) {
-          fn = library->FindSymbolWithNativeBridge(jni_long_name, m);
-        }
-      } else {
-        fn = library->FindSymbol(jni_short_name);
-        if (fn == nullptr) {
-          fn = library->FindSymbol(jni_long_name);
-        }
-      }
-      if (fn != nullptr) {
-        VLOG(jni) << "[Found native code for " << PrettyMethod(m)
-                  << " in \"" << library->GetPath() << "\"]";
-        return fn;
-      }
-    }
-    detail += "No implementation found for ";
-    detail += PrettyMethod(m);
-    detail += " (tried " + jni_short_name + " and " + jni_long_name + ")";
-    LOG(ERROR) << detail;
-    return nullptr;
-  }
-
-  void VisitRoots(RootCallback* callback, void* arg) {
-    for (auto& lib_pair : libraries_) {
-      lib_pair.second->VisitRoots(callback, arg);
-    }
-  }
-
- private:
-  AllocationTrackingSafeMap<std::string, SharedLibrary*, kAllocatorTagJNILibrarires> libraries_;
-};
-
 #define CHECK_NON_NULL_ARGUMENT(value) \
     CHECK_NON_NULL_ARGUMENT_FN_NAME(__FUNCTION__, value, nullptr)
 
@@ -582,13 +299,13 @@ class Libraries {
 
 #define CHECK_NON_NULL_ARGUMENT_FN_NAME(name, value, return_val) \
   if (UNLIKELY(value == nullptr)) { \
-    JniAbortF(name, #value " == null"); \
+    JavaVmExtFromEnv(env)->JniAbortF(name, #value " == null"); \
     return return_val; \
   }
 
 #define CHECK_NON_NULL_MEMCPY_ARGUMENT(length, value) \
   if (UNLIKELY(length != 0 && value == nullptr)) { \
-    JniAbortF(__FUNCTION__, #value " == null"); \
+    JavaVmExtFromEnv(env)->JniAbortF(__FUNCTION__, #value " == null"); \
     return; \
   }
 
@@ -789,10 +506,10 @@ class JNI {
   static jint PushLocalFrame(JNIEnv* env, jint capacity) {
     // TODO: SOA may not be necessary but I do it to please lock annotations.
     ScopedObjectAccess soa(env);
-    if (EnsureLocalCapacity(soa, capacity, "PushLocalFrame") != JNI_OK) {
+    if (EnsureLocalCapacityInternal(soa, capacity, "PushLocalFrame") != JNI_OK) {
       return JNI_ERR;
     }
-    static_cast<JNIEnvExt*>(env)->PushFrame(capacity);
+    down_cast<JNIEnvExt*>(env)->PushFrame(capacity);
     return JNI_OK;
   }
 
@@ -806,48 +523,31 @@ class JNI {
   static jint EnsureLocalCapacity(JNIEnv* env, jint desired_capacity) {
     // TODO: SOA may not be necessary but I do it to please lock annotations.
     ScopedObjectAccess soa(env);
-    return EnsureLocalCapacity(soa, desired_capacity, "EnsureLocalCapacity");
+    return EnsureLocalCapacityInternal(soa, desired_capacity, "EnsureLocalCapacity");
   }
 
   static jobject NewGlobalRef(JNIEnv* env, jobject obj) {
     ScopedObjectAccess soa(env);
     mirror::Object* decoded_obj = soa.Decode<mirror::Object*>(obj);
-    // Check for null after decoding the object to handle cleared weak globals.
-    if (decoded_obj == nullptr) {
-      return nullptr;
-    }
-    JavaVMExt* vm = soa.Vm();
-    IndirectReferenceTable& globals = vm->globals;
-    WriterMutexLock mu(soa.Self(), vm->globals_lock);
-    IndirectRef ref = globals.Add(IRT_FIRST_SEGMENT, decoded_obj);
-    return reinterpret_cast<jobject>(ref);
+    return soa.Vm()->AddGlobalRef(soa.Self(), decoded_obj);
   }
 
   static void DeleteGlobalRef(JNIEnv* env, jobject obj) {
-    if (obj == nullptr) {
-      return;
-    }
-    JavaVMExt* vm = reinterpret_cast<JNIEnvExt*>(env)->vm;
-    IndirectReferenceTable& globals = vm->globals;
-    Thread* self = reinterpret_cast<JNIEnvExt*>(env)->self;
-    WriterMutexLock mu(self, vm->globals_lock);
-
-    if (!globals.Remove(IRT_FIRST_SEGMENT, obj)) {
-      LOG(WARNING) << "JNI WARNING: DeleteGlobalRef(" << obj << ") "
-                   << "failed to find entry";
-    }
+    JavaVMExt* vm = down_cast<JNIEnvExt*>(env)->vm;
+    Thread* self = down_cast<JNIEnvExt*>(env)->self;
+    vm->DeleteGlobalRef(self, obj);
   }
 
   static jweak NewWeakGlobalRef(JNIEnv* env, jobject obj) {
     ScopedObjectAccess soa(env);
-    return AddWeakGlobalReference(soa, soa.Decode<mirror::Object*>(obj));
+    mirror::Object* decoded_obj = soa.Decode<mirror::Object*>(obj);
+    return soa.Vm()->AddWeakGlobalRef(soa.Self(), decoded_obj);
   }
 
   static void DeleteWeakGlobalRef(JNIEnv* env, jweak obj) {
-    if (obj != nullptr) {
-      ScopedObjectAccess soa(env);
-      soa.Vm()->DeleteWeakGlobalRef(soa.Self(), obj);
-    }
+    JavaVMExt* vm = down_cast<JNIEnvExt*>(env)->vm;
+    Thread* self = down_cast<JNIEnvExt*>(env)->self;
+    vm->DeleteWeakGlobalRef(self, obj);
   }
 
   static jobject NewLocalRef(JNIEnv* env, jobject obj) {
@@ -864,7 +564,6 @@ class JNI {
     if (obj == nullptr) {
       return;
     }
-    ScopedObjectAccess soa(env);
     IndirectReferenceTable& locals = reinterpret_cast<JNIEnvExt*>(env)->locals;
 
     uint32_t cookie = reinterpret_cast<JNIEnvExt*>(env)->local_ref_cookie;
@@ -1930,11 +1629,11 @@ class JNI {
 
   static jstring NewString(JNIEnv* env, const jchar* chars, jsize char_count) {
     if (UNLIKELY(char_count < 0)) {
-      JniAbortF("NewString", "char_count < 0: %d", char_count);
+      JavaVmExtFromEnv(env)->JniAbortF("NewString", "char_count < 0: %d", char_count);
       return nullptr;
     }
     if (UNLIKELY(chars == nullptr && char_count > 0)) {
-      JniAbortF("NewString", "chars == null && char_count > 0");
+      JavaVmExtFromEnv(env)->JniAbortF("NewString", "chars == null && char_count > 0");
       return nullptr;
     }
     ScopedObjectAccess soa(env);
@@ -1996,7 +1695,7 @@ class JNI {
     ScopedObjectAccess soa(env);
     mirror::String* s = soa.Decode<mirror::String*>(java_string);
     mirror::CharArray* chars = s->GetCharArray();
-    PinPrimitiveArray(soa, chars);
+    soa.Vm()->PinPrimitiveArray(soa.Self(), chars);
     gc::Heap* heap = Runtime::Current()->GetHeap();
     if (heap->IsMovableObject(chars)) {
       if (is_copy != nullptr) {
@@ -2025,7 +1724,7 @@ class JNI {
     if (chars != (s_chars->GetData() + s->GetOffset())) {
       delete[] chars;
     }
-    UnpinPrimitiveArray(soa, s->GetCharArray());
+    soa.Vm()->UnpinPrimitiveArray(soa.Self(), s->GetCharArray());
   }
 
   static const jchar* GetStringCritical(JNIEnv* env, jstring java_string, jboolean* is_copy) {
@@ -2034,7 +1733,7 @@ class JNI {
     mirror::String* s = soa.Decode<mirror::String*>(java_string);
     mirror::CharArray* chars = s->GetCharArray();
     int32_t offset = s->GetOffset();
-    PinPrimitiveArray(soa, chars);
+    soa.Vm()->PinPrimitiveArray(soa.Self(), chars);
     gc::Heap* heap = Runtime::Current()->GetHeap();
     if (heap->IsMovableObject(chars)) {
       StackHandleScope<1> hs(soa.Self());
@@ -2050,7 +1749,8 @@ class JNI {
   static void ReleaseStringCritical(JNIEnv* env, jstring java_string, const jchar* chars) {
     CHECK_NON_NULL_ARGUMENT_RETURN_VOID(java_string);
     ScopedObjectAccess soa(env);
-    UnpinPrimitiveArray(soa, soa.Decode<mirror::String*>(java_string)->GetCharArray());
+    soa.Vm()->UnpinPrimitiveArray(soa.Self(),
+                                  soa.Decode<mirror::String*>(java_string)->GetCharArray());
     gc::Heap* heap = Runtime::Current()->GetHeap();
     mirror::String* s = soa.Decode<mirror::String*>(java_string);
     mirror::CharArray* s_chars = s->GetCharArray();
@@ -2086,7 +1786,8 @@ class JNI {
     ScopedObjectAccess soa(env);
     mirror::Object* obj = soa.Decode<mirror::Object*>(java_array);
     if (UNLIKELY(!obj->IsArrayInstance())) {
-      JniAbortF("GetArrayLength", "not an array: %s", PrettyTypeOf(obj).c_str());
+      soa.Vm()->JniAbortF("GetArrayLength", "not an array: %s", PrettyTypeOf(obj).c_str());
+      return 0;
     }
     mirror::Array* array = obj->AsArray();
     return array->GetLength();
@@ -2141,7 +1842,7 @@ class JNI {
   static jobjectArray NewObjectArray(JNIEnv* env, jsize length, jclass element_jclass,
                                      jobject initial_element) {
     if (UNLIKELY(length < 0)) {
-      JniAbortF("NewObjectArray", "negative array length: %d", length);
+      JavaVmExtFromEnv(env)->JniAbortF("NewObjectArray", "negative array length: %d", length);
       return nullptr;
     }
     CHECK_NON_NULL_ARGUMENT(element_jclass);
@@ -2152,8 +1853,8 @@ class JNI {
     {
       mirror::Class* element_class = soa.Decode<mirror::Class*>(element_jclass);
       if (UNLIKELY(element_class->IsPrimitive())) {
-        JniAbortF("NewObjectArray", "not an object type: %s",
-                  PrettyDescriptor(element_class).c_str());
+        soa.Vm()->JniAbortF("NewObjectArray", "not an object type: %s",
+                            PrettyDescriptor(element_class).c_str());
         return nullptr;
       }
       ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
@@ -2171,10 +1872,11 @@ class JNI {
       if (initial_object != nullptr) {
         mirror::Class* element_class = result->GetClass()->GetComponentType();
         if (UNLIKELY(!element_class->IsAssignableFrom(initial_object->GetClass()))) {
-          JniAbortF("NewObjectArray", "cannot assign object of type '%s' to array with element "
-                    "type of '%s'", PrettyDescriptor(initial_object->GetClass()).c_str(),
-                    PrettyDescriptor(element_class).c_str());
-
+          soa.Vm()->JniAbortF("NewObjectArray", "cannot assign object of type '%s' to array with "
+                              "element type of '%s'",
+                              PrettyDescriptor(initial_object->GetClass()).c_str(),
+                              PrettyDescriptor(element_class).c_str());
+          return nullptr;
         } else {
           for (jsize i = 0; i < length; ++i) {
             result->SetWithoutChecks<false>(i, initial_object);
@@ -2194,8 +1896,8 @@ class JNI {
     ScopedObjectAccess soa(env);
     mirror::Array* array = soa.Decode<mirror::Array*>(java_array);
     if (UNLIKELY(!array->GetClass()->IsPrimitiveArray())) {
-      JniAbortF("GetPrimitiveArrayCritical", "expected primitive array, given %s",
-                PrettyDescriptor(array->GetClass()).c_str());
+      soa.Vm()->JniAbortF("GetPrimitiveArrayCritical", "expected primitive array, given %s",
+                          PrettyDescriptor(array->GetClass()).c_str());
       return nullptr;
     }
     gc::Heap* heap = Runtime::Current()->GetHeap();
@@ -2204,7 +1906,7 @@ class JNI {
       // Re-decode in case the object moved since IncrementDisableGC waits for GC to complete.
       array = soa.Decode<mirror::Array*>(java_array);
     }
-    PinPrimitiveArray(soa, array);
+    soa.Vm()->PinPrimitiveArray(soa.Self(), array);
     if (is_copy != nullptr) {
       *is_copy = JNI_FALSE;
     }
@@ -2217,8 +1919,8 @@ class JNI {
     ScopedObjectAccess soa(env);
     mirror::Array* array = soa.Decode<mirror::Array*>(java_array);
     if (UNLIKELY(!array->GetClass()->IsPrimitiveArray())) {
-      JniAbortF("ReleasePrimitiveArrayCritical", "expected primitive array, given %s",
-                PrettyDescriptor(array->GetClass()).c_str());
+      soa.Vm()->JniAbortF("ReleasePrimitiveArrayCritical", "expected primitive array, given %s",
+                          PrettyDescriptor(array->GetClass()).c_str());
       return;
     }
     const size_t component_size = array->GetClass()->GetComponentSize();
@@ -2390,8 +2092,9 @@ class JNI {
   static jint RegisterNativeMethods(JNIEnv* env, jclass java_class, const JNINativeMethod* methods,
                                     jint method_count, bool return_errors) {
     if (UNLIKELY(method_count < 0)) {
-      JniAbortF("RegisterNatives", "negative method count: %d", method_count);
-      return JNI_ERR;  // Not reached.
+      JavaVmExtFromEnv(env)->JniAbortF("RegisterNatives", "negative method count: %d",
+                                       method_count);
+      return JNI_ERR;  // Not reached except in unit tests.
     }
     CHECK_NON_NULL_ARGUMENT_FN_NAME("RegisterNatives", java_class, JNI_ERR);
     ScopedObjectAccess soa(env);
@@ -2515,17 +2218,21 @@ class JNI {
 
   static jobject NewDirectByteBuffer(JNIEnv* env, void* address, jlong capacity) {
     if (capacity < 0) {
-      JniAbortF("NewDirectByteBuffer", "negative buffer capacity: %" PRId64, capacity);
+      JavaVmExtFromEnv(env)->JniAbortF("NewDirectByteBuffer", "negative buffer capacity: %" PRId64,
+                                       capacity);
       return nullptr;
     }
     if (address == nullptr && capacity != 0) {
-      JniAbortF("NewDirectByteBuffer", "non-zero capacity for nullptr pointer: %" PRId64, capacity);
+      JavaVmExtFromEnv(env)->JniAbortF("NewDirectByteBuffer",
+                                       "non-zero capacity for nullptr pointer: %" PRId64, capacity);
       return nullptr;
     }
 
     // At the moment, the capacity of DirectByteBuffer is limited to a signed int.
     if (capacity > INT_MAX) {
-      JniAbortF("NewDirectByteBuffer", "buffer capacity greater than maximum jint: %" PRId64, capacity);
+      JavaVmExtFromEnv(env)->JniAbortF("NewDirectByteBuffer",
+                                       "buffer capacity greater than maximum jint: %" PRId64,
+                                       capacity);
       return nullptr;
     }
     jlong address_arg = reinterpret_cast<jlong>(address);
@@ -2579,8 +2286,9 @@ class JNI {
   }
 
  private:
-  static jint EnsureLocalCapacity(ScopedObjectAccess& soa, jint desired_capacity,
-                                  const char* caller) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  static jint EnsureLocalCapacityInternal(ScopedObjectAccess& soa, jint desired_capacity,
+                                          const char* caller)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     // TODO: we should try to expand the table if necessary.
     if (desired_capacity < 0 || desired_capacity > static_cast<jint>(kLocalsMax)) {
       LOG(ERROR) << "Invalid capacity given to " << caller << ": " << desired_capacity;
@@ -2597,11 +2305,11 @@ class JNI {
 
   template<typename JniT, typename ArtT>
   static JniT NewPrimitiveArray(JNIEnv* env, jsize length) {
+    ScopedObjectAccess soa(env);
     if (UNLIKELY(length < 0)) {
-      JniAbortF("NewPrimitiveArray", "negative array length: %d", length);
+      soa.Vm()->JniAbortF("NewPrimitiveArray", "negative array length: %d", length);
       return nullptr;
     }
-    ScopedObjectAccess soa(env);
     ArtT* result = ArtT::Alloc(soa.Self(), length);
     return soa.AddLocalReference<JniT>(result);
   }
@@ -2612,9 +2320,11 @@ class JNI {
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     ArtArrayT* array = soa.Decode<ArtArrayT*>(java_array);
     if (UNLIKELY(ArtArrayT::GetArrayClass() != array->GetClass())) {
-      JniAbortF(fn_name, "attempt to %s %s primitive array elements with an object of type %s",
-                operation, PrettyDescriptor(ArtArrayT::GetArrayClass()->GetComponentType()).c_str(),
-                PrettyDescriptor(array->GetClass()).c_str());
+      soa.Vm()->JniAbortF(fn_name,
+                          "attempt to %s %s primitive array elements with an object of type %s",
+                          operation,
+                          PrettyDescriptor(ArtArrayT::GetArrayClass()->GetComponentType()).c_str(),
+                          PrettyDescriptor(array->GetClass()).c_str());
       return nullptr;
     }
     DCHECK_EQ(sizeof(ElementT), array->GetClass()->GetComponentSize());
@@ -2631,7 +2341,7 @@ class JNI {
     if (UNLIKELY(array == nullptr)) {
       return nullptr;
     }
-    PinPrimitiveArray(soa, array);
+    soa.Vm()->PinPrimitiveArray(soa.Self(), array);
     // Only make a copy if necessary.
     if (Runtime::Current()->GetHeap()->IsMovableObject(array)) {
       if (is_copy != nullptr) {
@@ -2677,8 +2387,9 @@ class JNI {
       // heap address. TODO: This might be slow to check, may be worth keeping track of which
       // copies we make?
       if (heap->IsNonDiscontinuousSpaceHeapAddress(reinterpret_cast<mirror::Object*>(elements))) {
-        JniAbortF("ReleaseArrayElements", "invalid element pointer %p, array elements are %p",
-                  reinterpret_cast<void*>(elements), array_data);
+        soa.Vm()->JniAbortF("ReleaseArrayElements",
+                            "invalid element pointer %p, array elements are %p",
+                            reinterpret_cast<void*>(elements), array_data);
         return;
       }
     }
@@ -2693,7 +2404,7 @@ class JNI {
         // Non copy to a movable object must means that we had disabled the moving GC.
         heap->DecrementDisableMovingGC(soa.Self());
       }
-      UnpinPrimitiveArray(soa, array);
+      soa.Vm()->UnpinPrimitiveArray(soa.Self(), array);
     }
   }
 
@@ -2974,487 +2685,8 @@ const JNINativeInterface gJniNativeInterface = {
   JNI::GetObjectRefType,
 };
 
-JNIEnvExt::JNIEnvExt(Thread* self, JavaVMExt* vm)
-    : self(self),
-      vm(vm),
-      local_ref_cookie(IRT_FIRST_SEGMENT),
-      locals(kLocalsInitial, kLocalsMax, kLocal),
-      check_jni(false),
-      critical(0),
-      monitors("monitors", kMonitorsInitial, kMonitorsMax) {
-  functions = unchecked_functions = &gJniNativeInterface;
-  if (vm->check_jni) {
-    SetCheckJniEnabled(true);
-  }
-}
-
-JNIEnvExt::~JNIEnvExt() {
-}
-
-jobject JNIEnvExt::NewLocalRef(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  if (obj == nullptr) {
-    return nullptr;
-  }
-  return reinterpret_cast<jobject>(locals.Add(local_ref_cookie, obj));
-}
-
-void JNIEnvExt::DeleteLocalRef(jobject obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  if (obj != nullptr) {
-    locals.Remove(local_ref_cookie, reinterpret_cast<IndirectRef>(obj));
-  }
-}
-void JNIEnvExt::SetCheckJniEnabled(bool enabled) {
-  check_jni = enabled;
-  functions = enabled ? GetCheckJniNativeInterface() : &gJniNativeInterface;
-}
-
-void JNIEnvExt::DumpReferenceTables(std::ostream& os) {
-  locals.Dump(os);
-  monitors.Dump(os);
-}
-
-void JNIEnvExt::PushFrame(int capacity) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  UNUSED(capacity);  // cpplint gets confused with (int) and thinks its a cast.
-  // TODO: take 'capacity' into account.
-  stacked_local_ref_cookies.push_back(local_ref_cookie);
-  local_ref_cookie = locals.GetSegmentState();
-}
-
-void JNIEnvExt::PopFrame() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  locals.SetSegmentState(local_ref_cookie);
-  local_ref_cookie = stacked_local_ref_cookies.back();
-  stacked_local_ref_cookies.pop_back();
-}
-
-Offset JNIEnvExt::SegmentStateOffset() {
-  return Offset(OFFSETOF_MEMBER(JNIEnvExt, locals) +
-                IndirectReferenceTable::SegmentStateOffset().Int32Value());
-}
-
-// JNI Invocation interface.
-
-extern "C" jint JNI_CreateJavaVM(JavaVM** p_vm, JNIEnv** p_env, void* vm_args) {
-  const JavaVMInitArgs* args = static_cast<JavaVMInitArgs*>(vm_args);
-  if (IsBadJniVersion(args->version)) {
-    LOG(ERROR) << "Bad JNI version passed to CreateJavaVM: " << args->version;
-    return JNI_EVERSION;
-  }
-  RuntimeOptions options;
-  for (int i = 0; i < args->nOptions; ++i) {
-    JavaVMOption* option = &args->options[i];
-    options.push_back(std::make_pair(std::string(option->optionString), option->extraInfo));
-  }
-  bool ignore_unrecognized = args->ignoreUnrecognized;
-  if (!Runtime::Create(options, ignore_unrecognized)) {
-    return JNI_ERR;
-  }
-  Runtime* runtime = Runtime::Current();
-  bool started = runtime->Start();
-  if (!started) {
-    delete Thread::Current()->GetJniEnv();
-    delete runtime->GetJavaVM();
-    LOG(WARNING) << "CreateJavaVM failed";
-    return JNI_ERR;
-  }
-  *p_env = Thread::Current()->GetJniEnv();
-  *p_vm = runtime->GetJavaVM();
-  return JNI_OK;
-}
-
-extern "C" jint JNI_GetCreatedJavaVMs(JavaVM** vms, jsize, jsize* vm_count) {
-  Runtime* runtime = Runtime::Current();
-  if (runtime == nullptr) {
-    *vm_count = 0;
-  } else {
-    *vm_count = 1;
-    vms[0] = runtime->GetJavaVM();
-  }
-  return JNI_OK;
-}
-
-// Historically unsupported.
-extern "C" jint JNI_GetDefaultJavaVMInitArgs(void* /*vm_args*/) {
-  return JNI_ERR;
-}
-
-class JII {
- public:
-  static jint DestroyJavaVM(JavaVM* vm) {
-    if (vm == nullptr) {
-      return JNI_ERR;
-    }
-    JavaVMExt* raw_vm = reinterpret_cast<JavaVMExt*>(vm);
-    delete raw_vm->runtime;
-    return JNI_OK;
-  }
-
-  static jint AttachCurrentThread(JavaVM* vm, JNIEnv** p_env, void* thr_args) {
-    return JII_AttachCurrentThread(vm, p_env, thr_args, false);
-  }
-
-  static jint AttachCurrentThreadAsDaemon(JavaVM* vm, JNIEnv** p_env, void* thr_args) {
-    return JII_AttachCurrentThread(vm, p_env, thr_args, true);
-  }
-
-  static jint DetachCurrentThread(JavaVM* vm) {
-    if (vm == nullptr || Thread::Current() == nullptr) {
-      return JNI_ERR;
-    }
-    JavaVMExt* raw_vm = reinterpret_cast<JavaVMExt*>(vm);
-    Runtime* runtime = raw_vm->runtime;
-    runtime->DetachCurrentThread();
-    return JNI_OK;
-  }
-
-  static jint GetEnv(JavaVM* vm, void** env, jint version) {
-    // GetEnv always returns a JNIEnv* for the most current supported JNI version,
-    // and unlike other calls that take a JNI version doesn't care if you supply
-    // JNI_VERSION_1_1, which we don't otherwise support.
-    if (IsBadJniVersion(version) && version != JNI_VERSION_1_1) {
-      LOG(ERROR) << "Bad JNI version passed to GetEnv: " << version;
-      return JNI_EVERSION;
-    }
-    if (vm == nullptr || env == nullptr) {
-      return JNI_ERR;
-    }
-    Thread* thread = Thread::Current();
-    if (thread == nullptr) {
-      *env = nullptr;
-      return JNI_EDETACHED;
-    }
-    *env = thread->GetJniEnv();
-    return JNI_OK;
-  }
-};
-
-const JNIInvokeInterface gJniInvokeInterface = {
-  nullptr,  // reserved0
-  nullptr,  // reserved1
-  nullptr,  // reserved2
-  JII::DestroyJavaVM,
-  JII::AttachCurrentThread,
-  JII::DetachCurrentThread,
-  JII::GetEnv,
-  JII::AttachCurrentThreadAsDaemon
-};
-
-JavaVMExt::JavaVMExt(Runtime* runtime, ParsedOptions* options)
-    : runtime(runtime),
-      check_jni_abort_hook(nullptr),
-      check_jni_abort_hook_data(nullptr),
-      check_jni(false),
-      force_copy(false),  // TODO: add a way to enable this
-      trace(options->jni_trace_),
-      pins_lock("JNI pin table lock", kPinTableLock),
-      pin_table("pin table", kPinTableInitial, kPinTableMax),
-      globals_lock("JNI global reference table lock"),
-      globals(gGlobalsInitial, gGlobalsMax, kGlobal),
-      libraries_lock("JNI shared libraries map lock", kLoadLibraryLock),
-      libraries(new Libraries),
-      weak_globals_lock_("JNI weak global reference table lock"),
-      weak_globals_(kWeakGlobalsInitial, kWeakGlobalsMax, kWeakGlobal),
-      allow_new_weak_globals_(true),
-      weak_globals_add_condition_("weak globals add condition", weak_globals_lock_) {
-  functions = unchecked_functions = &gJniInvokeInterface;
-  if (options->check_jni_) {
-    SetCheckJniEnabled(true);
-  }
-}
-
-JavaVMExt::~JavaVMExt() {
-  delete libraries;
-}
-
-jweak JavaVMExt::AddWeakGlobalReference(Thread* self, mirror::Object* obj) {
-  if (obj == nullptr) {
-    return nullptr;
-  }
-  MutexLock mu(self, weak_globals_lock_);
-  while (UNLIKELY(!allow_new_weak_globals_)) {
-    weak_globals_add_condition_.WaitHoldingLocks(self);
-  }
-  IndirectRef ref = weak_globals_.Add(IRT_FIRST_SEGMENT, obj);
-  return reinterpret_cast<jweak>(ref);
-}
-
-void JavaVMExt::DeleteWeakGlobalRef(Thread* self, jweak obj) {
-  MutexLock mu(self, weak_globals_lock_);
-  if (!weak_globals_.Remove(IRT_FIRST_SEGMENT, obj)) {
-    LOG(WARNING) << "JNI WARNING: DeleteWeakGlobalRef(" << obj << ") "
-                 << "failed to find entry";
-  }
-}
-
-void JavaVMExt::SetCheckJniEnabled(bool enabled) {
-  check_jni = enabled;
-  functions = enabled ? GetCheckJniInvokeInterface() : &gJniInvokeInterface;
-}
-
-void JavaVMExt::DumpForSigQuit(std::ostream& os) {
-  os << "JNI: CheckJNI is " << (check_jni ? "on" : "off");
-  if (force_copy) {
-    os << " (with forcecopy)";
-  }
-  Thread* self = Thread::Current();
-  {
-    MutexLock mu(self, pins_lock);
-    os << "; pins=" << pin_table.Size();
-  }
-  {
-    ReaderMutexLock mu(self, globals_lock);
-    os << "; globals=" << globals.Capacity();
-  }
-  {
-    MutexLock mu(self, weak_globals_lock_);
-    if (weak_globals_.Capacity() > 0) {
-      os << " (plus " << weak_globals_.Capacity() << " weak)";
-    }
-  }
-  os << '\n';
-
-  {
-    MutexLock mu(self, libraries_lock);
-    os << "Libraries: " << Dumpable<Libraries>(*libraries) << " (" << libraries->size() << ")\n";
-  }
-}
-
-void JavaVMExt::DisallowNewWeakGlobals() {
-  MutexLock mu(Thread::Current(), weak_globals_lock_);
-  allow_new_weak_globals_ = false;
-}
-
-void JavaVMExt::AllowNewWeakGlobals() {
-  Thread* self = Thread::Current();
-  MutexLock mu(self, weak_globals_lock_);
-  allow_new_weak_globals_ = true;
-  weak_globals_add_condition_.Broadcast(self);
-}
-
-mirror::Object* JavaVMExt::DecodeWeakGlobal(Thread* self, IndirectRef ref) {
-  MutexLock mu(self, weak_globals_lock_);
-  while (UNLIKELY(!allow_new_weak_globals_)) {
-    weak_globals_add_condition_.WaitHoldingLocks(self);
-  }
-  return weak_globals_.Get(ref);
-}
-
-void JavaVMExt::DumpReferenceTables(std::ostream& os) {
-  Thread* self = Thread::Current();
-  {
-    ReaderMutexLock mu(self, globals_lock);
-    globals.Dump(os);
-  }
-  {
-    MutexLock mu(self, weak_globals_lock_);
-    weak_globals_.Dump(os);
-  }
-  {
-    MutexLock mu(self, pins_lock);
-    pin_table.Dump(os);
-  }
-}
-
-bool JavaVMExt::LoadNativeLibrary(const std::string& path,
-                                  Handle<mirror::ClassLoader> class_loader,
-                                  std::string* detail) {
-  detail->clear();
-
-  // See if we've already loaded this library.  If we have, and the class loader
-  // matches, return successfully without doing anything.
-  // TODO: for better results we should canonicalize the pathname (or even compare
-  // inodes). This implementation is fine if everybody is using System.loadLibrary.
-  SharedLibrary* library;
-  Thread* self = Thread::Current();
-  {
-    // TODO: move the locking (and more of this logic) into Libraries.
-    MutexLock mu(self, libraries_lock);
-    library = libraries->Get(path);
-  }
-  if (library != nullptr) {
-    if (library->GetClassLoader() != class_loader.Get()) {
-      // The library will be associated with class_loader. The JNI
-      // spec says we can't load the same library into more than one
-      // class loader.
-      StringAppendF(detail, "Shared library \"%s\" already opened by "
-          "ClassLoader %p; can't open in ClassLoader %p",
-          path.c_str(), library->GetClassLoader(), class_loader.Get());
-      LOG(WARNING) << detail;
-      return false;
-    }
-    VLOG(jni) << "[Shared library \"" << path << "\" already loaded in "
-              << "ClassLoader " << class_loader.Get() << "]";
-    if (!library->CheckOnLoadResult()) {
-      StringAppendF(detail, "JNI_OnLoad failed on a previous attempt "
-          "to load \"%s\"", path.c_str());
-      return false;
-    }
-    return true;
-  }
-
-  // Open the shared library.  Because we're using a full path, the system
-  // doesn't have to search through LD_LIBRARY_PATH.  (It may do so to
-  // resolve this library's dependencies though.)
-
-  // Failures here are expected when java.library.path has several entries
-  // and we have to hunt for the lib.
-
-  // Below we dlopen but there is no paired dlclose, this would be necessary if we supported
-  // class unloading. Libraries will only be unloaded when the reference count (incremented by
-  // dlopen) becomes zero from dlclose.
-
-  // This can execute slowly for a large library on a busy system, so we
-  // want to switch from kRunnable while it executes.  This allows the GC to ignore us.
-  self->TransitionFromRunnableToSuspended(kWaitingForJniOnLoad);
-  const char* path_str = path.empty() ? nullptr : path.c_str();
-  void* handle = dlopen(path_str, RTLD_LAZY);
-  bool needs_native_bridge = false;
-  if (handle == nullptr) {
-    if (android::NativeBridgeIsSupported(path_str)) {
-      handle = android::NativeBridgeLoadLibrary(path_str, RTLD_LAZY);
-      needs_native_bridge = true;
-    }
-  }
-  self->TransitionFromSuspendedToRunnable();
-
-  VLOG(jni) << "[Call to dlopen(\"" << path << "\", RTLD_LAZY) returned " << handle << "]";
-
-  if (handle == nullptr) {
-    *detail = dlerror();
-    LOG(ERROR) << "dlopen(\"" << path << "\", RTLD_LAZY) failed: " << *detail;
-    return false;
-  }
-
-  // Create a new entry.
-  // TODO: move the locking (and more of this logic) into Libraries.
-  bool created_library = false;
-  {
-    MutexLock mu(self, libraries_lock);
-    library = libraries->Get(path);
-    if (library == nullptr) {  // We won race to get libraries_lock
-      library = new SharedLibrary(path, handle, class_loader.Get());
-      libraries->Put(path, library);
-      created_library = true;
-    }
-  }
-  if (!created_library) {
-    LOG(INFO) << "WOW: we lost a race to add shared library: "
-        << "\"" << path << "\" ClassLoader=" << class_loader.Get();
-    return library->CheckOnLoadResult();
-  }
-
-  VLOG(jni) << "[Added shared library \"" << path << "\" for ClassLoader " << class_loader.Get()
-      << "]";
-
-  bool was_successful = false;
-  void* sym = nullptr;
-  if (UNLIKELY(needs_native_bridge)) {
-    library->SetNeedsNativeBridge();
-    sym = library->FindSymbolWithNativeBridge("JNI_OnLoad", nullptr);
-  } else {
-    sym = dlsym(handle, "JNI_OnLoad");
-  }
-
-  if (sym == nullptr) {
-    VLOG(jni) << "[No JNI_OnLoad found in \"" << path << "\"]";
-    was_successful = true;
-  } else {
-    // Call JNI_OnLoad.  We have to override the current class
-    // loader, which will always be "null" since the stuff at the
-    // top of the stack is around Runtime.loadLibrary().  (See
-    // the comments in the JNI FindClass function.)
-    typedef int (*JNI_OnLoadFn)(JavaVM*, void*);
-    JNI_OnLoadFn jni_on_load = reinterpret_cast<JNI_OnLoadFn>(sym);
-    StackHandleScope<1> hs(self);
-    Handle<mirror::ClassLoader> old_class_loader(hs.NewHandle(self->GetClassLoaderOverride()));
-    self->SetClassLoaderOverride(class_loader.Get());
-
-    int version = 0;
-    {
-      ScopedThreadStateChange tsc(self, kNative);
-      VLOG(jni) << "[Calling JNI_OnLoad in \"" << path << "\"]";
-      version = (*jni_on_load)(this, nullptr);
-    }
-
-    self->SetClassLoaderOverride(old_class_loader.Get());
-
-    if (version == JNI_ERR) {
-      StringAppendF(detail, "JNI_ERR returned from JNI_OnLoad in \"%s\"", path.c_str());
-    } else if (IsBadJniVersion(version)) {
-      StringAppendF(detail, "Bad JNI version returned from JNI_OnLoad in \"%s\": %d",
-                    path.c_str(), version);
-      // It's unwise to call dlclose() here, but we can mark it
-      // as bad and ensure that future load attempts will fail.
-      // We don't know how far JNI_OnLoad got, so there could
-      // be some partially-initialized stuff accessible through
-      // newly-registered native method calls.  We could try to
-      // unregister them, but that doesn't seem worthwhile.
-    } else {
-      was_successful = true;
-    }
-    VLOG(jni) << "[Returned " << (was_successful ? "successfully" : "failure")
-              << " from JNI_OnLoad in \"" << path << "\"]";
-  }
-
-  library->SetResult(was_successful);
-  return was_successful;
-}
-
-void* JavaVMExt::FindCodeForNativeMethod(mirror::ArtMethod* m) {
-  CHECK(m->IsNative());
-  mirror::Class* c = m->GetDeclaringClass();
-  // If this is a static method, it could be called before the class has been initialized.
-  if (m->IsStatic()) {
-    c = EnsureInitialized(Thread::Current(), c);
-    if (c == nullptr) {
-      return nullptr;
-    }
-  } else {
-    CHECK(c->IsInitializing()) << c->GetStatus() << " " << PrettyMethod(m);
-  }
-  std::string detail;
-  void* native_method;
-  Thread* self = Thread::Current();
-  {
-    MutexLock mu(self, libraries_lock);
-    native_method = libraries->FindNativeMethod(m, detail);
-  }
-  // Throwing can cause libraries_lock to be reacquired.
-  if (native_method == nullptr) {
-    ThrowLocation throw_location = self->GetCurrentLocationForThrow();
-    self->ThrowNewException(throw_location, "Ljava/lang/UnsatisfiedLinkError;", detail.c_str());
-  }
-  return native_method;
-}
-
-void JavaVMExt::SweepJniWeakGlobals(IsMarkedCallback* callback, void* arg) {
-  MutexLock mu(Thread::Current(), weak_globals_lock_);
-  for (mirror::Object** entry : weak_globals_) {
-    // Since this is called by the GC, we don't need a read barrier.
-    mirror::Object* obj = *entry;
-    mirror::Object* new_obj = callback(obj, arg);
-    if (new_obj == nullptr) {
-      new_obj = kClearedJniWeakGlobal;
-    }
-    *entry = new_obj;
-  }
-}
-
-void JavaVMExt::VisitRoots(RootCallback* callback, void* arg) {
-  Thread* self = Thread::Current();
-  {
-    ReaderMutexLock mu(self, globals_lock);
-    globals.VisitRoots(callback, arg, 0, kRootJNIGlobal);
-  }
-  {
-    MutexLock mu(self, pins_lock);
-    pin_table.VisitRoots(callback, arg, 0, kRootVMInternal);
-  }
-  {
-    MutexLock mu(self, libraries_lock);
-    // Libraries contains shared libraries which hold a pointer to a class loader.
-    libraries->VisitRoots(callback, arg);
-  }
-  // The weak_globals table is visited by the GC itself (because it mutates the table).
+const JNINativeInterface* GetJniNativeInterface() {
+  return &gJniNativeInterface;
 }
 
 void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINativeMethod* methods,
index ac502e6..48b10f5 100644 (file)
 #ifndef ART_RUNTIME_JNI_INTERNAL_H_
 #define ART_RUNTIME_JNI_INTERNAL_H_
 
-#include "jni.h"
-
-#include "base/macros.h"
-#include "base/mutex.h"
-#include "indirect_reference_table.h"
-#include "object_callbacks.h"
-#include "reference_table.h"
-
+#include <jni.h>
 #include <iosfwd>
-#include <string>
 
 #ifndef NATIVE_METHOD
 #define NATIVE_METHOD(className, functionName, signature) \
   RegisterNativeMethods(env, jni_class_name, gMethods, arraysize(gMethods))
 
 namespace art {
-namespace mirror {
-  class ArtField;
-  class ArtMethod;
-  class ClassLoader;
-}  // namespace mirror
-union JValue;
-class Libraries;
-class ParsedOptions;
-class Runtime;
-class ScopedObjectAccess;
-template<class T> class Handle;
-class Thread;
 
-void JniAbortF(const char* jni_function_name, const char* fmt, ...)
-    __attribute__((__format__(__printf__, 2, 3)));
+const JNINativeInterface* GetJniNativeInterface();
+
+// Similar to RegisterNatives except its passed a descriptor for a class name and failures are
+// fatal.
 void RegisterNativeMethods(JNIEnv* env, const char* jni_class_name, const JNINativeMethod* methods,
                            jint method_count);
 
 int ThrowNewException(JNIEnv* env, jclass exception_class, const char* msg, jobject cause);
 
-class JavaVMExt : public JavaVM {
- public:
-  JavaVMExt(Runtime* runtime, ParsedOptions* options);
-  ~JavaVMExt();
-
-  /**
-   * Loads the given shared library. 'path' is an absolute pathname.
-   *
-   * Returns 'true' on success. On failure, sets 'detail' to a
-   * human-readable description of the error.
-   */
-  bool LoadNativeLibrary(const std::string& path, Handle<mirror::ClassLoader> class_loader,
-                         std::string* detail)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  /**
-   * Returns a pointer to the code for the native method 'm', found
-   * using dlsym(3) on every native library that's been loaded so far.
-   */
-  void* FindCodeForNativeMethod(mirror::ArtMethod* m)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  void DumpForSigQuit(std::ostream& os);
-
-  void DumpReferenceTables(std::ostream& os)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  void SetCheckJniEnabled(bool enabled);
-
-  void VisitRoots(RootCallback* callback, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  void DisallowNewWeakGlobals() EXCLUSIVE_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void AllowNewWeakGlobals() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  jweak AddWeakGlobalReference(Thread* self, mirror::Object* obj)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void DeleteWeakGlobalRef(Thread* self, jweak obj)
-    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void SweepJniWeakGlobals(IsMarkedCallback* callback, void* arg)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  mirror::Object* DecodeWeakGlobal(Thread* self, IndirectRef ref)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  Runtime* runtime;
-
-  // Used for testing. By default, we'll LOG(FATAL) the reason.
-  void (*check_jni_abort_hook)(void* data, const std::string& reason);
-  void* check_jni_abort_hook_data;
-
-  // Extra checking.
-  bool check_jni;
-  bool force_copy;
-
-  // Extra diagnostics.
-  std::string trace;
-
-  // Used to hold references to pinned primitive arrays.
-  Mutex pins_lock DEFAULT_MUTEX_ACQUIRED_AFTER;
-  ReferenceTable pin_table GUARDED_BY(pins_lock);
-
-  // JNI global references.
-  ReaderWriterMutex globals_lock DEFAULT_MUTEX_ACQUIRED_AFTER;
-  // Not guarded by globals_lock since we sometimes use SynchronizedGet in Thread::DecodeJObject.
-  IndirectReferenceTable globals;
-
-  Mutex libraries_lock DEFAULT_MUTEX_ACQUIRED_AFTER;
-  Libraries* libraries GUARDED_BY(libraries_lock);
-
-  // Used by -Xcheck:jni.
-  const JNIInvokeInterface* unchecked_functions;
-
- private:
-  // TODO: Make the other members of this class also private.
-  // JNI weak global references.
-  Mutex weak_globals_lock_ DEFAULT_MUTEX_ACQUIRED_AFTER;
-  // Since weak_globals_ contain weak roots, be careful not to
-  // directly access the object references in it. Use Get() with the
-  // read barrier enabled.
-  IndirectReferenceTable weak_globals_ GUARDED_BY(weak_globals_lock_);
-  bool allow_new_weak_globals_ GUARDED_BY(weak_globals_lock_);
-  ConditionVariable weak_globals_add_condition_ GUARDED_BY(weak_globals_lock_);
-};
-
-struct JNIEnvExt : public JNIEnv {
-  JNIEnvExt(Thread* self, JavaVMExt* vm);
-  ~JNIEnvExt();
-
-  void DumpReferenceTables(std::ostream& os)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  void SetCheckJniEnabled(bool enabled);
-
-  void PushFrame(int capacity);
-  void PopFrame();
-
-  template<typename T>
-  T AddLocalReference(mirror::Object* obj)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  static Offset SegmentStateOffset();
-
-  static Offset LocalRefCookieOffset() {
-    return Offset(OFFSETOF_MEMBER(JNIEnvExt, local_ref_cookie));
-  }
-
-  static Offset SelfOffset() {
-    return Offset(OFFSETOF_MEMBER(JNIEnvExt, self));
-  }
-
-  jobject NewLocalRef(mirror::Object* obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  void DeleteLocalRef(jobject obj) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-
-  Thread* const self;
-  JavaVMExt* vm;
-
-  // Cookie used when using the local indirect reference table.
-  uint32_t local_ref_cookie;
-
-  // JNI local references.
-  IndirectReferenceTable locals GUARDED_BY(Locks::mutator_lock_);
-
-  // Stack of cookies corresponding to PushLocalFrame/PopLocalFrame calls.
-  // TODO: to avoid leaks (and bugs), we need to clear this vector on entry (or return)
-  // to a native method.
-  std::vector<uint32_t> stacked_local_ref_cookies;
-
-  // Frequently-accessed fields cached from JavaVM.
-  bool check_jni;
-
-  // How many nested "critical" JNI calls are we in?
-  int critical;
-
-  // Entered JNI monitors, for bulk exit on thread detach.
-  ReferenceTable monitors;
-
-  // Used by -Xcheck:jni.
-  const JNINativeInterface* unchecked_functions;
-};
-
-const JNINativeInterface* GetCheckJniNativeInterface();
-const JNIInvokeInterface* GetCheckJniInvokeInterface();
-
-// Used to save and restore the JNIEnvExt state when not going through code created by the JNI
-// compiler
-class ScopedJniEnvLocalRefState {
- public:
-  explicit ScopedJniEnvLocalRefState(JNIEnvExt* env) : env_(env) {
-    saved_local_ref_cookie_ = env->local_ref_cookie;
-    env->local_ref_cookie = env->locals.GetSegmentState();
-  }
-
-  ~ScopedJniEnvLocalRefState() {
-    env_->locals.SetSegmentState(env_->local_ref_cookie);
-    env_->local_ref_cookie = saved_local_ref_cookie_;
-  }
-
- private:
-  JNIEnvExt* env_;
-  uint32_t saved_local_ref_cookie_;
-  DISALLOW_COPY_AND_ASSIGN(ScopedJniEnvLocalRefState);
-};
-
 }  // namespace art
 
 std::ostream& operator<<(std::ostream& os, const jobjectRefType& rhs);
+
 #endif  // ART_RUNTIME_JNI_INTERNAL_H_
index bb46321..b236ede 100644 (file)
@@ -17,6 +17,7 @@
 #include "jni_internal.h"
 
 #include "common_compiler_test.h"
+#include "java_vm_ext.h"
 #include "mirror/art_method-inl.h"
 #include "mirror/string-inl.h"
 #include "scoped_thread_state_change.h"
@@ -53,24 +54,15 @@ class JniInternalTest : public CommonCompilerTest {
   }
 
   void ExpectException(jclass exception_class) {
-    EXPECT_TRUE(env_->ExceptionCheck());
+    ScopedObjectAccess soa(env_);
+    EXPECT_TRUE(env_->ExceptionCheck())
+        << PrettyDescriptor(soa.Decode<mirror::Class*>(exception_class));
     jthrowable exception = env_->ExceptionOccurred();
     EXPECT_NE(nullptr, exception);
     env_->ExceptionClear();
     EXPECT_TRUE(env_->IsInstanceOf(exception, exception_class));
   }
 
-  void ExpectClassFound(const char* name) {
-    EXPECT_NE(env_->FindClass(name), nullptr) << name;
-    EXPECT_FALSE(env_->ExceptionCheck()) << name;
-  }
-
-  void ExpectClassNotFound(const char* name) {
-    EXPECT_EQ(env_->FindClass(name), nullptr) << name;
-    EXPECT_TRUE(env_->ExceptionCheck()) << name;
-    env_->ExceptionClear();
-  }
-
   void CleanUpJniEnv() {
     if (aioobe_ != nullptr) {
       env_->DeleteGlobalRef(aioobe_);
@@ -98,6 +90,510 @@ class JniInternalTest : public CommonCompilerTest {
     return soa.AddLocalReference<jclass>(c);
   }
 
+  void ExpectClassFound(const char* name) {
+    EXPECT_NE(env_->FindClass(name), nullptr) << name;
+    EXPECT_FALSE(env_->ExceptionCheck()) << name;
+  }
+
+  void ExpectClassNotFound(const char* name, bool check_jni, const char* check_jni_msg,
+                           CheckJniAbortCatcher* abort_catcher) {
+    EXPECT_EQ(env_->FindClass(name), nullptr) << name;
+    if (!check_jni || check_jni_msg == nullptr) {
+      EXPECT_TRUE(env_->ExceptionCheck()) << name;
+      env_->ExceptionClear();
+    } else {
+      abort_catcher->Check(check_jni_msg);
+    }
+  }
+
+  void FindClassTest(bool check_jni) {
+    bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+    CheckJniAbortCatcher check_jni_abort_catcher;
+
+    // Null argument is always an abort.
+    env_->FindClass(nullptr);
+    check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+                                            : "name == null");
+
+    // Reference types...
+    ExpectClassFound("java/lang/String");
+    // ...for arrays too, where you must include "L;".
+    ExpectClassFound("[Ljava/lang/String;");
+    // Primitive arrays are okay too, if the primitive type is valid.
+    ExpectClassFound("[C");
+
+    // But primitive types aren't allowed...
+    ExpectClassNotFound("C", check_jni, nullptr, &check_jni_abort_catcher);
+    ExpectClassNotFound("V", check_jni, nullptr, &check_jni_abort_catcher);
+    ExpectClassNotFound("K", check_jni, nullptr, &check_jni_abort_catcher);
+
+    if (check_jni) {
+      // Check JNI will reject invalid class names as aborts but without pending exceptions.
+      EXPECT_EQ(env_->FindClass("java.lang.String"), nullptr);
+      EXPECT_FALSE(env_->ExceptionCheck());
+      check_jni_abort_catcher.Check("illegal class name 'java.lang.String'");
+
+      EXPECT_EQ(env_->FindClass("[Ljava.lang.String;"), nullptr);
+      EXPECT_FALSE(env_->ExceptionCheck());
+      check_jni_abort_catcher.Check("illegal class name '[Ljava.lang.String;'");
+    } else {
+      // Without check JNI we're tolerant and replace '.' with '/'.
+      ExpectClassFound("java.lang.String");
+      ExpectClassFound("[Ljava.lang.String;");
+    }
+
+    ExpectClassNotFound("Ljava.lang.String;", check_jni, "illegal class name 'Ljava.lang.String;'",
+                        &check_jni_abort_catcher);
+    ExpectClassNotFound("[java.lang.String", check_jni, "illegal class name '[java.lang.String'",
+                        &check_jni_abort_catcher);
+
+    // You can't include the "L;" in a JNI class descriptor.
+    ExpectClassNotFound("Ljava/lang/String;", check_jni, "illegal class name 'Ljava/lang/String;'",
+                        &check_jni_abort_catcher);
+
+    // But you must include it for an array of any reference type.
+    ExpectClassNotFound("[java/lang/String", check_jni, "illegal class name '[java/lang/String'",
+                        &check_jni_abort_catcher);
+
+    ExpectClassNotFound("[K", check_jni, "illegal class name '[K'", &check_jni_abort_catcher);
+
+    // Void arrays aren't allowed.
+    ExpectClassNotFound("[V", check_jni, "illegal class name '[V'", &check_jni_abort_catcher);
+
+    EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+  }
+
+  void GetFieldIdBadArgumentTest(bool check_jni) {
+    bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+    CheckJniAbortCatcher check_jni_abort_catcher;
+
+    jclass c = env_->FindClass("java/lang/String");
+    ASSERT_NE(c, nullptr);
+
+    jfieldID fid = env_->GetFieldID(nullptr, "count", "I");
+    EXPECT_EQ(nullptr, fid);
+    check_jni_abort_catcher.Check(check_jni ? "GetFieldID received NULL jclass"
+                                            : "java_class == null");
+    fid = env_->GetFieldID(c, nullptr, "I");
+    EXPECT_EQ(nullptr, fid);
+    check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+                                            : "name == null");
+    fid = env_->GetFieldID(c, "count", nullptr);
+    EXPECT_EQ(nullptr, fid);
+    check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+                                            : "sig == null");
+
+    EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+  }
+
+  void GetStaticFieldIdBadArgumentTest(bool check_jni) {
+    bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+    CheckJniAbortCatcher check_jni_abort_catcher;
+
+    jclass c = env_->FindClass("java/lang/String");
+    ASSERT_NE(c, nullptr);
+
+    jfieldID fid = env_->GetStaticFieldID(nullptr, "CASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;");
+    EXPECT_EQ(nullptr, fid);
+    check_jni_abort_catcher.Check(check_jni ? "GetStaticFieldID received NULL jclass"
+                                            : "java_class == null");
+    fid = env_->GetStaticFieldID(c, nullptr, "Ljava/util/Comparator;");
+    EXPECT_EQ(nullptr, fid);
+    check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+                                            : "name == null");
+    fid = env_->GetStaticFieldID(c, "CASE_INSENSITIVE_ORDER", nullptr);
+    EXPECT_EQ(nullptr, fid);
+    check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+                                            : "sig == null");
+
+    EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+  }
+
+  void GetMethodIdBadArgumentTest(bool check_jni) {
+    bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+    CheckJniAbortCatcher check_jni_abort_catcher;
+
+    jmethodID method = env_->GetMethodID(nullptr, "<init>", "(Ljava/lang/String;)V");
+    EXPECT_EQ(nullptr, method);
+    check_jni_abort_catcher.Check(check_jni ? "GetMethodID received NULL jclass"
+                                            : "java_class == null");
+    jclass jlnsme = env_->FindClass("java/lang/NoSuchMethodError");
+    ASSERT_TRUE(jlnsme != nullptr);
+    method = env_->GetMethodID(jlnsme, nullptr, "(Ljava/lang/String;)V");
+    EXPECT_EQ(nullptr, method);
+    check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+                                            : "name == null");
+    method = env_->GetMethodID(jlnsme, "<init>", nullptr);
+    EXPECT_EQ(nullptr, method);
+    check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+                                            : "sig == null");
+
+    EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+  }
+
+  void GetStaticMethodIdBadArgumentTest(bool check_jni) {
+    bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+    CheckJniAbortCatcher check_jni_abort_catcher;
+
+    jmethodID method = env_->GetStaticMethodID(nullptr, "valueOf", "(I)Ljava/lang/String;");
+    EXPECT_EQ(nullptr, method);
+    check_jni_abort_catcher.Check(check_jni ? "GetStaticMethodID received NULL jclass"
+                                            : "java_class == null");
+    jclass jlstring = env_->FindClass("java/lang/String");
+    method = env_->GetStaticMethodID(jlstring, nullptr, "(I)Ljava/lang/String;");
+    EXPECT_EQ(nullptr, method);
+    check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+                                            : "name == null");
+    method = env_->GetStaticMethodID(jlstring, "valueOf", nullptr);
+    EXPECT_EQ(nullptr, method);
+    check_jni_abort_catcher.Check(check_jni ? "non-nullable const char* was NULL"
+                                            : "sig == null");
+
+    EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+  }
+
+  void GetFromReflectedField_ToReflectedFieldBadArgumentTest(bool check_jni) {
+    bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+    CheckJniAbortCatcher check_jni_abort_catcher;
+
+    jclass c = env_->FindClass("java/lang/String");
+    ASSERT_NE(c, nullptr);
+    jfieldID fid = env_->GetFieldID(c, "count", "I");
+    ASSERT_NE(fid, nullptr);
+
+    // Check class argument for null argument, not checked in non-check JNI.
+    jobject field = env_->ToReflectedField(nullptr, fid, JNI_FALSE);
+    if (check_jni) {
+      EXPECT_EQ(field, nullptr);
+      check_jni_abort_catcher.Check("ToReflectedField received NULL jclass");
+    } else {
+      EXPECT_NE(field, nullptr);
+    }
+
+    field = env_->ToReflectedField(c, nullptr, JNI_FALSE);
+    EXPECT_EQ(field, nullptr);
+    check_jni_abort_catcher.Check(check_jni ? "jfieldID was NULL"
+                                            : "fid == null");
+
+    fid = env_->FromReflectedField(nullptr);
+    ASSERT_EQ(fid, nullptr);
+    check_jni_abort_catcher.Check(check_jni ? "expected non-null java.lang.reflect.Field"
+                                            : "jlr_field == null");
+
+    EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+  }
+
+  void GetFromReflectedMethod_ToReflectedMethodBadArgumentTest(bool check_jni) {
+    bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+    CheckJniAbortCatcher check_jni_abort_catcher;
+
+    jclass c = env_->FindClass("java/lang/String");
+    ASSERT_NE(c, nullptr);
+    jmethodID mid = env_->GetMethodID(c, "<init>", "()V");
+    ASSERT_NE(mid, nullptr);
+
+    // Check class argument for null argument, not checked in non-check JNI.
+    jobject method = env_->ToReflectedMethod(nullptr, mid, JNI_FALSE);
+    if (check_jni) {
+      EXPECT_EQ(method, nullptr);
+      check_jni_abort_catcher.Check("ToReflectedMethod received NULL jclass");
+    } else {
+      EXPECT_NE(method, nullptr);
+    }
+
+    method = env_->ToReflectedMethod(c, nullptr, JNI_FALSE);
+    EXPECT_EQ(method, nullptr);
+    check_jni_abort_catcher.Check(check_jni ? "jmethodID was NULL"
+                                            : "mid == null");
+    mid = env_->FromReflectedMethod(method);
+    ASSERT_EQ(mid, nullptr);
+    check_jni_abort_catcher.Check(check_jni ? "expected non-null method" : "jlr_method == null");
+
+    EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+  }
+
+  void RegisterAndUnregisterNativesBadArguments(bool check_jni,
+                                                CheckJniAbortCatcher* check_jni_abort_catcher) {
+    bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+    // Passing a class of null is a failure.
+    {
+      JNINativeMethod methods[] = { };
+      EXPECT_EQ(env_->RegisterNatives(nullptr, methods, 0), JNI_ERR);
+      check_jni_abort_catcher->Check(check_jni ? "RegisterNatives received NULL jclass"
+                                               : "java_class == null");
+    }
+
+    // Passing methods as null is a failure.
+    jclass jlobject = env_->FindClass("java/lang/Object");
+    EXPECT_EQ(env_->RegisterNatives(jlobject, nullptr, 1), JNI_ERR);
+    check_jni_abort_catcher->Check("methods == null");
+
+    // Unregisters null is a failure.
+    EXPECT_EQ(env_->UnregisterNatives(nullptr), JNI_ERR);
+    check_jni_abort_catcher->Check(check_jni ? "UnregisterNatives received NULL jclass"
+                                             : "java_class == null");
+
+    EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+  }
+
+
+  void GetPrimitiveArrayElementsOfWrongType(bool check_jni) {
+    bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+    CheckJniAbortCatcher jni_abort_catcher;
+
+    jbooleanArray array = env_->NewBooleanArray(10);
+    jboolean is_copy;
+    EXPECT_EQ(env_->GetByteArrayElements(reinterpret_cast<jbyteArray>(array), &is_copy), nullptr);
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected byte[]"
+            : "attempt to get byte primitive array elements with an object of type boolean[]");
+    EXPECT_EQ(env_->GetShortArrayElements(reinterpret_cast<jshortArray>(array), &is_copy), nullptr);
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected short[]"
+            : "attempt to get short primitive array elements with an object of type boolean[]");
+    EXPECT_EQ(env_->GetCharArrayElements(reinterpret_cast<jcharArray>(array), &is_copy), nullptr);
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected char[]"
+            : "attempt to get char primitive array elements with an object of type boolean[]");
+    EXPECT_EQ(env_->GetIntArrayElements(reinterpret_cast<jintArray>(array), &is_copy), nullptr);
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected int[]"
+            : "attempt to get int primitive array elements with an object of type boolean[]");
+    EXPECT_EQ(env_->GetLongArrayElements(reinterpret_cast<jlongArray>(array), &is_copy), nullptr);
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected long[]"
+            : "attempt to get long primitive array elements with an object of type boolean[]");
+    EXPECT_EQ(env_->GetFloatArrayElements(reinterpret_cast<jfloatArray>(array), &is_copy), nullptr);
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected float[]"
+            : "attempt to get float primitive array elements with an object of type boolean[]");
+    EXPECT_EQ(env_->GetDoubleArrayElements(reinterpret_cast<jdoubleArray>(array), &is_copy), nullptr);
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected double[]"
+            : "attempt to get double primitive array elements with an object of type boolean[]");
+    jbyteArray array2 = env_->NewByteArray(10);
+    EXPECT_EQ(env_->GetBooleanArrayElements(reinterpret_cast<jbooleanArray>(array2), &is_copy),
+              nullptr);
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type byte[] expected boolean[]"
+            : "attempt to get boolean primitive array elements with an object of type byte[]");
+    jobject object = env_->NewStringUTF("Test String");
+    EXPECT_EQ(env_->GetBooleanArrayElements(reinterpret_cast<jbooleanArray>(object), &is_copy),
+              nullptr);
+    jni_abort_catcher.Check(
+        check_jni ? "jarray argument has non-array type: java.lang.String"
+        : "attempt to get boolean primitive array elements with an object of type java.lang.String");
+
+    EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+  }
+
+  void ReleasePrimitiveArrayElementsOfWrongType(bool check_jni) {
+    bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+    CheckJniAbortCatcher jni_abort_catcher;
+
+    jbooleanArray array = env_->NewBooleanArray(10);
+    ASSERT_TRUE(array != nullptr);
+    jboolean is_copy;
+    jboolean* elements = env_->GetBooleanArrayElements(array, &is_copy);
+    ASSERT_TRUE(elements != nullptr);
+    env_->ReleaseByteArrayElements(reinterpret_cast<jbyteArray>(array),
+                                   reinterpret_cast<jbyte*>(elements), 0);
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected byte[]"
+            : "attempt to release byte primitive array elements with an object of type boolean[]");
+    env_->ReleaseShortArrayElements(reinterpret_cast<jshortArray>(array),
+                                    reinterpret_cast<jshort*>(elements), 0);
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected short[]"
+            : "attempt to release short primitive array elements with an object of type boolean[]");
+    env_->ReleaseCharArrayElements(reinterpret_cast<jcharArray>(array),
+                                   reinterpret_cast<jchar*>(elements), 0);
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected char[]"
+            : "attempt to release char primitive array elements with an object of type boolean[]");
+    env_->ReleaseIntArrayElements(reinterpret_cast<jintArray>(array),
+                                  reinterpret_cast<jint*>(elements), 0);
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected int[]"
+            : "attempt to release int primitive array elements with an object of type boolean[]");
+    env_->ReleaseLongArrayElements(reinterpret_cast<jlongArray>(array),
+                                   reinterpret_cast<jlong*>(elements), 0);
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected long[]"
+            : "attempt to release long primitive array elements with an object of type boolean[]");
+    env_->ReleaseFloatArrayElements(reinterpret_cast<jfloatArray>(array),
+                                    reinterpret_cast<jfloat*>(elements), 0);
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected float[]"
+            : "attempt to release float primitive array elements with an object of type boolean[]");
+    env_->ReleaseDoubleArrayElements(reinterpret_cast<jdoubleArray>(array),
+                                     reinterpret_cast<jdouble*>(elements), 0);
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected double[]"
+            : "attempt to release double primitive array elements with an object of type boolean[]");
+    jbyteArray array2 = env_->NewByteArray(10);
+    env_->ReleaseBooleanArrayElements(reinterpret_cast<jbooleanArray>(array2), elements, 0);
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type byte[] expected boolean[]"
+            : "attempt to release boolean primitive array elements with an object of type byte[]");
+    jobject object = env_->NewStringUTF("Test String");
+    env_->ReleaseBooleanArrayElements(reinterpret_cast<jbooleanArray>(object), elements, 0);
+    jni_abort_catcher.Check(
+        check_jni ? "jarray argument has non-array type: java.lang.String"
+            : "attempt to release boolean primitive array elements with an object of type "
+              "java.lang.String");
+
+    EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+  }
+
+  void GetReleasePrimitiveArrayCriticalOfWrongType(bool check_jni) {
+    bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+    CheckJniAbortCatcher jni_abort_catcher;
+
+    jobject object = env_->NewStringUTF("Test String");
+    jboolean is_copy;
+    void* elements = env_->GetPrimitiveArrayCritical(reinterpret_cast<jarray>(object), &is_copy);
+    jni_abort_catcher.Check(check_jni ? "jarray argument has non-array type: java.lang.String"
+        : "expected primitive array, given java.lang.String");
+    env_->ReleasePrimitiveArrayCritical(reinterpret_cast<jarray>(object), elements, 0);
+    jni_abort_catcher.Check(check_jni ? "jarray argument has non-array type: java.lang.String"
+        : "expected primitive array, given java.lang.String");
+
+    EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+  }
+
+  void GetPrimitiveArrayRegionElementsOfWrongType(bool check_jni) {
+    bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+    CheckJniAbortCatcher jni_abort_catcher;
+    constexpr size_t kLength = 10;
+    jbooleanArray array = env_->NewBooleanArray(kLength);
+    ASSERT_TRUE(array != nullptr);
+    jboolean elements[kLength];
+    env_->GetByteArrayRegion(reinterpret_cast<jbyteArray>(array), 0, kLength,
+                             reinterpret_cast<jbyte*>(elements));
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected byte[]"
+            : "attempt to get region of byte primitive array elements with an object of type boolean[]");
+    env_->GetShortArrayRegion(reinterpret_cast<jshortArray>(array), 0, kLength,
+                              reinterpret_cast<jshort*>(elements));
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected short[]"
+            : "attempt to get region of short primitive array elements with an object of type boolean[]");
+    env_->GetCharArrayRegion(reinterpret_cast<jcharArray>(array), 0, kLength,
+                             reinterpret_cast<jchar*>(elements));
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected char[]"
+            : "attempt to get region of char primitive array elements with an object of type boolean[]");
+    env_->GetIntArrayRegion(reinterpret_cast<jintArray>(array), 0, kLength,
+                            reinterpret_cast<jint*>(elements));
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected int[]"
+            : "attempt to get region of int primitive array elements with an object of type boolean[]");
+    env_->GetLongArrayRegion(reinterpret_cast<jlongArray>(array), 0, kLength,
+                             reinterpret_cast<jlong*>(elements));
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected long[]"
+            : "attempt to get region of long primitive array elements with an object of type boolean[]");
+    env_->GetFloatArrayRegion(reinterpret_cast<jfloatArray>(array), 0, kLength,
+                              reinterpret_cast<jfloat*>(elements));
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected float[]"
+            : "attempt to get region of float primitive array elements with an object of type boolean[]");
+    env_->GetDoubleArrayRegion(reinterpret_cast<jdoubleArray>(array), 0, kLength,
+                               reinterpret_cast<jdouble*>(elements));
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected double[]"
+            : "attempt to get region of double primitive array elements with an object of type boolean[]");
+    jbyteArray array2 = env_->NewByteArray(10);
+    env_->GetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(array2), 0, kLength,
+                                reinterpret_cast<jboolean*>(elements));
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type byte[] expected boolean[]"
+            : "attempt to get region of boolean primitive array elements with an object of type byte[]");
+    jobject object = env_->NewStringUTF("Test String");
+    env_->GetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(object), 0, kLength,
+                                reinterpret_cast<jboolean*>(elements));
+    jni_abort_catcher.Check(check_jni ? "jarray argument has non-array type: java.lang.String"
+        : "attempt to get region of boolean primitive array elements with an object of type "
+          "java.lang.String");
+
+    EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+  }
+
+  void SetPrimitiveArrayRegionElementsOfWrongType(bool check_jni) {
+    bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+    CheckJniAbortCatcher jni_abort_catcher;
+    constexpr size_t kLength = 10;
+    jbooleanArray array = env_->NewBooleanArray(kLength);
+    ASSERT_TRUE(array != nullptr);
+    jboolean elements[kLength];
+    env_->SetByteArrayRegion(reinterpret_cast<jbyteArray>(array), 0, kLength,
+                             reinterpret_cast<jbyte*>(elements));
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected byte[]"
+            : "attempt to set region of byte primitive array elements with an object of type boolean[]");
+    env_->SetShortArrayRegion(reinterpret_cast<jshortArray>(array), 0, kLength,
+                              reinterpret_cast<jshort*>(elements));
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected short[]"
+            : "attempt to set region of short primitive array elements with an object of type boolean[]");
+    env_->SetCharArrayRegion(reinterpret_cast<jcharArray>(array), 0, kLength,
+                             reinterpret_cast<jchar*>(elements));
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected char[]"
+            : "attempt to set region of char primitive array elements with an object of type boolean[]");
+    env_->SetIntArrayRegion(reinterpret_cast<jintArray>(array), 0, kLength,
+                            reinterpret_cast<jint*>(elements));
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected int[]"
+            : "attempt to set region of int primitive array elements with an object of type boolean[]");
+    env_->SetLongArrayRegion(reinterpret_cast<jlongArray>(array), 0, kLength,
+                             reinterpret_cast<jlong*>(elements));
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected long[]"
+            : "attempt to set region of long primitive array elements with an object of type boolean[]");
+    env_->SetFloatArrayRegion(reinterpret_cast<jfloatArray>(array), 0, kLength,
+                              reinterpret_cast<jfloat*>(elements));
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected float[]"
+            : "attempt to set region of float primitive array elements with an object of type boolean[]");
+    env_->SetDoubleArrayRegion(reinterpret_cast<jdoubleArray>(array), 0, kLength,
+                               reinterpret_cast<jdouble*>(elements));
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type boolean[] expected double[]"
+            : "attempt to set region of double primitive array elements with an object of type boolean[]");
+    jbyteArray array2 = env_->NewByteArray(10);
+    env_->SetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(array2), 0, kLength,
+                                reinterpret_cast<jboolean*>(elements));
+    jni_abort_catcher.Check(
+        check_jni ? "incompatible array type byte[] expected boolean[]"
+            : "attempt to set region of boolean primitive array elements with an object of type byte[]");
+    jobject object = env_->NewStringUTF("Test String");
+    env_->SetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(object), 0, kLength,
+                                reinterpret_cast<jboolean*>(elements));
+    jni_abort_catcher.Check(check_jni ? "jarray argument has non-array type: java.lang.String"
+        : "attempt to set region of boolean primitive array elements with an object of type "
+          "java.lang.String");
+    EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+  }
+
+  void NewObjectArrayBadArguments(bool check_jni) {
+    bool old_check_jni = vm_->SetCheckJniEnabled(check_jni);
+    CheckJniAbortCatcher jni_abort_catcher;
+
+    jclass element_class = env_->FindClass("java/lang/String");
+    ASSERT_NE(element_class, nullptr);
+
+    env_->NewObjectArray(-1, element_class, nullptr);
+    jni_abort_catcher.Check(check_jni ? "negative jsize: -1" : "negative array length: -1");
+
+    env_->NewObjectArray(std::numeric_limits<jint>::min(), element_class, nullptr);
+    jni_abort_catcher.Check(check_jni ? "negative jsize: -2147483648"
+        : "negative array length: -2147483648");
+
+    EXPECT_EQ(check_jni, vm_->SetCheckJniEnabled(old_check_jni));
+  }
+
   JavaVMExt* vm_;
   JNIEnv* env_;
   jclass aioobe_;
@@ -125,48 +621,8 @@ TEST_F(JniInternalTest, GetVersion) {
 }
 
 TEST_F(JniInternalTest, FindClass) {
-  // Reference types...
-  ExpectClassFound("java/lang/String");
-  // ...for arrays too, where you must include "L;".
-  ExpectClassFound("[Ljava/lang/String;");
-  // Primitive arrays are okay too, if the primitive type is valid.
-  ExpectClassFound("[C");
-
-  {
-    CheckJniAbortCatcher check_jni_abort_catcher;
-    env_->FindClass(nullptr);
-    check_jni_abort_catcher.Check("name == null");
-
-    // We support . as well as / for compatibility, if -Xcheck:jni is off.
-    ExpectClassFound("java.lang.String");
-    check_jni_abort_catcher.Check("illegal class name 'java.lang.String'");
-    ExpectClassNotFound("Ljava.lang.String;");
-    check_jni_abort_catcher.Check("illegal class name 'Ljava.lang.String;'");
-    ExpectClassFound("[Ljava.lang.String;");
-    check_jni_abort_catcher.Check("illegal class name '[Ljava.lang.String;'");
-    ExpectClassNotFound("[java.lang.String");
-    check_jni_abort_catcher.Check("illegal class name '[java.lang.String'");
-
-    // You can't include the "L;" in a JNI class descriptor.
-    ExpectClassNotFound("Ljava/lang/String;");
-    check_jni_abort_catcher.Check("illegal class name 'Ljava/lang/String;'");
-
-    // But you must include it for an array of any reference type.
-    ExpectClassNotFound("[java/lang/String");
-    check_jni_abort_catcher.Check("illegal class name '[java/lang/String'");
-
-    ExpectClassNotFound("[K");
-    check_jni_abort_catcher.Check("illegal class name '[K'");
-
-    // Void arrays aren't allowed.
-    ExpectClassNotFound("[V");
-    check_jni_abort_catcher.Check("illegal class name '[V'");
-  }
-
-  // But primitive types aren't allowed...
-  ExpectClassNotFound("C");
-  ExpectClassNotFound("V");
-  ExpectClassNotFound("K");
+  FindClassTest(false);
+  FindClassTest(true);
 }
 
 TEST_F(JniInternalTest, GetFieldID) {
@@ -208,16 +664,8 @@ TEST_F(JniInternalTest, GetFieldID) {
   ExpectException(jlnsfe);
 
   // Bad arguments.
-  CheckJniAbortCatcher check_jni_abort_catcher;
-  fid = env_->GetFieldID(nullptr, "count", "I");
-  EXPECT_EQ(nullptr, fid);
-  check_jni_abort_catcher.Check("java_class == null");
-  fid = env_->GetFieldID(c, nullptr, "I");
-  EXPECT_EQ(nullptr, fid);
-  check_jni_abort_catcher.Check("name == null");
-  fid = env_->GetFieldID(c, "count", nullptr);
-  EXPECT_EQ(nullptr, fid);
-  check_jni_abort_catcher.Check("sig == null");
+  GetFieldIdBadArgumentTest(false);
+  GetFieldIdBadArgumentTest(true);
 }
 
 TEST_F(JniInternalTest, GetStaticFieldID) {
@@ -253,16 +701,8 @@ TEST_F(JniInternalTest, GetStaticFieldID) {
   ExpectException(jlnsfe);
 
   // Bad arguments.
-  CheckJniAbortCatcher check_jni_abort_catcher;
-  fid = env_->GetStaticFieldID(nullptr, "CASE_INSENSITIVE_ORDER", "Ljava/util/Comparator;");
-  EXPECT_EQ(nullptr, fid);
-  check_jni_abort_catcher.Check("java_class == null");
-  fid = env_->GetStaticFieldID(c, nullptr, "Ljava/util/Comparator;");
-  EXPECT_EQ(nullptr, fid);
-  check_jni_abort_catcher.Check("name == null");
-  fid = env_->GetStaticFieldID(c, "CASE_INSENSITIVE_ORDER", nullptr);
-  EXPECT_EQ(nullptr, fid);
-  check_jni_abort_catcher.Check("sig == null");
+  GetStaticFieldIdBadArgumentTest(false);
+  GetStaticFieldIdBadArgumentTest(true);
 }
 
 TEST_F(JniInternalTest, GetMethodID) {
@@ -302,16 +742,8 @@ TEST_F(JniInternalTest, GetMethodID) {
   EXPECT_FALSE(env_->ExceptionCheck());
 
   // Bad arguments.
-  CheckJniAbortCatcher check_jni_abort_catcher;
-  method = env_->GetMethodID(nullptr, "<init>", "(Ljava/lang/String;)V");
-  EXPECT_EQ(nullptr, method);
-  check_jni_abort_catcher.Check("java_class == null");
-  method = env_->GetMethodID(jlnsme, nullptr, "(Ljava/lang/String;)V");
-  EXPECT_EQ(nullptr, method);
-  check_jni_abort_catcher.Check("name == null");
-  method = env_->GetMethodID(jlnsme, "<init>", nullptr);
-  EXPECT_EQ(nullptr, method);
-  check_jni_abort_catcher.Check("sig == null");
+  GetMethodIdBadArgumentTest(false);
+  GetMethodIdBadArgumentTest(true);
 }
 
 TEST_F(JniInternalTest, CallVoidMethodNullReceiver) {
@@ -325,7 +757,6 @@ TEST_F(JniInternalTest, CallVoidMethodNullReceiver) {
 
   // Null object to CallVoidMethod.
   CheckJniAbortCatcher check_jni_abort_catcher;
-  method = env_->GetMethodID(nullptr, "<init>", "(Ljava/lang/String;)V");
   env_->CallVoidMethod(nullptr, method);
   check_jni_abort_catcher.Check("null");
 }
@@ -356,16 +787,8 @@ TEST_F(JniInternalTest, GetStaticMethodID) {
   EXPECT_FALSE(env_->ExceptionCheck());
 
   // Bad arguments.
-  CheckJniAbortCatcher check_jni_abort_catcher;
-  method = env_->GetStaticMethodID(nullptr, "valueOf", "(I)Ljava/lang/String;");
-  EXPECT_EQ(nullptr, method);
-  check_jni_abort_catcher.Check("java_class == null");
-  method = env_->GetStaticMethodID(jlstring, nullptr, "(I)Ljava/lang/String;");
-  EXPECT_EQ(nullptr, method);
-  check_jni_abort_catcher.Check("name == null");
-  method = env_->GetStaticMethodID(jlstring, "valueOf", nullptr);
-  EXPECT_EQ(nullptr, method);
-  check_jni_abort_catcher.Check("sig == null");
+  GetStaticMethodIdBadArgumentTest(false);
+  GetStaticMethodIdBadArgumentTest(true);
 }
 
 TEST_F(JniInternalTest, FromReflectedField_ToReflectedField) {
@@ -386,13 +809,8 @@ TEST_F(JniInternalTest, FromReflectedField_ToReflectedField) {
   ASSERT_EQ(4, env_->GetIntField(s, fid2));
 
   // Bad arguments.
-  CheckJniAbortCatcher check_jni_abort_catcher;
-  field = env_->ToReflectedField(c, nullptr, JNI_FALSE);
-  EXPECT_EQ(field, nullptr);
-  check_jni_abort_catcher.Check("fid == null");
-  fid2 = env_->FromReflectedField(nullptr);
-  ASSERT_EQ(fid2, nullptr);
-  check_jni_abort_catcher.Check("jlr_field == null");
+  GetFromReflectedField_ToReflectedFieldBadArgumentTest(false);
+  GetFromReflectedField_ToReflectedFieldBadArgumentTest(true);
 }
 
 TEST_F(JniInternalTest, FromReflectedMethod_ToReflectedMethod) {
@@ -433,13 +851,8 @@ TEST_F(JniInternalTest, FromReflectedMethod_ToReflectedMethod) {
   ASSERT_EQ(4, env_->CallIntMethod(s, mid2));
 
   // Bad arguments.
-  CheckJniAbortCatcher check_jni_abort_catcher;
-  method = env_->ToReflectedMethod(c, nullptr, JNI_FALSE);
-  EXPECT_EQ(method, nullptr);
-  check_jni_abort_catcher.Check("mid == null");
-  mid2 = env_->FromReflectedMethod(method);
-  ASSERT_EQ(mid2, nullptr);
-  check_jni_abort_catcher.Check("jlr_method == null");
+  GetFromReflectedMethod_ToReflectedMethodBadArgumentTest(false);
+  GetFromReflectedMethod_ToReflectedMethodBadArgumentTest(true);
 }
 
 static void BogusMethod() {
@@ -514,23 +927,11 @@ TEST_F(JniInternalTest, RegisterAndUnregisterNatives) {
   }
   EXPECT_FALSE(env_->ExceptionCheck());
 
-  // Passing a class of null is a failure.
-  {
-    JNINativeMethod methods[] = { };
-    EXPECT_EQ(env_->RegisterNatives(nullptr, methods, 0), JNI_ERR);
-    check_jni_abort_catcher.Check("java_class == null");
-  }
-
-  // Passing methods as null is a failure.
-  EXPECT_EQ(env_->RegisterNatives(jlobject, nullptr, 1), JNI_ERR);
-  check_jni_abort_catcher.Check("methods == null");
-
-  // Unregisters null is a failure.
-  EXPECT_EQ(env_->UnregisterNatives(nullptr), JNI_ERR);
-  check_jni_abort_catcher.Check("java_class == null");
-
   // Unregistering a class with no natives is a warning.
   EXPECT_EQ(env_->UnregisterNatives(jlnsme), JNI_OK);
+
+  RegisterAndUnregisterNativesBadArguments(false, &check_jni_abort_catcher);
+  RegisterAndUnregisterNativesBadArguments(true, &check_jni_abort_catcher);
 }
 
 #define EXPECT_PRIMITIVE_ARRAY(new_fn, \
@@ -544,6 +945,7 @@ TEST_F(JniInternalTest, RegisterAndUnregisterNatives) {
   \
   { \
     CheckJniAbortCatcher jni_abort_catcher; \
+    down_cast<JNIEnvExt*>(env_)->SetCheckJniEnabled(false); \
     /* Allocate an negative sized array and check it has the right failure type. */ \
     EXPECT_EQ(env_->new_fn(-1), nullptr); \
     jni_abort_catcher.Check("negative array length: -1"); \
@@ -566,6 +968,7 @@ TEST_F(JniInternalTest, RegisterAndUnregisterNatives) {
     jni_abort_catcher.Check("buf == null"); \
     env_->set_region_fn(a, 0, size, nullptr); \
     jni_abort_catcher.Check("buf == null"); \
+    down_cast<JNIEnvExt*>(env_)->SetCheckJniEnabled(true); \
   } \
   /* Allocate an array and check it has the right type and length. */ \
   scalar_type ## Array a = env_->new_fn(size); \
@@ -670,189 +1073,28 @@ TEST_F(JniInternalTest, ShortArrays) {
 }
 
 TEST_F(JniInternalTest, GetPrimitiveArrayElementsOfWrongType) {
-  CheckJniAbortCatcher jni_abort_catcher;
-  jbooleanArray array = env_->NewBooleanArray(10);
-  jboolean is_copy;
-  EXPECT_EQ(env_->GetByteArrayElements(reinterpret_cast<jbyteArray>(array), &is_copy), nullptr);
-  jni_abort_catcher.Check(
-      "attempt to get byte primitive array elements with an object of type boolean[]");
-  EXPECT_EQ(env_->GetShortArrayElements(reinterpret_cast<jshortArray>(array), &is_copy), nullptr);
-  jni_abort_catcher.Check(
-      "attempt to get short primitive array elements with an object of type boolean[]");
-  EXPECT_EQ(env_->GetCharArrayElements(reinterpret_cast<jcharArray>(array), &is_copy), nullptr);
-  jni_abort_catcher.Check(
-      "attempt to get char primitive array elements with an object of type boolean[]");
-  EXPECT_EQ(env_->GetIntArrayElements(reinterpret_cast<jintArray>(array), &is_copy), nullptr);
-  jni_abort_catcher.Check(
-      "attempt to get int primitive array elements with an object of type boolean[]");
-  EXPECT_EQ(env_->GetLongArrayElements(reinterpret_cast<jlongArray>(array), &is_copy), nullptr);
-  jni_abort_catcher.Check(
-      "attempt to get long primitive array elements with an object of type boolean[]");
-  EXPECT_EQ(env_->GetFloatArrayElements(reinterpret_cast<jfloatArray>(array), &is_copy), nullptr);
-  jni_abort_catcher.Check(
-      "attempt to get float primitive array elements with an object of type boolean[]");
-  EXPECT_EQ(env_->GetDoubleArrayElements(reinterpret_cast<jdoubleArray>(array), &is_copy), nullptr);
-  jni_abort_catcher.Check(
-      "attempt to get double primitive array elements with an object of type boolean[]");
-  jbyteArray array2 = env_->NewByteArray(10);
-  EXPECT_EQ(env_->GetBooleanArrayElements(reinterpret_cast<jbooleanArray>(array2), &is_copy),
-            nullptr);
-  jni_abort_catcher.Check(
-      "attempt to get boolean primitive array elements with an object of type byte[]");
-  jobject object = env_->NewStringUTF("Test String");
-  EXPECT_EQ(env_->GetBooleanArrayElements(reinterpret_cast<jbooleanArray>(object), &is_copy),
-            nullptr);
-  jni_abort_catcher.Check(
-      "attempt to get boolean primitive array elements with an object of type java.lang.String");
+  GetPrimitiveArrayElementsOfWrongType(false);
+  GetPrimitiveArrayElementsOfWrongType(true);
 }
 
 TEST_F(JniInternalTest, ReleasePrimitiveArrayElementsOfWrongType) {
-  CheckJniAbortCatcher jni_abort_catcher;
-  jbooleanArray array = env_->NewBooleanArray(10);
-  ASSERT_TRUE(array != nullptr);
-  jboolean is_copy;
-  jboolean* elements = env_->GetBooleanArrayElements(array, &is_copy);
-  ASSERT_TRUE(elements != nullptr);
-  env_->ReleaseByteArrayElements(reinterpret_cast<jbyteArray>(array),
-                                 reinterpret_cast<jbyte*>(elements), 0);
-  jni_abort_catcher.Check(
-      "attempt to release byte primitive array elements with an object of type boolean[]");
-  env_->ReleaseShortArrayElements(reinterpret_cast<jshortArray>(array),
-                                  reinterpret_cast<jshort*>(elements), 0);
-  jni_abort_catcher.Check(
-      "attempt to release short primitive array elements with an object of type boolean[]");
-  env_->ReleaseCharArrayElements(reinterpret_cast<jcharArray>(array),
-                                 reinterpret_cast<jchar*>(elements), 0);
-  jni_abort_catcher.Check(
-      "attempt to release char primitive array elements with an object of type boolean[]");
-  env_->ReleaseIntArrayElements(reinterpret_cast<jintArray>(array),
-                                reinterpret_cast<jint*>(elements), 0);
-  jni_abort_catcher.Check(
-      "attempt to release int primitive array elements with an object of type boolean[]");
-  env_->ReleaseLongArrayElements(reinterpret_cast<jlongArray>(array),
-                                 reinterpret_cast<jlong*>(elements), 0);
-  jni_abort_catcher.Check(
-      "attempt to release long primitive array elements with an object of type boolean[]");
-  env_->ReleaseFloatArrayElements(reinterpret_cast<jfloatArray>(array),
-                                  reinterpret_cast<jfloat*>(elements), 0);
-  jni_abort_catcher.Check(
-      "attempt to release float primitive array elements with an object of type boolean[]");
-  env_->ReleaseDoubleArrayElements(reinterpret_cast<jdoubleArray>(array),
-                                  reinterpret_cast<jdouble*>(elements), 0);
-  jni_abort_catcher.Check(
-      "attempt to release double primitive array elements with an object of type boolean[]");
-  jbyteArray array2 = env_->NewByteArray(10);
-  env_->ReleaseBooleanArrayElements(reinterpret_cast<jbooleanArray>(array2), elements, 0);
-  jni_abort_catcher.Check(
-      "attempt to release boolean primitive array elements with an object of type byte[]");
-  jobject object = env_->NewStringUTF("Test String");
-  env_->ReleaseBooleanArrayElements(reinterpret_cast<jbooleanArray>(object), elements, 0);
-  jni_abort_catcher.Check(
-      "attempt to release boolean primitive array elements with an object of type "
-      "java.lang.String");
+  ReleasePrimitiveArrayElementsOfWrongType(false);
+  ReleasePrimitiveArrayElementsOfWrongType(true);
 }
+
 TEST_F(JniInternalTest, GetReleasePrimitiveArrayCriticalOfWrongType) {
-  CheckJniAbortCatcher jni_abort_catcher;
-  jobject object = env_->NewStringUTF("Test String");
-  jboolean is_copy;
-  void* elements = env_->GetPrimitiveArrayCritical(reinterpret_cast<jarray>(object), &is_copy);
-  jni_abort_catcher.Check("expected primitive array, given java.lang.String");
-  env_->ReleasePrimitiveArrayCritical(reinterpret_cast<jarray>(object), elements, 0);
-  jni_abort_catcher.Check("expected primitive array, given java.lang.String");
+  GetReleasePrimitiveArrayCriticalOfWrongType(false);
+  GetReleasePrimitiveArrayCriticalOfWrongType(true);
 }
 
 TEST_F(JniInternalTest, GetPrimitiveArrayRegionElementsOfWrongType) {
-  CheckJniAbortCatcher jni_abort_catcher;
-  constexpr size_t kLength = 10;
-  jbooleanArray array = env_->NewBooleanArray(kLength);
-  ASSERT_TRUE(array != nullptr);
-  jboolean elements[kLength];
-  env_->GetByteArrayRegion(reinterpret_cast<jbyteArray>(array), 0, kLength,
-                           reinterpret_cast<jbyte*>(elements));
-  jni_abort_catcher.Check(
-      "attempt to get region of byte primitive array elements with an object of type boolean[]");
-  env_->GetShortArrayRegion(reinterpret_cast<jshortArray>(array), 0, kLength,
-                            reinterpret_cast<jshort*>(elements));
-  jni_abort_catcher.Check(
-      "attempt to get region of short primitive array elements with an object of type boolean[]");
-  env_->GetCharArrayRegion(reinterpret_cast<jcharArray>(array), 0, kLength,
-                           reinterpret_cast<jchar*>(elements));
-  jni_abort_catcher.Check(
-      "attempt to get region of char primitive array elements with an object of type boolean[]");
-  env_->GetIntArrayRegion(reinterpret_cast<jintArray>(array), 0, kLength,
-                          reinterpret_cast<jint*>(elements));
-  jni_abort_catcher.Check(
-      "attempt to get region of int primitive array elements with an object of type boolean[]");
-  env_->GetLongArrayRegion(reinterpret_cast<jlongArray>(array), 0, kLength,
-                           reinterpret_cast<jlong*>(elements));
-  jni_abort_catcher.Check(
-      "attempt to get region of long primitive array elements with an object of type boolean[]");
-  env_->GetFloatArrayRegion(reinterpret_cast<jfloatArray>(array), 0, kLength,
-                            reinterpret_cast<jfloat*>(elements));
-  jni_abort_catcher.Check(
-      "attempt to get region of float primitive array elements with an object of type boolean[]");
-  env_->GetDoubleArrayRegion(reinterpret_cast<jdoubleArray>(array), 0, kLength,
-                           reinterpret_cast<jdouble*>(elements));
-  jni_abort_catcher.Check(
-      "attempt to get region of double primitive array elements with an object of type boolean[]");
-  jbyteArray array2 = env_->NewByteArray(10);
-  env_->GetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(array2), 0, kLength,
-                              reinterpret_cast<jboolean*>(elements));
-  jni_abort_catcher.Check(
-      "attempt to get region of boolean primitive array elements with an object of type byte[]");
-  jobject object = env_->NewStringUTF("Test String");
-  env_->GetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(object), 0, kLength,
-                              reinterpret_cast<jboolean*>(elements));
-  jni_abort_catcher.Check(
-      "attempt to get region of boolean primitive array elements with an object of type "
-      "java.lang.String");
+  GetPrimitiveArrayRegionElementsOfWrongType(false);
+  GetPrimitiveArrayRegionElementsOfWrongType(true);
 }
 
 TEST_F(JniInternalTest, SetPrimitiveArrayRegionElementsOfWrongType) {
-  CheckJniAbortCatcher jni_abort_catcher;
-  constexpr size_t kLength = 10;
-  jbooleanArray array = env_->NewBooleanArray(kLength);
-  ASSERT_TRUE(array != nullptr);
-  jboolean elements[kLength];
-  env_->SetByteArrayRegion(reinterpret_cast<jbyteArray>(array), 0, kLength,
-                           reinterpret_cast<jbyte*>(elements));
-  jni_abort_catcher.Check(
-      "attempt to set region of byte primitive array elements with an object of type boolean[]");
-  env_->SetShortArrayRegion(reinterpret_cast<jshortArray>(array), 0, kLength,
-                            reinterpret_cast<jshort*>(elements));
-  jni_abort_catcher.Check(
-      "attempt to set region of short primitive array elements with an object of type boolean[]");
-  env_->SetCharArrayRegion(reinterpret_cast<jcharArray>(array), 0, kLength,
-                           reinterpret_cast<jchar*>(elements));
-  jni_abort_catcher.Check(
-      "attempt to set region of char primitive array elements with an object of type boolean[]");
-  env_->SetIntArrayRegion(reinterpret_cast<jintArray>(array), 0, kLength,
-                          reinterpret_cast<jint*>(elements));
-  jni_abort_catcher.Check(
-      "attempt to set region of int primitive array elements with an object of type boolean[]");
-  env_->SetLongArrayRegion(reinterpret_cast<jlongArray>(array), 0, kLength,
-                           reinterpret_cast<jlong*>(elements));
-  jni_abort_catcher.Check(
-      "attempt to set region of long primitive array elements with an object of type boolean[]");
-  env_->SetFloatArrayRegion(reinterpret_cast<jfloatArray>(array), 0, kLength,
-                            reinterpret_cast<jfloat*>(elements));
-  jni_abort_catcher.Check(
-      "attempt to set region of float primitive array elements with an object of type boolean[]");
-  env_->SetDoubleArrayRegion(reinterpret_cast<jdoubleArray>(array), 0, kLength,
-                           reinterpret_cast<jdouble*>(elements));
-  jni_abort_catcher.Check(
-      "attempt to set region of double primitive array elements with an object of type boolean[]");
-  jbyteArray array2 = env_->NewByteArray(10);
-  env_->SetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(array2), 0, kLength,
-                              reinterpret_cast<jboolean*>(elements));
-  jni_abort_catcher.Check(
-      "attempt to set region of boolean primitive array elements with an object of type byte[]");
-  jobject object = env_->NewStringUTF("Test String");
-  env_->SetBooleanArrayRegion(reinterpret_cast<jbooleanArray>(object), 0, kLength,
-                              reinterpret_cast<jboolean*>(elements));
-  jni_abort_catcher.Check(
-      "attempt to set region of boolean primitive array elements with an object of type "
-      "java.lang.String");
+  SetPrimitiveArrayRegionElementsOfWrongType(false);
+  SetPrimitiveArrayRegionElementsOfWrongType(true);
 }
 
 TEST_F(JniInternalTest, NewObjectArray) {
@@ -873,12 +1115,8 @@ TEST_F(JniInternalTest, NewObjectArray) {
   EXPECT_TRUE(env_->IsSameObject(env_->GetObjectArrayElement(a, 0), nullptr));
 
   // Negative array length checks.
-  CheckJniAbortCatcher jni_abort_catcher;
-  env_->NewObjectArray(-1, element_class, nullptr);
-  jni_abort_catcher.Check("negative array length: -1");
-
-  env_->NewObjectArray(std::numeric_limits<jint>::min(), element_class, nullptr);
-  jni_abort_catcher.Check("negative array length: -2147483648");
+  NewObjectArrayBadArguments(false);
+  NewObjectArrayBadArguments(true);
 }
 
 TEST_F(JniInternalTest, NewObjectArrayWithPrimitiveClasses) {
@@ -888,6 +1126,7 @@ TEST_F(JniInternalTest, NewObjectArrayWithPrimitiveClasses) {
   };
   ASSERT_EQ(strlen(primitive_descriptors), arraysize(primitive_names));
 
+  bool old_check_jni = vm_->SetCheckJniEnabled(false);
   CheckJniAbortCatcher jni_abort_catcher;
   for (size_t i = 0; i < strlen(primitive_descriptors); ++i) {
     env_->NewObjectArray(0, nullptr, nullptr);
@@ -897,6 +1136,16 @@ TEST_F(JniInternalTest, NewObjectArrayWithPrimitiveClasses) {
     std::string error_msg(StringPrintf("not an object type: %s", primitive_names[i]));
     jni_abort_catcher.Check(error_msg.c_str());
   }
+  EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+  for (size_t i = 0; i < strlen(primitive_descriptors); ++i) {
+    env_->NewObjectArray(0, nullptr, nullptr);
+    jni_abort_catcher.Check("NewObjectArray received NULL jclass");
+    jclass primitive_class = GetPrimitiveClass(primitive_descriptors[i]);
+    env_->NewObjectArray(1, primitive_class, nullptr);
+    std::string error_msg(StringPrintf("not an object type: %s", primitive_names[i]));
+    jni_abort_catcher.Check(error_msg.c_str());
+  }
+  EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
 }
 
 TEST_F(JniInternalTest, NewObjectArrayWithInitialValue) {
@@ -956,8 +1205,13 @@ TEST_F(JniInternalTest, GetSuperclass) {
 
   // Null as class should fail.
   CheckJniAbortCatcher jni_abort_catcher;
+  bool old_check_jni = vm_->SetCheckJniEnabled(false);
   EXPECT_EQ(env_->GetSuperclass(nullptr), nullptr);
   jni_abort_catcher.Check("java_class == null");
+  EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+  EXPECT_EQ(env_->GetSuperclass(nullptr), nullptr);
+  jni_abort_catcher.Check("GetSuperclass received NULL jclass");
+  EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
 }
 
 TEST_F(JniInternalTest, IsAssignableFrom) {
@@ -991,10 +1245,17 @@ TEST_F(JniInternalTest, IsAssignableFrom) {
 
   // Null as either class should fail.
   CheckJniAbortCatcher jni_abort_catcher;
+  bool old_check_jni = vm_->SetCheckJniEnabled(false);
   EXPECT_EQ(env_->IsAssignableFrom(nullptr, string_class), JNI_FALSE);
   jni_abort_catcher.Check("java_class1 == null");
   EXPECT_EQ(env_->IsAssignableFrom(object_class, nullptr), JNI_FALSE);
   jni_abort_catcher.Check("java_class2 == null");
+  EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+  EXPECT_EQ(env_->IsAssignableFrom(nullptr, string_class), JNI_FALSE);
+  jni_abort_catcher.Check("IsAssignableFrom received NULL jclass");
+  EXPECT_EQ(env_->IsAssignableFrom(object_class, nullptr), JNI_FALSE);
+  jni_abort_catcher.Check("IsAssignableFrom received NULL jclass");
+  EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
 }
 
 TEST_F(JniInternalTest, GetObjectRefType) {
@@ -1079,10 +1340,17 @@ TEST_F(JniInternalTest, NewStringNullCharsNonzeroLength) {
 
 TEST_F(JniInternalTest, NewStringNegativeLength) {
   CheckJniAbortCatcher jni_abort_catcher;
+  bool old_check_jni = vm_->SetCheckJniEnabled(false);
   env_->NewString(nullptr, -1);
   jni_abort_catcher.Check("char_count < 0: -1");
   env_->NewString(nullptr, std::numeric_limits<jint>::min());
   jni_abort_catcher.Check("char_count < 0: -2147483648");
+  EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+  env_->NewString(nullptr, -1);
+  jni_abort_catcher.Check("negative jsize: -1");
+  env_->NewString(nullptr, std::numeric_limits<jint>::min());
+  jni_abort_catcher.Check("negative jsize: -2147483648");
+  EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
 }
 
 TEST_F(JniInternalTest, GetStringLength_GetStringUTFLength) {
@@ -1140,10 +1408,17 @@ TEST_F(JniInternalTest, GetStringRegion_GetStringUTFRegion) {
 
 TEST_F(JniInternalTest, GetStringUTFChars_ReleaseStringUTFChars) {
   // Passing in a nullptr jstring is ignored normally, but caught by -Xcheck:jni.
+  bool old_check_jni = vm_->SetCheckJniEnabled(false);
   {
     CheckJniAbortCatcher check_jni_abort_catcher;
     EXPECT_EQ(env_->GetStringUTFChars(nullptr, nullptr), nullptr);
-    check_jni_abort_catcher.Check("GetStringUTFChars received null jstring");
+  }
+  {
+    CheckJniAbortCatcher check_jni_abort_catcher;
+    EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+    EXPECT_EQ(env_->GetStringUTFChars(nullptr, nullptr), nullptr);
+    check_jni_abort_catcher.Check("GetStringUTFChars received NULL jstring");
+    EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
   }
 
   jstring s = env_->NewStringUTF("hello");
@@ -1238,10 +1513,17 @@ TEST_F(JniInternalTest, GetObjectArrayElement_SetObjectArrayElement) {
 
   // Null as array should fail.
   CheckJniAbortCatcher jni_abort_catcher;
+  bool old_check_jni = vm_->SetCheckJniEnabled(false);
   EXPECT_EQ(nullptr, env_->GetObjectArrayElement(nullptr, 0));
   jni_abort_catcher.Check("java_array == null");
   env_->SetObjectArrayElement(nullptr, 0, nullptr);
   jni_abort_catcher.Check("java_array == null");
+  EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+  EXPECT_EQ(nullptr, env_->GetObjectArrayElement(nullptr, 0));
+  jni_abort_catcher.Check("jarray was NULL");
+  env_->SetObjectArrayElement(nullptr, 0, nullptr);
+  jni_abort_catcher.Check("jarray was NULL");
+  EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
 }
 
 #define EXPECT_STATIC_PRIMITIVE_FIELD(type, field_name, sig, value1, value2) \
@@ -1253,15 +1535,28 @@ TEST_F(JniInternalTest, GetObjectArrayElement_SetObjectArrayElement) {
     env_->SetStatic ## type ## Field(c, fid, value2); \
     EXPECT_EQ(value2, env_->GetStatic ## type ## Field(c, fid)); \
     \
+    bool old_check_jni = vm_->SetCheckJniEnabled(false); \
+    { \
+      CheckJniAbortCatcher jni_abort_catcher; \
+      env_->GetStatic ## type ## Field(nullptr, fid); \
+      env_->SetStatic ## type ## Field(nullptr, fid, value1); \
+    } \
     CheckJniAbortCatcher jni_abort_catcher; \
-    env_->GetStatic ## type ## Field(nullptr, fid); \
-    jni_abort_catcher.Check("received null jclass"); \
-    env_->SetStatic ## type ## Field(nullptr, fid, value1); \
-    jni_abort_catcher.Check("received null jclass"); \
     env_->GetStatic ## type ## Field(c, nullptr); \
     jni_abort_catcher.Check("fid == null"); \
     env_->SetStatic ## type ## Field(c, nullptr, value1); \
     jni_abort_catcher.Check("fid == null"); \
+    \
+    EXPECT_FALSE(vm_->SetCheckJniEnabled(true)); \
+    env_->GetStatic ## type ## Field(nullptr, fid); \
+    jni_abort_catcher.Check("received NULL jclass"); \
+    env_->SetStatic ## type ## Field(nullptr, fid, value1); \
+    jni_abort_catcher.Check("received NULL jclass"); \
+    env_->GetStatic ## type ## Field(c, nullptr); \
+    jni_abort_catcher.Check("jfieldID was NULL"); \
+    env_->SetStatic ## type ## Field(c, nullptr, value1); \
+    jni_abort_catcher.Check("jfieldID was NULL"); \
+    EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni)); \
   } while (false)
 
 #define EXPECT_PRIMITIVE_FIELD(instance, type, field_name, sig, value1, value2) \
@@ -1273,6 +1568,7 @@ TEST_F(JniInternalTest, GetObjectArrayElement_SetObjectArrayElement) {
     env_->Set ## type ## Field(instance, fid, value2); \
     EXPECT_EQ(value2, env_->Get ## type ## Field(instance, fid)); \
     \
+    bool old_check_jni = vm_->SetCheckJniEnabled(false); \
     CheckJniAbortCatcher jni_abort_catcher; \
     env_->Get ## type ## Field(nullptr, fid); \
     jni_abort_catcher.Check("obj == null"); \
@@ -1282,6 +1578,16 @@ TEST_F(JniInternalTest, GetObjectArrayElement_SetObjectArrayElement) {
     jni_abort_catcher.Check("fid == null"); \
     env_->Set ## type ## Field(instance, nullptr, value1); \
     jni_abort_catcher.Check("fid == null"); \
+    EXPECT_FALSE(vm_->SetCheckJniEnabled(true)); \
+    env_->Get ## type ## Field(nullptr, fid); \
+    jni_abort_catcher.Check("field operation on NULL object:"); \
+    env_->Set ## type ## Field(nullptr, fid, value1); \
+    jni_abort_catcher.Check("field operation on NULL object:"); \
+    env_->Get ## type ## Field(instance, nullptr); \
+    jni_abort_catcher.Check("jfieldID was NULL"); \
+    env_->Set ## type ## Field(instance, nullptr, value1); \
+    jni_abort_catcher.Check("jfieldID was NULL"); \
+    EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni)); \
   } while (false)
 
 
@@ -1373,12 +1679,17 @@ TEST_F(JniInternalTest, DeleteLocalRef) {
 
   // Currently, deleting an already-deleted reference is just a CheckJNI warning.
   {
+    bool old_check_jni = vm_->SetCheckJniEnabled(false);
+    {
+      CheckJniAbortCatcher check_jni_abort_catcher;
+      env_->DeleteLocalRef(s);
+    }
     CheckJniAbortCatcher check_jni_abort_catcher;
+    EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
     env_->DeleteLocalRef(s);
-
-    std::string expected(StringPrintf("native code passing in reference to "
-                                      "invalid local reference: %p", s));
+    std::string expected(StringPrintf("jobject is an invalid local reference: %p", s));
     check_jni_abort_catcher.Check(expected.c_str());
+    EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
   }
 
   s = env_->NewStringUTF("");
@@ -1469,12 +1780,17 @@ TEST_F(JniInternalTest, DeleteGlobalRef) {
 
   // Currently, deleting an already-deleted reference is just a CheckJNI warning.
   {
+    bool old_check_jni = vm_->SetCheckJniEnabled(false);
+    {
+      CheckJniAbortCatcher check_jni_abort_catcher;
+      env_->DeleteGlobalRef(o);
+    }
     CheckJniAbortCatcher check_jni_abort_catcher;
+    EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
     env_->DeleteGlobalRef(o);
-
-    std::string expected(StringPrintf("native code passing in reference to "
-                                      "invalid global reference: %p", o));
+    std::string expected(StringPrintf("jobject is an invalid global reference: %p", o));
     check_jni_abort_catcher.Check(expected.c_str());
+    EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
   }
 
   jobject o1 = env_->NewGlobalRef(s);
@@ -1514,12 +1830,17 @@ TEST_F(JniInternalTest, DeleteWeakGlobalRef) {
 
   // Currently, deleting an already-deleted reference is just a CheckJNI warning.
   {
+    bool old_check_jni = vm_->SetCheckJniEnabled(false);
+    {
+      CheckJniAbortCatcher check_jni_abort_catcher;
+      env_->DeleteWeakGlobalRef(o);
+    }
     CheckJniAbortCatcher check_jni_abort_catcher;
+    EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
     env_->DeleteWeakGlobalRef(o);
-
-    std::string expected(StringPrintf("native code passing in reference to "
-                                      "invalid weak global reference: %p", o));
+    std::string expected(StringPrintf("jobject is an invalid weak global reference: %p", o));
     check_jni_abort_catcher.Check(expected.c_str());
+    EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
   }
 
   jobject o1 = env_->NewWeakGlobalRef(s);
@@ -1538,8 +1859,6 @@ TEST_F(JniInternalTest, ExceptionDescribe) {
 }
 
 TEST_F(JniInternalTest, Throw) {
-  EXPECT_EQ(JNI_ERR, env_->Throw(nullptr));
-
   jclass exception_class = env_->FindClass("java/lang/RuntimeException");
   ASSERT_TRUE(exception_class != nullptr);
   jthrowable exception = reinterpret_cast<jthrowable>(env_->AllocObject(exception_class));
@@ -1550,11 +1869,18 @@ TEST_F(JniInternalTest, Throw) {
   jthrowable thrown_exception = env_->ExceptionOccurred();
   env_->ExceptionClear();
   EXPECT_TRUE(env_->IsSameObject(exception, thrown_exception));
-}
 
-TEST_F(JniInternalTest, ThrowNew) {
+  // Bad argument.
+  bool old_check_jni = vm_->SetCheckJniEnabled(false);
   EXPECT_EQ(JNI_ERR, env_->Throw(nullptr));
+  EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+  CheckJniAbortCatcher check_jni_abort_catcher;
+  EXPECT_EQ(JNI_ERR, env_->Throw(nullptr));
+  check_jni_abort_catcher.Check("Throw received NULL jthrowable");
+  EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
+}
 
+TEST_F(JniInternalTest, ThrowNew) {
   jclass exception_class = env_->FindClass("java/lang/RuntimeException");
   ASSERT_TRUE(exception_class != nullptr);
 
@@ -1571,6 +1897,16 @@ TEST_F(JniInternalTest, ThrowNew) {
   thrown_exception = env_->ExceptionOccurred();
   env_->ExceptionClear();
   EXPECT_TRUE(env_->IsInstanceOf(thrown_exception, exception_class));
+
+  // Bad argument.
+  bool old_check_jni = vm_->SetCheckJniEnabled(false);
+  CheckJniAbortCatcher check_jni_abort_catcher;
+  EXPECT_EQ(JNI_ERR, env_->ThrowNew(nullptr, nullptr));
+  check_jni_abort_catcher.Check("c == null");
+  EXPECT_FALSE(vm_->SetCheckJniEnabled(true));
+  EXPECT_EQ(JNI_ERR, env_->ThrowNew(nullptr, nullptr));
+  check_jni_abort_catcher.Check("ThrowNew received NULL jclass");
+  EXPECT_TRUE(vm_->SetCheckJniEnabled(old_check_jni));
 }
 
 TEST_F(JniInternalTest, NewDirectBuffer_GetDirectBufferAddress_GetDirectBufferCapacity) {
index 89de16e..dfb42b8 100644 (file)
@@ -137,25 +137,26 @@ static inline uint8_t* EncodeSignedLeb128(uint8_t* dest, int32_t value) {
   return dest;
 }
 
-// An encoder with an API similar to vector<uint32_t> where the data is captured in ULEB128 format.
-class Leb128EncodingVector {
+// An encoder that pushed uint32_t data onto the given std::vector.
+class Leb128Encoder {
  public:
-  Leb128EncodingVector() {
+  explicit Leb128Encoder(std::vector<uint8_t>* data) : data_(data) {
+    DCHECK(data != nullptr);
   }
 
   void Reserve(uint32_t size) {
-    data_.reserve(size);
+    data_->reserve(size);
   }
 
   void PushBackUnsigned(uint32_t value) {
     uint8_t out = value & 0x7f;
     value >>= 7;
     while (value != 0) {
-      data_.push_back(out | 0x80);
+      data_->push_back(out | 0x80);
       out = value & 0x7f;
       value >>= 7;
     }
-    data_.push_back(out);
+    data_->push_back(out);
   }
 
   template<typename It>
@@ -169,12 +170,12 @@ class Leb128EncodingVector {
     uint32_t extra_bits = static_cast<uint32_t>(value ^ (value >> 31)) >> 6;
     uint8_t out = value & 0x7f;
     while (extra_bits != 0u) {
-      data_.push_back(out | 0x80);
+      data_->push_back(out | 0x80);
       value >>= 7;
       out = value & 0x7f;
       extra_bits >>= 7;
     }
-    data_.push_back(out);
+    data_->push_back(out);
   }
 
   template<typename It>
@@ -185,12 +186,23 @@ class Leb128EncodingVector {
   }
 
   const std::vector<uint8_t>& GetData() const {
-    return data_;
+    return *data_;
   }
 
+ protected:
+  std::vector<uint8_t>* const data_;
+
  private:
-  std::vector<uint8_t> data_;
+  DISALLOW_COPY_AND_ASSIGN(Leb128Encoder);
+};
+
+// An encoder with an API similar to vector<uint32_t> where the data is captured in ULEB128 format.
+class Leb128EncodingVector FINAL : private std::vector<uint8_t>, public Leb128Encoder {
+ public:
+  Leb128EncodingVector() : Leb128Encoder(this) {
+  }
 
+ private:
   DISALLOW_COPY_AND_ASSIGN(Leb128EncodingVector);
 };
 
index 414b3bb..cf6f83c 100644 (file)
@@ -50,6 +50,7 @@ inline LockWord::LockWord() : value_(0) {
 inline LockWord::LockWord(Monitor* mon)
     : value_(mon->GetMonitorId() | (kStateFat << kStateShift)) {
   DCHECK_EQ(FatLockMonitor(), mon);
+  DCHECK_LE(mon->GetMonitorId(), static_cast<uint32_t>(kMaxMonitorId));
 }
 
 inline int32_t LockWord::GetHashCode() const {
index e585412..13cc3b0 100644 (file)
@@ -63,6 +63,7 @@ class LockWord {
 
     kThinLockOwnerShift = 0,
     kThinLockOwnerMask = (1 << kThinLockOwnerSize) - 1,
+    kThinLockMaxOwner = kThinLockOwnerMask,
     // Count in higher bits.
     kThinLockCountShift = kThinLockOwnerSize + kThinLockOwnerShift,
     kThinLockCountMask = (1 << kThinLockCountSize) - 1,
@@ -80,10 +81,13 @@ class LockWord {
     kHashShift = 0,
     kHashSize = 32 - kStateSize,
     kHashMask = (1 << kHashSize) - 1,
+    kMaxHash = kHashMask,
+    kMaxMonitorId = kMaxHash
   };
 
   static LockWord FromThinLockId(uint32_t thread_id, uint32_t count) {
-    CHECK_LE(thread_id, static_cast<uint32_t>(kThinLockOwnerMask));
+    CHECK_LE(thread_id, static_cast<uint32_t>(kThinLockMaxOwner));
+    CHECK_LE(count, static_cast<uint32_t>(kThinLockMaxCount));
     return LockWord((thread_id << kThinLockOwnerShift) | (count << kThinLockCountShift) |
                      (kStateThinOrUnlocked << kStateShift));
   }
@@ -94,7 +98,7 @@ class LockWord {
   }
 
   static LockWord FromHashCode(uint32_t hash_code) {
-    CHECK_LE(hash_code, static_cast<uint32_t>(kHashMask));
+    CHECK_LE(hash_code, static_cast<uint32_t>(kMaxHash));
     return LockWord((hash_code << kHashShift) | (kStateHash << kStateShift));
   }
 
index f71d273..8150456 100644 (file)
@@ -41,6 +41,11 @@ class MethodHelper {
     return method_->GetInterfaceMethodIfProxy();
   }
 
+  // GetMethod() != Get() for proxy methods.
+  mirror::ArtMethod* Get() const SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return method_.Get();
+  }
+
   mirror::String* GetNameAsString(Thread* self) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   const char* GetShorty() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
index 2c0ea36..213dbc2 100644 (file)
@@ -29,7 +29,7 @@ namespace mirror {
 
 inline uint32_t Array::ClassSize() {
   uint32_t vtable_entries = Object::kVTableLength;
-  return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0);
+  return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0);
 }
 
 template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
index 00bed92..d37fa41 100644 (file)
@@ -31,7 +31,7 @@ namespace mirror {
 
 inline uint32_t ArtField::ClassSize() {
   uint32_t vtable_entries = Object::kVTableLength + 6;
-  return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0);
+  return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0);
 }
 
 inline Class* ArtField::GetDeclaringClass() {
@@ -120,50 +120,64 @@ inline void ArtField::SetObj(Object* object, Object* new_value) {
   }
 }
 
-inline bool ArtField::GetBoolean(Object* object) {
-  DCHECK_EQ(Primitive::kPrimBoolean, GetTypeAsPrimitiveType()) << PrettyField(this);
-  return Get32(object);
+#define FIELD_GET(object, type) \
+  DCHECK_EQ(Primitive::kPrim ## type, GetTypeAsPrimitiveType()) << PrettyField(this); \
+  DCHECK(object != nullptr) << PrettyField(this); \
+  DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted()); \
+  if (UNLIKELY(IsVolatile())) { \
+    return object->GetField ## type ## Volatile(GetOffset()); \
+  } \
+  return object->GetField ## type(GetOffset());
+
+#define FIELD_SET(object, type, value) \
+  DCHECK_EQ(Primitive::kPrim ## type, GetTypeAsPrimitiveType()) << PrettyField(this); \
+  DCHECK(object != nullptr) << PrettyField(this); \
+  DCHECK(!IsStatic() || (object == GetDeclaringClass()) || !Runtime::Current()->IsStarted()); \
+  if (UNLIKELY(IsVolatile())) { \
+    object->SetField ## type ## Volatile<kTransactionActive>(GetOffset(), value); \
+  } else { \
+    object->SetField ## type<kTransactionActive>(GetOffset(), value); \
+  }
+
+inline uint8_t ArtField::GetBoolean(Object* object) {
+  FIELD_GET(object, Boolean);
 }
 
 template<bool kTransactionActive>
-inline void ArtField::SetBoolean(Object* object, bool z) {
-  DCHECK_EQ(Primitive::kPrimBoolean, GetTypeAsPrimitiveType()) << PrettyField(this);
-  Set32<kTransactionActive>(object, z);
+inline void ArtField::SetBoolean(Object* object, uint8_t z) {
+  FIELD_SET(object, Boolean, z);
 }
 
 inline int8_t ArtField::GetByte(Object* object) {
-  DCHECK_EQ(Primitive::kPrimByte, GetTypeAsPrimitiveType()) << PrettyField(this);
-  return Get32(object);
+  FIELD_GET(object, Byte);
 }
 
 template<bool kTransactionActive>
 inline void ArtField::SetByte(Object* object, int8_t b) {
-  DCHECK_EQ(Primitive::kPrimByte, GetTypeAsPrimitiveType()) << PrettyField(this);
-  Set32<kTransactionActive>(object, b);
+  FIELD_SET(object, Byte, b);
 }
 
 inline uint16_t ArtField::GetChar(Object* object) {
-  DCHECK_EQ(Primitive::kPrimChar, GetTypeAsPrimitiveType()) << PrettyField(this);
-  return Get32(object);
+  FIELD_GET(object, Char);
 }
 
 template<bool kTransactionActive>
 inline void ArtField::SetChar(Object* object, uint16_t c) {
-  DCHECK_EQ(Primitive::kPrimChar, GetTypeAsPrimitiveType()) << PrettyField(this);
-  Set32<kTransactionActive>(object, c);
+  FIELD_SET(object, Char, c);
 }
 
 inline int16_t ArtField::GetShort(Object* object) {
-  DCHECK_EQ(Primitive::kPrimShort, GetTypeAsPrimitiveType()) << PrettyField(this);
-  return Get32(object);
+  FIELD_GET(object, Short);
 }
 
 template<bool kTransactionActive>
 inline void ArtField::SetShort(Object* object, int16_t s) {
-  DCHECK_EQ(Primitive::kPrimShort, GetTypeAsPrimitiveType()) << PrettyField(this);
-  Set32<kTransactionActive>(object, s);
+  FIELD_SET(object, Short, s);
 }
 
+#undef FIELD_GET
+#undef FIELD_SET
+
 inline int32_t ArtField::GetInt(Object* object) {
   if (kIsDebugBuild) {
     Primitive::Type type = GetTypeAsPrimitiveType();
@@ -273,7 +287,7 @@ inline bool ArtField::IsPrimitiveType() SHARED_LOCKS_REQUIRED(Locks::mutator_loc
 }
 
 inline size_t ArtField::FieldSize() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-  return Primitive::FieldSize(GetTypeAsPrimitiveType());
+  return Primitive::ComponentSize(GetTypeAsPrimitiveType());
 }
 
 inline mirror::DexCache* ArtField::GetDexCache() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
index f3dfa15..885bcb0 100644 (file)
@@ -95,9 +95,9 @@ class MANAGED ArtField FINAL : public Object {
   void SetOffset(MemberOffset num_bytes) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   // field access, null object for static fields
-  bool GetBoolean(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  uint8_t GetBoolean(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   template<bool kTransactionActive>
-  void SetBoolean(Object* object, bool z) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetBoolean(Object* object, uint8_t z) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   int8_t GetByte(Object* object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   template<bool kTransactionActive>
   void SetByte(Object* object, int8_t b) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
index 58321c7..84f204f 100644 (file)
@@ -38,7 +38,7 @@ namespace mirror {
 
 inline uint32_t ArtMethod::ClassSize() {
   uint32_t vtable_entries = Object::kVTableLength + 8;
-  return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0);
+  return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0);
 }
 
 template<ReadBarrierOption kReadBarrierOption>
index 370bfb9..131f5d6 100644 (file)
@@ -281,6 +281,19 @@ uint32_t ArtMethod::FindCatchBlock(Handle<ArtMethod> h_this, Handle<Class> excep
   return found_dex_pc;
 }
 
+bool ArtMethod::IsEntrypointInterpreter() {
+  ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
+  const void* oat_quick_code = class_linker->GetOatMethodQuickCodeFor(this);
+  const void* oat_portable_code = class_linker->GetOatMethodPortableCodeFor(this);
+  if (!IsPortableCompiled()) {  // Quick.
+    return oat_quick_code == nullptr ||
+        oat_quick_code != GetEntryPointFromQuickCompiledCode();
+  } else {  // Portable.
+    return oat_portable_code == nullptr ||
+        oat_portable_code != GetEntryPointFromPortableCompiledCode();
+  }
+}
+
 void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue* result,
                        const char* shorty) {
   if (UNLIKELY(__builtin_frame_address(0) < self->GetStackEnd())) {
@@ -318,6 +331,13 @@ void ArtMethod::Invoke(Thread* self, uint32_t* args, uint32_t args_size, JValue*
                                   have_quick_code ? GetEntryPointFromQuickCompiledCode()
                                                   : GetEntryPointFromPortableCompiledCode());
       }
+
+      // Ensure that we won't be accidentally calling quick/portable compiled code when -Xint.
+      if (kIsDebugBuild && Runtime::Current()->GetInstrumentation()->IsForcedInterpretOnly()) {
+        CHECK(IsEntrypointInterpreter())
+            << "Don't call compiled code when -Xint " << PrettyMethod(this);
+      }
+
       if (!IsPortableCompiled()) {
 #ifdef __LP64__
         if (!IsStatic()) {
index fa592c2..ebd5bd5 100644 (file)
@@ -309,6 +309,11 @@ class MANAGED ArtMethod FINAL : public Object {
 
   void AssertPcIsWithinQuickCode(uintptr_t pc) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  // Returns true if the entrypoint points to the interpreter, as
+  // opposed to the compiled code, that is, this method will be
+  // interpretered on invocation.
+  bool IsEntrypointInterpreter() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   uint32_t GetQuickOatCodeOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   uint32_t GetPortableOatCodeOffset() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
   void SetQuickOatCodeOffset(uint32_t code_offset) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
index b0ff7ea..726e928 100644 (file)
@@ -556,6 +556,8 @@ inline Object* Class::AllocNonMovableObject(Thread* self) {
 
 inline uint32_t Class::ComputeClassSize(bool has_embedded_tables,
                                         uint32_t num_vtable_entries,
+                                        uint32_t num_8bit_static_fields,
+                                        uint32_t num_16bit_static_fields,
                                         uint32_t num_32bit_static_fields,
                                         uint32_t num_64bit_static_fields,
                                         uint32_t num_ref_static_fields) {
@@ -569,18 +571,33 @@ inline uint32_t Class::ComputeClassSize(bool has_embedded_tables,
             sizeof(int32_t) /* vtable len */ +
             embedded_vtable_size;
   }
+
   // Space used by reference statics.
   size +=  num_ref_static_fields * sizeof(HeapReference<Object>);
-  // Possible pad for alignment.
-  if (((size & 7) != 0) && (num_64bit_static_fields > 0)) {
-    size += sizeof(uint32_t);
-    if (num_32bit_static_fields != 0) {
-      // Shuffle one 32 bit static field forward.
-      num_32bit_static_fields--;
+  if (!IsAligned<8>(size) && num_64bit_static_fields > 0) {
+    uint32_t gap = 8 - (size & 0x7);
+    size += gap;  // will be padded
+    // Shuffle 4-byte fields forward.
+    while (gap >= sizeof(uint32_t) && num_32bit_static_fields != 0) {
+      --num_32bit_static_fields;
+      gap -= sizeof(uint32_t);
+    }
+    // Shuffle 2-byte fields forward.
+    while (gap >= sizeof(uint16_t) && num_16bit_static_fields != 0) {
+      --num_16bit_static_fields;
+      gap -= sizeof(uint16_t);
+    }
+    // Shuffle byte fields forward.
+    while (gap >= sizeof(uint8_t) && num_8bit_static_fields != 0) {
+      --num_8bit_static_fields;
+      gap -= sizeof(uint8_t);
     }
   }
+  // Guaranteed to be at least 4 byte aligned. No need for further alignments.
   // Space used for primitive static fields.
-  size += (num_32bit_static_fields * sizeof(uint32_t)) +
+  size += (num_8bit_static_fields * sizeof(uint8_t)) +
+      (num_16bit_static_fields * sizeof(uint16_t)) +
+      (num_32bit_static_fields * sizeof(uint32_t)) +
       (num_64bit_static_fields * sizeof(uint64_t));
   return size;
 }
@@ -705,11 +722,11 @@ inline MemberOffset Class::GetSlowPathFlagOffset() {
 }
 
 inline bool Class::GetSlowPathEnabled() {
-  return GetField32(GetSlowPathFlagOffset());
+  return GetFieldBoolean(GetSlowPathFlagOffset());
 }
 
 inline void Class::SetSlowPath(bool enabled) {
-  SetField32<false>(GetSlowPathFlagOffset(), enabled);
+  SetFieldBoolean<false>(GetSlowPathFlagOffset(), enabled);
 }
 
 inline void Class::InitializeClassVisitor::operator()(
index 5b8eb82..760d54c 100644 (file)
@@ -772,7 +772,8 @@ uint16_t Class::GetDirectInterfaceTypeIdx(uint32_t idx) {
   return GetInterfaceTypeList()->GetTypeItem(idx).type_idx_;
 }
 
-mirror::Class* Class::GetDirectInterface(Thread* self, Handle<mirror::Class> klass, uint32_t idx) {
+mirror::Class* Class::GetDirectInterface(Thread* self, ConstHandle<mirror::Class> klass,
+                                         uint32_t idx) {
   DCHECK(klass.Get() != nullptr);
   DCHECK(!klass->IsPrimitive());
   if (klass->IsArrayClass()) {
index 13d0c80..0d30bc6 100644 (file)
@@ -65,6 +65,8 @@
 namespace art {
 
 struct ClassOffsets;
+template<class T> class ConstHandle;
+template<class T> class Handle;
 class Signature;
 class StringPiece;
 
@@ -449,8 +451,14 @@ class MANAGED Class FINAL : public Object {
   bool IsObjectClass() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     return !IsPrimitive() && GetSuperClass() == NULL;
   }
+
+  bool IsInstantiableNonArray() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+    return !IsPrimitive() && !IsInterface() && !IsAbstract() && !IsArrayClass();
+  }
+
   bool IsInstantiable() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
-    return (!IsPrimitive() && !IsInterface() && !IsAbstract()) || ((IsAbstract()) && IsArrayClass());
+    return (!IsPrimitive() && !IsInterface() && !IsAbstract()) ||
+        ((IsAbstract()) && IsArrayClass());
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
@@ -494,6 +502,8 @@ class MANAGED Class FINAL : public Object {
   // Compute how many bytes would be used a class with the given elements.
   static uint32_t ComputeClassSize(bool has_embedded_tables,
                                    uint32_t num_vtable_entries,
+                                   uint32_t num_8bit_static_fields,
+                                   uint32_t num_16bit_static_fields,
                                    uint32_t num_32bit_static_fields,
                                    uint32_t num_64bit_static_fields,
                                    uint32_t num_ref_static_fields);
@@ -502,12 +512,12 @@ class MANAGED Class FINAL : public Object {
   static uint32_t ClassClassSize() {
     // The number of vtable entries in java.lang.Class.
     uint32_t vtable_entries = Object::kVTableLength + 64;
-    return ComputeClassSize(true, vtable_entries, 0, 1, 0);
+    return ComputeClassSize(true, vtable_entries, 0, 0, 0, 1, 0);
   }
 
   // The size of a java.lang.Class representing a primitive such as int.class.
   static uint32_t PrimitiveClassSize() {
-    return ComputeClassSize(false, 0, 0, 0, 0);
+    return ComputeClassSize(false, 0, 0, 0, 0, 0, 0);
   }
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags,
@@ -975,7 +985,8 @@ class MANAGED Class FINAL : public Object {
 
   uint16_t GetDirectInterfaceTypeIdx(uint32_t idx) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
-  static mirror::Class* GetDirectInterface(Thread* self, Handle<mirror::Class> klass, uint32_t idx)
+  static mirror::Class* GetDirectInterface(Thread* self, ConstHandle<mirror::Class> klass,
+                                           uint32_t idx)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   const char* GetSourceFile() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
index d3fcb55..288e88e 100644 (file)
@@ -28,7 +28,7 @@ namespace mirror {
 
 inline uint32_t DexCache::ClassSize() {
   uint32_t vtable_entries = Object::kVTableLength + 1;
-  return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0);
+  return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0);
 }
 
 inline ArtMethod* DexCache::GetResolvedMethod(uint32_t method_idx)
index 9dbfb56..8c1dc7d 100644 (file)
@@ -37,7 +37,7 @@ namespace mirror {
 
 inline uint32_t Object::ClassSize() {
   uint32_t vtable_entries = kVTableLength;
-  return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0);
+  return Class::ComputeClassSize(true, vtable_entries, 0, 0, 0, 0, 0);
 }
 
 template<VerifyObjectFlags kVerifyFlags, ReadBarrierOption kReadBarrierOption>
@@ -408,17 +408,157 @@ inline size_t Object::SizeOf() {
 }
 
 template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile>
-inline int32_t Object::GetField32(MemberOffset field_offset) {
+inline uint8_t Object::GetFieldBoolean(MemberOffset field_offset) {
   if (kVerifyFlags & kVerifyThis) {
     VerifyObject(this);
   }
-  const byte* raw_addr = reinterpret_cast<const byte*>(this) + field_offset.Int32Value();
-  const int32_t* word_addr = reinterpret_cast<const int32_t*>(raw_addr);
-  if (UNLIKELY(kIsVolatile)) {
-    return reinterpret_cast<const Atomic<int32_t>*>(word_addr)->LoadSequentiallyConsistent();
-  } else {
-    return reinterpret_cast<const Atomic<int32_t>*>(word_addr)->LoadJavaData();
+  return GetField<uint8_t, kIsVolatile>(field_offset);
+}
+
+template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile>
+inline int8_t Object::GetFieldByte(MemberOffset field_offset) {
+  if (kVerifyFlags & kVerifyThis) {
+    VerifyObject(this);
+  }
+  return GetField<int8_t, kIsVolatile>(field_offset);
+}
+
+template<VerifyObjectFlags kVerifyFlags>
+inline uint8_t Object::GetFieldBooleanVolatile(MemberOffset field_offset) {
+  return GetFieldBoolean<kVerifyFlags, true>(field_offset);
+}
+
+template<VerifyObjectFlags kVerifyFlags>
+inline int8_t Object::GetFieldByteVolatile(MemberOffset field_offset) {
+  return GetFieldByte<kVerifyFlags, true>(field_offset);
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
+    bool kIsVolatile>
+inline void Object::SetFieldBoolean(MemberOffset field_offset, uint8_t new_value)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  if (kCheckTransaction) {
+    DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
+  }
+  if (kTransactionActive) {
+    Runtime::Current()->RecordWriteFieldBoolean(this, field_offset,
+                                           GetFieldBoolean<kVerifyFlags, kIsVolatile>(field_offset),
+                                           kIsVolatile);
+  }
+  if (kVerifyFlags & kVerifyThis) {
+    VerifyObject(this);
+  }
+  SetField<uint8_t, kIsVolatile>(field_offset, new_value);
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
+    bool kIsVolatile>
+inline void Object::SetFieldByte(MemberOffset field_offset, int8_t new_value)
+    SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  if (kCheckTransaction) {
+    DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
+  }
+  if (kTransactionActive) {
+    Runtime::Current()->RecordWriteFieldByte(this, field_offset,
+                                           GetFieldByte<kVerifyFlags, kIsVolatile>(field_offset),
+                                           kIsVolatile);
+  }
+  if (kVerifyFlags & kVerifyThis) {
+    VerifyObject(this);
+  }
+  SetField<int8_t, kIsVolatile>(field_offset, new_value);
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline void Object::SetFieldBooleanVolatile(MemberOffset field_offset, uint8_t new_value) {
+  return SetFieldBoolean<kTransactionActive, kCheckTransaction, kVerifyFlags, true>(
+      field_offset, new_value);
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline void Object::SetFieldByteVolatile(MemberOffset field_offset, int8_t new_value) {
+  return SetFieldByte<kTransactionActive, kCheckTransaction, kVerifyFlags, true>(
+      field_offset, new_value);
+}
+
+template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile>
+inline uint16_t Object::GetFieldChar(MemberOffset field_offset) {
+  if (kVerifyFlags & kVerifyThis) {
+    VerifyObject(this);
+  }
+  return GetField<uint16_t, kIsVolatile>(field_offset);
+}
+
+template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile>
+inline int16_t Object::GetFieldShort(MemberOffset field_offset) {
+  if (kVerifyFlags & kVerifyThis) {
+    VerifyObject(this);
   }
+  return GetField<int16_t, kIsVolatile>(field_offset);
+}
+
+template<VerifyObjectFlags kVerifyFlags>
+inline uint16_t Object::GetFieldCharVolatile(MemberOffset field_offset) {
+  return GetFieldChar<kVerifyFlags, true>(field_offset);
+}
+
+template<VerifyObjectFlags kVerifyFlags>
+inline int16_t Object::GetFieldShortVolatile(MemberOffset field_offset) {
+  return GetFieldShort<kVerifyFlags, true>(field_offset);
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
+    bool kIsVolatile>
+inline void Object::SetFieldChar(MemberOffset field_offset, uint16_t new_value) {
+  if (kCheckTransaction) {
+    DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
+  }
+  if (kTransactionActive) {
+    Runtime::Current()->RecordWriteFieldChar(this, field_offset,
+                                           GetFieldChar<kVerifyFlags, kIsVolatile>(field_offset),
+                                           kIsVolatile);
+  }
+  if (kVerifyFlags & kVerifyThis) {
+    VerifyObject(this);
+  }
+  SetField<uint16_t, kIsVolatile>(field_offset, new_value);
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags,
+    bool kIsVolatile>
+inline void Object::SetFieldShort(MemberOffset field_offset, int16_t new_value) {
+  if (kCheckTransaction) {
+    DCHECK_EQ(kTransactionActive, Runtime::Current()->IsActiveTransaction());
+  }
+  if (kTransactionActive) {
+    Runtime::Current()->RecordWriteFieldChar(this, field_offset,
+                                           GetFieldShort<kVerifyFlags, kIsVolatile>(field_offset),
+                                           kIsVolatile);
+  }
+  if (kVerifyFlags & kVerifyThis) {
+    VerifyObject(this);
+  }
+  SetField<int16_t, kIsVolatile>(field_offset, new_value);
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline void Object::SetFieldCharVolatile(MemberOffset field_offset, uint16_t new_value) {
+  return SetFieldChar<kTransactionActive, kCheckTransaction, kVerifyFlags, true>(
+      field_offset, new_value);
+}
+
+template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
+inline void Object::SetFieldShortVolatile(MemberOffset field_offset, int16_t new_value) {
+  return SetFieldShort<kTransactionActive, kCheckTransaction, kVerifyFlags, true>(
+      field_offset, new_value);
+}
+
+template<VerifyObjectFlags kVerifyFlags, bool kIsVolatile>
+inline int32_t Object::GetField32(MemberOffset field_offset) {
+  if (kVerifyFlags & kVerifyThis) {
+    VerifyObject(this);
+  }
+  return GetField<int32_t, kIsVolatile>(field_offset);
 }
 
 template<VerifyObjectFlags kVerifyFlags>
@@ -440,13 +580,7 @@ inline void Object::SetField32(MemberOffset field_offset, int32_t new_value) {
   if (kVerifyFlags & kVerifyThis) {
     VerifyObject(this);
   }
-  byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
-  int32_t* word_addr = reinterpret_cast<int32_t*>(raw_addr);
-  if (kIsVolatile) {
-    reinterpret_cast<Atomic<int32_t>*>(word_addr)->StoreSequentiallyConsistent(new_value);
-  } else {
-    reinterpret_cast<Atomic<int32_t>*>(word_addr)->StoreJavaData(new_value);
-  }
+  SetField<int32_t, kIsVolatile>(field_offset, new_value);
 }
 
 template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
@@ -515,13 +649,7 @@ inline int64_t Object::GetField64(MemberOffset field_offset) {
   if (kVerifyFlags & kVerifyThis) {
     VerifyObject(this);
   }
-  const byte* raw_addr = reinterpret_cast<const byte*>(this) + field_offset.Int32Value();
-  const int64_t* addr = reinterpret_cast<const int64_t*>(raw_addr);
-  if (kIsVolatile) {
-    return reinterpret_cast<const Atomic<int64_t>*>(addr)->LoadSequentiallyConsistent();
-  } else {
-    return reinterpret_cast<const Atomic<int64_t>*>(addr)->LoadJavaData();
-  }
+  return GetField<int64_t, kIsVolatile>(field_offset);
 }
 
 template<VerifyObjectFlags kVerifyFlags>
@@ -543,13 +671,7 @@ inline void Object::SetField64(MemberOffset field_offset, int64_t new_value) {
   if (kVerifyFlags & kVerifyThis) {
     VerifyObject(this);
   }
-  byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
-  int64_t* addr = reinterpret_cast<int64_t*>(raw_addr);
-  if (kIsVolatile) {
-    reinterpret_cast<Atomic<int64_t>*>(addr)->StoreSequentiallyConsistent(new_value);
-  } else {
-    reinterpret_cast<Atomic<int64_t>*>(addr)->StoreJavaData(new_value);
-  }
+  SetField<int64_t, kIsVolatile>(field_offset, new_value);
 }
 
 template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
@@ -558,6 +680,28 @@ inline void Object::SetField64Volatile(MemberOffset field_offset, int64_t new_va
                                                                                new_value);
 }
 
+template<typename kSize, bool kIsVolatile>
+inline void Object::SetField(MemberOffset field_offset, kSize new_value) {
+  byte* raw_addr = reinterpret_cast<byte*>(this) + field_offset.Int32Value();
+  kSize* addr = reinterpret_cast<kSize*>(raw_addr);
+  if (kIsVolatile) {
+    reinterpret_cast<Atomic<kSize>*>(addr)->StoreSequentiallyConsistent(new_value);
+  } else {
+    reinterpret_cast<Atomic<kSize>*>(addr)->StoreJavaData(new_value);
+  }
+}
+
+template<typename kSize, bool kIsVolatile>
+inline kSize Object::GetField(MemberOffset field_offset) {
+  const byte* raw_addr = reinterpret_cast<const byte*>(this) + field_offset.Int32Value();
+  const kSize* addr = reinterpret_cast<const kSize*>(raw_addr);
+  if (kIsVolatile) {
+    return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadSequentiallyConsistent();
+  } else {
+    return reinterpret_cast<const Atomic<kSize>*>(addr)->LoadJavaData();
+  }
+}
+
 template<bool kTransactionActive, bool kCheckTransaction, VerifyObjectFlags kVerifyFlags>
 inline bool Object::CasFieldWeakSequentiallyConsistent64(MemberOffset field_offset,
                                                          int64_t old_value, int64_t new_value) {
index a6b6227..6cd230b 100644 (file)
@@ -19,6 +19,7 @@
 
 #include "object_reference.h"
 #include "offsets.h"
+#include "runtime.h"
 #include "verify_object.h"
 
 namespace art {
@@ -247,6 +248,78 @@ class MANAGED LOCKABLE Object {
   HeapReference<Object>* GetFieldObjectReferenceAddr(MemberOffset field_offset);
 
   template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+  ALWAYS_INLINE uint8_t GetFieldBoolean(MemberOffset field_offset)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+  ALWAYS_INLINE int8_t GetFieldByte(MemberOffset field_offset)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  ALWAYS_INLINE uint8_t GetFieldBooleanVolatile(MemberOffset field_offset)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  ALWAYS_INLINE int8_t GetFieldByteVolatile(MemberOffset field_offset)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<bool kTransactionActive, bool kCheckTransaction = true,
+      VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+  ALWAYS_INLINE void SetFieldBoolean(MemberOffset field_offset, uint8_t new_value)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<bool kTransactionActive, bool kCheckTransaction = true,
+      VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+  ALWAYS_INLINE void SetFieldByte(MemberOffset field_offset, int8_t new_value)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<bool kTransactionActive, bool kCheckTransaction = true,
+      VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  ALWAYS_INLINE void SetFieldBooleanVolatile(MemberOffset field_offset, uint8_t new_value)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<bool kTransactionActive, bool kCheckTransaction = true,
+      VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  ALWAYS_INLINE void SetFieldByteVolatile(MemberOffset field_offset, int8_t new_value)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+  ALWAYS_INLINE uint16_t GetFieldChar(MemberOffset field_offset)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+  ALWAYS_INLINE int16_t GetFieldShort(MemberOffset field_offset)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  ALWAYS_INLINE uint16_t GetFieldCharVolatile(MemberOffset field_offset)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  ALWAYS_INLINE int16_t GetFieldShortVolatile(MemberOffset field_offset)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<bool kTransactionActive, bool kCheckTransaction = true,
+      VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+  ALWAYS_INLINE void SetFieldChar(MemberOffset field_offset, uint16_t new_value)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<bool kTransactionActive, bool kCheckTransaction = true,
+      VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
+  ALWAYS_INLINE void SetFieldShort(MemberOffset field_offset, int16_t new_value)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<bool kTransactionActive, bool kCheckTransaction = true,
+      VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  ALWAYS_INLINE void SetFieldCharVolatile(MemberOffset field_offset, uint16_t new_value)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<bool kTransactionActive, bool kCheckTransaction = true,
+      VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags>
+  ALWAYS_INLINE void SetFieldShortVolatile(MemberOffset field_offset, int16_t new_value)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
+  template<VerifyObjectFlags kVerifyFlags = kDefaultVerifyFlags, bool kIsVolatile = false>
   ALWAYS_INLINE int32_t GetField32(MemberOffset field_offset)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -356,6 +429,13 @@ class MANAGED LOCKABLE Object {
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
  private:
+  template<typename kSize, bool kIsVolatile>
+  ALWAYS_INLINE void SetField(MemberOffset field_offset, kSize new_value)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  template<typename kSize, bool kIsVolatile>
+  ALWAYS_INLINE kSize GetField(MemberOffset field_offset)
+      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+
   // Verify the type correctness of stores to fields.
   // TODO: This can cause thread suspension and isn't moving GC safe.
   void CheckFieldAssignmentImpl(MemberOffset field_offset, Object* new_value)
index bc872e6..aa181ee 100644 (file)
@@ -74,7 +74,12 @@ class ObjectTest : public CommonRuntimeTest {
   }
 };
 
-// Keep the assembly code in sync
+// Keep constants in sync.
+TEST_F(ObjectTest, Constants) {
+  EXPECT_EQ(kObjectReferenceSize, sizeof(mirror::HeapReference<mirror::Object>));
+}
+
+// Keep the assembly code constats in sync.
 TEST_F(ObjectTest, AsmConstants) {
   EXPECT_EQ(CLASS_OFFSET, Object::ClassOffset().Int32Value());
   EXPECT_EQ(LOCK_WORD_OFFSET, Object::MonitorOffset().Int32Value());
index b353402..d1d2a3a 100644 (file)
@@ -24,7 +24,7 @@ namespace mirror {
 
 inline uint32_t Reference::ClassSize() {
   uint32_t vtable_entries = Object::kVTableLength + 5;
-  return Class::ComputeClassSize(false, vtable_entries, 2, 0, 0);
+  return Class::ComputeClassSize(false, vtable_entries, 2, 0, 0, 0, 0);
 }
 
 inline bool Reference::IsEnqueuable() {
index d924141..14d7de2 100644 (file)
@@ -30,7 +30,7 @@ namespace mirror {
 
 inline uint32_t String::ClassSize() {
   uint32_t vtable_entries = Object::kVTableLength + 51;
-  return Class::ComputeClassSize(true, vtable_entries, 1, 1, 2);
+  return Class::ComputeClassSize(true, vtable_entries, 0, 1, 0, 1, 2);
 }
 
 inline CharArray* String::GetCharArray() {
index 0b6e3b2..a87d7c1 100644 (file)
@@ -176,10 +176,6 @@ Monitor::~Monitor() {
   // Deflated monitors have a null object.
 }
 
-/*
- * Links a thread into a monitor's wait set.  The monitor lock must be
- * held by the caller of this routine.
- */
 void Monitor::AppendToWaitSet(Thread* thread) {
   DCHECK(owner_ == Thread::Current());
   DCHECK(thread != NULL);
@@ -197,10 +193,6 @@ void Monitor::AppendToWaitSet(Thread* thread) {
   t->SetWaitNext(thread);
 }
 
-/*
- * Unlinks a thread from a monitor's wait set.  The monitor lock must
- * be held by the caller of this routine.
- */
 void Monitor::RemoveFromWaitSet(Thread *thread) {
   DCHECK(owner_ == Thread::Current());
   DCHECK(thread != NULL);
@@ -395,29 +387,6 @@ bool Monitor::Unlock(Thread* self) {
   return true;
 }
 
-/*
- * Wait on a monitor until timeout, interrupt, or notification.  Used for
- * Object.wait() and (somewhat indirectly) Thread.sleep() and Thread.join().
- *
- * If another thread calls Thread.interrupt(), we throw InterruptedException
- * and return immediately if one of the following are true:
- *  - blocked in wait(), wait(long), or wait(long, int) methods of Object
- *  - blocked in join(), join(long), or join(long, int) methods of Thread
- *  - blocked in sleep(long), or sleep(long, int) methods of Thread
- * Otherwise, we set the "interrupted" flag.
- *
- * Checks to make sure that "ns" is in the range 0-999999
- * (i.e. fractions of a millisecond) and throws the appropriate
- * exception if it isn't.
- *
- * The spec allows "spurious wakeups", and recommends that all code using
- * Object.wait() do so in a loop.  This appears to derive from concerns
- * about pthread_cond_wait() on multiprocessor systems.  Some commentary
- * on the web casts doubt on whether these can/should occur.
- *
- * Since we're allowed to wake up "early", we clamp extremely long durations
- * to return at the end of the 32-bit time epoch.
- */
 void Monitor::Wait(Thread* self, int64_t ms, int32_t ns,
                    bool interruptShouldThrow, ThreadState why) {
   DCHECK(self != NULL);
@@ -818,9 +787,6 @@ bool Monitor::MonitorExit(Thread* self, mirror::Object* obj) {
   }
 }
 
-/*
- * Object.wait().  Also called for class init.
- */
 void Monitor::Wait(Thread* self, mirror::Object *obj, int64_t ms, int32_t ns,
                    bool interruptShouldThrow, ThreadState why) {
   DCHECK(self != nullptr);
index be9e6f9..8f97a40 100644 (file)
@@ -75,6 +75,8 @@ class Monitor {
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
     DoNotify(self, obj, true);
   }
+
+  // Object.wait().  Also called for class init.
   static void Wait(Thread* self, mirror::Object* obj, int64_t ms, int32_t ns,
                    bool interruptShouldThrow, ThreadState why)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -139,15 +141,18 @@ class Monitor {
       LOCKS_EXCLUDED(monitor_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
+  // Links a thread into a monitor's wait set.  The monitor lock must be held by the caller of this
+  // routine.
   void AppendToWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_);
+
+  // Unlinks a thread from a monitor's wait set.  The monitor lock must be held by the caller of
+  // this routine.
   void RemoveFromWaitSet(Thread* thread) EXCLUSIVE_LOCKS_REQUIRED(monitor_lock_);
 
-  /*
-   * Changes the shape of a monitor from thin to fat, preserving the internal lock state. The
-   * calling thread must own the lock or the owner must be suspended. There's a race with other
-   * threads inflating the lock, installing hash codes and spurious failures. The caller should
-   * re-read the lock word following the call.
-   */
+  // Changes the shape of a monitor from thin to fat, preserving the internal lock state. The
+  // calling thread must own the lock or the owner must be suspended. There's a race with other
+  // threads inflating the lock, installing hash codes and spurious failures. The caller should
+  // re-read the lock word following the call.
   static void Inflate(Thread* self, Thread* owner, mirror::Object* obj, int32_t hash_code)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -178,6 +183,25 @@ class Monitor {
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
 
+  // Wait on a monitor until timeout, interrupt, or notification.  Used for Object.wait() and
+  // (somewhat indirectly) Thread.sleep() and Thread.join().
+  //
+  // If another thread calls Thread.interrupt(), we throw InterruptedException and return
+  // immediately if one of the following are true:
+  //  - blocked in wait(), wait(long), or wait(long, int) methods of Object
+  //  - blocked in join(), join(long), or join(long, int) methods of Thread
+  //  - blocked in sleep(long), or sleep(long, int) methods of Thread
+  // Otherwise, we set the "interrupted" flag.
+  //
+  // Checks to make sure that "ns" is in the range 0-999999 (i.e. fractions of a millisecond) and
+  // throws the appropriate exception if it isn't.
+  //
+  // The spec allows "spurious wakeups", and recommends that all code using Object.wait() do so in
+  // a loop.  This appears to derive from concerns about pthread_cond_wait() on multiprocessor
+  // systems.  Some commentary on the web casts doubt on whether these can/should occur.
+  //
+  // Since we're allowed to wake up "early", we clamp extremely long durations to return at the end
+  // of the 32-bit time epoch.
   void Wait(Thread* self, int64_t msec, int32_t nsec, bool interruptShouldThrow, ThreadState why)
       LOCKS_EXCLUDED(monitor_lock_)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
index b0b64aa..a9ef8fc 100644 (file)
@@ -167,7 +167,7 @@ static jboolean VMRuntime_is64Bit(JNIEnv* env, jobject) {
 }
 
 static jboolean VMRuntime_isCheckJniEnabled(JNIEnv* env, jobject) {
-  return Runtime::Current()->GetJavaVM()->check_jni ? JNI_TRUE : JNI_FALSE;
+  return Runtime::Current()->GetJavaVM()->IsCheckJniEnabled() ? JNI_TRUE : JNI_FALSE;
 }
 
 static void VMRuntime_setTargetSdkVersionNative(JNIEnv*, jobject, jint target_sdk_version) {
index 820bd04..df6055d 100644 (file)
@@ -17,6 +17,7 @@
 #include <stdlib.h>
 
 #include "debugger.h"
+#include "java_vm_ext.h"
 #include "jni_internal.h"
 #include "JNIHelp.h"
 #include "thread-inl.h"
@@ -47,7 +48,7 @@ static void EnableDebugger() {
 }
 
 static void EnableDebugFeatures(uint32_t debug_flags) {
-  // Must match values in dalvik.system.Zygote.
+  // Must match values in com.android.internal.os.Zygote.
   enum {
     DEBUG_ENABLE_DEBUGGER           = 1,
     DEBUG_ENABLE_CHECKJNI           = 1 << 1,
@@ -59,7 +60,7 @@ static void EnableDebugFeatures(uint32_t debug_flags) {
   if ((debug_flags & DEBUG_ENABLE_CHECKJNI) != 0) {
     Runtime* runtime = Runtime::Current();
     JavaVMExt* vm = runtime->GetJavaVM();
-    if (!vm->check_jni) {
+    if (!vm->IsCheckJniEnabled()) {
       LOG(INFO) << "Late-enabling -Xcheck:jni";
       vm->SetCheckJniEnabled(true);
       // There's only one thread running at this point, so only one JNIEnv to fix up.
index 51cd5b8..c1c6c26 100644 (file)
@@ -15,6 +15,7 @@
  */
 
 #include "dex_file.h"
+#include "jni_internal.h"
 #include "mirror/dex_cache.h"
 #include "mirror/object-inl.h"
 #include "scoped_fast_native_object_access.h"
index fb708a2..a85eec7 100644 (file)
@@ -43,7 +43,10 @@ static void Runtime_nativeExit(JNIEnv*, jclass, jint status) {
   exit(status);
 }
 
-static jstring Runtime_nativeLoad(JNIEnv* env, jclass, jstring javaFilename, jobject javaLoader, jstring javaLdLibraryPath) {
+static jstring Runtime_nativeLoad(JNIEnv* env, jclass, jstring javaFilename, jobject javaLoader,
+                                  jstring javaLdLibraryPath) {
+  // TODO: returns NULL on success or an error message describing the failure on failure. This
+  // should be refactored in terms of suppressed exceptions.
   ScopedUtfChars filename(env, javaFilename);
   if (filename.c_str() == NULL) {
     return NULL;
@@ -64,14 +67,10 @@ static jstring Runtime_nativeLoad(JNIEnv* env, jclass, jstring javaFilename, job
     }
   }
 
-  std::string detail;
+  std::string error_msg;
   {
-    ScopedObjectAccess soa(env);
-    StackHandleScope<1> hs(soa.Self());
-    Handle<mirror::ClassLoader> classLoader(
-        hs.NewHandle(soa.Decode<mirror::ClassLoader*>(javaLoader)));
     JavaVMExt* vm = Runtime::Current()->GetJavaVM();
-    bool success = vm->LoadNativeLibrary(filename.c_str(), classLoader, &detail);
+    bool success = vm->LoadNativeLibrary(env, filename.c_str(), javaLoader, &error_msg);
     if (success) {
       return nullptr;
     }
@@ -79,7 +78,7 @@ static jstring Runtime_nativeLoad(JNIEnv* env, jclass, jstring javaFilename, job
 
   // Don't let a pending exception from JNI_OnLoad cause a CheckJNI issue with NewStringUTF.
   env->ExceptionClear();
-  return env->NewStringUTF(detail.c_str());
+  return env->NewStringUTF(error_msg.c_str());
 }
 
 static jlong Runtime_maxMemory(JNIEnv*, jclass) {
index 163ae20..8b2aecb 100644 (file)
@@ -16,6 +16,7 @@
 
 #include "base/logging.h"
 #include "debugger.h"
+#include "jni_internal.h"
 #include "scoped_fast_native_object_access.h"
 #include "ScopedPrimitiveArray.h"
 
index ede108c..43173ca 100644 (file)
@@ -23,7 +23,7 @@
 namespace art {
 
 const uint8_t OatHeader::kOatMagic[] = { 'o', 'a', 't', '\n' };
-const uint8_t OatHeader::kOatVersion[] = { '0', '3', '9', '\0' };
+const uint8_t OatHeader::kOatVersion[] = { '0', '4', '0', '\0' };
 
 static size_t ComputeOatHeaderSize(const SafeMap<std::string, std::string>* variable_data) {
   size_t estimate = 0U;
index 2ebdebd..58df150 100644 (file)
@@ -473,15 +473,17 @@ OatFile::OatClass OatFile::OatDexFile::GetOatClass(uint16_t class_def_index) con
   uint32_t bitmap_size = 0;
   const byte* bitmap_pointer = nullptr;
   const byte* methods_pointer = nullptr;
-  if (type == kOatClassSomeCompiled) {
-    bitmap_size = static_cast<uint32_t>(*reinterpret_cast<const uint32_t*>(after_type_pointer));
-    bitmap_pointer = after_type_pointer + sizeof(bitmap_size);
-    CHECK_LE(bitmap_pointer, oat_file_->End()) << oat_file_->GetLocation();
-    methods_pointer = bitmap_pointer + bitmap_size;
-  } else {
-    methods_pointer = after_type_pointer;
+  if (type != kOatClassNoneCompiled) {
+    if (type == kOatClassSomeCompiled) {
+      bitmap_size = static_cast<uint32_t>(*reinterpret_cast<const uint32_t*>(after_type_pointer));
+      bitmap_pointer = after_type_pointer + sizeof(bitmap_size);
+      CHECK_LE(bitmap_pointer, oat_file_->End()) << oat_file_->GetLocation();
+      methods_pointer = bitmap_pointer + bitmap_size;
+    } else {
+      methods_pointer = after_type_pointer;
+    }
+    CHECK_LE(methods_pointer, oat_file_->End()) << oat_file_->GetLocation();
   }
-  CHECK_LE(methods_pointer, oat_file_->End()) << oat_file_->GetLocation();
 
   return OatClass(oat_file_,
                   status,
@@ -499,22 +501,23 @@ OatFile::OatClass::OatClass(const OatFile* oat_file,
                             const OatMethodOffsets* methods_pointer)
     : oat_file_(oat_file), status_(status), type_(type),
       bitmap_(bitmap_pointer), methods_pointer_(methods_pointer) {
-    CHECK(methods_pointer != nullptr);
     switch (type_) {
       case kOatClassAllCompiled: {
         CHECK_EQ(0U, bitmap_size);
         CHECK(bitmap_pointer == nullptr);
+        CHECK(methods_pointer != nullptr);
         break;
       }
       case kOatClassSomeCompiled: {
         CHECK_NE(0U, bitmap_size);
         CHECK(bitmap_pointer != nullptr);
+        CHECK(methods_pointer != nullptr);
         break;
       }
       case kOatClassNoneCompiled: {
         CHECK_EQ(0U, bitmap_size);
         CHECK(bitmap_pointer == nullptr);
-        methods_pointer_ = nullptr;
+        CHECK(methods_pointer_ == nullptr);
         break;
       }
       case kOatClassMax: {
@@ -556,16 +559,6 @@ const OatFile::OatMethod OatFile::OatClass::GetOatMethod(uint32_t method_index)
   }
 }
 
-OatFile::OatMethod::OatMethod(const byte* base,
-                              const uint32_t code_offset,
-                              const uint32_t gc_map_offset)
-  : begin_(base),
-    code_offset_(code_offset),
-    native_gc_map_offset_(gc_map_offset) {
-}
-
-OatFile::OatMethod::~OatMethod() {}
-
 
 uint32_t OatFile::OatMethod::GetQuickCodeSize() const {
   uintptr_t code = reinterpret_cast<uintptr_t>(GetQuickCode());
index 0bf2b7b..8cb47e2 100644 (file)
@@ -134,14 +134,19 @@ class OatFile {
     const uint8_t* GetMappingTable() const;
     const uint8_t* GetVmapTable() const;
 
-    ~OatMethod();
-
     // Create an OatMethod with offsets relative to the given base address
-    OatMethod(const byte* base,
-              const uint32_t code_offset,
-              const uint32_t gc_map_offset);
+    OatMethod(const byte* base, const uint32_t code_offset, const uint32_t gc_map_offset)
+      : begin_(base),
+        code_offset_(code_offset),
+        native_gc_map_offset_(gc_map_offset) {
+    }
+    ~OatMethod() {}
 
-    OatMethod() {}
+    // A representation of an invalid OatMethod, used when an OatMethod or OatClass can't be found.
+    // See ClassLinker::FindOatMethodFor.
+    static const OatMethod Invalid() {
+      return OatMethod(nullptr, -1, -1);
+    }
 
    private:
     template<class T>
@@ -152,10 +157,10 @@ class OatFile {
       return reinterpret_cast<T>(begin_ + offset);
     }
 
-    const byte* begin_;
+    const byte* const begin_;
 
-    uint32_t code_offset_;
-    uint32_t native_gc_map_offset_;
+    const uint32_t code_offset_;
+    const uint32_t native_gc_map_offset_;
 
     friend class OatClass;
   };
@@ -176,7 +181,12 @@ class OatFile {
     // methods are not included.
     const OatMethod GetOatMethod(uint32_t method_index) const;
 
-    OatClass() {}
+    // A representation of an invalid OatClass, used when an OatClass can't be found.
+    // See ClassLinker::FindOatClass.
+    static OatClass Invalid() {
+      return OatClass(nullptr, mirror::Class::kStatusError, kOatClassNoneCompiled, 0, nullptr,
+                      nullptr);
+    }
 
    private:
     OatClass(const OatFile* oat_file,
@@ -186,15 +196,15 @@ class OatFile {
              const uint32_t* bitmap_pointer,
              const OatMethodOffsets* methods_pointer);
 
-    const OatFile* oat_file_;
+    const OatFile* const oat_file_;
 
-    mirror::Class::Status status_;
+    const mirror::Class::Status status_;
 
-    OatClassType type_;
+    const OatClassType type_;
 
-    const uint32_t* bitmap_;
+    const uint32_t* const bitmap_;
 
-    const OatMethodOffsets* methods_pointer_;
+    const OatMethodOffsets* const methods_pointer_;
 
     friend class OatDexFile;
   };
index f7accc0..a2668ec 100644 (file)
@@ -22,7 +22,7 @@
 namespace art {
 
 template <typename T>
-ObjectLock<T>::ObjectLock(Thread* self, Handle<T> object) : self_(self), obj_(object) {
+ObjectLock<T>::ObjectLock(Thread* self, ConstHandle<T> object) : self_(self), obj_(object) {
   CHECK(object.Get() != nullptr);
   obj_->MonitorEnter(self_);
 }
index acddc03..38690bc 100644 (file)
@@ -28,7 +28,7 @@ class Thread;
 template <typename T>
 class ObjectLock {
  public:
-  ObjectLock(Thread* self, Handle<T> object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  ObjectLock(Thread* self, ConstHandle<T> object) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   ~ObjectLock() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -40,7 +40,7 @@ class ObjectLock {
 
  private:
   Thread* const self_;
-  Handle<T> const obj_;
+  ConstHandle<T> const obj_;
 
   DISALLOW_COPY_AND_ASSIGN(ObjectLock);
 };
index bb2ad44..02ee117 100644 (file)
 
 namespace art {
 
+ParsedOptions::ParsedOptions()
+    :
+    boot_class_path_(nullptr),
+    check_jni_(kIsDebugBuild),                      // -Xcheck:jni is off by default for regular
+                                                    // builds but on by default in debug builds.
+    force_copy_(false),
+    compiler_callbacks_(nullptr),
+    is_zygote_(false),
+    must_relocate_(kDefaultMustRelocate),
+    dex2oat_enabled_(true),
+    image_dex2oat_enabled_(true),
+    interpreter_only_(kPoisonHeapReferences),       // kPoisonHeapReferences currently works with
+                                                    // the interpreter only.
+                                                    // TODO: make it work with the compiler.
+    is_explicit_gc_disabled_(false),
+    use_tlab_(false),
+    verify_pre_gc_heap_(false),
+    verify_pre_sweeping_heap_(kIsDebugBuild),       // Pre sweeping is the one that usually fails
+                                                    // if the GC corrupted the heap.
+    verify_post_gc_heap_(false),
+    verify_pre_gc_rosalloc_(kIsDebugBuild),
+    verify_pre_sweeping_rosalloc_(false),
+    verify_post_gc_rosalloc_(false),
+    long_pause_log_threshold_(gc::Heap::kDefaultLongPauseLogThreshold),
+    long_gc_log_threshold_(gc::Heap::kDefaultLongGCLogThreshold),
+    dump_gc_performance_on_shutdown_(false),
+    ignore_max_footprint_(false),
+    heap_initial_size_(gc::Heap::kDefaultInitialSize),
+    heap_maximum_size_(gc::Heap::kDefaultMaximumSize),
+    heap_growth_limit_(0),                          // 0 means no growth limit.
+    heap_min_free_(gc::Heap::kDefaultMinFree),
+    heap_max_free_(gc::Heap::kDefaultMaxFree),
+    heap_non_moving_space_capacity_(gc::Heap::kDefaultNonMovingSpaceCapacity),
+    heap_target_utilization_(gc::Heap::kDefaultTargetUtilization),
+    foreground_heap_growth_multiplier_(gc::Heap::kDefaultHeapGrowthMultiplier),
+    parallel_gc_threads_(1),
+    conc_gc_threads_(0),                            // Only the main GC thread, no workers.
+    collector_type_(                                // The default GC type is set in makefiles.
+#if ART_DEFAULT_GC_TYPE_IS_CMS
+        gc::kCollectorTypeCMS),
+#elif ART_DEFAULT_GC_TYPE_IS_SS
+        gc::kCollectorTypeSS),
+#elif ART_DEFAULT_GC_TYPE_IS_GSS
+    gc::kCollectorTypeGSS),
+#else
+    gc::kCollectorTypeCMS),
+#error "ART default GC type must be set"
+#endif
+    background_collector_type_(
+#ifdef ART_USE_BACKGROUND_COMPACT
+        gc::kCollectorTypeSS),
+#elif defined(ART_USE_HSPACE_COMPACT)
+        gc::kCollectorTypeHomogeneousSpaceCompact),
+#else
+    gc::kCollectorTypeNone),
+#endif
+                                                    // If background_collector_type_ is
+                                                    // kCollectorTypeNone, it defaults to the
+                                                    // collector_type_ after parsing options. If
+                                                    // you set this to kCollectorTypeHSpaceCompact
+                                                    // then we will do an hspace compaction when
+                                                    // we transition to background instead of a
+                                                    // normal collector transition.
+    stack_size_(0),                                 // 0 means default.
+    max_spins_before_thin_lock_inflation_(Monitor::kDefaultMaxSpinsBeforeThinLockInflation),
+    low_memory_mode_(false),
+    lock_profiling_threshold_(0),
+    method_trace_(false),
+    method_trace_file_("/data/method-trace-file.bin"),
+    method_trace_file_size_(10 * MB),
+    hook_is_sensitive_thread_(nullptr),
+    hook_vfprintf_(vfprintf),
+    hook_exit_(exit),
+    hook_abort_(nullptr),                           // We don't call abort(3) by default; see
+                                                    // Runtime::Abort.
+    profile_clock_source_(kDefaultTraceClockSource),
+    verify_(true),
+    image_isa_(kRuntimeISA),
+    use_homogeneous_space_compaction_for_oom_(false),  // If we are using homogeneous space
+                                                       // compaction then default background
+                                                       // compaction to off since homogeneous
+                                                       // space compactions when we transition
+                                                       // to not jank perceptible.
+    min_interval_homogeneous_space_compaction_by_oom_(MsToNs(100 * 1000))  // 100s.
+    {}
+
 ParsedOptions* ParsedOptions::Create(const RuntimeOptions& options, bool ignore_unrecognized) {
   std::unique_ptr<ParsedOptions> parsed(new ParsedOptions());
   if (parsed->Parse(options, ignore_unrecognized)) {
@@ -175,82 +261,9 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize
   if (class_path_string != NULL) {
     class_path_string_ = class_path_string;
   }
-  // -Xcheck:jni is off by default for regular builds but on by default in debug builds.
-  check_jni_ = kIsDebugBuild;
-
-  heap_initial_size_ = gc::Heap::kDefaultInitialSize;
-  heap_maximum_size_ = gc::Heap::kDefaultMaximumSize;
-  heap_min_free_ = gc::Heap::kDefaultMinFree;
-  heap_max_free_ = gc::Heap::kDefaultMaxFree;
-  heap_non_moving_space_capacity_ = gc::Heap::kDefaultNonMovingSpaceCapacity;
-  heap_target_utilization_ = gc::Heap::kDefaultTargetUtilization;
-  foreground_heap_growth_multiplier_ = gc::Heap::kDefaultHeapGrowthMultiplier;
-  heap_growth_limit_ = 0;  // 0 means no growth limit .
+
   // Default to number of processors minus one since the main GC thread also does work.
   parallel_gc_threads_ = sysconf(_SC_NPROCESSORS_CONF) - 1;
-  // Only the main GC thread, no workers.
-  conc_gc_threads_ = 0;
-  // The default GC type is set in makefiles.
-#if ART_DEFAULT_GC_TYPE_IS_CMS
-  collector_type_ = gc::kCollectorTypeCMS;
-#elif ART_DEFAULT_GC_TYPE_IS_SS
-  collector_type_ = gc::kCollectorTypeSS;
-#elif ART_DEFAULT_GC_TYPE_IS_GSS
-  collector_type_ = gc::kCollectorTypeGSS;
-#else
-#error "ART default GC type must be set"
-#endif
-  // If we are using homogeneous space compaction then default background compaction to off since
-  // homogeneous space compactions when we transition to not jank perceptible.
-  use_homogeneous_space_compaction_for_oom_ = false;
-  // If background_collector_type_ is kCollectorTypeNone, it defaults to the collector_type_ after
-  // parsing options. If you set this to kCollectorTypeHSpaceCompact then we will do an hspace
-  // compaction when we transition to background instead of a normal collector transition.
-  background_collector_type_ = gc::kCollectorTypeNone;
-#ifdef ART_USE_HSPACE_COMPACT
-  background_collector_type_ = gc::kCollectorTypeHomogeneousSpaceCompact;
-#endif
-#ifdef ART_USE_BACKGROUND_COMPACT
-  background_collector_type_ = gc::kCollectorTypeSS;
-#endif
-  stack_size_ = 0;  // 0 means default.
-  max_spins_before_thin_lock_inflation_ = Monitor::kDefaultMaxSpinsBeforeThinLockInflation;
-  low_memory_mode_ = false;
-  use_tlab_ = false;
-  min_interval_homogeneous_space_compaction_by_oom_ = MsToNs(100 * 1000);  // 100s.
-  verify_pre_gc_heap_ = false;
-  // Pre sweeping is the one that usually fails if the GC corrupted the heap.
-  verify_pre_sweeping_heap_ = kIsDebugBuild;
-  verify_post_gc_heap_ = false;
-  verify_pre_gc_rosalloc_ = kIsDebugBuild;
-  verify_pre_sweeping_rosalloc_ = false;
-  verify_post_gc_rosalloc_ = false;
-
-  compiler_callbacks_ = nullptr;
-  is_zygote_ = false;
-  must_relocate_ = kDefaultMustRelocate;
-  dex2oat_enabled_ = true;
-  image_dex2oat_enabled_ = true;
-  if (kPoisonHeapReferences) {
-    // kPoisonHeapReferences currently works only with the interpreter only.
-    // TODO: make it work with the compiler.
-    interpreter_only_ = true;
-  } else {
-    interpreter_only_ = false;
-  }
-  is_explicit_gc_disabled_ = false;
-
-  long_pause_log_threshold_ = gc::Heap::kDefaultLongPauseLogThreshold;
-  long_gc_log_threshold_ = gc::Heap::kDefaultLongGCLogThreshold;
-  dump_gc_performance_on_shutdown_ = false;
-  ignore_max_footprint_ = false;
-
-  lock_profiling_threshold_ = 0;
-  hook_is_sensitive_thread_ = NULL;
-
-  hook_vfprintf_ = vfprintf;
-  hook_exit_ = exit;
-  hook_abort_ = NULL;  // We don't call abort(3) by default; see Runtime::Abort.
 
 //  gLogVerbosity.class_linker = true;  // TODO: don't check this in!
 //  gLogVerbosity.compiler = true;  // TODO: don't check this in!
@@ -266,15 +279,6 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize
 //  gLogVerbosity.threads = true;  // TODO: don't check this in!
 //  gLogVerbosity.verifier = true;  // TODO: don't check this in!
 
-  method_trace_ = false;
-  method_trace_file_ = "/data/method-trace-file.bin";
-  method_trace_file_size_ = 10 * MB;
-
-  profile_clock_source_ = kDefaultTraceClockSource;
-
-  verify_ = true;
-  image_isa_ = kRuntimeISA;
-
   for (size_t i = 0; i < options.size(); ++i) {
     if (true && options[0].first == "-Xzygote") {
       LOG(INFO) << "option[" << i << "]=" << options[i].first;
@@ -309,6 +313,8 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize
       }
     } else if (StartsWith(option, "-Xcheck:jni")) {
       check_jni_ = true;
+    } else if (StartsWith(option, "-Xjniopts:forcecopy")) {
+      force_copy_ = true;
     } else if (StartsWith(option, "-Xrunjdwp:") || StartsWith(option, "-agentlib:jdwp=")) {
       std::string tail(option.substr(option[1] == 'X' ? 10 : 15));
       // TODO: move parsing logic out of Dbg
@@ -642,7 +648,6 @@ bool ParsedOptions::Parse(const RuntimeOptions& options, bool ignore_unrecognize
                StartsWith(option, "-Xint:") ||
                StartsWith(option, "-Xdexopt:") ||
                (option == "-Xnoquithandler") ||
-               StartsWith(option, "-Xjniopts:") ||
                StartsWith(option, "-Xjnigreflimit:") ||
                (option == "-Xgenregmap") ||
                (option == "-Xnogenregmap") ||
index 86056c5..3839e19 100644 (file)
@@ -44,6 +44,7 @@ class ParsedOptions {
   std::string class_path_string_;
   std::string image_;
   bool check_jni_;
+  bool force_copy_;
   std::string jni_trace_;
   std::string native_bridge_library_filename_;
   CompilerCallbacks* compiler_callbacks_;
@@ -107,7 +108,7 @@ class ParsedOptions {
   uint64_t min_interval_homogeneous_space_compaction_by_oom_;
 
  private:
-  ParsedOptions() {}
+  ParsedOptions();
 
   void Usage(const char* fmt, ...);
   void UsageMessage(FILE* stream, const char* fmt, ...);
index 16ca0fe..a639f93 100644 (file)
@@ -30,6 +30,7 @@ static const char* kTypeNames[] = {
   "PrimDouble",
   "PrimVoid",
 };
+
 std::ostream& operator<<(std::ostream& os, const Primitive::Type& type) {
   int32_t int_type = static_cast<int32_t>(type);
   if (type >= Primitive::kPrimNot && type <= Primitive::kPrimVoid) {
index b436bd2..36ad662 100644 (file)
 
 #include "base/logging.h"
 #include "base/macros.h"
-#include "mirror/object_reference.h"
 
 namespace art {
-namespace mirror {
-class Object;
-}  // namespace mirror
+
+static constexpr size_t kObjectReferenceSize = 4;
 
 class Primitive {
  public:
@@ -79,17 +77,13 @@ class Primitive {
       case kPrimFloat:   return 4;
       case kPrimLong:
       case kPrimDouble:  return 8;
-      case kPrimNot:     return sizeof(mirror::HeapReference<mirror::Object>);
+      case kPrimNot:     return kObjectReferenceSize;
       default:
         LOG(FATAL) << "Invalid type " << static_cast<int>(type);
         return 0;
     }
   }
 
-  static size_t FieldSize(Type type) {
-    return ComponentSize(type) <= 4 ? 4 : 8;
-  }
-
   static const char* Descriptor(Type type) {
     switch (type) {
       case kPrimBoolean:
index 23b9aed..c4d51cb 100644 (file)
@@ -53,7 +53,7 @@ enum InlineMethodOpcode : uint16_t {
   kIntrinsicRint,
   kIntrinsicRoundFloat,
   kIntrinsicRoundDouble,
-  kIntrinsicGet,
+  kIntrinsicReferenceGet,
   kIntrinsicCharAt,
   kIntrinsicCompareTo,
   kIntrinsicIsEmptyOrLength,
index 98eeda7..1ec488e 100644 (file)
@@ -206,13 +206,14 @@ class DeoptimizeStackVisitor FINAL : public StackVisitor {
     const Instruction* inst = Instruction::At(code_item->insns_ + dex_pc);
     uint32_t new_dex_pc = dex_pc + inst->SizeInCodeUnits();
     ShadowFrame* new_frame = ShadowFrame::Create(num_regs, nullptr, m, new_dex_pc);
-    StackHandleScope<2> hs(self_);
+    StackHandleScope<3> hs(self_);
     mirror::Class* declaring_class = m->GetDeclaringClass();
     Handle<mirror::DexCache> h_dex_cache(hs.NewHandle(declaring_class->GetDexCache()));
     Handle<mirror::ClassLoader> h_class_loader(hs.NewHandle(declaring_class->GetClassLoader()));
-    verifier::MethodVerifier verifier(h_dex_cache->GetDexFile(), &h_dex_cache, &h_class_loader,
-                                      &m->GetClassDef(), code_item, m->GetDexMethodIndex(), m,
-                                      m->GetAccessFlags(), false, true, true);
+    Handle<mirror::ArtMethod> h_method(hs.NewHandle(m));
+    verifier::MethodVerifier verifier(h_dex_cache->GetDexFile(), h_dex_cache, h_class_loader,
+                                      &m->GetClassDef(), code_item, m->GetDexMethodIndex(),
+                                      h_method, m->GetAccessFlags(), false, true, true);
     verifier.Verify();
     const std::vector<int32_t> kinds(verifier.DescribeVRegs(dex_pc));
     for (uint16_t reg = 0; reg < num_regs; ++reg) {
index 4cd61a5..7da450c 100644 (file)
@@ -348,7 +348,7 @@ class ArgArray {
   std::unique_ptr<uint32_t[]> large_arg_array_;
 };
 
-static void CheckMethodArguments(mirror::ArtMethod* m, uint32_t* args)
+static void CheckMethodArguments(JavaVMExt* vm, mirror::ArtMethod* m, uint32_t* args)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   const DexFile::TypeList* params = m->GetParameterTypeList();
   if (params == nullptr) {
@@ -376,11 +376,11 @@ static void CheckMethodArguments(mirror::ArtMethod* m, uint32_t* args)
       self->ClearException();
       ++error_count;
     } else if (!param_type->IsPrimitive()) {
-      // TODO: check primitives are in range.
       // TODO: There is a compaction bug here since GetClassFromTypeIdx can cause thread suspension,
       // this is a hard to fix problem since the args can contain Object*, we need to save and
       // restore them by using a visitor similar to the ones used in the trampoline entrypoints.
-      mirror::Object* argument = reinterpret_cast<mirror::Object*>(args[i + offset]);
+      mirror::Object* argument =
+          (reinterpret_cast<StackReference<mirror::Object>*>(&args[i + offset]))->AsMirrorPtr();
       if (argument != nullptr && !argument->InstanceOf(param_type)) {
         LOG(ERROR) << "JNI ERROR (app bug): attempt to pass an instance of "
                    << PrettyTypeOf(argument) << " as argument " << (i + 1)
@@ -389,13 +389,40 @@ static void CheckMethodArguments(mirror::ArtMethod* m, uint32_t* args)
       }
     } else if (param_type->IsPrimitiveLong() || param_type->IsPrimitiveDouble()) {
       offset++;
+    } else {
+      int32_t arg = static_cast<int32_t>(args[i + offset]);
+      if (param_type->IsPrimitiveBoolean()) {
+        if (arg != JNI_TRUE && arg != JNI_FALSE) {
+          LOG(ERROR) << "JNI ERROR (app bug): expected jboolean (0/1) but got value of "
+              << arg << " as argument " << (i + 1) << " to " << PrettyMethod(h_m.Get());
+          ++error_count;
+        }
+      } else if (param_type->IsPrimitiveByte()) {
+        if (arg < -128 || arg > 127) {
+          LOG(ERROR) << "JNI ERROR (app bug): expected jbyte but got value of "
+              << arg << " as argument " << (i + 1) << " to " << PrettyMethod(h_m.Get());
+          ++error_count;
+        }
+      } else if (param_type->IsPrimitiveChar()) {
+        if (args[i + offset] > 0xFFFF) {
+          LOG(ERROR) << "JNI ERROR (app bug): expected jchar but got value of "
+              << arg << " as argument " << (i + 1) << " to " << PrettyMethod(h_m.Get());
+          ++error_count;
+        }
+      } else if (param_type->IsPrimitiveShort()) {
+        if (arg < -32768 || arg > 0x7FFF) {
+          LOG(ERROR) << "JNI ERROR (app bug): expected jshort but got value of "
+              << arg << " as argument " << (i + 1) << " to " << PrettyMethod(h_m.Get());
+          ++error_count;
+        }
+      }
     }
   }
-  if (error_count > 0) {
+  if (UNLIKELY(error_count > 0)) {
     // TODO: pass the JNI function name (such as "CallVoidMethodV") through so we can call JniAbort
     // with an argument.
-    JniAbortF(nullptr, "bad arguments passed to %s (see above for details)",
-              PrettyMethod(h_m.Get()).c_str());
+    vm->JniAbortF(nullptr, "bad arguments passed to %s (see above for details)",
+                  PrettyMethod(h_m.Get()).c_str());
   }
 }
 
@@ -412,7 +439,7 @@ static void InvokeWithArgArray(const ScopedObjectAccessAlreadyRunnable& soa,
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
   uint32_t* args = arg_array->GetArray();
   if (UNLIKELY(soa.Env()->check_jni)) {
-    CheckMethodArguments(method, args);
+    CheckMethodArguments(soa.Vm(), method, args);
   }
   method->Invoke(soa.Self(), args, arg_array->GetNumBytes(), result, shorty);
 }
@@ -617,10 +644,6 @@ bool VerifyObjectIsClass(mirror::Object* o, mirror::Class* c) {
   return true;
 }
 
-static std::string PrettyDescriptor(Primitive::Type type) {
-  return PrettyDescriptor(Primitive::Descriptor(type));
-}
-
 bool ConvertPrimitiveValue(const ThrowLocation* throw_location, bool unbox_for_result,
                            Primitive::Type srcType, Primitive::Type dstType,
                            const JValue& src, JValue* dst) {
index 2c54c06..61370c6 100644 (file)
@@ -17,6 +17,7 @@
 #ifndef ART_RUNTIME_REFLECTION_H_
 #define ART_RUNTIME_REFLECTION_H_
 
+#include "base/mutex.h"
 #include "jni.h"
 #include "primitive.h"
 
index c2912be..c962c14 100644 (file)
@@ -347,7 +347,7 @@ jobject CreateSystemClassLoader() {
   ScopedObjectAccess soa(Thread::Current());
   ClassLinker* cl = Runtime::Current()->GetClassLinker();
 
-  StackHandleScope<3> hs(soa.Self());
+  StackHandleScope<2> hs(soa.Self());
   Handle<mirror::Class> class_loader_class(
       hs.NewHandle(soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_ClassLoader)));
   CHECK(cl->EnsureInitialized(class_loader_class, true, true));
@@ -357,15 +357,12 @@ jobject CreateSystemClassLoader() {
   CHECK(getSystemClassLoader != NULL);
 
   JValue result = InvokeWithJValues(soa, nullptr, soa.EncodeMethod(getSystemClassLoader), nullptr);
-  Handle<mirror::ClassLoader> class_loader(
-      hs.NewHandle(down_cast<mirror::ClassLoader*>(result.GetL())));
-  CHECK(class_loader.Get() != nullptr);
   JNIEnv* env = soa.Self()->GetJniEnv();
   ScopedLocalRef<jobject> system_class_loader(env,
-                                              soa.AddLocalReference<jobject>(class_loader.Get()));
+                                              soa.AddLocalReference<jobject>(result.GetL()));
   CHECK(system_class_loader.get() != nullptr);
 
-  soa.Self()->SetClassLoaderOverride(class_loader.Get());
+  soa.Self()->SetClassLoaderOverride(system_class_loader.get());
 
   Handle<mirror::Class> thread_class(
       hs.NewHandle(soa.Decode<mirror::Class*>(WellKnownClasses::java_lang_Thread)));
@@ -376,7 +373,8 @@ jobject CreateSystemClassLoader() {
   CHECK(contextClassLoader != NULL);
 
   // We can't run in a transaction yet.
-  contextClassLoader->SetObject<false>(soa.Self()->GetPeer(), class_loader.Get());
+  contextClassLoader->SetObject<false>(soa.Self()->GetPeer(),
+                                       soa.Decode<mirror::ClassLoader*>(system_class_loader.get()));
 
   return env->NewGlobalRef(system_class_loader.get());
 }
@@ -863,13 +861,9 @@ void Runtime::InitNativeMethods() {
   {
     std::string mapped_name(StringPrintf(OS_SHARED_LIB_FORMAT_STR, "javacore"));
     std::string reason;
-    self->TransitionFromSuspendedToRunnable();
-    StackHandleScope<1> hs(self);
-    auto class_loader(hs.NewHandle<mirror::ClassLoader>(nullptr));
-    if (!instance_->java_vm_->LoadNativeLibrary(mapped_name, class_loader, &reason)) {
+    if (!instance_->java_vm_->LoadNativeLibrary(env, mapped_name, nullptr, &reason)) {
       LOG(FATAL) << "LoadNativeLibrary failed for \"" << mapped_name << "\": " << reason;
     }
-    self->TransitionFromRunnableToSuspended(kNative);
   }
 
   // Initialize well known classes that may invoke runtime native methods.
@@ -1300,6 +1294,34 @@ void Runtime::ExitTransactionMode() {
   preinitialization_transaction_ = nullptr;
 }
 
+void Runtime::RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset,
+                                      uint8_t value, bool is_volatile) const {
+  DCHECK(IsCompiler());
+  DCHECK(IsActiveTransaction());
+  preinitialization_transaction_->RecordWriteFieldBoolean(obj, field_offset, value, is_volatile);
+}
+
+void Runtime::RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset,
+                                   int8_t value, bool is_volatile) const {
+  DCHECK(IsCompiler());
+  DCHECK(IsActiveTransaction());
+  preinitialization_transaction_->RecordWriteFieldByte(obj, field_offset, value, is_volatile);
+}
+
+void Runtime::RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset,
+                                   uint16_t value, bool is_volatile) const {
+  DCHECK(IsCompiler());
+  DCHECK(IsActiveTransaction());
+  preinitialization_transaction_->RecordWriteFieldChar(obj, field_offset, value, is_volatile);
+}
+
+void Runtime::RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset,
+                                    int16_t value, bool is_volatile) const {
+  DCHECK(IsCompiler());
+  DCHECK(IsActiveTransaction());
+  preinitialization_transaction_->RecordWriteFieldShort(obj, field_offset, value, is_volatile);
+}
+
 void Runtime::RecordWriteField32(mirror::Object* obj, MemberOffset field_offset,
                                  uint32_t value, bool is_volatile) const {
   DCHECK(IsCompiler());
index db7b476..79d4554 100644 (file)
@@ -419,6 +419,14 @@ class Runtime {
   }
   void EnterTransactionMode(Transaction* transaction);
   void ExitTransactionMode();
+  void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
+                               bool is_volatile) const;
+  void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value,
+                            bool is_volatile) const;
+  void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value,
+                            bool is_volatile) const;
+  void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value,
+                          bool is_volatile) const;
   void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
                           bool is_volatile) const;
   void RecordWriteField64(mirror::Object* obj, MemberOffset field_offset, uint64_t value,
index 23aca45..ae3eaf2 100644 (file)
@@ -18,7 +18,8 @@
 #define ART_RUNTIME_SCOPED_THREAD_STATE_CHANGE_H_
 
 #include "base/casts.h"
-#include "jni_internal-inl.h"
+#include "java_vm_ext.h"
+#include "jni_env_ext-inl.h"
 #include "read_barrier.h"
 #include "thread-inl.h"
 #include "verify_object.h"
@@ -114,6 +115,10 @@ class ScopedObjectAccessAlreadyRunnable {
     return vm_;
   }
 
+  bool ForceCopy() const {
+    return vm_->ForceCopy();
+  }
+
   /*
    * Add a local reference for an object to the indirect reference table associated with the
    * current stack frame.  When the native function returns, the reference will be discarded.
index e58caee..8e5da35 100644 (file)
@@ -54,50 +54,6 @@ enum VRegKind {
   kUndefined,
 };
 
-/**
- * @brief Represents the virtual register numbers that denote special meaning.
- * @details This is used to make some virtual register numbers to have specific
- * semantic meaning. This is done so that the compiler can treat all virtual
- * registers the same way and only special case when needed. For example,
- * calculating SSA does not care whether a virtual register is a normal one or
- * a compiler temporary, so it can deal with them in a consistent manner. But,
- * for example if backend cares about temporaries because it has custom spill
- * location, then it can special case them only then.
- */
-enum VRegBaseRegNum : int {
-  /**
-   * @brief Virtual registers originating from dex have number >= 0.
-   */
-  kVRegBaseReg = 0,
-
-  /**
-   * @brief Invalid virtual register number.
-   */
-  kVRegInvalid = -1,
-
-  /**
-   * @brief Used to denote the base register for compiler temporaries.
-   * @details Compiler temporaries are virtual registers not originating
-   * from dex but that are created by compiler.  All virtual register numbers
-   * that are <= kVRegTempBaseReg are categorized as compiler temporaries.
-   */
-  kVRegTempBaseReg = -2,
-
-  /**
-   * @brief Base register of temporary that holds the method pointer.
-   * @details This is a special compiler temporary because it has a specific
-   * location on stack.
-   */
-  kVRegMethodPtrBaseReg = kVRegTempBaseReg,
-
-  /**
-   * @brief Base register of non-special compiler temporary.
-   * @details A non-special compiler temporary is one whose spill location
-   * is flexible.
-   */
-  kVRegNonSpecialTempBaseReg = -3,
-};
-
 // A reference from the shadow stack to a MirrorType object within the Java heap.
 template<class MirrorType>
 class MANAGED StackReference : public mirror::ObjectReference<false, MirrorType> {
@@ -612,75 +568,76 @@ class StackVisitor {
   /*
    * Return sp-relative offset for a Dalvik virtual register, compiler
    * spill or Method* in bytes using Method*.
-   * Note that (reg >= 0) refers to a Dalvik register, (reg == -1)
-   * denotes an invalid Dalvik register, (reg == -2) denotes Method*
-   * and (reg <= -3) denotes a compiler temporary. A compiler temporary
-   * can be thought of as a virtual register that does not exist in the
-   * dex but holds intermediate values to help optimizations and code
-   * generation. A special compiler temporary is one whose location
-   * in frame is well known while non-special ones do not have a requirement
-   * on location in frame as long as code generator itself knows how
-   * to access them.
+   * Note that (reg == -1) denotes an invalid Dalvik register. For the
+   * positive values, the Dalvik registers come first, followed by the
+   * Method*, followed by other special temporaries if any, followed by
+   * regular compiler temporary. As of now we only have the Method* as
+   * as a special compiler temporary.
+   * A compiler temporary can be thought of as a virtual register that
+   * does not exist in the dex but holds intermediate values to help
+   * optimizations and code generation. A special compiler temporary is
+   * one whose location in frame is well known while non-special ones
+   * do not have a requirement on location in frame as long as code
+   * generator itself knows how to access them.
    *
-   *     +---------------------------+
-   *     | IN[ins-1]                 |  {Note: resides in caller's frame}
-   *     |       .                   |
-   *     | IN[0]                     |
-   *     | caller's ArtMethod        |  ... StackReference<ArtMethod>
-   *     +===========================+  {Note: start of callee's frame}
-   *     | core callee-save spill    |  {variable sized}
-   *     +---------------------------+
-   *     | fp callee-save spill      |
-   *     +---------------------------+
-   *     | filler word               |  {For compatibility, if V[locals-1] used as wide
-   *     +---------------------------+
-   *     | V[locals-1]               |
-   *     | V[locals-2]               |
-   *     |      .                    |
-   *     |      .                    |  ... (reg == 2)
-   *     | V[1]                      |  ... (reg == 1)
-   *     | V[0]                      |  ... (reg == 0) <---- "locals_start"
-   *     +---------------------------+
-   *     | Compiler temp region      |  ... (reg <= -3)
-   *     |                           |
-   *     |                           |
-   *     +---------------------------+
-   *     | stack alignment padding   |  {0 to (kStackAlignWords-1) of padding}
-   *     +---------------------------+
-   *     | OUT[outs-1]               |
-   *     | OUT[outs-2]               |
-   *     |       .                   |
-   *     | OUT[0]                    |
-   *     | StackReference<ArtMethod> |  ... (reg == -2) <<== sp, 16-byte aligned
-   *     +===========================+
+   *     +-------------------------------+
+   *     | IN[ins-1]                     |  {Note: resides in caller's frame}
+   *     |       .                       |
+   *     | IN[0]                         |
+   *     | caller's ArtMethod            |  ... StackReference<ArtMethod>
+   *     +===============================+  {Note: start of callee's frame}
+   *     | core callee-save spill        |  {variable sized}
+   *     +-------------------------------+
+   *     | fp callee-save spill          |
+   *     +-------------------------------+
+   *     | filler word                   |  {For compatibility, if V[locals-1] used as wide
+   *     +-------------------------------+
+   *     | V[locals-1]                   |
+   *     | V[locals-2]                   |
+   *     |      .                        |
+   *     |      .                        |  ... (reg == 2)
+   *     | V[1]                          |  ... (reg == 1)
+   *     | V[0]                          |  ... (reg == 0) <---- "locals_start"
+   *     +-------------------------------+
+   *     | stack alignment padding       |  {0 to (kStackAlignWords-1) of padding}
+   *     +-------------------------------+
+   *     | Compiler temp region          |  ... (reg >= max_num_special_temps)
+   *     |      .                        |
+   *     |      .                        |
+   *     | V[max_num_special_temps + 1] |
+   *     | V[max_num_special_temps + 0] |
+   *     +-------------------------------+
+   *     | OUT[outs-1]                   |
+   *     | OUT[outs-2]                   |
+   *     |       .                       |
+   *     | OUT[0]                        |
+   *     | StackReference<ArtMethod>     |  ... (reg == num_total_code_regs == special_temp_value) <<== sp, 16-byte aligned
+   *     +===============================+
    */
   static int GetVRegOffset(const DexFile::CodeItem* code_item,
                            uint32_t core_spills, uint32_t fp_spills,
                            size_t frame_size, int reg, InstructionSet isa) {
     DCHECK_EQ(frame_size & (kStackAlignment - 1), 0U);
-    DCHECK_NE(reg, static_cast<int>(kVRegInvalid));
+    DCHECK_NE(reg, -1);
     int spill_size = POPCOUNT(core_spills) * GetBytesPerGprSpillLocation(isa)
         + POPCOUNT(fp_spills) * GetBytesPerFprSpillLocation(isa)
         + sizeof(uint32_t);  // Filler.
-    int num_ins = code_item->ins_size_;
-    int num_regs = code_item->registers_size_ - num_ins;
-    int locals_start = frame_size - spill_size - num_regs * sizeof(uint32_t);
-    if (reg == static_cast<int>(kVRegMethodPtrBaseReg)) {
+    int num_regs = code_item->registers_size_ - code_item->ins_size_;
+    int temp_threshold = code_item->registers_size_;
+    const int max_num_special_temps = 1;
+    if (reg == temp_threshold) {
       // The current method pointer corresponds to special location on stack.
       return 0;
-    } else if (reg <= static_cast<int>(kVRegNonSpecialTempBaseReg)) {
+    } else if (reg >= temp_threshold + max_num_special_temps) {
       /*
        * Special temporaries may have custom locations and the logic above deals with that.
-       * However, non-special temporaries are placed relative to the locals. Since the
-       * virtual register numbers for temporaries "grow" in negative direction, reg number
-       * will always be <= to the temp base reg. Thus, the logic ensures that the first
-       * temp is at offset -4 bytes from locals, the second is at -8 bytes from locals,
-       * and so on.
+       * However, non-special temporaries are placed relative to the outs.
        */
-      int relative_offset =
-          (reg + std::abs(static_cast<int>(kVRegNonSpecialTempBaseReg)) - 1) * sizeof(uint32_t);
-      return locals_start + relative_offset;
+      int temps_start = sizeof(StackReference<mirror::ArtMethod>) + code_item->outs_size_ * sizeof(uint32_t);
+      int relative_offset = (reg - (temp_threshold + max_num_special_temps)) * sizeof(uint32_t);
+      return temps_start + relative_offset;
     }  else if (reg < num_regs) {
+      int locals_start = frame_size - spill_size - num_regs * sizeof(uint32_t);
       return locals_start + (reg * sizeof(uint32_t));
     } else {
       // Handle ins.
index a5caa07..bd399e7 100644 (file)
@@ -24,7 +24,7 @@
 #include "base/casts.h"
 #include "base/mutex-inl.h"
 #include "gc/heap.h"
-#include "jni_internal.h"
+#include "jni_env_ext.h"
 
 namespace art {
 
index 10688ff..e0d67d6 100644 (file)
@@ -507,39 +507,8 @@ void Thread::InitStackHwm() {
     + 4 * KB;
   if (read_stack_size <= min_stack) {
     LOG(FATAL) << "Attempt to attach a thread with a too-small stack (" << read_stack_size
-        << " bytes)";
-  }
-
-  // TODO: move this into the Linux GetThreadStack implementation.
-#if !defined(__APPLE__)
-  // If we're the main thread, check whether we were run with an unlimited stack. In that case,
-  // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection
-  // will be broken because we'll die long before we get close to 2GB.
-  bool is_main_thread = (::art::GetTid() == getpid());
-  if (is_main_thread) {
-    rlimit stack_limit;
-    if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) {
-      PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed";
-    }
-    if (stack_limit.rlim_cur == RLIM_INFINITY) {
-      // Find the default stack size for new threads...
-      pthread_attr_t default_attributes;
-      size_t default_stack_size;
-      CHECK_PTHREAD_CALL(pthread_attr_init, (&default_attributes), "default stack size query");
-      CHECK_PTHREAD_CALL(pthread_attr_getstacksize, (&default_attributes, &default_stack_size),
-                         "default stack size query");
-      CHECK_PTHREAD_CALL(pthread_attr_destroy, (&default_attributes), "default stack size query");
-
-      // ...and use that as our limit.
-      size_t old_stack_size = read_stack_size;
-      tlsPtr_.stack_size = default_stack_size;
-      tlsPtr_.stack_begin += (old_stack_size - default_stack_size);
-      VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")"
-                    << " to " << PrettySize(default_stack_size)
-                    << " with base " << reinterpret_cast<void*>(tlsPtr_.stack_begin);
-    }
+               << " bytes)";
   }
-#endif
 
   // Set stack_end_ to the bottom of the stack saving space of stack overflows
 
@@ -1092,6 +1061,12 @@ bool Thread::IsStillStarting() const {
       (*tlsPtr_.name == kThreadNameDuringStartup);
 }
 
+void Thread::AssertPendingException() const {
+  if (UNLIKELY(!IsExceptionPending())) {
+    LOG(FATAL) << "Pending exception expected.";
+  }
+}
+
 void Thread::AssertNoPendingException() const {
   if (UNLIKELY(IsExceptionPending())) {
     ScopedObjectAccess soa(Thread::Current());
@@ -1127,6 +1102,21 @@ void Thread::Destroy() {
   Thread* self = this;
   DCHECK_EQ(self, Thread::Current());
 
+  if (tlsPtr_.jni_env != nullptr) {
+    // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
+    tlsPtr_.jni_env->monitors.VisitRoots(MonitorExitVisitor, self, 0, kRootVMInternal);
+    // Release locally held global references which releasing may require the mutator lock.
+    if (tlsPtr_.jpeer != nullptr) {
+      // If pthread_create fails we don't have a jni env here.
+      tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.jpeer);
+      tlsPtr_.jpeer = nullptr;
+    }
+    if (tlsPtr_.class_loader_override != nullptr) {
+      tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.class_loader_override);
+      tlsPtr_.class_loader_override = nullptr;
+    }
+  }
+
   if (tlsPtr_.opeer != nullptr) {
     ScopedObjectAccess soa(self);
     // We may need to call user-supplied managed code, do this before final clean-up.
@@ -1154,22 +1144,16 @@ void Thread::Destroy() {
       ObjectLock<mirror::Object> locker(self, h_obj);
       locker.NotifyAll();
     }
+    tlsPtr_.opeer = nullptr;
   }
 
-  // On thread detach, all monitors entered with JNI MonitorEnter are automatically exited.
-  if (tlsPtr_.jni_env != nullptr) {
-    tlsPtr_.jni_env->monitors.VisitRoots(MonitorExitVisitor, self, 0, kRootVMInternal);
-  }
+  Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this);
 }
 
 Thread::~Thread() {
-  if (tlsPtr_.jni_env != nullptr && tlsPtr_.jpeer != nullptr) {
-    // If pthread_create fails we don't have a jni env here.
-    tlsPtr_.jni_env->DeleteGlobalRef(tlsPtr_.jpeer);
-    tlsPtr_.jpeer = nullptr;
-  }
-  tlsPtr_.opeer = nullptr;
-
+  CHECK(tlsPtr_.class_loader_override == nullptr);
+  CHECK(tlsPtr_.jpeer == nullptr);
+  CHECK(tlsPtr_.opeer == nullptr);
   bool initialized = (tlsPtr_.jni_env != nullptr);  // Did Thread::Init run?
   if (initialized) {
     delete tlsPtr_.jni_env;
@@ -1202,7 +1186,7 @@ Thread::~Thread() {
   delete tlsPtr_.stack_trace_sample;
   free(tlsPtr_.nested_signal_state);
 
-  Runtime::Current()->GetHeap()->RevokeThreadLocalBuffers(this);
+  Runtime::Current()->GetHeap()->AssertThreadLocalBuffersAreRevoked(this);
 
   TearDownAlternateSignalStack();
 }
@@ -1312,11 +1296,10 @@ mirror::Object* Thread::DecodeJObject(jobject obj) const {
       result = kInvalidIndirectRefObject;
     }
   } else if (kind == kGlobal) {
-    JavaVMExt* const vm = Runtime::Current()->GetJavaVM();
-    result = vm->globals.SynchronizedGet(const_cast<Thread*>(this), &vm->globals_lock, ref);
+    result = tlsPtr_.jni_env->vm->DecodeGlobal(const_cast<Thread*>(this), ref);
   } else {
     DCHECK_EQ(kind, kWeakGlobal);
-    result = Runtime::Current()->GetJavaVM()->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
+    result = tlsPtr_.jni_env->vm->DecodeWeakGlobal(const_cast<Thread*>(this), ref);
     if (result == kClearedJniWeakGlobal) {
       // This is a special case where it's okay to return nullptr.
       return nullptr;
@@ -1324,7 +1307,8 @@ mirror::Object* Thread::DecodeJObject(jobject obj) const {
   }
 
   if (UNLIKELY(result == nullptr)) {
-    JniAbortF(nullptr, "use of deleted %s %p", ToStr<IndirectRefKind>(kind).c_str(), obj);
+    tlsPtr_.jni_env->vm->JniAbortF(nullptr, "use of deleted %s %p",
+                                   ToStr<IndirectRefKind>(kind).c_str(), obj);
   }
   return result;
 }
@@ -1364,6 +1348,13 @@ void Thread::NotifyLocked(Thread* self) {
   }
 }
 
+void Thread::SetClassLoaderOverride(jobject class_loader_override) {
+  if (tlsPtr_.class_loader_override != nullptr) {
+    GetJniEnv()->DeleteGlobalRef(tlsPtr_.class_loader_override);
+  }
+  tlsPtr_.class_loader_override = GetJniEnv()->NewGlobalRef(class_loader_override);
+}
+
 class CountStackDepthVisitor : public StackVisitor {
  public:
   explicit CountStackDepthVisitor(Thread* thread)
@@ -1836,12 +1827,24 @@ void Thread::DumpThreadOffset(std::ostream& os, uint32_t offset) {
   QUICK_ENTRY_POINT_INFO(pInitializeTypeAndVerifyAccess)
   QUICK_ENTRY_POINT_INFO(pInitializeType)
   QUICK_ENTRY_POINT_INFO(pResolveString)
+  QUICK_ENTRY_POINT_INFO(pSet8Instance)
+  QUICK_ENTRY_POINT_INFO(pSet8Static)
+  QUICK_ENTRY_POINT_INFO(pSet16Instance)
+  QUICK_ENTRY_POINT_INFO(pSet16Static)
   QUICK_ENTRY_POINT_INFO(pSet32Instance)
   QUICK_ENTRY_POINT_INFO(pSet32Static)
   QUICK_ENTRY_POINT_INFO(pSet64Instance)
   QUICK_ENTRY_POINT_INFO(pSet64Static)
   QUICK_ENTRY_POINT_INFO(pSetObjInstance)
   QUICK_ENTRY_POINT_INFO(pSetObjStatic)
+  QUICK_ENTRY_POINT_INFO(pGetByteInstance)
+  QUICK_ENTRY_POINT_INFO(pGetBooleanInstance)
+  QUICK_ENTRY_POINT_INFO(pGetByteStatic)
+  QUICK_ENTRY_POINT_INFO(pGetBooleanStatic)
+  QUICK_ENTRY_POINT_INFO(pGetShortInstance)
+  QUICK_ENTRY_POINT_INFO(pGetCharInstance)
+  QUICK_ENTRY_POINT_INFO(pGetShortStatic)
+  QUICK_ENTRY_POINT_INFO(pGetCharStatic)
   QUICK_ENTRY_POINT_INFO(pGet32Instance)
   QUICK_ENTRY_POINT_INFO(pGet32Static)
   QUICK_ENTRY_POINT_INFO(pGet64Instance)
@@ -2138,11 +2141,6 @@ class RootCallbackVisitor {
   const uint32_t tid_;
 };
 
-void Thread::SetClassLoaderOverride(mirror::ClassLoader* class_loader_override) {
-  VerifyObject(class_loader_override);
-  tlsPtr_.class_loader_override = class_loader_override;
-}
-
 void Thread::VisitRoots(RootCallback* visitor, void* arg) {
   uint32_t thread_id = GetThreadId();
   if (tlsPtr_.opeer != nullptr) {
@@ -2152,10 +2150,6 @@ void Thread::VisitRoots(RootCallback* visitor, void* arg) {
     visitor(reinterpret_cast<mirror::Object**>(&tlsPtr_.exception), arg, thread_id, kRootNativeStack);
   }
   tlsPtr_.throw_location.VisitRoots(visitor, arg);
-  if (tlsPtr_.class_loader_override != nullptr) {
-    visitor(reinterpret_cast<mirror::Object**>(&tlsPtr_.class_loader_override), arg, thread_id,
-            kRootNativeStack);
-  }
   if (tlsPtr_.monitor_enter_object != nullptr) {
     visitor(&tlsPtr_.monitor_enter_object, arg, thread_id, kRootNativeStack);
   }
index 0c64f1f..aca4069 100644 (file)
@@ -322,6 +322,7 @@ class Thread {
     return tlsPtr_.exception;
   }
 
+  void AssertPendingException() const;
   void AssertNoPendingException() const;
   void AssertNoPendingExceptionForNewException(const char* msg) const;
 
@@ -457,12 +458,11 @@ class Thread {
     tlsPtr_.wait_next = next;
   }
 
-  mirror::ClassLoader* GetClassLoaderOverride() SHARED_LOCKS_REQUIRED(Locks::mutator_lock_) {
+  jobject GetClassLoaderOverride() {
     return tlsPtr_.class_loader_override;
   }
 
-  void SetClassLoaderOverride(mirror::ClassLoader* class_loader_override)
-      SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+  void SetClassLoaderOverride(jobject class_loader_override);
 
   // Create the internal representation of a stack trace, that is more time
   // and space efficient to compute than the StackTraceElement[].
@@ -1085,7 +1085,7 @@ class Thread {
 
     // Needed to get the right ClassLoader in JNI_OnLoad, but also
     // useful for testing.
-    mirror::ClassLoader* class_loader_override;
+    jobject class_loader_override;
 
     // Thread local, lazily allocated, long jump context. Used to deliver exceptions.
     Context* long_jump_context;
index 9aacb30..1254056 100644 (file)
@@ -33,8 +33,11 @@ static void SigAltStack(stack_t* new_stack, stack_t* old_stack) {
 }
 
 // The default SIGSTKSZ on linux is 8K.  If we do any logging in a signal
-// handler this is too small.  We allocate 16K instead.
-static constexpr int kHostAltSigStackSize = 16*1024;    // 16K signal stack.
+// handler this is too small.  We allocate 16K instead or the minimum signal
+// stack size.
+// TODO: We shouldn't do logging (with locks) in signal handlers.
+static constexpr int kHostAltSigStackSize =
+    16 * KB < MINSIGSTKSZ ? MINSIGSTKSZ : 16 * KB;
 
 void Thread::SetUpAlternateSignalStack() {
   // Create and set an alternate signal stack.
index fc687dc..afb98ca 100644 (file)
@@ -820,6 +820,8 @@ void ThreadList::Register(Thread* self) {
 
 void ThreadList::Unregister(Thread* self) {
   DCHECK_EQ(self, Thread::Current());
+  CHECK_NE(self->GetState(), kRunnable);
+  Locks::mutator_lock_->AssertNotHeld(self);
 
   VLOG(threads) << "ThreadList::Unregister() " << *self;
 
@@ -834,14 +836,18 @@ void ThreadList::Unregister(Thread* self) {
     // Note: deliberately not using MutexLock that could hold a stale self pointer.
     Locks::thread_list_lock_->ExclusiveLock(self);
     CHECK(Contains(self));
-    // Note: we don't take the thread_suspend_count_lock_ here as to be suspending a thread other
-    // than yourself you need to hold the thread_list_lock_ (see Thread::ModifySuspendCount).
+    Locks::thread_suspend_count_lock_->ExclusiveLock(self);
+    bool removed = false;
     if (!self->IsSuspended()) {
       list_.remove(self);
+      removed = true;
+    }
+    Locks::thread_suspend_count_lock_->ExclusiveUnlock(self);
+    Locks::thread_list_lock_->ExclusiveUnlock(self);
+    if (removed) {
       delete self;
       self = nullptr;
     }
-    Locks::thread_list_lock_->ExclusiveUnlock(self);
   }
   // Release the thread ID after the thread is finished and deleted to avoid cases where we can
   // temporarily have multiple threads with the same thread id. When this occurs, it causes
index 50f1eca..b496f25 100644 (file)
@@ -57,6 +57,40 @@ Transaction::~Transaction() {
   }
 }
 
+void Transaction::RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset,
+                                          uint8_t value, bool is_volatile) {
+  DCHECK(obj != nullptr);
+  MutexLock mu(Thread::Current(), log_lock_);
+  ObjectLog& object_log = object_logs_[obj];
+  object_log.LogBooleanValue(field_offset, value, is_volatile);
+}
+
+void Transaction::RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset,
+                                       int8_t value, bool is_volatile) {
+  DCHECK(obj != nullptr);
+  MutexLock mu(Thread::Current(), log_lock_);
+  ObjectLog& object_log = object_logs_[obj];
+  object_log.LogByteValue(field_offset, value, is_volatile);
+}
+
+void Transaction::RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset,
+                                       uint16_t value, bool is_volatile) {
+  DCHECK(obj != nullptr);
+  MutexLock mu(Thread::Current(), log_lock_);
+  ObjectLog& object_log = object_logs_[obj];
+  object_log.LogCharValue(field_offset, value, is_volatile);
+}
+
+
+void Transaction::RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset,
+                                        int16_t value, bool is_volatile) {
+  DCHECK(obj != nullptr);
+  MutexLock mu(Thread::Current(), log_lock_);
+  ObjectLog& object_log = object_logs_[obj];
+  object_log.LogShortValue(field_offset, value, is_volatile);
+}
+
+
 void Transaction::RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
                                      bool is_volatile) {
   DCHECK(obj != nullptr);
@@ -223,35 +257,42 @@ void Transaction::VisitStringLogs(RootCallback* callback, void* arg) {
   }
 }
 
+void Transaction::ObjectLog::LogBooleanValue(MemberOffset offset, uint8_t value, bool is_volatile) {
+  LogValue(ObjectLog::kBoolean, offset, value, is_volatile);
+}
+
+void Transaction::ObjectLog::LogByteValue(MemberOffset offset, int8_t value, bool is_volatile) {
+  LogValue(ObjectLog::kByte, offset, value, is_volatile);
+}
+
+void Transaction::ObjectLog::LogCharValue(MemberOffset offset, uint16_t value, bool is_volatile) {
+  LogValue(ObjectLog::kChar, offset, value, is_volatile);
+}
+
+void Transaction::ObjectLog::LogShortValue(MemberOffset offset, int16_t value, bool is_volatile) {
+  LogValue(ObjectLog::kShort, offset, value, is_volatile);
+}
+
 void Transaction::ObjectLog::Log32BitsValue(MemberOffset offset, uint32_t value, bool is_volatile) {
-  auto it = field_values_.find(offset.Uint32Value());
-  if (it == field_values_.end()) {
-    ObjectLog::FieldValue field_value;
-    field_value.value = value;
-    field_value.is_volatile = is_volatile;
-    field_value.kind = ObjectLog::k32Bits;
-    field_values_.insert(std::make_pair(offset.Uint32Value(), field_value));
-  }
+  LogValue(ObjectLog::k32Bits, offset, value, is_volatile);
 }
 
 void Transaction::ObjectLog::Log64BitsValue(MemberOffset offset, uint64_t value, bool is_volatile) {
-  auto it = field_values_.find(offset.Uint32Value());
-  if (it == field_values_.end()) {
-    ObjectLog::FieldValue field_value;
-    field_value.value = value;
-    field_value.is_volatile = is_volatile;
-    field_value.kind = ObjectLog::k64Bits;
-    field_values_.insert(std::make_pair(offset.Uint32Value(), field_value));
-  }
+  LogValue(ObjectLog::k64Bits, offset, value, is_volatile);
 }
 
 void Transaction::ObjectLog::LogReferenceValue(MemberOffset offset, mirror::Object* obj, bool is_volatile) {
+  LogValue(ObjectLog::kReference, offset, reinterpret_cast<uintptr_t>(obj), is_volatile);
+}
+
+void Transaction::ObjectLog::LogValue(ObjectLog::FieldValueKind kind,
+                                      MemberOffset offset, uint64_t value, bool is_volatile) {
   auto it = field_values_.find(offset.Uint32Value());
   if (it == field_values_.end()) {
     ObjectLog::FieldValue field_value;
-    field_value.value = reinterpret_cast<uintptr_t>(obj);
+    field_value.value = value;
     field_value.is_volatile = is_volatile;
-    field_value.kind = ObjectLog::kReference;
+    field_value.kind = kind;
     field_values_.insert(std::make_pair(offset.Uint32Value(), field_value));
   }
 }
@@ -281,6 +322,42 @@ void Transaction::ObjectLog::UndoFieldWrite(mirror::Object* obj, MemberOffset fi
   // we'd need to disable the check.
   constexpr bool kCheckTransaction = true;
   switch (field_value.kind) {
+    case kBoolean:
+      if (UNLIKELY(field_value.is_volatile)) {
+        obj->SetFieldBooleanVolatile<false, kCheckTransaction>(field_offset,
+                                                         static_cast<bool>(field_value.value));
+      } else {
+        obj->SetFieldBoolean<false, kCheckTransaction>(field_offset,
+                                                 static_cast<bool>(field_value.value));
+      }
+      break;
+    case kByte:
+      if (UNLIKELY(field_value.is_volatile)) {
+        obj->SetFieldByteVolatile<false, kCheckTransaction>(field_offset,
+                                                         static_cast<int8_t>(field_value.value));
+      } else {
+        obj->SetFieldByte<false, kCheckTransaction>(field_offset,
+                                                 static_cast<int8_t>(field_value.value));
+      }
+      break;
+    case kChar:
+      if (UNLIKELY(field_value.is_volatile)) {
+        obj->SetFieldCharVolatile<false, kCheckTransaction>(field_offset,
+                                                          static_cast<uint16_t>(field_value.value));
+      } else {
+        obj->SetFieldChar<false, kCheckTransaction>(field_offset,
+                                                  static_cast<uint16_t>(field_value.value));
+      }
+      break;
+    case kShort:
+      if (UNLIKELY(field_value.is_volatile)) {
+        obj->SetFieldShortVolatile<false, kCheckTransaction>(field_offset,
+                                                          static_cast<int16_t>(field_value.value));
+      } else {
+        obj->SetFieldShort<false, kCheckTransaction>(field_offset,
+                                                  static_cast<int16_t>(field_value.value));
+      }
+      break;
     case k32Bits:
       if (UNLIKELY(field_value.is_volatile)) {
         obj->SetField32Volatile<false, kCheckTransaction>(field_offset,
index 6625390..21d3c98 100644 (file)
@@ -41,6 +41,18 @@ class Transaction {
   ~Transaction();
 
   // Record object field changes.
+  void RecordWriteFieldBoolean(mirror::Object* obj, MemberOffset field_offset, uint8_t value,
+                               bool is_volatile)
+      LOCKS_EXCLUDED(log_lock_);
+  void RecordWriteFieldByte(mirror::Object* obj, MemberOffset field_offset, int8_t value,
+                               bool is_volatile)
+      LOCKS_EXCLUDED(log_lock_);
+  void RecordWriteFieldChar(mirror::Object* obj, MemberOffset field_offset, uint16_t value,
+                            bool is_volatile)
+      LOCKS_EXCLUDED(log_lock_);
+  void RecordWriteFieldShort(mirror::Object* obj, MemberOffset field_offset, int16_t value,
+                             bool is_volatile)
+      LOCKS_EXCLUDED(log_lock_);
   void RecordWriteField32(mirror::Object* obj, MemberOffset field_offset, uint32_t value,
                           bool is_volatile)
       LOCKS_EXCLUDED(log_lock_);
@@ -82,6 +94,10 @@ class Transaction {
  private:
   class ObjectLog {
    public:
+    void LogBooleanValue(MemberOffset offset, uint8_t value, bool is_volatile);
+    void LogByteValue(MemberOffset offset, int8_t value, bool is_volatile);
+    void LogCharValue(MemberOffset offset, uint16_t value, bool is_volatile);
+    void LogShortValue(MemberOffset offset, int16_t value, bool is_volatile);
     void Log32BitsValue(MemberOffset offset, uint32_t value, bool is_volatile);
     void Log64BitsValue(MemberOffset offset, uint64_t value, bool is_volatile);
     void LogReferenceValue(MemberOffset offset, mirror::Object* obj, bool is_volatile);
@@ -95,6 +111,10 @@ class Transaction {
 
    private:
     enum FieldValueKind {
+      kBoolean,
+      kByte,
+      kChar,
+      kShort,
       k32Bits,
       k64Bits,
       kReference
@@ -106,6 +126,7 @@ class Transaction {
       bool is_volatile;
     };
 
+    void LogValue(FieldValueKind kind, MemberOffset offset, uint64_t value, bool is_volatile);
     void UndoFieldWrite(mirror::Object* obj, MemberOffset field_offset,
                         const FieldValue& field_value) SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
index c4e84f3..d15a09a 100644 (file)
@@ -66,8 +66,9 @@ pid_t GetTid() {
   uint64_t owner;
   CHECK_PTHREAD_CALL(pthread_threadid_np, (NULL, &owner), __FUNCTION__);  // Requires Mac OS 10.6
   return owner;
+#elif defined(__BIONIC__)
+  return gettid();
 #else
-  // Neither bionic nor glibc exposes gettid(2).
   return syscall(__NR_gettid);
 #endif
 }
@@ -107,6 +108,31 @@ void GetThreadStack(pthread_t thread, void** stack_base, size_t* stack_size, siz
   CHECK_PTHREAD_CALL(pthread_attr_getstack, (&attributes, stack_base, stack_size), __FUNCTION__);
   CHECK_PTHREAD_CALL(pthread_attr_getguardsize, (&attributes, guard_size), __FUNCTION__);
   CHECK_PTHREAD_CALL(pthread_attr_destroy, (&attributes), __FUNCTION__);
+
+#if defined(__GLIBC__)
+  // If we're the main thread, check whether we were run with an unlimited stack. In that case,
+  // glibc will have reported a 2GB stack for our 32-bit process, and our stack overflow detection
+  // will be broken because we'll die long before we get close to 2GB.
+  bool is_main_thread = (::art::GetTid() == getpid());
+  if (is_main_thread) {
+    rlimit stack_limit;
+    if (getrlimit(RLIMIT_STACK, &stack_limit) == -1) {
+      PLOG(FATAL) << "getrlimit(RLIMIT_STACK) failed";
+    }
+    if (stack_limit.rlim_cur == RLIM_INFINITY) {
+      size_t old_stack_size = *stack_size;
+
+      // Use the kernel default limit as our size, and adjust the base to match.
+      *stack_size = 8 * MB;
+      *stack_base = reinterpret_cast<uint8_t*>(*stack_base) + (old_stack_size - *stack_size);
+
+      VLOG(threads) << "Limiting unlimited stack (reported as " << PrettySize(old_stack_size) << ")"
+                    << " to " << PrettySize(*stack_size)
+                    << " with base " << *stack_base;
+    }
+  }
+#endif
+
 #endif
 }
 
@@ -289,6 +315,10 @@ std::string PrettyDescriptor(const char* descriptor) {
   return result;
 }
 
+std::string PrettyDescriptor(Primitive::Type type) {
+  return PrettyDescriptor(Primitive::Descriptor(type));
+}
+
 std::string PrettyField(mirror::ArtField* f, bool with_type) {
   if (f == NULL) {
     return "null";
@@ -1002,7 +1032,7 @@ void SetThreadName(const char* thread_name) {
   } else {
     s = thread_name + len - 15;
   }
-#if defined(HAVE_ANDROID_PTHREAD_SETNAME_NP)
+#if defined(__BIONIC__)
   // pthread_setname_np fails rather than truncating long strings.
   char buf[16];       // MAX_TASK_COMM_LEN=16 is hard-coded into bionic
   strncpy(buf, s, sizeof(buf)-1);
@@ -1064,10 +1094,6 @@ std::string GetSchedulerGroupName(pid_t tid) {
 
 void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
     mirror::ArtMethod* current_method) {
-  // We may be called from contexts where current_method is not null, so we must assert this.
-  if (current_method != nullptr) {
-    Locks::mutator_lock_->AssertSharedHeld(Thread::Current());
-  }
 #ifdef __linux__
   std::unique_ptr<Backtrace> backtrace(Backtrace::Create(BACKTRACE_CURRENT_PROCESS, tid));
   if (!backtrace->Unwind(0)) {
@@ -1099,7 +1125,9 @@ void DumpNativeStack(std::ostream& os, pid_t tid, const char* prefix,
         if (it->func_offset != 0) {
           os << "+" << it->func_offset;
         }
-      } else if (current_method != nullptr && current_method->IsWithinQuickCode(it->pc)) {
+      } else if (current_method != nullptr &&
+                 Locks::mutator_lock_->IsSharedHeld(Thread::Current()) &&
+                 current_method->IsWithinQuickCode(it->pc)) {
         const void* start_of_code = current_method->GetEntryPointFromQuickCompiledCode();
         os << JniLongName(current_method) << "+"
            << (it->pc - reinterpret_cast<uintptr_t>(start_of_code));
@@ -1374,4 +1402,19 @@ bool Exec(std::vector<std::string>& arg_vector, std::string* error_msg) {
   return true;
 }
 
+void EncodeUnsignedLeb128(uint32_t data, std::vector<uint8_t>* dst) {
+  Leb128Encoder(dst).PushBackUnsigned(data);
+}
+
+void EncodeSignedLeb128(int32_t data, std::vector<uint8_t>* dst) {
+  Leb128Encoder(dst).PushBackSigned(data);
+}
+
+void PushWord(std::vector<uint8_t>* buf, int data) {
+  buf->push_back(data & 0xff);
+  buf->push_back((data >> 8) & 0xff);
+  buf->push_back((data >> 16) & 0xff);
+  buf->push_back((data >> 24) & 0xff);
+}
+
 }  // namespace art
index a34a01d..7fb5bbd 100644 (file)
 #include <vector>
 
 #include "base/logging.h"
+#include "base/mutex.h"
 #include "globals.h"
 #include "instruction_set.h"
-#include "base/mutex.h"
-
-#ifdef HAVE_ANDROID_OS
-#include "cutils/properties.h"
-#endif
+#include "primitive.h"
 
 namespace art {
 
@@ -279,6 +276,7 @@ std::string PrettyDescriptor(mirror::String* descriptor)
 std::string PrettyDescriptor(const char* descriptor);
 std::string PrettyDescriptor(mirror::Class* klass)
     SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
+std::string PrettyDescriptor(Primitive::Type type);
 
 // Returns a human-readable signature for 'f'. Something like "a.b.C.f" or
 // "int a.b.C.f" (depending on the value of 'with_type').
@@ -499,6 +497,11 @@ class VoidFunctor {
   }
 };
 
+void PushWord(std::vector<uint8_t>* buf, int32_t data);
+
+void EncodeUnsignedLeb128(uint32_t data, std::vector<uint8_t>* buf);
+void EncodeSignedLeb128(int32_t data, std::vector<uint8_t>* buf);
+
 }  // namespace art
 
 #endif  // ART_RUNTIME_UTILS_H_
index d4fe106..0f9aeb5 100644 (file)
@@ -39,11 +39,11 @@ inline const InstructionFlags& MethodVerifier::GetInstructionFlags(size_t index)
 }
 
 inline mirror::ClassLoader* MethodVerifier::GetClassLoader() {
-  return class_loader_->Get();
+  return class_loader_.Get();
 }
 
 inline mirror::DexCache* MethodVerifier::GetDexCache() {
-  return dex_cache_->Get();
+  return dex_cache_.Get();
 }
 
 inline MethodReference MethodVerifier::GetMethodReference() const {
index 0ee4414..ef6b343 100644 (file)
@@ -124,8 +124,8 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(mirror::Class* klass,
 }
 
 MethodVerifier::FailureKind MethodVerifier::VerifyClass(const DexFile* dex_file,
-                                                        Handle<mirror::DexCache> dex_cache,
-                                                        Handle<mirror::ClassLoader> class_loader,
+                                                        ConstHandle<mirror::DexCache> dex_cache,
+                                                        ConstHandle<mirror::ClassLoader> class_loader,
                                                         const DexFile::ClassDef* class_def,
                                                         bool allow_soft_failures,
                                                         std::string* error) {
@@ -139,6 +139,7 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(const DexFile* dex_file,
   while (it.HasNextStaticField() || it.HasNextInstanceField()) {
     it.Next();
   }
+  Thread* self = Thread::Current();
   size_t error_count = 0;
   bool hard_fail = false;
   ClassLinker* linker = Runtime::Current()->GetClassLinker();
@@ -157,17 +158,19 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(const DexFile* dex_file,
         linker->ResolveMethod(*dex_file, method_idx, dex_cache, class_loader,
                               NullHandle<mirror::ArtMethod>(), type);
     if (method == NULL) {
-      DCHECK(Thread::Current()->IsExceptionPending());
+      DCHECK(self->IsExceptionPending());
       // We couldn't resolve the method, but continue regardless.
-      Thread::Current()->ClearException();
+      self->ClearException();
     }
+    StackHandleScope<1> hs(self);
+    Handle<mirror::ArtMethod> h_method(hs.NewHandle(method));
     MethodVerifier::FailureKind result = VerifyMethod(method_idx,
                                                       dex_file,
                                                       dex_cache,
                                                       class_loader,
                                                       class_def,
                                                       it.GetMethodCodeItem(),
-                                                      method,
+                                                      h_method,
                                                       it.GetMemberAccessFlags(),
                                                       allow_soft_failures,
                                                       false);
@@ -201,17 +204,19 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(const DexFile* dex_file,
         linker->ResolveMethod(*dex_file, method_idx, dex_cache, class_loader,
                               NullHandle<mirror::ArtMethod>(), type);
     if (method == NULL) {
-      DCHECK(Thread::Current()->IsExceptionPending());
+      DCHECK(self->IsExceptionPending());
       // We couldn't resolve the method, but continue regardless.
-      Thread::Current()->ClearException();
+      self->ClearException();
     }
+    StackHandleScope<1> hs(self);
+    Handle<mirror::ArtMethod> h_method(hs.NewHandle(method));
     MethodVerifier::FailureKind result = VerifyMethod(method_idx,
                                                       dex_file,
                                                       dex_cache,
                                                       class_loader,
                                                       class_def,
                                                       it.GetMethodCodeItem(),
-                                                      method,
+                                                      h_method,
                                                       it.GetMemberAccessFlags(),
                                                       allow_soft_failures,
                                                       false);
@@ -239,20 +244,20 @@ MethodVerifier::FailureKind MethodVerifier::VerifyClass(const DexFile* dex_file,
 
 MethodVerifier::FailureKind MethodVerifier::VerifyMethod(uint32_t method_idx,
                                                          const DexFile* dex_file,
-                                                         Handle<mirror::DexCache> dex_cache,
-                                                         Handle<mirror::ClassLoader> class_loader,
+                                                         ConstHandle<mirror::DexCache> dex_cache,
+                                                         ConstHandle<mirror::ClassLoader> class_loader,
                                                          const DexFile::ClassDef* class_def,
                                                          const DexFile::CodeItem* code_item,
-                                                         mirror::ArtMethod* method,
+                                                         ConstHandle<mirror::ArtMethod> method,
                                                          uint32_t method_access_flags,
                                                          bool allow_soft_failures,
                                                          bool need_precise_constants) {
   MethodVerifier::FailureKind result = kNoFailure;
   uint64_t start_ns = kTimeVerifyMethod ? NanoTime() : 0;
 
-  MethodVerifier verifier(dex_file, &dex_cache, &class_loader, class_def, code_item,
-                           method_idx, method, method_access_flags, true, allow_soft_failures,
-                           need_precise_constants);
+  MethodVerifier verifier(dex_file, dex_cache, class_loader, class_def, code_item,
+                          method_idx, method, method_access_flags, true, allow_soft_failures,
+                          need_precise_constants);
   if (verifier.Verify()) {
     // Verification completed, however failures may be pending that didn't cause the verification
     // to hard fail.
@@ -288,13 +293,13 @@ MethodVerifier::FailureKind MethodVerifier::VerifyMethod(uint32_t method_idx,
 
 void MethodVerifier::VerifyMethodAndDump(std::ostream& os, uint32_t dex_method_idx,
                                          const DexFile* dex_file,
-                                         Handle<mirror::DexCache> dex_cache,
-                                         Handle<mirror::ClassLoader> class_loader,
+                                         ConstHandle<mirror::DexCache> dex_cache,
+                                         ConstHandle<mirror::ClassLoader> class_loader,
                                          const DexFile::ClassDef* class_def,
                                          const DexFile::CodeItem* code_item,
-                                         mirror::ArtMethod* method,
+                                         ConstHandle<mirror::ArtMethod> method,
                                          uint32_t method_access_flags) {
-  MethodVerifier verifier(dex_file, &dex_cache, &class_loader, class_def, code_item,
+  MethodVerifier verifier(dex_file, dex_cache, class_loader, class_def, code_item,
                           dex_method_idx, method, method_access_flags, true, true, true);
   verifier.Verify();
   verifier.DumpFailures(os);
@@ -302,11 +307,11 @@ void MethodVerifier::VerifyMethodAndDump(std::ostream& os, uint32_t dex_method_i
   verifier.Dump(os);
 }
 
-MethodVerifier::MethodVerifier(const DexFile* dex_file, Handle<mirror::DexCache>* dex_cache,
-                               Handle<mirror::ClassLoader>* class_loader,
+MethodVerifier::MethodVerifier(const DexFile* dex_file, ConstHandle<mirror::DexCache> dex_cache,
+                               ConstHandle<mirror::ClassLoader> class_loader,
                                const DexFile::ClassDef* class_def,
                                const DexFile::CodeItem* code_item, uint32_t dex_method_idx,
-                               mirror::ArtMethod* method, uint32_t method_access_flags,
+                               ConstHandle<mirror::ArtMethod> method, uint32_t method_access_flags,
                                bool can_load_classes, bool allow_soft_failures,
                                bool need_precise_constants)
     : reg_types_(can_load_classes),
@@ -343,12 +348,13 @@ MethodVerifier::~MethodVerifier() {
 
 void MethodVerifier::FindLocksAtDexPc(mirror::ArtMethod* m, uint32_t dex_pc,
                                       std::vector<uint32_t>* monitor_enter_dex_pcs) {
-  StackHandleScope<2> hs(Thread::Current());
+  StackHandleScope<3> hs(Thread::Current());
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(m->GetDexCache()));
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(m->GetClassLoader()));
-  MethodVerifier verifier(m->GetDexFile(), &dex_cache, &class_loader, &m->GetClassDef(),
-                          m->GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(), false,
-                          true, false);
+  Handle<mirror::ArtMethod> method(hs.NewHandle(m));
+  MethodVerifier verifier(m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(),
+                          m->GetCodeItem(), m->GetDexMethodIndex(), method, m->GetAccessFlags(),
+                          false, true, false);
   verifier.interesting_dex_pc_ = dex_pc;
   verifier.monitor_enter_dex_pcs_ = monitor_enter_dex_pcs;
   verifier.FindLocksAtDexPc();
@@ -367,12 +373,13 @@ void MethodVerifier::FindLocksAtDexPc() {
 
 mirror::ArtField* MethodVerifier::FindAccessedFieldAtDexPc(mirror::ArtMethod* m,
                                                            uint32_t dex_pc) {
-  StackHandleScope<2> hs(Thread::Current());
+  StackHandleScope<3> hs(Thread::Current());
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(m->GetDexCache()));
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(m->GetClassLoader()));
-  MethodVerifier verifier(m->GetDexFile(), &dex_cache, &class_loader, &m->GetClassDef(),
-                          m->GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(), true,
-                          true, false);
+  Handle<mirror::ArtMethod> method(hs.NewHandle(m));
+  MethodVerifier verifier(m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(),
+                          m->GetCodeItem(), m->GetDexMethodIndex(), method, m->GetAccessFlags(),
+                          true, true, false);
   return verifier.FindAccessedFieldAtDexPc(dex_pc);
 }
 
@@ -397,12 +404,13 @@ mirror::ArtField* MethodVerifier::FindAccessedFieldAtDexPc(uint32_t dex_pc) {
 
 mirror::ArtMethod* MethodVerifier::FindInvokedMethodAtDexPc(mirror::ArtMethod* m,
                                                             uint32_t dex_pc) {
-  StackHandleScope<2> hs(Thread::Current());
+  StackHandleScope<3> hs(Thread::Current());
   Handle<mirror::DexCache> dex_cache(hs.NewHandle(m->GetDexCache()));
   Handle<mirror::ClassLoader> class_loader(hs.NewHandle(m->GetClassLoader()));
-  MethodVerifier verifier(m->GetDexFile(), &dex_cache, &class_loader, &m->GetClassDef(),
-                          m->GetCodeItem(), m->GetDexMethodIndex(), m, m->GetAccessFlags(), true,
-                          true, false);
+  Handle<mirror::ArtMethod> method(hs.NewHandle(m));
+  MethodVerifier verifier(m->GetDexFile(), dex_cache, class_loader, &m->GetClassDef(),
+                          m->GetCodeItem(), m->GetDexMethodIndex(), method, m->GetAccessFlags(),
+                          true, true, false);
   return verifier.FindInvokedMethodAtDexPc(dex_pc);
 }
 
@@ -502,9 +510,9 @@ std::ostream& MethodVerifier::Fail(VerifyError error) {
     }
   }
   failures_.push_back(error);
-  std::string location(StringPrintf("%s: [0x%X]", PrettyMethod(dex_method_idx_, *dex_file_).c_str(),
+  std::string location(StringPrintf("%s: [0x%X] ", PrettyMethod(dex_method_idx_, *dex_file_).c_str(),
                                     work_insn_idx_));
-  std::ostringstream* failure_message = new std::ostringstream(location);
+  std::ostringstream* failure_message = new std::ostringstream(location, std::ostringstream::ate);
   failure_messages_.push_back(failure_message);
   return *failure_message;
 }
@@ -519,7 +527,7 @@ void MethodVerifier::PrependToLastFailMessage(std::string prepend) {
   DCHECK_NE(failure_num, 0U);
   std::ostringstream* last_fail_message = failure_messages_[failure_num - 1];
   prepend += last_fail_message->str();
-  failure_messages_[failure_num - 1] = new std::ostringstream(prepend);
+  failure_messages_[failure_num - 1] = new std::ostringstream(prepend, std::ostringstream::ate);
   delete last_fail_message;
 }
 
@@ -623,7 +631,7 @@ bool MethodVerifier::ScanTryCatchBlocks() {
       if (iterator.GetHandlerTypeIndex() != DexFile::kDexNoIndex16) {
         mirror::Class* exception_type = linker->ResolveType(*dex_file_,
                                                             iterator.GetHandlerTypeIndex(),
-                                                            *dex_cache_, *class_loader_);
+                                                            dex_cache_, class_loader_);
         if (exception_type == NULL) {
           DCHECK(Thread::Current()->IsExceptionPending());
           Thread::Current()->ClearException();
@@ -1731,7 +1739,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
       RegType& res_type = ResolveClassAndCheckAccess(type_idx);
       if (res_type.IsConflict()) {
         // If this is a primitive type, fail HARD.
-        mirror::Class* klass = (*dex_cache_)->GetResolvedType(type_idx);
+        mirror::Class* klass = dex_cache_->GetResolvedType(type_idx);
         if (klass != nullptr && klass->IsPrimitive()) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "using primitive type "
               << dex_file_->StringByTypeIdx(type_idx) << " in instanceof in "
@@ -1876,8 +1884,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
           Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid fill-array-data with array type "
                                             << array_type;
         } else {
-          RegType& component_type = reg_types_.GetComponentType(array_type,
-                                                                      class_loader_->Get());
+          RegType& component_type = reg_types_.GetComponentType(array_type, GetClassLoader());
           DCHECK(!component_type.IsConflict());
           if (component_type.IsNonZeroReferenceTypes()) {
             Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "invalid fill-array-data with component type "
@@ -2206,7 +2213,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
         const DexFile::MethodId& method_id = dex_file_->GetMethodId(method_idx);
         uint32_t return_type_idx = dex_file_->GetProtoId(method_id.proto_idx_).return_type_idx_;
         const char* descriptor = dex_file_->StringByTypeIdx(return_type_idx);
-        return_type = &reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
+        return_type = &reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
       }
       if (!return_type->IsLowHalf()) {
         work_line_->SetResultRegisterType(*return_type);
@@ -2288,8 +2295,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
         work_line_->MarkRefsAsInitialized(this_type);
       }
       if (return_type == nullptr) {
-        return_type = &reg_types_.FromDescriptor(class_loader_->Get(),
-                                                 return_type_descriptor, false);
+        return_type = &reg_types_.FromDescriptor(GetClassLoader(), return_type_descriptor,
+                                                 false);
       }
       if (!return_type->IsLowHalf()) {
         work_line_->SetResultRegisterType(*return_type);
@@ -2315,8 +2322,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
         } else {
           descriptor = called_method->GetReturnTypeDescriptor();
         }
-        RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
-                                                               false);
+        RegType& return_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
         if (!return_type.IsLowHalf()) {
           work_line_->SetResultRegisterType(return_type);
         } else {
@@ -2373,8 +2379,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
       } else {
         descriptor = abs_method->GetReturnTypeDescriptor();
       }
-      RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
-                                                             false);
+      RegType& return_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
       if (!return_type.IsLowHalf()) {
         work_line_->SetResultRegisterType(return_type);
       } else {
@@ -2627,6 +2632,18 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
     case Instruction::IPUT_QUICK:
       VerifyIPutQuick(inst, reg_types_.Integer(), true);
       break;
+    case Instruction::IPUT_BOOLEAN_QUICK:
+        VerifyIPutQuick(inst, reg_types_.Boolean(), true);
+      break;
+    case Instruction::IPUT_BYTE_QUICK:
+        VerifyIPutQuick(inst, reg_types_.Byte(), true);
+      break;
+    case Instruction::IPUT_CHAR_QUICK:
+        VerifyIPutQuick(inst, reg_types_.Char(), true);
+      break;
+    case Instruction::IPUT_SHORT_QUICK:
+        VerifyIPutQuick(inst, reg_types_.Short(), true);
+      break;
     case Instruction::IPUT_WIDE_QUICK:
       VerifyIPutQuick(inst, reg_types_.LongLo(), true);
       break;
@@ -2639,8 +2656,7 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
       mirror::ArtMethod* called_method = VerifyInvokeVirtualQuickArgs(inst, is_range);
       if (called_method != NULL) {
         const char* descriptor = called_method->GetReturnTypeDescriptor();
-        RegType& return_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor,
-                                                               false);
+        RegType& return_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
         if (!return_type.IsLowHalf()) {
           work_line_->SetResultRegisterType(return_type);
         } else {
@@ -2660,10 +2676,6 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
     case Instruction::UNUSED_43:
     case Instruction::UNUSED_79:
     case Instruction::UNUSED_7A:
-    case Instruction::UNUSED_EB:
-    case Instruction::UNUSED_EC:
-    case Instruction::UNUSED_ED:
-    case Instruction::UNUSED_EE:
     case Instruction::UNUSED_EF:
     case Instruction::UNUSED_F0:
     case Instruction::UNUSED_F1:
@@ -2805,8 +2817,8 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
         has_catch_all_handler = true;
       } else {
         // It is also a catch-all if it is java.lang.Throwable.
-        mirror::Class* klass = linker->ResolveType(*dex_file_, handler_type_idx, *dex_cache_,
-                                                   *class_loader_);
+        mirror::Class* klass = linker->ResolveType(*dex_file_, handler_type_idx, dex_cache_,
+                                                   class_loader_);
         if (klass != nullptr) {
           if (klass == mirror::Throwable::GetJavaLangThrowable()) {
             has_catch_all_handler = true;
@@ -2926,18 +2938,17 @@ bool MethodVerifier::CodeFlowVerifyInstruction(uint32_t* start_guess) {
 RegType& MethodVerifier::ResolveClassAndCheckAccess(uint32_t class_idx) {
   const char* descriptor = dex_file_->StringByTypeIdx(class_idx);
   RegType& referrer = GetDeclaringClass();
-  mirror::Class* klass = (*dex_cache_)->GetResolvedType(class_idx);
-  RegType& result =
-      klass != NULL ? reg_types_.FromClass(descriptor, klass,
-                                           klass->CannotBeAssignedFromOtherTypes())
-                    : reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
+  mirror::Class* klass = dex_cache_->GetResolvedType(class_idx);
+  RegType& result = klass != NULL ?
+      reg_types_.FromClass(descriptor, klass, klass->CannotBeAssignedFromOtherTypes()) :
+      reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
   if (result.IsConflict()) {
     Fail(VERIFY_ERROR_BAD_CLASS_SOFT) << "accessing broken descriptor '" << descriptor
         << "' in " << referrer;
     return result;
   }
   if (klass == NULL && !result.IsUnresolvedTypes()) {
-    (*dex_cache_)->SetResolvedType(class_idx, result.GetClass());
+    dex_cache_->SetResolvedType(class_idx, result.GetClass());
   }
   // Check if access is allowed. Unresolved types use xxxWithAccessCheck to
   // check at runtime if access is allowed and so pass here. If result is
@@ -3009,7 +3020,7 @@ mirror::ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex_meth
   }
   mirror::Class* klass = klass_type.GetClass();
   RegType& referrer = GetDeclaringClass();
-  mirror::ArtMethod* res_method = (*dex_cache_)->GetResolvedMethod(dex_method_idx);
+  mirror::ArtMethod* res_method = dex_cache_->GetResolvedMethod(dex_method_idx);
   if (res_method == NULL) {
     const char* name = dex_file_->GetMethodName(method_id);
     const Signature signature = dex_file_->GetMethodSignature(method_id);
@@ -3022,7 +3033,7 @@ mirror::ArtMethod* MethodVerifier::ResolveMethodAndCheckAccess(uint32_t dex_meth
       res_method = klass->FindVirtualMethod(name, signature);
     }
     if (res_method != NULL) {
-      (*dex_cache_)->SetResolvedMethod(dex_method_idx, res_method);
+      dex_cache_->SetResolvedMethod(dex_method_idx, res_method);
     } else {
       // If a virtual or interface method wasn't found with the expected type, look in
       // the direct methods. This can happen when the wrong invoke type is used or when
@@ -3145,7 +3156,7 @@ mirror::ArtMethod* MethodVerifier::VerifyInvocationArgsFromIterator(T* it, const
       } else {
         const uint32_t method_idx = (is_range) ? inst->VRegB_3rc() : inst->VRegB_35c();
         const uint16_t class_idx = dex_file_->GetMethodId(method_idx).class_idx_;
-        res_method_class = &reg_types_.FromDescriptor(class_loader_->Get(),
+        res_method_class = &reg_types_.FromDescriptor(GetClassLoader(),
                                                       dex_file_->StringByTypeIdx(class_idx),
                                                       false);
       }
@@ -3178,8 +3189,7 @@ mirror::ArtMethod* MethodVerifier::VerifyInvocationArgsFromIterator(T* it, const
       return nullptr;
     }
 
-    RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), param_descriptor,
-                                                        false);
+    RegType& reg_type = reg_types_.FromDescriptor(GetClassLoader(), param_descriptor, false);
     uint32_t get_reg = is_range ? inst->VRegC_3rc() + static_cast<uint32_t>(sig_registers) :
         arg[sig_registers];
     if (reg_type.IsIntegralTypes()) {
@@ -3393,7 +3403,7 @@ mirror::ArtMethod* MethodVerifier::VerifyInvokeVirtualQuickArgs(const Instructio
                                         << " missing signature component";
       return NULL;
     }
-    RegType& reg_type = reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
+    RegType& reg_type = reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
     uint32_t get_reg = is_range ? inst->VRegC_3rc() + actual_args : arg[actual_args];
     if (!work_line_->VerifyRegisterType(get_reg, reg_type)) {
       return res_method;
@@ -3437,7 +3447,7 @@ void MethodVerifier::VerifyNewArray(const Instruction* inst, bool is_filled, boo
     } else {
       // Verify each register. If "arg_count" is bad, VerifyRegisterType() will run off the end of
       // the list and fail. It's legal, if silly, for arg_count to be zero.
-      RegType& expected_type = reg_types_.GetComponentType(res_type, class_loader_->Get());
+      RegType& expected_type = reg_types_.GetComponentType(res_type, GetClassLoader());
       uint32_t arg_count = (is_range) ? inst->VRegA_3rc() : inst->VRegA_35c();
       uint32_t arg[5];
       if (!is_range) {
@@ -3479,7 +3489,7 @@ void MethodVerifier::VerifyAGet(const Instruction* inst,
       Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aget";
     } else {
       /* verify the class */
-      RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
+      RegType& component_type = reg_types_.GetComponentType(array_type, GetClassLoader());
       if (!component_type.IsReferenceTypes() && !is_primitive) {
         Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "primitive array type " << array_type
             << " source for aget-object";
@@ -3558,7 +3568,7 @@ void MethodVerifier::VerifyAPut(const Instruction* inst,
     } else if (!array_type.IsArrayTypes()) {
       Fail(VERIFY_ERROR_BAD_CLASS_HARD) << "not array type " << array_type << " with aput";
     } else {
-      RegType& component_type = reg_types_.GetComponentType(array_type, class_loader_->Get());
+      RegType& component_type = reg_types_.GetComponentType(array_type, GetClassLoader());
       const uint32_t vregA = inst->VRegA_23x();
       if (is_primitive) {
         VerifyPrimitivePut(component_type, insn_type, vregA);
@@ -3591,8 +3601,8 @@ mirror::ArtField* MethodVerifier::GetStaticField(int field_idx) {
     return NULL;  // Can't resolve Class so no more to do here, will do checking at runtime.
   }
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  mirror::ArtField* field = class_linker->ResolveFieldJLS(*dex_file_, field_idx, *dex_cache_,
-                                                          *class_loader_);
+  mirror::ArtField* field = class_linker->ResolveFieldJLS(*dex_file_, field_idx, dex_cache_,
+                                                          class_loader_);
   if (field == NULL) {
     VLOG(verifier) << "Unable to resolve static field " << field_idx << " ("
               << dex_file_->GetFieldName(field_id) << ") in "
@@ -3626,8 +3636,8 @@ mirror::ArtField* MethodVerifier::GetInstanceField(RegType& obj_type, int field_
     return NULL;  // Can't resolve Class so no more to do here
   }
   ClassLinker* class_linker = Runtime::Current()->GetClassLinker();
-  mirror::ArtField* field = class_linker->ResolveFieldJLS(*dex_file_, field_idx, *dex_cache_,
-                                                          *class_loader_);
+  mirror::ArtField* field = class_linker->ResolveFieldJLS(*dex_file_, field_idx, dex_cache_,
+                                                          class_loader_);
   if (field == NULL) {
     VLOG(verifier) << "Unable to resolve instance field " << field_idx << " ("
               << dex_file_->GetFieldName(field_id) << ") in "
@@ -3709,7 +3719,7 @@ void MethodVerifier::VerifyISGet(const Instruction* inst, RegType& insn_type,
   if (field_type == nullptr) {
     const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
     const char* descriptor = dex_file_->GetFieldTypeDescriptor(field_id);
-    field_type = &reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
+    field_type = &reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
   }
   DCHECK(field_type != nullptr);
   const uint32_t vregA = (is_static) ? inst->VRegA_21c() : inst->VRegA_22c();
@@ -3781,7 +3791,7 @@ void MethodVerifier::VerifyISPut(const Instruction* inst, RegType& insn_type,
   if (field_type == nullptr) {
     const DexFile::FieldId& field_id = dex_file_->GetFieldId(field_idx);
     const char* descriptor = dex_file_->GetFieldTypeDescriptor(field_id);
-    field_type = &reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
+    field_type = &reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
   }
   DCHECK(field_type != nullptr);
   const uint32_t vregA = (is_static) ? inst->VRegA_21c() : inst->VRegA_22c();
@@ -4025,14 +4035,11 @@ InstructionFlags* MethodVerifier::CurrentInsnFlags() {
 
 RegType& MethodVerifier::GetMethodReturnType() {
   if (return_type_ == nullptr) {
-    if (mirror_method_ != NULL) {
+    if (mirror_method_.Get() != nullptr) {
       Thread* self = Thread::Current();
       StackHandleScope<1> hs(self);
-      mirror::Class* return_type_class;
-      {
-        HandleWrapper<mirror::ArtMethod> h_mirror_method(hs.NewHandleWrapper(&mirror_method_));
-        return_type_class = MethodHelper(h_mirror_method).GetReturnType(can_load_classes_);
-      }
+      mirror::Class* return_type_class =
+          MethodHelper(hs.NewHandle(mirror_method_.Get())).GetReturnType(can_load_classes_);
       if (return_type_class != nullptr) {
         return_type_ = &reg_types_.FromClass(mirror_method_->GetReturnTypeDescriptor(),
                                              return_type_class,
@@ -4047,7 +4054,7 @@ RegType& MethodVerifier::GetMethodReturnType() {
       const DexFile::ProtoId& proto_id = dex_file_->GetMethodPrototype(method_id);
       uint16_t return_type_idx = proto_id.return_type_idx_;
       const char* descriptor = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(return_type_idx));
-      return_type_ = &reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
+      return_type_ = &reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
     }
   }
   return *return_type_;
@@ -4058,12 +4065,12 @@ RegType& MethodVerifier::GetDeclaringClass() {
     const DexFile::MethodId& method_id = dex_file_->GetMethodId(dex_method_idx_);
     const char* descriptor
         = dex_file_->GetTypeDescriptor(dex_file_->GetTypeId(method_id.class_idx_));
-    if (mirror_method_ != NULL) {
+    if (mirror_method_.Get() != nullptr) {
       mirror::Class* klass = mirror_method_->GetDeclaringClass();
       declaring_class_ = &reg_types_.FromClass(descriptor, klass,
                                                klass->CannotBeAssignedFromOtherTypes());
     } else {
-      declaring_class_ = &reg_types_.FromDescriptor(class_loader_->Get(), descriptor, false);
+      declaring_class_ = &reg_types_.FromDescriptor(GetClassLoader(), descriptor, false);
     }
   }
   return *declaring_class_;
index e63a90c..af33414 100644 (file)
@@ -27,6 +27,7 @@
 #include "class_reference.h"
 #include "dex_file.h"
 #include "dex_instruction.h"
+#include "handle.h"
 #include "instruction_flags.h"
 #include "method_reference.h"
 #include "reg_type.h"
@@ -141,18 +142,18 @@ class MethodVerifier {
   /* Verify a class. Returns "kNoFailure" on success. */
   static FailureKind VerifyClass(mirror::Class* klass, bool allow_soft_failures, std::string* error)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
-  static FailureKind VerifyClass(const DexFile* dex_file, Handle<mirror::DexCache> dex_cache,
-                                 Handle<mirror::ClassLoader> class_loader,
+  static FailureKind VerifyClass(const DexFile* dex_file, ConstHandle<mirror::DexCache> dex_cache,
+                                 ConstHandle<mirror::ClassLoader> class_loader,
                                  const DexFile::ClassDef* class_def,
                                  bool allow_soft_failures, std::string* error)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   static void VerifyMethodAndDump(std::ostream& os, uint32_t method_idx, const DexFile* dex_file,
-                                  Handle<mirror::DexCache> dex_cache,
-                                  Handle<mirror::ClassLoader> class_loader,
+                                  ConstHandle<mirror::DexCache> dex_cache,
+                                  ConstHandle<mirror::ClassLoader> class_loader,
                                   const DexFile::ClassDef* class_def,
                                   const DexFile::CodeItem* code_item,
-                                  mirror::ArtMethod* method, uint32_t method_access_flags)
+                                  ConstHandle<mirror::ArtMethod> method, uint32_t method_access_flags)
       SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
   uint8_t EncodePcToReferenceMapData() const;
@@ -201,9 +202,10 @@ class MethodVerifier {
     return can_load_classes_;
   }
 
-  MethodVerifier(const DexFile* dex_file, Handle<mirror::DexCache>* dex_cache,
-                 Handle<mirror::ClassLoader>* class_loader, const DexFile::ClassDef* class_def,
-                 const DexFile::CodeItem* code_item, uint32_t method_idx, mirror::ArtMethod* method,
+  MethodVerifier(const DexFile* dex_file, ConstHandle<mirror::DexCache> dex_cache,
+                 ConstHandle<mirror::ClassLoader> class_loader, const DexFile::ClassDef* class_def,
+                 const DexFile::CodeItem* code_item, uint32_t method_idx,
+                 ConstHandle<mirror::ArtMethod> method,
                  uint32_t access_flags, bool can_load_classes, bool allow_soft_failures,
                  bool need_precise_constants)
           SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
@@ -252,11 +254,11 @@ class MethodVerifier {
    *      for code flow problems.
    */
   static FailureKind VerifyMethod(uint32_t method_idx, const DexFile* dex_file,
-                                  Handle<mirror::DexCache> dex_cache,
-                                  Handle<mirror::ClassLoader> class_loader,
+                                  ConstHandle<mirror::DexCache> dex_cache,
+                                  ConstHandle<mirror::ClassLoader> class_loader,
                                   const DexFile::ClassDef* class_def_idx,
                                   const DexFile::CodeItem* code_item,
-                                  mirror::ArtMethod* method, uint32_t method_access_flags,
+                                  ConstHandle<mirror::ArtMethod> method, uint32_t method_access_flags,
                                   bool allow_soft_failures, bool need_precise_constants)
           SHARED_LOCKS_REQUIRED(Locks::mutator_lock_);
 
@@ -639,14 +641,14 @@ class MethodVerifier {
 
   const uint32_t dex_method_idx_;  // The method we're working on.
   // Its object representation if known.
-  mirror::ArtMethod* mirror_method_ GUARDED_BY(Locks::mutator_lock_);
+  ConstHandle<mirror::ArtMethod> mirror_method_ GUARDED_BY(Locks::mutator_lock_);
   const uint32_t method_access_flags_;  // Method's access flags.
   RegType* return_type_;  // Lazily computed return type of the method.
   const DexFile* const dex_file_;  // The dex file containing the method.
   // The dex_cache for the declaring class of the method.
-  Handle<mirror::DexCache>* dex_cache_ GUARDED_BY(Locks::mutator_lock_);
+  ConstHandle<mirror::DexCache> dex_cache_ GUARDED_BY(Locks::mutator_lock_);
   // The class loader for the declaring class of the method.
-  Handle<mirror::ClassLoader>* class_loader_ GUARDED_BY(Locks::mutator_lock_);
+  ConstHandle<mirror::ClassLoader> class_loader_ GUARDED_BY(Locks::mutator_lock_);
   const DexFile::ClassDef* const class_def_;  // The class def of the declaring class of the method.
   const DexFile::CodeItem* const code_item_;  // The code item containing the code for the method.
   RegType* declaring_class_;  // Lazily computed reg type of the method's declaring class.
index d508fb5..1682d4e 100644 (file)
@@ -25,6 +25,7 @@
 #include "jni.h"
 
 #include "base/macros.h"
+#include "base/mutex.h"
 #include "gc_root.h"
 #include "globals.h"
 #include "object_callbacks.h"
index c02f310..63bfc44 100644 (file)
@@ -123,7 +123,7 @@ ZipEntry* ZipArchive::Find(const char* name, std::string* error_msg) const {
 
   // Resist the urge to delete the space. <: is a bigraph sequence.
   std::unique_ptr< ::ZipEntry> zip_entry(new ::ZipEntry);
-  const int32_t error = FindEntry(handle_, name, zip_entry.get());
+  const int32_t error = FindEntry(handle_, ZipEntryName(name), zip_entry.get());
   if (error) {
     *error_msg = std::string(ErrorCodeString(error));
     return nullptr;
index c655226..7539990 100644 (file)
 
 #if defined(__APPLE__)
 #define _NSIG NSIG
+#define sighandler_t sig_t
 #endif
 
 namespace art {
 
 class SignalAction {
  public:
-  SignalAction() : claimed_(false) {
+  SignalAction() : claimed_(false), uses_old_style_(false) {
   }
 
   // Claim the signal and keep the action specified.
@@ -60,13 +61,22 @@ class SignalAction {
   }
 
   // Change the recorded action to that specified.
-  void SetAction(const struct sigaction& action) {
+  // If oldstyle is true then this action is from an older style signal()
+  // call as opposed to sigaction().  In this case the sa_handler is
+  // used when invoking the user's handler.
+  void SetAction(const struct sigaction& action, bool oldstyle) {
     action_ = action;
+    uses_old_style_ = oldstyle;
+  }
+
+  bool OldStyle() const {
+    return uses_old_style_;
   }
 
  private:
   struct sigaction action_;     // Action to be performed.
   bool claimed_;                // Whether signal is claimed or not.
+  bool uses_old_style_;         // Action is created using signal().  Use sa_handler.
 };
 
 // User's signal handlers
@@ -115,7 +125,7 @@ void InvokeUserSignalHandler(int sig, siginfo_t* info, void* context) {
   }
 
   const struct sigaction& action = user_sigactions[sig].GetAction();
-  if ((action.sa_flags & SA_SIGINFO) == 0) {
+  if (user_sigactions[sig].OldStyle()) {
     if (action.sa_handler != NULL) {
       action.sa_handler(sig);
     } else {
@@ -145,7 +155,7 @@ int sigaction(int signal, const struct sigaction* new_action, struct sigaction*
       *old_action = user_sigactions[signal].GetAction();
     }
     if (new_action != NULL) {
-      user_sigactions[signal].SetAction(*new_action);
+      user_sigactions[signal].SetAction(*new_action, false);
     }
     return 0;
   }
@@ -168,6 +178,45 @@ int sigaction(int signal, const struct sigaction* new_action, struct sigaction*
   return linked_sigaction(signal, new_action, old_action);
 }
 
+sighandler_t signal(int signal, sighandler_t handler) {
+  struct sigaction sa;
+  sigemptyset(&sa.sa_mask);
+  sa.sa_handler = handler;
+  sa.sa_flags = SA_RESTART;
+  sighandler_t oldhandler;
+
+  // If this signal has been claimed as a signal chain, record the user's
+  // action but don't pass it on to the kernel.
+  // Note that we check that the signal number is in range here.  An out of range signal
+  // number should behave exactly as the libc sigaction.
+  if (signal > 0 && signal < _NSIG && user_sigactions[signal].IsClaimed()) {
+    oldhandler = reinterpret_cast<sighandler_t>(user_sigactions[signal].GetAction().sa_handler);
+    user_sigactions[signal].SetAction(sa, true);
+    return oldhandler;
+  }
+
+  // Will only get here if the signal chain has not been claimed.  We want
+  // to pass the sigaction on to the kernel via the real sigaction in libc.
+
+  void* linked_sigaction_sym = dlsym(RTLD_NEXT, "sigaction");
+  if (linked_sigaction_sym == nullptr) {
+    linked_sigaction_sym = dlsym(RTLD_DEFAULT, "sigaction");
+    if (linked_sigaction_sym == nullptr ||
+        linked_sigaction_sym == reinterpret_cast<void*>(sigaction)) {
+      log("Unable to find next sigaction in signal chain");
+      abort();
+    }
+  }
+
+  typedef int (*SigAction)(int, const struct sigaction*, struct sigaction*);
+  SigAction linked_sigaction = reinterpret_cast<SigAction>(linked_sigaction_sym);
+  if (linked_sigaction(signal, &sa, &sa) == -1) {
+    return SIG_ERR;
+  }
+
+  return reinterpret_cast<sighandler_t>(sa.sa_handler);
+}
+
 int sigprocmask(int how, const sigset_t* bionic_new_set, sigset_t* bionic_old_set) {
   const sigset_t* new_set_ptr = bionic_new_set;
   sigset_t tmpset;
index f09fc26..a6d9b66 100644 (file)
@@ -87,12 +87,12 @@ extern "C" JNIEXPORT void JNICALL Java_Main_terminateSignalTest(JNIEnv*, jclass)
 
 // Prevent the compiler being a smart-alec and optimizing out the assignment
 // to nullptr.
-char *p = nullptr;
+char *go_away_compiler = nullptr;
 
 extern "C" JNIEXPORT jint JNICALL Java_Main_testSignal(JNIEnv*, jclass) {
 #if defined(__arm__) || defined(__i386__) || defined(__x86_64__) || defined(__aarch64__)
   // On supported architectures we cause a real SEGV.
-  *p = 'a';
+  *go_away_compiler = 'a';
 #else
   // On other architectures we simulate SEGV.
   kill(getpid(), SIGSEGV);
index 364e646..3dcc23d 100644 (file)
@@ -5,287 +5,292 @@ package junit.framework;
  */
 
 public class Assert {
-    /**
-     * Protect constructor since it is a static only class
-     */
-    protected Assert() {
-    }
+       /**
+        * Protect constructor since it is a static only class
+        */
+       protected Assert() {
+       }
 
-    /**
-     * Asserts that a condition is true. If it isn't it throws
-     * an AssertionFailedError with the given message.
-     */
-    static public void assertTrue(String message, boolean condition) {
-        if (!condition)
-            fail(message);
-    }
-    /**
-     * Asserts that a condition is true. If it isn't it throws
-     * an AssertionFailedError.
-     */
-    static public void assertTrue(boolean condition) {
-        assertTrue(null, condition);
-    }
-    /**
-     * Asserts that a condition is false. If it isn't it throws
-     * an AssertionFailedError with the given message.
-     */
-    static public void assertFalse(String message, boolean condition) {
-        assertTrue(message, !condition);
-    }
-    /**
-     * Asserts that a condition is false. If it isn't it throws
-     * an AssertionFailedError.
-     */
-    static public void assertFalse(boolean condition) {
-        assertFalse(null, condition);
-    }
-    /**
-     * Fails a test with the given message.
-     */
-    static public void fail(String message) {
-        throw new AssertionFailedError(message);
-    }
-    /**
-     * Fails a test with no message.
-     */
-    static public void fail() {
-        fail(null);
-    }
-    /**
-     * Asserts that two objects are equal. If they are not
-     * an AssertionFailedError is thrown with the given message.
-     */
-    static public void assertEquals(String message, Object expected, Object actual) {
-        if (expected == null && actual == null)
-            return;
-        if (expected != null && expected.equals(actual))
-            return;
-        failNotEquals(message, expected, actual);
-    }
-    /**
-     * Asserts that two objects are equal. If they are not
-     * an AssertionFailedError is thrown.
-     */
-    static public void assertEquals(Object expected, Object actual) {
-        assertEquals(null, expected, actual);
-    }
-    /**
-     * Asserts that two Strings are equal.
-     */
-    static public void assertEquals(String message, String expected, String actual) {
-        if (expected == null && actual == null)
-            return;
-        if (expected != null && expected.equals(actual))
-            return;
-        throw new ComparisonFailure(message, expected, actual);
-    }
-    /**
-     * Asserts that two Strings are equal.
-     */
-    static public void assertEquals(String expected, String actual) {
-        assertEquals(null, expected, actual);
-    }
-    /**
-     * Asserts that two doubles are equal concerning a delta.  If they are not
-     * an AssertionFailedError is thrown with the given message.  If the expected
-     * value is infinity then the delta value is ignored.
-     */
-    static public void assertEquals(String message, double expected, double actual, double delta) {
-        // handle infinity specially since subtracting to infinite values gives NaN and the
-        // the following test fails
-        if (Double.isInfinite(expected)) {
-            if (!(expected == actual))
-                failNotEquals(message, new Double(expected), new Double(actual));
-        } else if (!(Math.abs(expected-actual) <= delta)) // Because comparison with NaN always returns false
-            failNotEquals(message, new Double(expected), new Double(actual));
-    }
-    /**
-     * Asserts that two doubles are equal concerning a delta. If the expected
-     * value is infinity then the delta value is ignored.
-     */
-    static public void assertEquals(double expected, double actual, double delta) {
-        assertEquals(null, expected, actual, delta);
-    }
-    /**
-     * Asserts that two floats are equal concerning a delta. If they are not
-     * an AssertionFailedError is thrown with the given message.  If the expected
-     * value is infinity then the delta value is ignored.
-     */
-    static public void assertEquals(String message, float expected, float actual, float delta) {
-         // handle infinity specially since subtracting to infinite values gives NaN and the
-        // the following test fails
-        if (Float.isInfinite(expected)) {
-            if (!(expected == actual))
-                failNotEquals(message, new Float(expected), new Float(actual));
-        } else if (!(Math.abs(expected-actual) <= delta))
-              failNotEquals(message, new Float(expected), new Float(actual));
-    }
-    /**
-     * Asserts that two floats are equal concerning a delta. If the expected
-     * value is infinity then the delta value is ignored.
-     */
-    static public void assertEquals(float expected, float actual, float delta) {
-        assertEquals(null, expected, actual, delta);
-    }
-    /**
-     * Asserts that two longs are equal. If they are not
-     * an AssertionFailedError is thrown with the given message.
-     */
-    static public void assertEquals(String message, long expected, long actual) {
-        assertEquals(message, new Long(expected), new Long(actual));
-    }
-    /**
-     * Asserts that two longs are equal.
-     */
-    static public void assertEquals(long expected, long actual) {
-        assertEquals(null, expected, actual);
-    }
-    /**
-     * Asserts that two booleans are equal. If they are not
-     * an AssertionFailedError is thrown with the given message.
-     */
-    static public void assertEquals(String message, boolean expected, boolean actual) {
-            assertEquals(message, new Boolean(expected), new Boolean(actual));
-      }
-    /**
-     * Asserts that two booleans are equal.
-      */
-    static public void assertEquals(boolean expected, boolean actual) {
-        assertEquals(null, expected, actual);
-    }
-    /**
-     * Asserts that two bytes are equal. If they are not
-     * an AssertionFailedError is thrown with the given message.
-     */
-      static public void assertEquals(String message, byte expected, byte actual) {
-        assertEquals(message, new Byte(expected), new Byte(actual));
-    }
-    /**
-        * Asserts that two bytes are equal.
-     */
-    static public void assertEquals(byte expected, byte actual) {
-        assertEquals(null, expected, actual);
-    }
-    /**
-     * Asserts that two chars are equal. If they are not
-     * an AssertionFailedError is thrown with the given message.
-     */
-      static public void assertEquals(String message, char expected, char actual) {
-            assertEquals(message, new Character(expected), new Character(actual));
-      }
-    /**
-     * Asserts that two chars are equal.
-     */
-      static public void assertEquals(char expected, char actual) {
-        assertEquals(null, expected, actual);
-    }
-    /**
-     * Asserts that two shorts are equal. If they are not
-     * an AssertionFailedError is thrown with the given message.
-     */
-    static public void assertEquals(String message, short expected, short actual) {
-            assertEquals(message, new Short(expected), new Short(actual));
-    }
-      /**
-     * Asserts that two shorts are equal.
-     */
-    static public void assertEquals(short expected, short actual) {
-        assertEquals(null, expected, actual);
-    }
-    /**
-     * Asserts that two ints are equal. If they are not
-     * an AssertionFailedError is thrown with the given message.
-     */
-      static public void assertEquals(String message, int expected, int actual) {
-        assertEquals(message, new Integer(expected), new Integer(actual));
-      }
-      /**
-        * Asserts that two ints are equal.
-     */
-      static public void assertEquals(int expected, int actual) {
-          assertEquals(null, expected, actual);
-    }
-    /**
-     * Asserts that an object isn't null.
-     */
-    static public void assertNotNull(Object object) {
-        assertNotNull(null, object);
-    }
-    /**
-     * Asserts that an object isn't null. If it is
-     * an AssertionFailedError is thrown with the given message.
-     */
-    static public void assertNotNull(String message, Object object) {
-        assertTrue(message, object != null);
-    }
-    /**
-     * Asserts that an object is null.
-     */
-    static public void assertNull(Object object) {
-        assertNull(null, object);
-    }
-    /**
-     * Asserts that an object is null.  If it is not
-     * an AssertionFailedError is thrown with the given message.
-     */
-    static public void assertNull(String message, Object object) {
-        assertTrue(message, object == null);
-    }
-    /**
-     * Asserts that two objects refer to the same object. If they are not
-     * an AssertionFailedError is thrown with the given message.
-     */
-    static public void assertSame(String message, Object expected, Object actual) {
-        if (expected == actual)
-            return;
-        failNotSame(message, expected, actual);
-    }
-    /**
-     * Asserts that two objects refer to the same object. If they are not
-     * the same an AssertionFailedError is thrown.
-     */
-    static public void assertSame(Object expected, Object actual) {
-        assertSame(null, expected, actual);
-    }
-     /**
-      * Asserts that two objects refer to the same object. If they are not
-      * an AssertionFailedError is thrown with the given message.
-      */
-    static public void assertNotSame(String message, Object expected, Object actual) {
-        if (expected == actual)
-            failSame(message);
-    }
-    /**
-     * Asserts that two objects refer to the same object. If they are not
-     * the same an AssertionFailedError is thrown.
-     */
-    static public void assertNotSame(Object expected, Object actual) {
-        assertNotSame(null, expected, actual);
-    }
+       /**
+        * Asserts that a condition is true. If it isn't it throws
+        * an AssertionFailedError with the given message.
+        */
+       static public void assertTrue(String message, boolean condition) {
+               if (!condition)
+                       fail(message);
+       }
+       /**
+        * Asserts that a condition is true. If it isn't it throws
+        * an AssertionFailedError.
+        */
+       static public void assertTrue(boolean condition) {
+               assertTrue(null, condition);
+       }
+       /**
+        * Asserts that a condition is false. If it isn't it throws
+        * an AssertionFailedError with the given message.
+        */
+       static public void assertFalse(String message, boolean condition) {
+               assertTrue(message, !condition);
+       }
+       /**
+        * Asserts that a condition is false. If it isn't it throws
+        * an AssertionFailedError.
+        */
+       static public void assertFalse(boolean condition) {
+               assertFalse(null, condition);
+       }
+       /**
+        * Fails a test with the given message.
+        */
+       static public void fail(String message) {
+               if (message == null) {
+                       throw new AssertionFailedError();
+               }
+               throw new AssertionFailedError(message);
+       }
+       /**
+        * Fails a test with no message.
+        */
+       static public void fail() {
+               fail(null);
+       }
+       /**
+        * Asserts that two objects are equal. If they are not
+        * an AssertionFailedError is thrown with the given message.
+        */
+       static public void assertEquals(String message, Object expected, Object actual) {
+               if (expected == null && actual == null)
+                       return;
+               if (expected != null && expected.equals(actual))
+                       return;
+               failNotEquals(message, expected, actual);
+       }
+       /**
+        * Asserts that two objects are equal. If they are not
+        * an AssertionFailedError is thrown.
+        */
+       static public void assertEquals(Object expected, Object actual) {
+           assertEquals(null, expected, actual);
+       }
+       /**
+        * Asserts that two Strings are equal. 
+        */
+       static public void assertEquals(String message, String expected, String actual) {
+               if (expected == null && actual == null)
+                       return;
+               if (expected != null && expected.equals(actual))
+                       return;
+               String cleanMessage= message == null ? "" : message;
+               throw new ComparisonFailure(cleanMessage, expected, actual);
+       }
+       /**
+        * Asserts that two Strings are equal. 
+        */
+       static public void assertEquals(String expected, String actual) {
+           assertEquals(null, expected, actual);
+       }
+       /**
+        * Asserts that two doubles are equal concerning a delta.  If they are not
+        * an AssertionFailedError is thrown with the given message.  If the expected
+        * value is infinity then the delta value is ignored.
+        */
+       static public void assertEquals(String message, double expected, double actual, double delta) {
+               if (Double.compare(expected, actual) == 0)
+                       return;
+               if (!(Math.abs(expected-actual) <= delta))
+                       failNotEquals(message, new Double(expected), new Double(actual));
+       }
+       /**
+        * Asserts that two doubles are equal concerning a delta. If the expected
+        * value is infinity then the delta value is ignored.
+        */
+       static public void assertEquals(double expected, double actual, double delta) {
+           assertEquals(null, expected, actual, delta);
+       }
+       /**
+        * Asserts that two floats are equal concerning a positive delta. If they
+        * are not an AssertionFailedError is thrown with the given message. If the
+        * expected value is infinity then the delta value is ignored.
+        */
+       static public void assertEquals(String message, float expected, float actual, float delta) {
+               if (Float.compare(expected, actual) == 0)
+                       return;
+               if (!(Math.abs(expected - actual) <= delta))
+                               failNotEquals(message, new Float(expected), new Float(actual));
+       }
+       /**
+        * Asserts that two floats are equal concerning a delta. If the expected
+        * value is infinity then the delta value is ignored.
+        */
+       static public void assertEquals(float expected, float actual, float delta) {
+               assertEquals(null, expected, actual, delta);
+       }
+       /**
+        * Asserts that two longs are equal. If they are not
+        * an AssertionFailedError is thrown with the given message.
+        */
+       static public void assertEquals(String message, long expected, long actual) {
+           assertEquals(message, new Long(expected), new Long(actual));
+       }
+       /**
+        * Asserts that two longs are equal.
+        */
+       static public void assertEquals(long expected, long actual) {
+           assertEquals(null, expected, actual);
+       }
+       /**
+        * Asserts that two booleans are equal. If they are not
+        * an AssertionFailedError is thrown with the given message.
+        */
+       static public void assertEquals(String message, boolean expected, boolean actual) {
+               assertEquals(message, Boolean.valueOf(expected), Boolean.valueOf(actual));
+       }
+       /**
+        * Asserts that two booleans are equal.
+        */
+       static public void assertEquals(boolean expected, boolean actual) {
+               assertEquals(null, expected, actual);
+       }
+       /**
+        * Asserts that two bytes are equal. If they are not
+        * an AssertionFailedError is thrown with the given message.
+        */
+       static public void assertEquals(String message, byte expected, byte actual) {
+               assertEquals(message, new Byte(expected), new Byte(actual));
+       }
+       /**
+        * Asserts that two bytes are equal.
+        */
+       static public void assertEquals(byte expected, byte actual) {
+               assertEquals(null, expected, actual);
+       }
+       /**
+        * Asserts that two chars are equal. If they are not
+        * an AssertionFailedError is thrown with the given message.
+        */
+       static public void assertEquals(String message, char expected, char actual) {
+               assertEquals(message, new Character(expected), new Character(actual));
+       }
+       /**
+        * Asserts that two chars are equal.
+        */
+       static public void assertEquals(char expected, char actual) {
+               assertEquals(null, expected, actual);
+       }
+       /**
+        * Asserts that two shorts are equal. If they are not
+        * an AssertionFailedError is thrown with the given message.
+        */
+       static public void assertEquals(String message, short expected, short actual) {
+               assertEquals(message, new Short(expected), new Short(actual));
+       }
+       /**
+        * Asserts that two shorts are equal.
+        */
+       static public void assertEquals(short expected, short actual) {
+               assertEquals(null, expected, actual);
+       }
+       /**
+        * Asserts that two ints are equal. If they are not
+        * an AssertionFailedError is thrown with the given message.
+        */
+       static public void assertEquals(String message, int expected, int actual) {
+               assertEquals(message, new Integer(expected), new Integer(actual));
+       }
+       /**
+        * Asserts that two ints are equal.
+        */
+       static public void assertEquals(int expected, int actual) {
+               assertEquals(null, expected, actual);
+       }
+       /**
+        * Asserts that an object isn't null.
+        */
+       static public void assertNotNull(Object object) {
+               assertNotNull(null, object);
+       }
+       /**
+        * Asserts that an object isn't null. If it is
+        * an AssertionFailedError is thrown with the given message.
+        */
+       static public void assertNotNull(String message, Object object) {
+               assertTrue(message, object != null);
+       }
+       /**
+        * Asserts that an object is null. If it isn't an {@link AssertionError} is
+        * thrown.
+        * Message contains: Expected: <null> but was: object
+        * 
+        * @param object
+        *            Object to check or <code>null</code>
+        */
+       static public void assertNull(Object object) {
+               String message = "Expected: <null> but was: " + String.valueOf(object);
+               assertNull(message, object);
+       }
+       /**
+        * Asserts that an object is null.  If it is not
+        * an AssertionFailedError is thrown with the given message.
+        */
+       static public void assertNull(String message, Object object) {
+               assertTrue(message, object == null);
+       }
+       /**
+        * Asserts that two objects refer to the same object. If they are not
+        * an AssertionFailedError is thrown with the given message.
+        */
+       static public void assertSame(String message, Object expected, Object actual) {
+               if (expected == actual)
+                       return;
+               failNotSame(message, expected, actual);
+       }
+       /**
+        * Asserts that two objects refer to the same object. If they are not
+        * the same an AssertionFailedError is thrown.
+        */
+       static public void assertSame(Object expected, Object actual) {
+           assertSame(null, expected, actual);
+       }
+       /**
+        * Asserts that two objects do not refer to the same object. If they do
+        * refer to the same object an AssertionFailedError is thrown with the
+        * given message.
+        */
+       static public void assertNotSame(String message, Object expected, Object actual) {
+               if (expected == actual)
+                       failSame(message);
+       }
+       /**
+        * Asserts that two objects do not refer to the same object. If they do
+        * refer to the same object an AssertionFailedError is thrown.
+        */
+       static public void assertNotSame(Object expected, Object actual) {
+               assertNotSame(null, expected, actual);
+       }
 
-    static private void failSame(String message) {
-        String formatted= "";
-         if (message != null)
-             formatted= message+" ";
-         fail(formatted+"expected not same");
-    }
+       static public void failSame(String message) {
+               String formatted= "";
+               if (message != null)
+                       formatted= message+" ";
+               fail(formatted+"expected not same");
+       }
 
-    static private void failNotSame(String message, Object expected, Object actual) {
-        String formatted= "";
-        if (message != null)
-            formatted= message+" ";
-        fail(formatted+"expected same:<"+expected+"> was not:<"+actual+">");
-    }
+       static public void failNotSame(String message, Object expected, Object actual) {
+               String formatted= "";
+               if (message != null)
+                       formatted= message+" ";
+               fail(formatted+"expected same:<"+expected+"> was not:<"+actual+">");
+       }
 
-    static private void failNotEquals(String message, Object expected, Object actual) {
-        fail(format(message, expected, actual));
-    }
+       static public void failNotEquals(String message, Object expected, Object actual) {
+               fail(format(message, expected, actual));
+       }
 
-    static String format(String message, Object expected, Object actual) {
-        String formatted= "";
-        if (message != null)
-            formatted= message+" ";
-        return formatted+"expected:<"+expected+"> but was:<"+actual+">";
-    }
+       public static String format(String message, Object expected, Object actual) {
+               String formatted= "";
+               if (message != null && message.length() > 0)
+                       formatted= message+" ";
+               return formatted+"expected:<"+expected+"> but was:<"+actual+">";
+       }
 }
index e9cb3a3..0d7802c 100644 (file)
@@ -3,11 +3,18 @@ package junit.framework;
 /**
  * Thrown when an assertion failed.
  */
-public class AssertionFailedError extends Error {
-
-    public AssertionFailedError () {
-    }
-    public AssertionFailedError (String message) {
-        super (message);
-    }
-}
+public class AssertionFailedError extends AssertionError {
+
+       private static final long serialVersionUID= 1L;
+
+       public AssertionFailedError() {
+       }
+
+       public AssertionFailedError(String message) {
+               super(defaultString(message));
+       }
+
+       private static String defaultString(String message) {
+               return message == null ? "" : message;
+       }
+}
\ No newline at end of file
diff --git a/test/021-string2/src/junit/framework/ComparisonCompactor.java b/test/021-string2/src/junit/framework/ComparisonCompactor.java
new file mode 100644 (file)
index 0000000..e540f03
--- /dev/null
@@ -0,0 +1,87 @@
+package junit.framework;
+
+// android-changed add @hide
+/**
+ * @hide not needed for public API
+ */
+public class ComparisonCompactor {
+
+       private static final String ELLIPSIS= "...";
+       private static final String DELTA_END= "]";
+       private static final String DELTA_START= "[";
+       
+       private int fContextLength;
+       private String fExpected;
+       private String fActual;
+       private int fPrefix;
+       private int fSuffix;
+
+       public ComparisonCompactor(int contextLength, String expected, String actual) {
+               fContextLength= contextLength;
+               fExpected= expected;
+               fActual= actual;
+       }
+
+       public String compact(String message) {
+               if (fExpected == null || fActual == null || areStringsEqual()) {
+                       // android-changed use local method instead of Assert.format, since
+                       // the later is not part of Android API till API 16
+                       return format(message, fExpected, fActual);
+               }
+               findCommonPrefix();
+               findCommonSuffix();
+               String expected= compactString(fExpected);
+               String actual= compactString(fActual);
+               // android-changed use local format method
+               return format(message, expected, actual);
+       }
+
+       private String compactString(String source) {
+               String result= DELTA_START + source.substring(fPrefix, source.length() - fSuffix + 1) + DELTA_END;
+               if (fPrefix > 0)
+                       result= computeCommonPrefix() + result;
+               if (fSuffix > 0)
+                       result= result + computeCommonSuffix();
+               return result;
+       }
+
+       private void findCommonPrefix() {
+               fPrefix= 0;
+               int end= Math.min(fExpected.length(), fActual.length());
+               for (; fPrefix < end; fPrefix++) {
+                       if (fExpected.charAt(fPrefix) != fActual.charAt(fPrefix))
+                               break;
+               }
+       }
+
+       private void findCommonSuffix() {
+               int expectedSuffix= fExpected.length() - 1;
+               int actualSuffix= fActual.length() - 1;
+               for (; actualSuffix >= fPrefix && expectedSuffix >= fPrefix; actualSuffix--, expectedSuffix--) {
+                       if (fExpected.charAt(expectedSuffix) != fActual.charAt(actualSuffix))
+                               break;
+               }
+               fSuffix=  fExpected.length() - expectedSuffix;
+       }
+
+       private String computeCommonPrefix() {
+               return (fPrefix > fContextLength ? ELLIPSIS : "") + fExpected.substring(Math.max(0, fPrefix - fContextLength), fPrefix);
+       }
+
+       private String computeCommonSuffix() {
+               int end= Math.min(fExpected.length() - fSuffix + 1 + fContextLength, fExpected.length());
+               return fExpected.substring(fExpected.length() - fSuffix + 1, end) + (fExpected.length() - fSuffix + 1 < fExpected.length() - fContextLength ? ELLIPSIS : "");
+       }
+
+       private boolean areStringsEqual() {
+               return fExpected.equals(fActual);
+       }
+
+       // android-changed copy of Assert.format for reasons described above
+       private static String format(String message, Object expected, Object actual) {
+               String formatted= "";
+               if (message != null && message.length() > 0)
+                       formatted= message+" ";
+               return formatted+"expected:<"+expected+"> but was:<"+actual+">";
+       }
+}
index ccd476b..5077993 100644 (file)
@@ -2,67 +2,51 @@ package junit.framework;
 
 /**
  * Thrown when an assert equals for Strings failed.
- *
+ * 
  * Inspired by a patch from Alex Chaffee mailto:alex@purpletech.com
  */
 public class ComparisonFailure extends AssertionFailedError {
-    private String fExpected;
-    private String fActual;
+       private static final int MAX_CONTEXT_LENGTH= 20;
+       private static final long serialVersionUID= 1L;
+       
+       private String fExpected;
+       private String fActual;
 
-    /**
-     * Constructs a comparison failure.
-     * @param message the identifying message or null
-     * @param expected the expected string value
-     * @param actual the actual string value
-     */
-    public ComparisonFailure (String message, String expected, String actual) {
-        super (message);
-        fExpected= expected;
-        fActual= actual;
-    }
-
-    /**
-     * Returns "..." in place of common prefix and "..." in
-     * place of common suffix between expected and actual.
-     *
-     * @see java.lang.Throwable#getMessage()
-     */
-    public String getMessage() {
-        if (fExpected == null || fActual == null)
-            return Assert.format(super.getMessage(), fExpected, fActual);
-
-        int end= Math.min(fExpected.length(), fActual.length());
-
-        int i= 0;
-        for (; i < end; i++) {
-            if (fExpected.charAt(i) != fActual.charAt(i))
-                break;
-        }
-        int j= fExpected.length()-1;
-        int k= fActual.length()-1;
-        for (; k >= i && j >= i; k--,j--) {
-            if (fExpected.charAt(j) != fActual.charAt(k))
-                break;
-        }
-        String actual, expected;
-
-        // equal strings
-        if (j < i && k < i) {
-            expected= fExpected;
-            actual= fActual;
-        } else {
-            expected= fExpected.substring(i, j+1);
-            actual= fActual.substring(i, k+1);
-            if (i <= end && i > 0) {
-                expected= "..."+expected;
-                actual= "..."+actual;
-            }
-
-            if (j < fExpected.length()-1)
-                expected= expected+"...";
-            if (k < fActual.length()-1)
-                actual= actual+"...";
-        }
-        return Assert.format(super.getMessage(), expected, actual);
-    }
-}
+       /**
+        * Constructs a comparison failure.
+        * @param message the identifying message or null
+        * @param expected the expected string value
+        * @param actual the actual string value
+        */
+       public ComparisonFailure (String message, String expected, String actual) {
+               super (message);
+               fExpected= expected;
+               fActual= actual;
+       }
+       
+       /**
+        * Returns "..." in place of common prefix and "..." in
+        * place of common suffix between expected and actual.
+        * 
+        * @see Throwable#getMessage()
+        */
+       @Override
+       public String getMessage() {
+               return new ComparisonCompactor(MAX_CONTEXT_LENGTH, fExpected, fActual).compact(super.getMessage());
+       }
+       
+       /**
+        * Gets the actual string value
+        * @return the actual string value
+        */
+       public String getActual() {
+               return fActual;
+       }
+       /**
+        * Gets the expected string value
+        * @return the expected string value
+        */
+       public String getExpected() {
+               return fExpected;
+       }
+}
\ No newline at end of file
index ecb3599..fa053fb 100644 (file)
@@ -123,3 +123,17 @@ fields are unique
 fields are .equals
 methods are unique
 methods are .equals
+type1 is a ParameterizedType
+type2 is a ParameterizedType
+type3 is a ParameterizedType
+type1(java.util.Set<java.lang.String>) equals type2(java.util.Set<java.lang.String>)
+type1(java.util.Set<java.lang.String>) equals type3(java.util.Set<java.lang.String>)
+type1(java.util.Set<java.lang.String>) hashCode equals type2(java.util.Set<java.lang.String>) hashCode
+type1(java.util.Set<java.lang.String>) hashCode equals type3(java.util.Set<java.lang.String>) hashCode
+type1 is a GenericArrayType
+type2 is a GenericArrayType
+type3 is a GenericArrayType
+type1(T[]) equals type2(T[])
+type1(T[]) equals type3(T[])
+type1(T[]) hashCode equals type2(T[]) hashCode
+type1(T[]) hashCode equals type3(T[]) hashCode
index 3e6d700..3fe3881 100644 (file)
@@ -18,8 +18,10 @@ import java.lang.reflect.*;
 import java.io.IOException;
 import java.util.Collections;
 import java.util.ArrayList;
+import java.util.Arrays;
 import java.util.List;
 import java.util.Map;
+import java.util.Set;
 
 /**
  * Reflection test.
@@ -579,16 +581,155 @@ public class Main {
         }
     }
 
+    public static void checkParametrizedTypeEqualsAndHashCode() {
+        Method method1;
+        Method method2;
+        Method method3;
+        try {
+            method1 = ParametrizedTypeTest.class.getDeclaredMethod("aMethod", Set.class);
+            method2 = ParametrizedTypeTest.class.getDeclaredMethod("aMethod", Set.class);
+            method3 = ParametrizedTypeTest.class.getDeclaredMethod("aMethodIdentical", Set.class);
+        } catch (NoSuchMethodException nsme) {
+            throw new RuntimeException(nsme);
+        }
+
+        List<Type> types1 = Arrays.asList(method1.getGenericParameterTypes());
+        List<Type> types2 = Arrays.asList(method2.getGenericParameterTypes());
+        List<Type> types3 = Arrays.asList(method3.getGenericParameterTypes());
+
+        Type type1 = types1.get(0);
+        Type type2 = types2.get(0);
+        Type type3 = types3.get(0);
+
+        if (type1 instanceof ParameterizedType) {
+            System.out.println("type1 is a ParameterizedType");
+        }
+        if (type2 instanceof ParameterizedType) {
+            System.out.println("type2 is a ParameterizedType");
+        }
+        if (type3 instanceof ParameterizedType) {
+            System.out.println("type3 is a ParameterizedType");
+        }
+
+        if (type1.equals(type2)) {
+            System.out.println("type1("+type1+") equals type2("+type2+")");
+        } else {
+            System.out.println("type1("+type1+") does not equal type2("+type2+")");
+        }
+
+        if (type1.equals(type3)) {
+            System.out.println("type1("+type1+") equals type3("+type3+")");
+        } else {
+            System.out.println("type1("+type1+") does not equal type3("+type3+")");
+        }
+        if (type1.hashCode() == type2.hashCode()) {
+            System.out.println("type1("+type1+") hashCode equals type2("+type2+") hashCode");
+        } else {
+            System.out.println(
+                   "type1("+type1+") hashCode does not equal type2("+type2+") hashCode");
+        }
+
+        if (type1.hashCode() == type3.hashCode()) {
+            System.out.println("type1("+type1+") hashCode equals type3("+type3+") hashCode");
+        } else {
+            System.out.println(
+                    "type1("+type1+") hashCode does not equal type3("+type3+") hashCode");
+        }
+    }
+
+    public static void checkGenericArrayTypeEqualsAndHashCode() {
+        Method method1;
+        Method method2;
+        Method method3;
+        try {
+            method1 = GenericArrayTypeTest.class.getDeclaredMethod("aMethod", Object[].class);
+            method2 = GenericArrayTypeTest.class.getDeclaredMethod("aMethod", Object[].class);
+            method3 = GenericArrayTypeTest.class.getDeclaredMethod("aMethodIdentical", Object[].class);
+        } catch (NoSuchMethodException nsme) {
+            throw new RuntimeException(nsme);
+        }
+
+        List<Type> types1 = Arrays.asList(method1.getGenericParameterTypes());
+        List<Type> types2 = Arrays.asList(method2.getGenericParameterTypes());
+        List<Type> types3 = Arrays.asList(method3.getGenericParameterTypes());
+
+        Type type1 = types1.get(0);
+        Type type2 = types2.get(0);
+        Type type3 = types3.get(0);
+
+        if (type1 instanceof GenericArrayType) {
+            System.out.println("type1 is a GenericArrayType");
+        }
+        if (type2 instanceof GenericArrayType) {
+            System.out.println("type2 is a GenericArrayType");
+        }
+        if (type3 instanceof GenericArrayType) {
+            System.out.println("type3 is a GenericArrayType");
+        }
+
+        if (type1.equals(type2)) {
+            System.out.println("type1("+type1+") equals type2("+type2+")");
+        } else {
+            System.out.println("type1("+type1+") does not equal type2("+type2+")");
+        }
+
+        if (type1.equals(type3)) {
+            System.out.println("type1("+type1+") equals type3("+type3+")");
+        } else {
+            System.out.println("type1("+type1+") does not equal type3("+type3+")");
+        }
+        if (type1.hashCode() == type2.hashCode()) {
+            System.out.println("type1("+type1+") hashCode equals type2("+type2+") hashCode");
+        } else {
+            System.out.println(
+                   "type1("+type1+") hashCode does not equal type2("+type2+") hashCode");
+        }
+
+        if (type1.hashCode() == type3.hashCode()) {
+            System.out.println("type1("+type1+") hashCode equals type3("+type3+") hashCode");
+        } else {
+            System.out.println(
+                    "type1("+type1+") hashCode does not equal type3("+type3+") hashCode");
+        }
+    }
+
+    private static void checkGetDeclaredConstructor() {
+        try {
+            Method.class.getDeclaredConstructor().setAccessible(true);
+            System.out.print("Didn't get an exception from method getDeclaredConstructor");
+        } catch (NoSuchMethodException e) {
+        } catch (Exception e) {
+            System.out.print(e);
+        }
+        try {
+            Field.class.getDeclaredConstructor().setAccessible(true);
+            System.out.print("Didn't get an exception from field getDeclaredConstructor");
+        } catch (NoSuchMethodException e) {
+        } catch (Exception e) {
+            System.out.print(e);
+        }
+        try {
+            Class.class.getDeclaredConstructor().setAccessible(true);
+            System.out.print("Didn't get an exception from class getDeclaredConstructor()");
+        } catch (SecurityException e) {
+        } catch (Exception e) {
+            System.out.print(e);
+        }
+    }
+
     public static void main(String[] args) throws Exception {
         Main test = new Main();
         test.run();
 
+        checkGetDeclaredConstructor();
         checkAccess();
         checkType();
         checkClinitForFields();
         checkClinitForMethods();
         checkGeneric();
         checkUnique();
+        checkParametrizedTypeEqualsAndHashCode();
+        checkGenericArrayTypeEqualsAndHashCode();
     }
 }
 
@@ -696,3 +837,13 @@ class Thrower {
     throw new UnsupportedOperationException();
   }
 }
+
+class ParametrizedTypeTest {
+    public void aMethod(Set<String> names) {}
+    public void aMethodIdentical(Set<String> names) {}
+}
+
+class GenericArrayTypeTest<T> {
+    public void aMethod(T[] names) {}
+    public void aMethodIdentical(T[] names) {}
+}
index 035690f..c93f8bb 100644 (file)
@@ -31,6 +31,7 @@ public class Main {
 
     static class InstanceMemEater {
         static boolean sawOome;
+        static InstanceMemEater hook;
 
         InstanceMemEater next;
         double d1, d2, d3, d4, d5, d6, d7, d8; // Bloat this object so we fill the heap faster.
@@ -45,6 +46,7 @@ public class Main {
         }
 
         static void confuseCompilerOptimization(InstanceMemEater instance) {
+          hook = instance;
         }
     }
 
@@ -61,6 +63,7 @@ public class Main {
             lastMemEater = lastMemEater.next;
         } while (lastMemEater != null);
         memEater.confuseCompilerOptimization(memEater);
+        InstanceMemEater.hook = null;
         return InstanceMemEater.sawOome;
     }
 
index 364e646..3dcc23d 100644 (file)
@@ -5,287 +5,292 @@ package junit.framework;
  */
 
 public class Assert {
-    /**
-     * Protect constructor since it is a static only class
-     */
-    protected Assert() {
-    }
+       /**
+        * Protect constructor since it is a static only class
+        */
+       protected Assert() {
+       }
 
-    /**
-     * Asserts that a condition is true. If it isn't it throws
-     * an AssertionFailedError with the given message.
-     */
-    static public void assertTrue(String message, boolean condition) {
-        if (!condition)
-            fail(message);
-    }
-    /**
-     * Asserts that a condition is true. If it isn't it throws
-     * an AssertionFailedError.
-     */
-    static public void assertTrue(boolean condition) {
-        assertTrue(null, condition);
-    }
-    /**
-     * Asserts that a condition is false. If it isn't it throws
-     * an AssertionFailedError with the given message.
-     */
-    static public void assertFalse(String message, boolean condition) {
-        assertTrue(message, !condition);
-    }
-    /**
-     * Asserts that a condition is false. If it isn't it throws
-     * an AssertionFailedError.
-     */
-    static public void assertFalse(boolean condition) {
-        assertFalse(null, condition);
-    }
-    /**
-     * Fails a test with the given message.
-     */
-    static public void fail(String message) {
-        throw new AssertionFailedError(message);
-    }
-    /**
-     * Fails a test with no message.
-     */
-    static public void fail() {
-        fail(null);
-    }
-    /**
-     * Asserts that two objects are equal. If they are not
-     * an AssertionFailedError is thrown with the given message.
-     */
-    static public void assertEquals(String message, Object expected, Object actual) {
-        if (expected == null && actual == null)
-            return;
-        if (expected != null && expected.equals(actual))
-            return;
-        failNotEquals(message, expected, actual);
-    }
-    /**
-     * Asserts that two objects are equal. If they are not
-     * an AssertionFailedError is thrown.
-     */
-    static public void assertEquals(Object expected, Object actual) {
-        assertEquals(null, expected, actual);
-    }
-    /**
-     * Asserts that two Strings are equal.
-     */
-    static public void assertEquals(String message, String expected, String actual) {
-        if (expected == null && actual == null)
-            return;
-        if (expected != null && expected.equals(actual))
-            return;
-        throw new ComparisonFailure(message, expected, actual);
-    }
-    /**
-     * Asserts that two Strings are equal.
-     */
-    static public void assertEquals(String expected, String actual) {
-        assertEquals(null, expected, actual);
-    }
-    /**
-     * Asserts that two doubles are equal concerning a delta.  If they are not
-     * an AssertionFailedError is thrown with the given message.  If the expected
-     * value is infinity then the delta value is ignored.
-     */
-    static public void assertEquals(String message, double expected, double actual, double delta) {
-        // handle infinity specially since subtracting to infinite values gives NaN and the
-        // the following test fails
-        if (Double.isInfinite(expected)) {
-            if (!(expected == actual))
-                failNotEquals(message, new Double(expected), new Double(actual));
-        } else if (!(Math.abs(expected-actual) <= delta)) // Because comparison with NaN always returns false
-            failNotEquals(message, new Double(expected), new Double(actual));
-    }
-    /**
-     * Asserts that two doubles are equal concerning a delta. If the expected
-     * value is infinity then the delta value is ignored.
-     */
-    static public void assertEquals(double expected, double actual, double delta) {
-        assertEquals(null, expected, actual, delta);
-    }
-    /**
-     * Asserts that two floats are equal concerning a delta. If they are not
-     * an AssertionFailedError is thrown with the given message.  If the expected
-     * value is infinity then the delta value is ignored.
-     */
-    static public void assertEquals(String message, float expected, float actual, float delta) {
-         // handle infinity specially since subtracting to infinite values gives NaN and the
-        // the following test fails
-        if (Float.isInfinite(expected)) {
-            if (!(expected == actual))
-                failNotEquals(message, new Float(expected), new Float(actual));
-        } else if (!(Math.abs(expected-actual) <= delta))
-              failNotEquals(message, new Float(expected), new Float(actual));
-    }
-    /**
-     * Asserts that two floats are equal concerning a delta. If the expected
-     * value is infinity then the delta value is ignored.
-     */
-    static public void assertEquals(float expected, float actual, float delta) {
-        assertEquals(null, expected, actual, delta);
-    }
-    /**
-     * Asserts that two longs are equal. If they are not
-     * an AssertionFailedError is thrown with the given message.
-     */
-    static public void assertEquals(String message, long expected, long actual) {
-        assertEquals(message, new Long(expected), new Long(actual));
-    }
-    /**
-     * Asserts that two longs are equal.
-     */
-    static public void assertEquals(long expected, long actual) {
-        assertEquals(null, expected, actual);
-    }
-    /**
-     * Asserts that two booleans are equal. If they are not
-     * an AssertionFailedError is thrown with the given message.
-     */
-    static public void assertEquals(String message, boolean expected, boolean actual) {
-            assertEquals(message, new Boolean(expected), new Boolean(actual));
-      }
-    /**
-     * Asserts that two booleans are equal.
-      */
-    static public void assertEquals(boolean expected, boolean actual) {
-        assertEquals(null, expected, actual);
-    }
-    /**
-     * Asserts that two bytes are equal. If they are not
-     * an AssertionFailedError is thrown with the given message.
-     */
-      static public void assertEquals(String message, byte expected, byte actual) {
-        assertEquals(message, new Byte(expected), new Byte(actual));
-    }
-    /**
-        * Asserts that two bytes are equal.
-     */
-    static public void assertEquals(byte expected, byte actual) {
-        assertEquals(null, expected, actual);
-    }
-    /**
-     * Asserts that two chars are equal. If they are not
-     * an AssertionFailedError is thrown with the given message.
-     */
-      static public void assertEquals(String message, char expected, char actual) {
-            assertEquals(message, new Character(expected), new Character(actual));
-      }
-    /**
-     * Asserts that two chars are equal.
-     */
-      static public void assertEquals(char expected, char actual) {
-        assertEquals(null, expected, actual);
-    }
-    /**
-     * Asserts that two shorts are equal. If they are not
-     * an AssertionFailedError is thrown with the given message.
-     */
-    static public void assertEquals(String message, short expected, short actual) {
-            assertEquals(message, new Short(expected), new Short(actual));
-    }
-      /**
-     * Asserts that two shorts are equal.
-     */
-    static public void assertEquals(short expected, short actual) {
-        assertEquals(null, expected, actual);
-    }
-    /**
-     * Asserts that two ints are equal. If they are not
-     * an AssertionFailedError is thrown with the given message.
-     */
-      static public void assertEquals(String message, int expected, int actual) {
-        assertEquals(message, new Integer(expected), new Integer(actual));
-      }
-      /**
-        * Asserts that two ints are equal.
-     */
-      static public void assertEquals(int expected, int actual) {
-          assertEquals(null, expected, actual);
-    }
-    /**
-     * Asserts that an object isn't null.
-     */
-    static public void assertNotNull(Object object) {
-        assertNotNull(null, object);
-    }
-    /**
-     * Asserts that an object isn't null. If it is
-     * an AssertionFailedError is thrown with the given message.
-     */
-    static public void assertNotNull(String message, Object object) {
-        assertTrue(message, object != null);
-    }
-    /**
-     * Asserts that an object is null.
-     */
-    static public void assertNull(Object object) {
-        assertNull(null, object);
-    }
-    /**
-     * Asserts that an object is null.  If it is not
-     * an AssertionFailedError is thrown with the given message.
-     */
-    static public void assertNull(String message, Object object) {
-        assertTrue(message, object == null);
-    }
-    /**
-     * Asserts that two objects refer to the same object. If they are not
-     * an AssertionFailedError is thrown with the given message.
-     */
-    static public void assertSame(String message, Object expected, Object actual) {
-        if (expected == actual)
-            return;
-        failNotSame(message, expected, actual);
-    }
-    /**
-     * Asserts that two objects refer to the same object. If they are not
-     * the same an AssertionFailedError is thrown.
-     */
-    static public void assertSame(Object expected, Object actual) {
-        assertSame(null, expected, actual);
-    }
-     /**
-      * Asserts that two objects refer to the same object. If they are not
-      * an AssertionFailedError is thrown with the given message.
-      */
-    static public void assertNotSame(String message, Object expected, Object actual) {
-        if (expected == actual)
-            failSame(message);
-    }
-    /**
-     * Asserts that two objects refer to the same object. If they are not
-     * the same an AssertionFailedError is thrown.
-     */
-    static public void assertNotSame(Object expected, Object actual) {
-        assertNotSame(null, expected, actual);
-    }
+       /**
+        * Asserts that a condition is true. If it isn't it throws
+        * an AssertionFailedError with the given message.
+        */
+       static public void assertTrue(String message, boolean condition) {
+               if (!condition)
+                       fail(message);
+       }
+       /**
+        * Asserts that a condition is true. If it isn't it throws
+        * an AssertionFailedError.
+        */
+       static public void assertTrue(boolean condition) {
+               assertTrue(null, condition);
+       }
+       /**
+        * Asserts that a condition is false. If it isn't it throws
+        * an AssertionFailedError with the given message.
+        */
+       static public void assertFalse(String message, boolean condition) {
+               assertTrue(message, !condition);
+       }
+       /**
+        * Asserts that a condition is false. If it isn't it throws
+        * an AssertionFailedError.
+        */
+       static public void assertFalse(boolean condition) {
+               assertFalse(null, condition);
+       }
+       /**
+        * Fails a test with the given message.
+        */
+       static public void fail(String message) {
+               if (message == null) {
+                       throw new AssertionFailedError();
+               }
+               throw new AssertionFailedError(message);
+       }
+       /**
+        * Fails a test with no message.
+        */
+       static public void fail() {
+               fail(null);
+       }
+       /**
+        * Asserts that two objects are equal. If they are not
+        * an AssertionFailedError is thrown with the given message.
+        */
+       static public void assertEquals(String message, Object expected, Object actual) {
+               if (expected == null && actual == null)
+                       return;
+               if (expected != null && expected.equals(actual))
+                       return;
+               failNotEquals(message, expected, actual);
+       }
+       /**
+        * Asserts that two objects are equal. If they are not
+        * an AssertionFailedError is thrown.
+        */
+       static public void assertEquals(Object expected, Object actual) {
+           assertEquals(null, expected, actual);
+       }
+       /**
+        * Asserts that two Strings are equal. 
+        */
+       static public void assertEquals(String message, String expected, String actual) {
+               if (expected == null && actual == null)
+                       return;
+               if (expected != null && expected.equals(actual))
+                       return;
+               String cleanMessage= message == null ? "" : message;
+               throw new ComparisonFailure(cleanMessage, expected, actual);
+       }
+       /**
+        * Asserts that two Strings are equal. 
+        */
+       static public void assertEquals(String expected, String actual) {
+           assertEquals(null, expected, actual);
+       }
+       /**
+        * Asserts that two doubles are equal concerning a delta.  If they are not
+        * an AssertionFailedError is thrown with the given message.  If the expected
+        * value is infinity then the delta value is ignored.
+        */
+       static public void assertEquals(String message, double expected, double actual, double delta) {
+               if (Double.compare(expected, actual) == 0)
+                       return;
+               if (!(Math.abs(expected-actual) <= delta))
+                       failNotEquals(message, new Double(expected), new Double(actual));
+       }
+       /**
+        * Asserts that two doubles are equal concerning a delta. If the expected
+        * value is infinity then the delta value is ignored.
+        */
+       static public void assertEquals(double expected, double actual, double delta) {
+           assertEquals(null, expected, actual, delta);
+       }
+       /**
+        * Asserts that two floats are equal concerning a positive delta. If they
+        * are not an AssertionFailedError is thrown with the given message. If the
+        * expected value is infinity then the delta value is ignored.
+        */
+       static public void assertEquals(String message, float expected, float actual, float delta) {
+               if (Float.compare(expected, actual) == 0)
+                       return;
+               if (!(Math.abs(expected - actual) <= delta))
+                               failNotEquals(message, new Float(expected), new Float(actual));
+       }
+       /**
+        * Asserts that two floats are equal concerning a delta. If the expected
+        * value is infinity then the delta value is ignored.
+        */
+       static public void assertEquals(float expected, float actual, float delta) {
+               assertEquals(null, expected, actual, delta);
+       }
+       /**
+        * Asserts that two longs are equal. If they are not
+        * an AssertionFailedError is thrown with the given message.
+        */
+       static public void assertEquals(String message, long expected, long actual) {
+           assertEquals(message, new Long(expected), new Long(actual));
+       }
+       /**
+        * Asserts that two longs are equal.
+        */
+       static public void assertEquals(long expected, long actual) {
+           assertEquals(null, expected, actual);
+       }
+       /**
+        * Asserts that two booleans are equal. If they are not
+        * an AssertionFailedError is thrown with the given message.
+        */
+       static public void assertEquals(String message, boolean expected, boolean actual) {
+               assertEquals(message, Boolean.valueOf(expected), Boolean.valueOf(actual));
+       }
+       /**
+        * Asserts that two booleans are equal.
+        */
+       static public void assertEquals(boolean expected, boolean actual) {
+               assertEquals(null, expected, actual);
+       }
+       /**
+        * Asserts that two bytes are equal. If they are not
+        * an AssertionFailedError is thrown with the given message.
+        */
+       static public void assertEquals(String message, byte expected, byte actual) {
+               assertEquals(message, new Byte(expected), new Byte(actual));
+       }
+       /**
+        * Asserts that two bytes are equal.
+        */
+       static public void assertEquals(byte expected, byte actual) {
+               assertEquals(null, expected, actual);
+       }
+       /**
+        * Asserts that two chars are equal. If they are not
+        * an AssertionFailedError is thrown with the given message.
+        */
+       static public void assertEquals(String message, char expected, char actual) {
+               assertEquals(message, new Character(expected), new Character(actual));
+       }
+       /**
+        * Asserts that two chars are equal.
+        */
+       static public void assertEquals(char expected, char actual) {
+               assertEquals(null, expected, actual);
+       }
+       /**
+        * Asserts that two shorts are equal. If they are not
+        * an AssertionFailedError is thrown with the given message.
+        */
+       static public void assertEquals(String message, short expected, short actual) {
+               assertEquals(message, new Short(expected), new Short(actual));
+       }
+       /**
+        * Asserts that two shorts are equal.
+        */
+       static public void assertEquals(short expected, short actual) {
+               assertEquals(null, expected, actual);
+       }
+       /**
+        * Asserts that two ints are equal. If they are not
+        * an AssertionFailedError is thrown with the given message.
+        */
+       static public void assertEquals(String message, int expected, int actual) {
+               assertEquals(message, new Integer(expected), new Integer(actual));
+       }
+       /**
+        * Asserts that two ints are equal.
+        */
+       static public void assertEquals(int expected, int actual) {
+               assertEquals(null, expected, actual);
+       }
+       /**
+        * Asserts that an object isn't null.
+        */
+       static public void assertNotNull(Object object) {
+               assertNotNull(null, object);
+       }
+       /**
+        * Asserts that an object isn't null. If it is
+        * an AssertionFailedError is thrown with the given message.
+        */
+       static public void assertNotNull(String message, Object object) {
+               assertTrue(message, object != null);
+       }
+       /**
+        * Asserts that an object is null. If it isn't an {@link AssertionError} is
+        * thrown.
+        * Message contains: Expected: <null> but was: object
+        * 
+        * @param object
+        *            Object to check or <code>null</code>
+        */
+       static public void assertNull(Object object) {
+               String message = "Expected: <null> but was: " + String.valueOf(object);
+               assertNull(message, object);
+       }
+       /**
+        * Asserts that an object is null.  If it is not
+        * an AssertionFailedError is thrown with the given message.
+        */
+       static public void assertNull(String message, Object object) {
+               assertTrue(message, object == null);
+       }
+       /**
+        * Asserts that two objects refer to the same object. If they are not
+        * an AssertionFailedError is thrown with the given message.
+        */
+       static public void assertSame(String message, Object expected, Object actual) {
+               if (expected == actual)
+                       return;
+               failNotSame(message, expected, actual);
+       }
+       /**
+        * Asserts that two objects refer to the same object. If they are not
+        * the same an AssertionFailedError is thrown.
+        */
+       static public void assertSame(Object expected, Object actual) {
+           assertSame(null, expected, actual);
+       }
+       /**
+        * Asserts that two objects do not refer to the same object. If they do
+        * refer to the same object an AssertionFailedError is thrown with the
+        * given message.
+        */
+       static public void assertNotSame(String message, Object expected, Object actual) {
+               if (expected == actual)
+                       failSame(message);
+       }
+       /**
+        * Asserts that two objects do not refer to the same object. If they do
+        * refer to the same object an AssertionFailedError is thrown.
+        */
+       static public void assertNotSame(Object expected, Object actual) {
+               assertNotSame(null, expected, actual);
+       }
 
-    static private void failSame(String message) {
-        String formatted= "";
-         if (message != null)
-             formatted= message+" ";
-         fail(formatted+"expected not same");
-    }
+       static public void failSame(String message) {
+               String formatted= "";
+               if (message != null)
+                       formatted= message+" ";
+               fail(formatted+"expected not same");
+       }
 
-    static private void failNotSame(String message, Object expected, Object actual) {
-        String formatted= "";
-        if (message != null)
-            formatted= message+" ";
-        fail(formatted+"expected same:<"+expected+"> was not:<"+actual+">");
-    }
+       static public void failNotSame(String message, Object expected, Object actual) {
+               String formatted= "";
+               if (message != null)
+                       formatted= message+" ";
+               fail(formatted+"expected same:<"+expected+"> was not:<"+actual+">");
+       }
 
-    static private void failNotEquals(String message, Object expected, Object actual) {
-        fail(format(message, expected, actual));
-    }
+       static public void failNotEquals(String message, Object expected, Object actual) {
+               fail(format(message, expected, actual));
+       }
 
-    static String format(String message, Object expected, Object actual) {
-        String formatted= "";
-        if (message != null)
-            formatted= message+" ";
-        return formatted+"expected:<"+expected+"> but was:<"+actual+">";
-    }
+       public static String format(String message, Object expected, Object actual) {
+               String formatted= "";
+               if (message != null && message.length() > 0)
+                       formatted= message+" ";
+               return formatted+"expected:<"+expected+"> but was:<"+actual+">";
+       }
 }
index e9cb3a3..0d7802c 100644 (file)
@@ -3,11 +3,18 @@ package junit.framework;
 /**
  * Thrown when an assertion failed.
  */
-public class AssertionFailedError extends Error {
-
-    public AssertionFailedError () {
-    }
-    public AssertionFailedError (String message) {
-        super (message);
-    }
-}
+public class AssertionFailedError extends AssertionError {
+
+       private static final long serialVersionUID= 1L;
+
+       public AssertionFailedError() {
+       }
+
+       public AssertionFailedError(String message) {
+               super(defaultString(message));
+       }
+
+       private static String defaultString(String message) {
+               return message == null ? "" : message;
+       }
+}
\ No newline at end of file
diff --git a/test/082-inline-execute/src/junit/framework/ComparisonCompactor.java b/test/082-inline-execute/src/junit/framework/ComparisonCompactor.java
new file mode 100644 (file)
index 0000000..e540f03
--- /dev/null
@@ -0,0 +1,87 @@
+package junit.framework;
+
+// android-changed add @hide
+/**
+ * @hide not needed for public API
+ */
+public class ComparisonCompactor {
+
+       private static final String ELLIPSIS= "...";
+       private static final String DELTA_END= "]";
+       private static final String DELTA_START= "[";
+       
+       private int fContextLength;
+       private String fExpected;
+       private String fActual;
+       private int fPrefix;
+       private int fSuffix;
+
+       public ComparisonCompactor(int contextLength, String expected, String actual) {
+               fContextLength= contextLength;
+               fExpected= expected;
+               fActual= actual;
+       }
+
+       public String compact(String message) {
+               if (fExpected == null || fActual == null || areStringsEqual()) {
+                       // android-changed use local method instead of Assert.format, since
+                       // the later is not part of Android API till API 16
+                       return format(message, fExpected, fActual);
+               }
+               findCommonPrefix();
+               findCommonSuffix();
+               String expected= compactString(fExpected);
+               String actual= compactString(fActual);
+               // android-changed use local format method
+               return format(message, expected, actual);
+       }
+
+       private String compactString(String source) {
+               String result= DELTA_START + source.substring(fPrefix, source.length() - fSuffix + 1) + DELTA_END;
+               if (fPrefix > 0)
+                       result= computeCommonPrefix() + result;
+               if (fSuffix > 0)
+                       result= result + computeCommonSuffix();
+               return result;
+       }
+
+       private void findCommonPrefix() {
+               fPrefix= 0;
+               int end= Math.min(fExpected.length(), fActual.length());
+               for (; fPrefix < end; fPrefix++) {
+                       if (fExpected.charAt(fPrefix) != fActual.charAt(fPrefix))
+                               break;
+               }
+       }
+
+       private void findCommonSuffix() {
+               int expectedSuffix= fExpected.length() - 1;
+               int actualSuffix= fActual.length() - 1;
+               for (; actualSuffix >= fPrefix && expectedSuffix >= fPrefix; actualSuffix--, expectedSuffix--) {
+                       if (fExpected.charAt(expectedSuffix) != fActual.charAt(actualSuffix))
+                               break;
+               }
+               fSuffix=  fExpected.length() - expectedSuffix;
+       }
+
+       private String computeCommonPrefix() {
+               return (fPrefix > fContextLength ? ELLIPSIS : "") + fExpected.substring(Math.max(0, fPrefix - fContextLength), fPrefix);
+       }
+
+       private String computeCommonSuffix() {
+               int end= Math.min(fExpected.length() - fSuffix + 1 + fContextLength, fExpected.length());
+               return fExpected.substring(fExpected.length() - fSuffix + 1, end) + (fExpected.length() - fSuffix + 1 < fExpected.length() - fContextLength ? ELLIPSIS : "");
+       }
+
+       private boolean areStringsEqual() {
+               return fExpected.equals(fActual);
+       }
+
+       // android-changed copy of Assert.format for reasons described above
+       private static String format(String message, Object expected, Object actual) {
+               String formatted= "";
+               if (message != null && message.length() > 0)
+                       formatted= message+" ";
+               return formatted+"expected:<"+expected+"> but was:<"+actual+">";
+       }
+}
index ccd476b..5077993 100644 (file)
@@ -2,67 +2,51 @@ package junit.framework;
 
 /**
  * Thrown when an assert equals for Strings failed.
- *
+ * 
  * Inspired by a patch from Alex Chaffee mailto:alex@purpletech.com
  */
 public class ComparisonFailure extends AssertionFailedError {
-    private String fExpected;
-    private String fActual;
+       private static final int MAX_CONTEXT_LENGTH= 20;
+       private static final long serialVersionUID= 1L;
+       
+       private String fExpected;
+       private String fActual;
 
-    /**
-     * Constructs a comparison failure.
-     * @param message the identifying message or null
-     * @param expected the expected string value
-     * @param actual the actual string value
-     */
-    public ComparisonFailure (String message, String expected, String actual) {
-        super (message);
-        fExpected= expected;
-        fActual= actual;
-    }
-
-    /**
-     * Returns "..." in place of common prefix and "..." in
-     * place of common suffix between expected and actual.
-     *
-     * @see java.lang.Throwable#getMessage()
-     */
-    public String getMessage() {
-        if (fExpected == null || fActual == null)
-            return Assert.format(super.getMessage(), fExpected, fActual);
-
-        int end= Math.min(fExpected.length(), fActual.length());
-
-        int i= 0;
-        for (; i < end; i++) {
-            if (fExpected.charAt(i) != fActual.charAt(i))
-                break;
-        }
-        int j= fExpected.length()-1;
-        int k= fActual.length()-1;
-        for (; k >= i && j >= i; k--,j--) {
-            if (fExpected.charAt(j) != fActual.charAt(k))
-                break;
-        }
-        String actual, expected;
-
-        // equal strings
-        if (j < i && k < i) {
-            expected= fExpected;
-            actual= fActual;
-        } else {
-            expected= fExpected.substring(i, j+1);
-            actual= fActual.substring(i, k+1);
-            if (i <= end && i > 0) {
-                expected= "..."+expected;
-                actual= "..."+actual;
-            }
-
-            if (j < fExpected.length()-1)
-                expected= expected+"...";
-            if (k < fActual.length()-1)
-                actual= actual+"...";
-        }
-        return Assert.format(super.getMessage(), expected, actual);
-    }
-}
+       /**
+        * Constructs a comparison failure.
+        * @param message the identifying message or null
+        * @param expected the expected string value
+        * @param actual the actual string value
+        */
+       public ComparisonFailure (String message, String expected, String actual) {
+               super (message);
+               fExpected= expected;
+               fActual= actual;
+       }
+       
+       /**
+        * Returns "..." in place of common prefix and "..." in
+        * place of common suffix between expected and actual.
+        * 
+        * @see Throwable#getMessage()
+        */
+       @Override
+       public String getMessage() {
+               return new ComparisonCompactor(MAX_CONTEXT_LENGTH, fExpected, fActual).compact(super.getMessage());
+       }
+       
+       /**
+        * Gets the actual string value
+        * @return the actual string value
+        */
+       public String getActual() {
+               return fActual;
+       }
+       /**
+        * Gets the expected string value
+        * @return the expected string value
+        */
+       public String getExpected() {
+               return fExpected;
+       }
+}
\ No newline at end of file
index 911abdf..92a4ec2 100644 (file)
@@ -21,21 +21,30 @@ flags="${@/--no-relocate/--relocate}"
 RUN="${RUN/push-and-run-prebuilt-test-jar/push-and-run-test-jar}"
 
 if [ $(basename $RUN) == 'host-run-test-jar' ]; then
-  BPATH="--runtime-option -Xbootclasspath:$ANDROID_HOST_OUT/../common/obj/JAVA_LIBRARIES/core-libart-hostdex_intermediates/javalib.jar"
-  # Remove prebuild from the flags, this test is for testing not having oat files.
-  flags="${flags/--prebuild/}"
+    framework="${ANDROID_HOST_OUT}/framework"
+    bpath_suffix="-hostdex"
+    # Remove prebuild from the flags, this test is for testing not having oat files.
+    flags="${flags/--prebuild/}"
 else
-  BPATH="--runtime-option -Xbootclasspath:/system/framework/core-libart.jar"
+    framework="/system/framework"
+    bpath_suffix=""
 fi
+bpath="${framework}/core-libart${bpath_suffix}.jar"
+bpath="${bpath}:${framework}/conscrypt${bpath_suffix}.jar"
+bpath="${bpath}:${framework}/okhttp${bpath_suffix}.jar"
+bpath="${bpath}:${framework}/core-junit${bpath_suffix}.jar"
+bpath="${bpath}:${framework}/bouncycastle${bpath_suffix}.jar"
+bpath_arg="--runtime-option -Xbootclasspath:${bpath}"
+
 
 # Make sure we can run without an oat file,
 echo "Run -Xnoimage-dex2oat"
-${RUN} ${flags} ${BPATH} --runtime-option -Xnoimage-dex2oat --runtime-option -Xnodex2oat
+${RUN} ${flags} ${bpath_arg} --runtime-option -Xnoimage-dex2oat --runtime-option -Xnodex2oat
 
 # Make sure we can run with the oat file.
 echo "Run -Ximage-dex2oat"
-${RUN} ${flags} ${BPATH} --runtime-option -Ximage-dex2oat
+${RUN} ${flags} ${bpath_arg} --runtime-option -Ximage-dex2oat
 
 # Make sure we can run with the default settings.
 echo "Run default"
-${RUN} ${flags} ${BPATH}
+${RUN} ${flags} ${bpath_arg}
diff --git a/test/702-LargeBranchOffset/build b/test/702-LargeBranchOffset/build
new file mode 100644 (file)
index 0000000..eacf730
--- /dev/null
@@ -0,0 +1,27 @@
+#!/bin/bash
+#
+# Copyright (C) 2014 The Android Open Source Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# Stop if something fails.
+set -e
+
+# Write out a bunch of source files.
+cpp -P src/Main.java.in src/Main.java
+
+mkdir classes
+${JAVAC} -d classes src/*.java
+
+${DX} --debug --dex --output=classes.dex classes
+zip $TEST_NAME.jar classes.dex
diff --git a/test/702-LargeBranchOffset/expected.txt b/test/702-LargeBranchOffset/expected.txt
new file mode 100644 (file)
index 0000000..130678f
--- /dev/null
@@ -0,0 +1,5 @@
+0
+0
+2
+1
+512
diff --git a/test/702-LargeBranchOffset/info.txt b/test/702-LargeBranchOffset/info.txt
new file mode 100644 (file)
index 0000000..747263e
--- /dev/null
@@ -0,0 +1 @@
+Simple test to check if large branch offset works correctly.
diff --git a/test/702-LargeBranchOffset/src/Main.java.in b/test/702-LargeBranchOffset/src/Main.java.in
new file mode 100644 (file)
index 0000000..270d766
--- /dev/null
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2014 The Android Open Source Project
+ *
+ * Licensed under the Apache License, Version 2.0 (the "License");
+ * you may not use this file except in compliance with the License.
+ * You may obtain a copy of the License at
+ *
+ *      http://www.apache.org/licenses/LICENSE-2.0
+ *
+ * Unless required by applicable law or agreed to in writing, software
+ * distributed under the License is distributed on an "AS IS" BASIS,
+ * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ * See the License for the specific language governing permissions and
+ * limitations under the License.
+ */
+
+#define DO_2_TIMES(x) x x
+#define DO_4_TIMES(x) DO_2_TIMES(DO_2_TIMES(x))
+#define DO_16_TIMES(x) DO_4_TIMES(DO_4_TIMES(x))
+#define DO_256_TIMES(x) DO_16_TIMES(DO_16_TIMES(x))
+#define DO_512_TIMES(x) DO_256_TIMES(DO_2_TIMES(x))
+
+
+public class Main {
+  public static void main(String[] args) {
+    Main m = new Main();
+    System.out.println(m.foo(-1, -1));
+    System.out.println(m.foo(-1, +1));
+    System.out.println(m.foo(+1, -1));
+    System.out.println(m.foo(+1, +1));
+    System.out.println(m.value);
+  }
+
+  public int foo(int a, int b) {
+    if ( a >= 0 ) {
+      if ( b < 0 ) {
+        DO_512_TIMES( synchronized(lock) { value++; } )
+        return 2;
+      }
+      return 1;
+    }
+    return 0;
+  }
+
+  Object lock = new Object();
+  int value = 0;
+}
index da94458..7da57dd 100644 (file)
@@ -21,144 +21,8 @@ include art/build/Android.common_test.mk
 TEST_ART_RUN_TESTS := $(wildcard $(LOCAL_PATH)/[0-9]*)
 TEST_ART_RUN_TESTS := $(subst $(LOCAL_PATH)/,, $(TEST_ART_RUN_TESTS))
 
-# List all the test names for host and target and compiler variants.
-# $(1): test name, e.g. 003-omnibus-opcodes
-# $(2): undefined, -trace, -gcverify or -gcstress
-# $(3): -relocate, -norelocate, -no-prebuild, or undefined.
-define all-run-test-names
-  test-art-host-run-test$(2)-default$(3)-$(1)32 \
-  test-art-host-run-test$(2)-optimizing$(3)-$(1)32 \
-  test-art-host-run-test$(2)-interpreter$(3)-$(1)32 \
-  test-art-host-run-test$(2)-default$(3)-$(1)64 \
-  test-art-host-run-test$(2)-optimizing$(3)-$(1)64 \
-  test-art-host-run-test$(2)-interpreter$(3)-$(1)64 \
-  test-art-target-run-test$(2)-default$(3)-$(1)32 \
-  test-art-target-run-test$(2)-optimizing$(3)-$(1)32 \
-  test-art-target-run-test$(2)-interpreter$(3)-$(1)32 \
-  test-art-target-run-test$(2)-default$(3)-$(1)64 \
-  test-art-target-run-test$(2)-optimizing$(3)-$(1)64 \
-  test-art-target-run-test$(2)-interpreter$(3)-$(1)64
-endef  # all-run-test-names
-
-# Subset of the above for target only.
-define all-run-test-target-names
-  test-art-target-run-test$(2)-default$(3)-$(1)32 \
-  test-art-target-run-test$(2)-optimizing$(3)-$(1)32 \
-  test-art-target-run-test$(2)-interpreter$(3)-$(1)32 \
-  test-art-target-run-test$(2)-default$(3)-$(1)64 \
-  test-art-target-run-test$(2)-optimizing$(3)-$(1)64 \
-  test-art-target-run-test$(2)-interpreter$(3)-$(1)64
-endef  # all-run-test-target-names
-
-# Tests that are timing sensitive and flaky on heavily loaded systems.
-TEST_ART_TIMING_SENSITIVE_RUN_TESTS := \
-  053-wait-some \
-  055-enum-performance
-
- # disable timing sensitive tests on "dist" builds.
-ifdef dist_goal
-  ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),,))
-  ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-trace,))
-  ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-gcverify,))
-  ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-gcstress,))
-  ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),,-relocate))
-  ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-trace,-relocate))
-  ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-gcverify,-relocate))
-  ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-gcstress,-relocate))
-  ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),,-norelocate))
-  ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-trace,-norelocate))
-  ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-gcverify,-norelocate))
-  ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-gcstress,-norelocate))
-  ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),,-prebuild))
-  ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-trace,-prebuild))
-  ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-gcverify,-prebuild))
-  ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-gcstress,-prebuild))
-  ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),,-no-prebuild))
-  ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-trace,-no-prebuild))
-  ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-gcverify,-no-prebuild))
-  ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(call all-run-test-names,$(test),-gcstress,-no-prebuild))
-endif
-
-# Tests that are broken in --trace mode.
-TEST_ART_BROKEN_TRACE_RUN_TESTS := \
-  004-SignalTest \
-  018-stack-overflow \
-  097-duplicate-method \
-  107-int-math2
-
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_TRACE_RUN_TESTS), $(call all-run-test-names,$(test),-trace,-relocate))
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_TRACE_RUN_TESTS), $(call all-run-test-names,$(test),-trace,-no-prebuild))
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_TRACE_RUN_TESTS), $(call all-run-test-names,$(test),-trace,-prebuild))
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_TRACE_RUN_TESTS), $(call all-run-test-names,$(test),-trace,-norelocate))
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_TRACE_RUN_TESTS), $(call all-run-test-names,$(test),-trace,))
-
-# Tests that need more than 2MB of RAM or are running into other corner cases in GC stress related
-# to OOMEs.
-TEST_ART_BROKEN_GCSTRESS_RUN_TESTS :=
-
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_GCSTRESS_RUN_TESTS), $(call all-run-test-names,$(test),-gcstress,-relocate))
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_GCSTRESS_RUN_TESTS), $(call all-run-test-names,$(test),-gcstress,-no-prebuild))
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_GCSTRESS_RUN_TESTS), $(call all-run-test-names,$(test),-gcstress,-prebuild))
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_GCSTRESS_RUN_TESTS), $(call all-run-test-names,$(test),-gcstress,-norelocate))
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_GCSTRESS_RUN_TESTS), $(call all-run-test-names,$(test),-gcstress,))
-
-# 115-native-bridge setup is complicated. Need to implement it correctly for the target.
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,,)
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-trace,)
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-gcverify,)
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-gcstress,)
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,,-relocate)
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-trace,-relocate)
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-gcverify,-relocate)
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-gcstress,-relocate)
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,,-norelocate)
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-trace,-norelocate)
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-gcverify,-norelocate)
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-gcstress,-norelocate)
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,,-prebuild)
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-trace,-prebuild)
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-gcverify,-prebuild)
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-gcstress,-prebuild)
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,,-no-prebuild)
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-trace,-no-prebuild)
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-gcverify,-no-prebuild)
-ART_TEST_KNOWN_BROKEN += $(call all-run-test-target-names,115-native-bridge,-gcstress,-no-prebuild)
-
-# NB 116-nodex2oat is not broken per-se it just doesn't (and isn't meant to) work with --prebuild.
-# On host this is patched around by changing a run flag but we cannot do this on the target due to
-# a different run-script.
-TEST_ART_TARGET_BROKEN_PREBUILD_RUN_TESTS := \
-  116-nodex2oat
-
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_TARGET_PREBUILD_RUN_TESTS), $(call all-run-test-target-names,$(test),,-prebuild))
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_TARGET_PREBUILD_RUN_TESTS), $(call all-run-test-target-names,$(test),-trace,-prebuild))
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_TARGET_PREBUILD_RUN_TESTS), $(call all-run-test-target-names,$(test),-gcverify,-prebuild))
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_TARGET_PREBUILD_RUN_TESTS), $(call all-run-test-target-names,$(test),-gcstress,-prebuild))
-
-# NB 117-nopatchoat is not broken per-se it just doesn't work (and isn't meant to) without --prebuild --relocate
-TEST_ART_BROKEN_RELOCATE_TESTS := \
-  117-nopatchoat
-
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_RELOCATE_TESTS), $(call all-run-test-names,$(test),,-relocate))
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_RELOCATE_TESTS), $(call all-run-test-names,$(test),-trace,-relocate))
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_RELOCATE_TESTS), $(call all-run-test-names,$(test),-gcverify,-relocate))
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_RELOCATE_TESTS), $(call all-run-test-names,$(test),-gcstress,-relocate))
-
-TEST_ART_BROKEN_NORELOCATE_TESTS := \
-  117-nopatchoat
-
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_NORELOCATE_TESTS), $(call all-run-test-names,$(test),,-norelocate))
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_NORELOCATE_TESTS), $(call all-run-test-names,$(test),-trace,-norelocate))
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_NORELOCATE_TESTS), $(call all-run-test-names,$(test),-gcverify,-norelocate))
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_NORELOCATE_TESTS), $(call all-run-test-names,$(test),-gcstress,-norelocate))
-
-TEST_ART_BROKEN_NO_PREBUILD_TESTS := \
-  117-nopatchoat
-
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_NO_PREBUILD_TESTS), $(call all-run-test-names,$(test),,-no-prebuild))
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_NO_PREBUILD_TESTS), $(call all-run-test-names,$(test),-trace,-no-prebuild))
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_NO_PREBUILD_TESTS), $(call all-run-test-names,$(test),-gcverify,-no-prebuild))
-ART_TEST_KNOWN_BROKEN += $(foreach test, $(TEST_ART_BROKEN_NO_PREBUILD_TESTS), $(call all-run-test-names,$(test),-gcstress,-no-prebuild))
+########################################################################
+# The art-run-tests module, used to build all run-tests into an image.
 
 # The path where build only targets will be output, e.g.
 # out/target/product/generic_x86_64/obj/PACKAGING/art-run-tests_intermediates/DATA
@@ -193,132 +57,233 @@ LOCAL_PICKUP_FILES := $(art_run_tests_dir)
 include $(BUILD_PHONY_PACKAGE)
 
 # Clear temp vars.
-all-run-test-names :=
 art_run_tests_dir :=
 define-build-art-run-test :=
 TEST_ART_RUN_TEST_BUILD_RULES :=
+
+########################################################################
+# General rules to build and run a run-test.
+
+# Test rule names or of the form:
+# test-art-{1: host or target}-run-test-{2: prebuild no-prebuild no-dex2oat}-
+#    {3: interpreter default optimizing}-{4: relocate no-relocate relocate-no-patchoat}-
+#    {5: trace or no-trace}-{6: gcstress gcverify cms}-{7: forcecopy checkjni jni}-
+#    {8: no-image or image}-{9: test name}{10: 32 or 64}
+TARGET_TYPES := host target
+PREBUILD_TYPES := prebuild
+ifeq ($(ART_TEST_RUN_TEST_NO_PREBUILD),true)
+  PREBUILD_TYPES += no-prebuild
+endif
+ifeq ($(ART_TEST_RUN_TEST_NO_DEX2OAT),true)
+  PREBUILD_TYPES += no-dex2oat
+endif
+COMPILER_TYPES :=
+ifeq ($(ART_TEST_DEFAULT_COMPILER),true)
+  COMPILER_TYPES += default
+endif
+ifeq ($(ART_TEST_INTERPRETER),true)
+  COMPILER_TYPES += interpreter
+endif
+ifeq ($(ART_TEST_OPTIMIZING),true)
+  COMPILER_TYPES += optimizing
+endif
+RELOCATE_TYPES := relocate
+ifeq ($(ART_TEST_RUN_TEST_NO_RELOCATE),true)
+  RELOCATE_TYPES += no-relocate
+endif
+ifeq ($(ART_TEST_RUN_TEST_RELOCATE_NO_PATCHOAT),true)
+  RELOCATE_TYPES := relocate-no-patchoat
+endif
+TRACE_TYPES := no-trace
+ifeq ($(ART_TEST_TRACE),true)
+  TRACE_TYPES += trace
+endif
+GC_TYPES := cms
+ifeq ($(ART_TEST_GC_STRESS),true)
+  GC_TYPES += gcstress
+endif
+ifeq ($(ART_TEST_GC_VERIFY),true)
+  GC_TYPES += gcverify
+endif
+JNI_TYPES := checkjni
+ifeq ($(ART_TEST_JNI_FORCECOPY),true)
+  JNI_TYPES += forcecopy
+endif
+IMAGE_TYPES := image
+ifeq ($(ART_TEST_RUN_TEST_NO_IMAGE),true)
+  IMAGE_TYPES += no-image
+endif
+ADDRESS_SIZES_TARGET := $(ART_PHONY_TEST_TARGET_SUFFIX) $(2ND_ART_PHONY_TEST_TARGET_SUFFIX)
+ADDRESS_SIZES_HOST := $(ART_PHONY_TEST_HOST_SUFFIX) $(2ND_ART_PHONY_TEST_HOST_SUFFIX)
+ALL_ADDRESS_SIZES := 64 32
+
+# List all run test names with number arguments agreeing with the comment above.
+define all-run-test-names
+  $(foreach target, $(1), \
+    $(foreach prebuild, $(2), \
+      $(foreach compiler, $(3), \
+        $(foreach relocate, $(4), \
+          $(foreach trace, $(5), \
+            $(foreach gc, $(6), \
+              $(foreach jni, $(7), \
+                $(foreach image, $(8), \
+                  $(foreach test, $(9), \
+                    $(foreach address_size, $(10), \
+                      test-art-$(target)-run-test-$(prebuild)-$(compiler)-$(relocate)-$(trace)-$(gc)-$(jni)-$(image)-$(test)$(address_size) \
+                  ))))))))))
+endef  # all-run-test-names
+
+# To generate a full list or tests:
+# $(call all-run-test-names,$(TARGET_TYPES),$(PREBUILD_TYPES),$(COMPILER_TYPES), \
+#        $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
+#        $(TEST_ART_RUN_TESTS), $(ALL_ADDRESS_SIZES))
+
+# Convert's a rule name to the form used in variables, e.g. no-relocate to NO_RELOCATE
+define name-to-var
+$(shell echo $(1) | tr '[:lower:]' '[:upper:]' | tr '-' '_')
+endef  # name-to-var
+
+# Tests that are timing sensitive and flaky on heavily loaded systems.
+TEST_ART_TIMING_SENSITIVE_RUN_TESTS := \
+  053-wait-some \
+  055-enum-performance
+
+ # disable timing sensitive tests on "dist" builds.
+ifdef dist_goal
+  ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(PREBUILD_TYPES), \
+        $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+        $(IMAGE_TYPES), $(TEST_ART_TIMING_SENSITIVE_RUN_TESTS), $(ALL_ADDRESS_SIZES))
+endif
+
 TEST_ART_TIMING_SENSITIVE_RUN_TESTS :=
+
+# Note 116-nodex2oat is not broken per-se it just doesn't (and isn't meant to) work with --prebuild.
+TEST_ART_BROKEN_PREBUILD_RUN_TESTS := \
+  116-nodex2oat
+
+ifneq (,$(filter prebuild,$(PREBUILD_TYPES)))
+  ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),prebuild, \
+      $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+      $(IMAGE_TYPES), $(TEST_ART_BROKEN_PREBUILD_RUN_TESTS), $(ALL_ADDRESS_SIZES))
+endif
+
+TEST_ART_BROKEN_PREBUILD_RUN_TESTS :=
+
+TEST_ART_BROKEN_NO_PREBUILD_TESTS := \
+  117-nopatchoat
+
+ifneq (,$(filter no-prebuild,$(PREBUILD_TYPES)))
+  ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),no-prebuild, \
+      $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+      $(IMAGE_TYPES), $(TEST_ART_BROKEN_NO_PREBUILD_TESTS), $(ALL_ADDRESS_SIZES))
+endif
+
+TEST_ART_BROKEN_NO_PREBUILD_TESTS :=
+
+# Note 117-nopatchoat is not broken per-se it just doesn't work (and isn't meant to) without
+# --prebuild --relocate
+TEST_ART_BROKEN_NO_RELOCATE_TESTS := \
+  117-nopatchoat
+
+ifneq (,$(filter no-relocate,$(RELOCATE_TYPES)))
+  ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(PREBUILD_TYPES), \
+      $(COMPILER_TYPES), no-relocate,$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+      $(IMAGE_TYPES), $(TEST_ART_BROKEN_NO_RELOCATE_TESTS), $(ALL_ADDRESS_SIZES))
+endif
+
+TEST_ART_BROKEN_NO_RELOCATE_TESTS :=
+
+# Tests that are broken with tracing.
+TEST_ART_BROKEN_TRACE_RUN_TESTS := \
+  004-SignalTest \
+  018-stack-overflow \
+  097-duplicate-method \
+  107-int-math2
+
+ifneq (,$(filter trace,$(TRACE_TYPES)))
+  ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(PREBUILD_TYPES), \
+      $(COMPILER_TYPES),$(RELOCATE_TYPES),trace,$(GC_TYPES),$(JNI_TYPES), \
+      $(IMAGE_TYPES), $(TEST_ART_BROKEN_TRACE_RUN_TESTS), $(ALL_ADDRESS_SIZES))
+endif
+
 TEST_ART_BROKEN_TRACE_RUN_TESTS :=
+
+# Tests that are broken with GC stress.
+TEST_ART_BROKEN_GCSTRESS_RUN_TESTS := \
+  004-SignalTest
+
+ifneq (,$(filter gcstress,$(GC_TYPES)))
+  ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(PREBUILD_TYPES), \
+      $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),gcstress,$(JNI_TYPES), \
+      $(IMAGE_TYPES), $(TEST_ART_BROKEN_GCSTRESS_RUN_TESTS), $(ALL_ADDRESS_SIZES))
+endif
+
 TEST_ART_BROKEN_GCSTRESS_RUN_TESTS :=
 
-########################################################################
+# 115-native-bridge setup is complicated. Need to implement it correctly for the target.
+ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,target,$(PREBUILD_TYPES),$(COMPILER_TYPES), \
+    $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES),115-native-bridge, \
+    $(ALL_ADDRESS_SIZES))
+
+# All these tests check that we have sane behavior if we don't have a patchoat or dex2oat.
+# Therefore we shouldn't run them in situations where we actually don't have these since they
+# explicitly test for them. These all also assume we have an image.
+TEST_ART_BROKEN_FALLBACK_RUN_TESTS := \
+  116-nodex2oat \
+  117-nopatchoat \
+  118-noimage-dex2oat \
+  119-noimage-patchoat
+
+ifneq (,$(filter no-dex2oat,$(PREBUILD_TYPES)))
+  ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),no-dex2oat, \
+      $(COMPILER_TYPES),$(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),$(IMAGE_TYPES), \
+      $(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+endif
+
+
+ifneq (,$(filter no-image,$(IMAGE_TYPES)))
+  ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(PREBUILD_TYPES), \
+      $(COMPILER_TYPES), $(RELOCATE_TYPES),$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES),no-image, \
+      $(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+endif
+
+ifneq (,$(filter relocate-no-patchoat,$(RELOCATE_TYPES)))
+  ART_TEST_KNOWN_BROKEN += $(call all-run-test-names,$(TARGET_TYPES),$(PREBUILD_TYPES), \
+      $(COMPILER_TYPES), relocate-no-patchoat,$(TRACE_TYPES),$(GC_TYPES),$(JNI_TYPES), \
+      $(IMAGE_TYPES),$(TEST_ART_BROKEN_FALLBACK_RUN_TESTS),$(ALL_ADDRESS_SIZES))
+endif
 
-ART_TEST_TARGET_RUN_TEST_ALL_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING_RULES :=
-ART_TEST_TARGET_RUN_TEST_RELOCATE_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_RELOCATE_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_RELOCATE_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING_RELOCATE_RULES :=
-ART_TEST_TARGET_RUN_TEST_NORELOCATE_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_NORELOCATE_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_NORELOCATE_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NORELOCATE_RULES :=
-ART_TEST_TARGET_RUN_TEST_NO_PREBUILD_RULES :=
-ART_TEST_TARGET_RUN_TEST_PREBUILD_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_NO_PREBUILD_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_PREBUILD_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_NO_PREBUILD_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_PREBUILD_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NO_PREBUILD_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING_PREBUILD_RULES :=
-ART_TEST_TARGET_RUN_TEST_ALL$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_RELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_RELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_RELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING_RELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_NORELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_NORELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_NORELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NORELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_NO_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_NO_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_NO_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_ALL$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_RELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_RELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_RELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING_RELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_NORELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_NORELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_NORELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NORELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_NO_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_NO_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_NO_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NO_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_ALL_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING_RULES :=
-ART_TEST_HOST_RUN_TEST_RELOCATE_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_RELOCATE_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_RELOCATE_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING_RELOCATE_RULES :=
-ART_TEST_HOST_RUN_TEST_NORELOCATE_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_NORELOCATE_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_NORELOCATE_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING_NORELOCATE_RULES :=
-ART_TEST_HOST_RUN_TEST_NO_PREBUILD_RULES :=
-ART_TEST_HOST_RUN_TEST_PREBUILD_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_NO_PREBUILD_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_PREBUILD_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_NO_PREBUILD_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_PREBUILD_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING_NO_PREBUILD_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING_PREBUILD_RULES :=
-ART_TEST_HOST_RUN_TEST_ALL$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_RELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_RELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_RELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING_RELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_NORELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_NORELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_NORELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING_NORELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_NO_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_NO_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_NO_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_ALL$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_RELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_RELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_RELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING_RELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_NORELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_NORELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_NORELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING_NORELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_NO_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_NO_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_NO_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING_NO_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
+TEST_ART_BROKEN_FALLBACK_RUN_TESTS :=
+
+# Clear variables ahead of appending to them when defining tests.
+$(foreach target, $(TARGET_TYPES), $(eval ART_RUN_TEST_$(call name-to-var,$(target))_RULES :=))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach prebuild, $(PREBUILD_TYPES), \
+    $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(prebuild))_RULES :=)))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach compiler, $(COMPILER_TYPES), \
+    $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(compiler))_RULES :=)))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach relocate, $(RELOCATE_TYPES), \
+    $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(relocate))_RULES :=)))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach trace, $(TRACE_TYPES), \
+    $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(trace))_RULES :=)))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach gc, $(GC_TYPES), \
+    $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(gc))_RULES :=)))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach jni, $(JNI_TYPES), \
+    $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(jni))_RULES :=)))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach image, $(IMAGE_TYPES), \
+    $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(image))_RULES :=)))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach test, $(TEST_ART_RUN_TESTS), \
+    $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(test))_RULES :=)))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach address_size, $(ALL_ADDRESS_SIZES), \
+    $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(address_size))_RULES :=)))
 
 # We need dex2oat and dalvikvm on the target as well as the core image.
 TEST_ART_TARGET_SYNC_DEPS += $(ART_TARGET_EXECUTABLES) $(TARGET_CORE_IMG_OUT) $(2ND_TARGET_CORE_IMG_OUT)
@@ -351,659 +316,265 @@ ART_TEST_HOST_RUN_TEST_DEPENDENCIES += \
   $(2ND_HOST_CORE_IMG_OUT)
 endif
 
-# For a given test create all the combinations of host/target, compiler and suffix such as:
-# test-art-host-run-test-optimizing-003-omnibus-opcodes32
-# $(1): test name, e.g. 003-omnibus-opcodes
-# $(2): host or target
-# $(3): default, optimizing or interpreter
-# $(4): 32 or 64
-# $(5): run tests with tracing or GC verification enabled or not: trace, gcverify or undefined
-# $(6): relocate, norelocate, no-prebuild or undefined.
+# Create a rule to build and run a tests following the form:
+# test-art-{1: host or target}-run-test-{2: prebuild no-prebuild no-dex2oat}-
+#    {3: interpreter default optimizing}-{4: relocate no-relocate relocate-no-patchoat}-
+#    {5: trace or no-trace}-{6: gcstress gcverify cms}-{7: forcecopy checkjni jni}-
+#    {8: no-image image}-{9: test name}{10: 32 or 64}
 define define-test-art-run-test
   run_test_options := $(addprefix --runtime-option ,$(DALVIKVM_FLAGS))
-  run_test_rule_name :=
-  uc_host_or_target :=
   prereq_rule :=
-  skip_test := false
-  uc_reloc_type :=
+  test_groups :=
+  uc_host_or_target :=
   ifeq ($(ART_TEST_RUN_TEST_ALWAYS_CLEAN),true)
     run_test_options += --always-clean
   endif
-  ifeq ($(2),host)
+  ifeq ($(1),host)
     uc_host_or_target := HOST
+    test_groups := ART_RUN_TEST_HOST_RULES
     run_test_options += --host
     prereq_rule := $(ART_TEST_HOST_RUN_TEST_DEPENDENCIES)
   else
-    ifeq ($(2),target)
+    ifeq ($(1),target)
       uc_host_or_target := TARGET
+      test_groups := ART_RUN_TEST_TARGET_RULES
       prereq_rule := test-art-target-sync
     else
-      $$(error found $(2) expected host or target)
+      $$(error found $(1) expected $(TARGET_TYPES))
     endif
   endif
-  ifeq ($(6),relocate)
-    uc_reloc_type := RELOCATE
-    run_test_options += --relocate --no-prebuild
-    ifneq ($(ART_TEST_RUN_TEST_RELOCATE),true)
-      skip_test := true
-    endif
+  ifeq ($(2),prebuild)
+    test_groups += ART_RUN_TEST_$$(uc_host_or_target)_PREBUILD_RULES
+    run_test_options += --prebuild
   else
-    ifeq ($(6),no-prebuild)
-      uc_reloc_type := NO_PREBUILD
-      run_test_options += --no-relocate --no-prebuild
-      ifneq ($(ART_TEST_RUN_TEST_NO_PREBUILD),true)
-        skip_test := true
-      endif
+    ifeq ($(2),no-prebuild)
+      test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_PREBUILD_RULES
+      run_test_options += --no-prebuild
     else
-      ifeq ($(6),norelocate)
-        uc_reloc_type := NORELOCATE
-        run_test_options += --no-relocate --prebuild
-        ifneq ($(ART_TEST_RUN_TEST_NO_RELOCATE),true)
-          skip_test := true
-        endif
+      ifeq ($(2),no-dex2oat)
+        test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_DEX2OAT_RULES
+        run_test_options += --no-prebuild --no-dex2oat
       else
-        uc_reloc_type := PREBUILD
-        run_test_options += --relocate --prebuild
-        ifneq ($(ART_TEST_RUN_TEST_PREBUILD),true)
-          skip_test := true
-        endif
+        $$(error found $(2) expected $(PREBUILD_TYPES))
       endif
     endif
   endif
-  uc_compiler :=
   ifeq ($(3),optimizing)
-    uc_compiler := OPTIMIZING
+    test_groups += ART_RUN_TEST_$$(uc_host_or_target)_OPTIMIZING_RULES
     run_test_options += -Xcompiler-option --compiler-backend=Optimizing
-    ifneq ($$(ART_TEST_OPTIMIZING),true)
-      skip_test := true
-    endif
   else
     ifeq ($(3),interpreter)
-      uc_compiler := INTERPRETER
+      test_groups += ART_RUN_TEST_$$(uc_host_or_target)_INTERPRETER_RULES
       run_test_options += --interpreter
     else
       ifeq ($(3),default)
-        uc_compiler := DEFAULT
+        test_groups += ART_RUN_TEST_$$(uc_host_or_target)_DEFAULT_RULES
       else
-        $$(error found $(3) expected optimizing, interpreter or default)
+        $$(error found $(3) expected $(COMPILER_TYPES))
       endif
     endif
   endif
-  ifeq ($(4),64)
-    run_test_options += --64
+  ifeq ($(4),relocate)
+    test_groups += ART_RUN_TEST_$$(uc_host_or_target)_RELOCATE_RULES
+    run_test_options += --relocate
   else
-    ifneq ($(4),32)
-      $$(error found $(4) expected 32 or 64)
+    ifeq ($(4),no-relocate)
+      test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_RELOCATE_RULES
+      run_test_options += --no-relocate
+    else
+      ifeq ($(4),relocate-no-patchoat)
+        test_groups += ART_RUN_TEST_$$(uc_host_or_target)_RELOCATE_NO_PATCHOAT_RULES
+        run_test_options += --relocate --no-patchoat
+      else
+        $$(error found $(4) expected $(RELOCATE_TYPES))
+      endif
     endif
   endif
   ifeq ($(5),trace)
+    test_groups += ART_RUN_TEST_$$(uc_host_or_target)_TRACE_RULES
     run_test_options += --trace
-    run_test_rule_name := test-art-$(2)-run-test-trace-$(3)-$(6)-$(1)$(4)
-    ifneq ($$(ART_TEST_TRACE),true)
-      skip_test := true
+  else
+    ifeq ($(5),no-trace)
+      test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_TRACE_RULES
+    else
+      $$(error found $(5) expected $(TRACE_TYPES))
     endif
+  endif
+  ifeq ($(6),gcverify)
+    test_groups += ART_RUN_TEST_$$(uc_host_or_target)_GCVERIFY_RULES
+    run_test_options += --gcverify
   else
-    ifeq ($(5),gcverify)
-      run_test_options += --runtime-option -Xgc:preverify --runtime-option -Xgc:postverify \
-        --runtime-option -Xgc:preverify_rosalloc --runtime-option -Xgc:postverify_rosalloc
-      run_test_rule_name := test-art-$(2)-run-test-gcverify-$(3)-$(6)-$(1)$(4)
-      ifneq ($$(ART_TEST_GC_VERIFY),true)
-        skip_test := true
+    ifeq ($(6),gcstress)
+      test_groups += ART_RUN_TEST_$$(uc_host_or_target)_GCSTRESS_RULES
+      run_test_options += --gcstress
+    else
+      ifeq ($(6),cms)
+        test_groups += ART_RUN_TEST_$$(uc_host_or_target)_CMS_RULES
+      else
+        $$(error found $(6) expected $(GC_TYPES))
       endif
+    endif
+  endif
+  ifeq ($(7),forcecopy)
+    test_groups += ART_RUN_TEST_$$(uc_host_or_target)_FORCECOPY_RULES
+    run_test_options += --runtime-option -Xjniopts:forcecopy
+    ifneq ($$(ART_TEST_JNI_FORCECOPY),true)
+      skip_test := true
+    endif
+  else
+    ifeq ($(7),checkjni)
+      test_groups += ART_RUN_TEST_$$(uc_host_or_target)_CHECKJNI_RULES
+      run_test_options += --runtime-option -Xcheck:jni
     else
-      ifeq ($(5),gcstress)
-        run_test_options += --runtime-option -Xgc:SS --runtime-option -Xms2m \
-          --runtime-option -Xmx2m --runtime-option -Xgc:preverify --runtime-option -Xgc:postverify
-        run_test_rule_name := test-art-$(2)-run-test-gcstress-$(3)-$(6)-$(1)$(4)
-        ifneq ($$(ART_TEST_GC_STRESS),true)
-          skip_test := true
-        endif
+      ifeq ($(7),jni)
+        test_groups += ART_RUN_TEST_$$(uc_host_or_target)_JNI_RULES
       else
-        ifneq (,$(5))
-          $$(error found $(5) expected undefined or gcverify, gcstress or trace)
-        endif
-        run_test_rule_name := test-art-$(2)-run-test-$(3)-$(6)-$(1)$(4)
+        $$(error found $(7) expected $(JNI_TYPES))
       endif
     endif
   endif
-  ifeq ($$(skip_test),false)
-    run_test_options := --output-path $(ART_HOST_TEST_DIR)/run-test-output/$$(run_test_rule_name) \
+  ifeq ($(8),no-image)
+    test_groups += ART_RUN_TEST_$$(uc_host_or_target)_NO_IMAGE_RULES
+    run_test_options += --no-image
+  else
+    ifeq ($(8),image)
+      test_groups += ART_RUN_TEST_$$(uc_host_or_target)_IMAGE_RULES
+    else
+      $$(error found $(8) expected $(IMAGE_TYPES))
+    endif
+  endif
+  # $(9) is the test name
+  test_groups += ART_RUN_TEST_$$(uc_host_or_target)_$(call name-to-var,$(9))_RULES
+  ifeq ($(10),64)
+    test_groups += ART_RUN_TEST_$$(uc_host_or_target)_64_RULES
+    run_test_options += --64
+  else
+    ifeq ($(10),32)
+      test_groups += ART_RUN_TEST_$$(uc_host_or_target)_32_RULES
+    else
+      $$(error found $(10) expected $(ALL_ADDRESS_SIZES))
+    endif
+  endif
+  run_test_rule_name := test-art-$(1)-run-test-$(2)-$(3)-$(4)-$(5)-$(6)-$(7)-$(8)-$(9)$(10)
+  run_test_options := --output-path $(ART_HOST_TEST_DIR)/run-test-output/$$(run_test_rule_name) \
       $$(run_test_options)
 $$(run_test_rule_name): PRIVATE_RUN_TEST_OPTIONS := $$(run_test_options)
 .PHONY: $$(run_test_rule_name)
 $$(run_test_rule_name): $(DX) $(HOST_OUT_EXECUTABLES)/jasmin $$(prereq_rule)
        $(hide) $$(call ART_TEST_SKIP,$$@) && \
          DX=$(abspath $(DX)) JASMIN=$(abspath $(HOST_OUT_EXECUTABLES)/jasmin) \
-           art/test/run-test $$(PRIVATE_RUN_TEST_OPTIONS) $(1) \
+           art/test/run-test $$(PRIVATE_RUN_TEST_OPTIONS) $(9) \
              && $$(call ART_TEST_PASSED,$$@) || $$(call ART_TEST_FAILED,$$@)
        $$(hide) (echo $(MAKECMDGOALS) | grep -q $$@ && \
          echo "run-test run as top-level target, removing test directory $(ART_HOST_TEST_DIR)" && \
          rm -r $(ART_HOST_TEST_DIR)) || true
-  else
-    .PHONY: $$(run_test_rule_name)
-$$(run_test_rule_name):
-  endif
 
-  ART_TEST_$$(uc_host_or_target)_RUN_TEST_$$(uc_compiler)$(4)_RULES += $$(run_test_rule_name)
-  ART_TEST_$$(uc_host_or_target)_RUN_TEST_$$(uc_compiler)_RULES += $$(run_test_rule_name)
-  ART_TEST_$$(uc_host_or_target)_RUN_TEST_$$(uc_compiler)_$(1)_RULES += $$(run_test_rule_name)
-  ART_TEST_$$(uc_host_or_target)_RUN_TEST_$$(uc_compiler)_$$(uc_reloc_type)_RULES += $$(run_test_rule_name)
-  ART_TEST_$$(uc_host_or_target)_RUN_TEST_$(1)_RULES += $$(run_test_rule_name)
-  ART_TEST_$$(uc_host_or_target)_RUN_TEST_$(1)$(4)_RULES += $$(run_test_rule_name)
-  ART_TEST_$$(uc_host_or_target)_RUN_TEST_ALL_RULES += $$(run_test_rule_name)
-  ART_TEST_$$(uc_host_or_target)_RUN_TEST_$$(uc_reloc_type)_RULES += $$(run_test_rule_name)
-  ART_TEST_$$(uc_host_or_target)_RUN_TEST_ALL$(4)_RULES += $$(run_test_rule_name)
+  $$(foreach test_group,$$(test_groups), $$(eval $$(value test_group) += $$(run_test_rule_name)))
 
   # Clear locally defined variables.
-  skip_test :=
+  uc_host_or_target :=
+  test_groups :=
   run_test_options :=
   run_test_rule_name :=
-  uc_host_or_target :=
   prereq_rule :=
-  uc_reloc_type :=
-  uc_compiler :=
 endef  # define-test-art-run-test
 
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach test, $(TEST_ART_RUN_TESTS), \
+    $(foreach address_size, $(ADDRESS_SIZES_$(call name-to-var,$(target))), \
+      $(foreach prebuild, $(PREBUILD_TYPES), \
+        $(foreach compiler, $(COMPILER_TYPES), \
+          $(foreach relocate, $(RELOCATE_TYPES), \
+            $(foreach trace, $(TRACE_TYPES), \
+              $(foreach gc, $(GC_TYPES), \
+                $(foreach jni, $(JNI_TYPES), \
+                  $(foreach image, $(IMAGE_TYPES), \
+                    $(eval $(call define-test-art-run-test,$(target),$(prebuild),$(compiler),$(relocate),$(trace),$(gc),$(jni),$(image),$(test),$(address_size))) \
+                ))))))))))
+define-test-art-run-test :=
+
 # Define a phony rule whose purpose is to test its prerequisites.
-# $(1): rule name, e.g. test-art-host-run-test32
+# $(1): host or target
 # $(2): list of prerequisites
-define define-test-art-run-test-group-rule
+define define-test-art-run-test-group
 .PHONY: $(1)
 $(1): $(2)
        $(hide) $$(call ART_TEST_PREREQ_FINISHED,$$@)
 
-endef  # define-test-art-run-test-group-rule
-
-# Create rules for a group of run tests.
-# $(1): test name, e.g. 003-omnibus-opcodes
-# $(2): host or target
-# $(3): relocate, norelocate or no-prebuild, or prebuild.
-define define-test-art-run-test-group-type
-  group_uc_host_or_target :=
-  ifeq ($(2),host)
-    group_uc_host_or_target := HOST
-  else
-    ifeq ($(2),target)
-      group_uc_host_or_target := TARGET
-    else
-      $$(error found $(2) expected host or target)
-    endif
-  endif
-
-  $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),,$(3)))
-  $$(eval $$(call define-test-art-run-test,$(1),$(2),interpreter,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),,$(3)))
-  $$(eval $$(call define-test-art-run-test,$(1),$(2),optimizing,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),,$(3)))
-  $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),trace,$(3)))
-  $$(eval $$(call define-test-art-run-test,$(1),$(2),interpreter,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),trace,$(3)))
-  $$(eval $$(call define-test-art-run-test,$(1),$(2),optimizing,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),trace,$(3)))
-  $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),gcverify,$(3)))
-  $$(eval $$(call define-test-art-run-test,$(1),$(2),interpreter,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),gcverify,$(3)))
-  $$(eval $$(call define-test-art-run-test,$(1),$(2),optimizing,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),gcverify,$(3)))
-  $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),gcstress,$(3)))
-  $$(eval $$(call define-test-art-run-test,$(1),$(2),interpreter,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),gcstress,$(3)))
-  $$(eval $$(call define-test-art-run-test,$(1),$(2),optimizing,$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),gcstress,$(3)))
-  do_second := false
-  ifeq ($(2),host)
-    ifneq ($$(HOST_PREFER_32_BIT),true)
-      do_second := true
-    endif
-  else
-    ifdef TARGET_2ND_ARCH
-      do_second := true
-    endif
-  endif
-  ifeq (true,$$(do_second))
-    $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),,$(3)))
-    $$(eval $$(call define-test-art-run-test,$(1),$(2),interpreter,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),,$(3)))
-    $$(eval $$(call define-test-art-run-test,$(1),$(2),optimizing,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),,$(3)))
-    $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),trace,$(3)))
-    $$(eval $$(call define-test-art-run-test,$(1),$(2),interpreter,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),trace,$(3)))
-    $$(eval $$(call define-test-art-run-test,$(1),$(2),optimizing,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),trace,$(3)))
-    $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),gcverify,$(3)))
-    $$(eval $$(call define-test-art-run-test,$(1),$(2),interpreter,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),gcverify,$(3)))
-    $$(eval $$(call define-test-art-run-test,$(1),$(2),optimizing,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),gcverify,$(3)))
-    $$(eval $$(call define-test-art-run-test,$(1),$(2),default,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),gcstress,$(3)))
-    $$(eval $$(call define-test-art-run-test,$(1),$(2),interpreter,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),gcstress,$(3)))
-    $$(eval $$(call define-test-art-run-test,$(1),$(2),optimizing,$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX),gcstress,$(3)))
-  endif
-endef  # define-test-art-run-test-group-type
-
-# Create rules for a group of run tests.
-# $(1): test name, e.g. 003-omnibus-opcodes
-# $(2): host or target
-define define-test-art-run-test-group
-  group_uc_host_or_target :=
-  ifeq ($(2),host)
-    group_uc_host_or_target := HOST
-  else
-    ifeq ($(2),target)
-      group_uc_host_or_target := TARGET
-    else
-      $$(error found $(2) expected host or target)
-    endif
-  endif
-  do_second := false
-  ifeq ($(2),host)
-    ifneq ($$(HOST_PREFER_32_BIT),true)
-      do_second := true
-    endif
-  else
-    ifdef TARGET_2ND_ARCH
-      do_second := true
-    endif
-  endif
-  ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_DEFAULT_$(1)_RULES :=
-  ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_INTERPRETER_$(1)_RULES :=
-  ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_OPTIMIZING_$(1)_RULES :=
-  ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_$(1)_RULES :=
-  ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_$(1)$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX)_RULES :=
-  ifeq ($$(do_second),true)
-    ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_$(1)$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX)_RULES :=
-  endif
-  $$(eval $$(call define-test-art-run-test-group-type,$(1),$(2),prebuild))
-  $$(eval $$(call define-test-art-run-test-group-type,$(1),$(2),norelocate))
-  $$(eval $$(call define-test-art-run-test-group-type,$(1),$(2),relocate))
-  $$(eval $$(call define-test-art-run-test-group-type,$(1),$(2),no-prebuild))
-  $$(eval $$(call define-test-art-run-test-group-rule,test-art-$(2)-run-test-default-$(1), \
-    $$(ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_DEFAULT_$(1)_RULES)))
-  $$(eval $$(call define-test-art-run-test-group-rule,test-art-$(2)-run-test-interpreter-$(1), \
-    $$(ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_INTERPRETER_$(1)_RULES)))
-  $$(eval $$(call define-test-art-run-test-group-rule,test-art-$(2)-run-test-optimizing-$(1), \
-    $$(ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_OPTIMIZING_$(1)_RULES)))
-  $$(eval $$(call define-test-art-run-test-group-rule,test-art-$(2)-run-test-$(1), \
-    $$(ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_$(1)_RULES)))
-  $$(eval $$(call define-test-art-run-test-group-rule,test-art-$(2)-run-test-$(1)$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX), \
-    $$(ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_$(1)$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX)_RULES)))
-  ifeq ($$(do_second),true)
-    $$(eval $$(call define-test-art-run-test-group-rule,test-art-$(2)-run-test-$(1)$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX), \
-      $$(ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_$(1)$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX)_RULES)))
-  endif
-
-  # Clear locally defined variables.
-  ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_DEFAULT_$(1)_RULES :=
-  ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_INTERPRETER_$(1)_RULES :=
-  ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_OPTIMIZING_$(1)_RULES :=
-  ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_$(1)_RULES :=
-  ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_$(1)$$(ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX)_RULES :=
-  ifeq ($$(do_second),true)
-    ART_TEST_$$(group_uc_host_or_target)_RUN_TEST_$(1)$$(2ND_ART_PHONY_TEST_$$(group_uc_host_or_target)_SUFFIX)_RULES :=
-  endif
-  group_uc_host_or_target :=
-  do_second :=
 endef  # define-test-art-run-test-group
 
-$(foreach test, $(TEST_ART_RUN_TESTS), $(eval $(call define-test-art-run-test-group,$(test),target)))
-$(foreach test, $(TEST_ART_RUN_TESTS), $(eval $(call define-test-art-run-test-group,$(test),host)))
-
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-no-prebuild, \
-  $(ART_TEST_TARGET_RUN_TEST_NO_PREBUILD_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-prebuild, \
-  $(ART_TEST_TARGET_RUN_TEST_PREBUILD_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-norelocate, \
-  $(ART_TEST_TARGET_RUN_TEST_NORELOCATE_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-relocate, \
-  $(ART_TEST_TARGET_RUN_TEST_RELOCATE_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test, \
-  $(ART_TEST_TARGET_RUN_TEST_ALL_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default, \
-  $(ART_TEST_TARGET_RUN_TEST_DEFAULT_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter, \
-  $(ART_TEST_TARGET_RUN_TEST_INTERPRETER_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing, \
-  $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default-no-prebuild, \
-  $(ART_TEST_TARGET_RUN_TEST_DEFAULT_NO_PREBUILD_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default-prebuild, \
-  $(ART_TEST_TARGET_RUN_TEST_DEFAULT_PREBUILD_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter-no-prebuild, \
-  $(ART_TEST_TARGET_RUN_TEST_INTERPRETER_NO_PREBUILD_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter-prebuild, \
-  $(ART_TEST_TARGET_RUN_TEST_INTERPRETER_PREBUILD_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing-no-prebuild, \
-  $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NO_PREBUILD_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing-prebuild, \
-  $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_PREBUILD_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default-norelocate, \
-  $(ART_TEST_TARGET_RUN_TEST_DEFAULT_NORELOCATE_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter-norelocate, \
-  $(ART_TEST_TARGET_RUN_TEST_INTERPRETER_NORELOCATE_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing-norelocate, \
-  $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NORELOCATE_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default-relocate, \
-  $(ART_TEST_TARGET_RUN_TEST_DEFAULT_RELOCATE_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter-relocate, \
-  $(ART_TEST_TARGET_RUN_TEST_INTERPRETER_RELOCATE_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing-relocate, \
-  $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_RELOCATE_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test$(ART_PHONY_TEST_TARGET_SUFFIX), \
-  $(ART_TEST_TARGET_RUN_TEST_ALL$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default$(ART_PHONY_TEST_TARGET_SUFFIX), \
-  $(ART_TEST_TARGET_RUN_TEST_DEFAULT$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter$(ART_PHONY_TEST_TARGET_SUFFIX), \
-  $(ART_TEST_TARGET_RUN_TEST_INTERPRETER$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing$(ART_PHONY_TEST_TARGET_SUFFIX), \
-  $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-no-prebuild$(ART_PHONY_TEST_TARGET_SUFFIX), \
-  $(ART_TEST_TARGET_RUN_TEST_NO_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-prebuild$(ART_PHONY_TEST_TARGET_SUFFIX), \
-  $(ART_TEST_TARGET_RUN_TEST_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-norelocate$(ART_PHONY_TEST_TARGET_SUFFIX), \
-  $(ART_TEST_TARGET_RUN_TEST_NORELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-relocate$(ART_PHONY_TEST_TARGET_SUFFIX), \
-  $(ART_TEST_TARGET_RUN_TEST_RELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default-no-prebuild$(ART_PHONY_TEST_TARGET_SUFFIX), \
-  $(ART_TEST_TARGET_RUN_TEST_DEFAULT_NO_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default-prebuild$(ART_PHONY_TEST_TARGET_SUFFIX), \
-  $(ART_TEST_TARGET_RUN_TEST_DEFAULT_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter-no-prebuild$(ART_PHONY_TEST_TARGET_SUFFIX), \
-  $(ART_TEST_TARGET_RUN_TEST_INTERPRETER_NO_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter-prebuild$(ART_PHONY_TEST_TARGET_SUFFIX), \
-  $(ART_TEST_TARGET_RUN_TEST_INTERPRETER_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing-no-prebuild$(ART_PHONY_TEST_TARGET_SUFFIX), \
-  $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NO_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing-prebuild$(ART_PHONY_TEST_TARGET_SUFFIX), \
-  $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default-norelocate$(ART_PHONY_TEST_TARGET_SUFFIX), \
-  $(ART_TEST_TARGET_RUN_TEST_DEFAULT_NORELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter-norelocate$(ART_PHONY_TEST_TARGET_SUFFIX), \
-  $(ART_TEST_TARGET_RUN_TEST_INTERPRETER_NORELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing-norelocate$(ART_PHONY_TEST_TARGET_SUFFIX), \
-  $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NORELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default-relocate$(ART_PHONY_TEST_TARGET_SUFFIX), \
-  $(ART_TEST_TARGET_RUN_TEST_DEFAULT_RELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter-relocate$(ART_PHONY_TEST_TARGET_SUFFIX), \
-  $(ART_TEST_TARGET_RUN_TEST_INTERPRETER_RELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing-relocate$(ART_PHONY_TEST_TARGET_SUFFIX), \
-  $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_RELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-ifdef TARGET_2ND_ARCH
-  $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
-    $(ART_TEST_TARGET_RUN_TEST_ALL$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
-    $(ART_TEST_TARGET_RUN_TEST_DEFAULT$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
-    $(ART_TEST_TARGET_RUN_TEST_INTERPRETER$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
-    $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-no-prebuild$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
-    $(ART_TEST_TARGET_RUN_TEST_NO_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-prebuild$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
-    $(ART_TEST_TARGET_RUN_TEST_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-norelocate$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
-    $(ART_TEST_TARGET_RUN_TEST_NORELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-relocate$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
-    $(ART_TEST_TARGET_RUN_TEST_RELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default-no-prebuild$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
-    $(ART_TEST_TARGET_RUN_TEST_DEFAULT_NO_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default-prebuild$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
-    $(ART_TEST_TARGET_RUN_TEST_DEFAULT_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter-no-prebuild$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
-    $(ART_TEST_TARGET_RUN_TEST_INTERPRETER_NO_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter-prebuild$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
-    $(ART_TEST_TARGET_RUN_TEST_INTERPRETER_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing-no-prebuild$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
-    $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NO_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing-prebuild$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
-    $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default-norelocate$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
-    $(ART_TEST_TARGET_RUN_TEST_DEFAULT_NORELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter-norelocate$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
-    $(ART_TEST_TARGET_RUN_TEST_INTERPRETER_NORELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing-norelocate$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
-    $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NORELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-default-relocate$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
-    $(ART_TEST_TARGET_RUN_TEST_DEFAULT_RELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-interpreter-relocate$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
-    $(ART_TEST_TARGET_RUN_TEST_INTERPRETER_RELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-target-run-test-optimizing-relocate$(2ND_ART_PHONY_TEST_TARGET_SUFFIX), \
-    $(ART_TEST_TARGET_RUN_TEST_OPTIMIZING_RELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES)))
-endif
 
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-no-prebuild, \
-  $(ART_TEST_HOST_RUN_TEST_NO_PREBUILD_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-prebuild, \
-  $(ART_TEST_HOST_RUN_TEST_PREBUILD_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-norelocate, \
-  $(ART_TEST_HOST_RUN_TEST_NORELOCATE_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-relocate, \
-  $(ART_TEST_HOST_RUN_TEST_RELOCATE_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test, \
-  $(ART_TEST_HOST_RUN_TEST_ALL_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default, \
-  $(ART_TEST_HOST_RUN_TEST_DEFAULT_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter, \
-  $(ART_TEST_HOST_RUN_TEST_INTERPRETER_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing, \
-  $(ART_TEST_HOST_RUN_TEST_OPTIMIZING_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default-no-prebuild, \
-  $(ART_TEST_HOST_RUN_TEST_DEFAULT_NO_PREBUILD_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default-prebuild, \
-  $(ART_TEST_HOST_RUN_TEST_DEFAULT_PREBUILD_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter-no-prebuild, \
-  $(ART_TEST_HOST_RUN_TEST_INTERPRETER_NO_PREBUILD_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter-prebuild, \
-  $(ART_TEST_HOST_RUN_TEST_INTERPRETER_PREBUILD_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing-no-prebuild, \
-  $(ART_TEST_HOST_RUN_TEST_OPTIMIZING_NO_PREBUILD_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing-prebuild, \
-  $(ART_TEST_HOST_RUN_TEST_OPTIMIZING_PREBUILD_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default-norelocate, \
-  $(ART_TEST_HOST_RUN_TEST_DEFAULT_NORELOCATE_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter-norelocate, \
-  $(ART_TEST_HOST_RUN_TEST_INTERPRETER_NORELOCATE_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing-norelocate, \
-  $(ART_TEST_HOST_RUN_TEST_OPTIMIZING_NORELOCATE_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default-relocate, \
-  $(ART_TEST_HOST_RUN_TEST_DEFAULT_RELOCATE_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter-relocate, \
-  $(ART_TEST_HOST_RUN_TEST_INTERPRETER_RELOCATE_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing-relocate, \
-  $(ART_TEST_HOST_RUN_TEST_OPTIMIZING_RELOCATE_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test$(ART_PHONY_TEST_HOST_SUFFIX), \
-  $(ART_TEST_HOST_RUN_TEST_ALL$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default$(ART_PHONY_TEST_HOST_SUFFIX), \
-  $(ART_TEST_HOST_RUN_TEST_DEFAULT$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter$(ART_PHONY_TEST_HOST_SUFFIX), \
-  $(ART_TEST_HOST_RUN_TEST_INTERPRETER$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing$(ART_PHONY_TEST_HOST_SUFFIX), \
-  $(ART_TEST_HOST_RUN_TEST_OPTIMIZING$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-no-prebuild$(ART_PHONY_TEST_HOST_SUFFIX), \
-  $(ART_TEST_HOST_RUN_TEST_NO_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-prebuild$(ART_PHONY_TEST_HOST_SUFFIX), \
-  $(ART_TEST_HOST_RUN_TEST_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-norelocate$(ART_PHONY_TEST_HOST_SUFFIX), \
-  $(ART_TEST_HOST_RUN_TEST_NORELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-relocate$(ART_PHONY_TEST_HOST_SUFFIX), \
-  $(ART_TEST_HOST_RUN_TEST_RELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default-no-prebuild$(ART_PHONY_TEST_HOST_SUFFIX), \
-  $(ART_TEST_HOST_RUN_TEST_DEFAULT_NO_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default-prebuild$(ART_PHONY_TEST_HOST_SUFFIX), \
-  $(ART_TEST_HOST_RUN_TEST_DEFAULT_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter-no-prebuild$(ART_PHONY_TEST_HOST_SUFFIX), \
-  $(ART_TEST_HOST_RUN_TEST_INTERPRETER_NO_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter-prebuild$(ART_PHONY_TEST_HOST_SUFFIX), \
-  $(ART_TEST_HOST_RUN_TEST_INTERPRETER_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing-no-prebuild$(ART_PHONY_TEST_HOST_SUFFIX), \
-  $(ART_TEST_HOST_RUN_TEST_OPTIMIZING_NO_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing-prebuild$(ART_PHONY_TEST_HOST_SUFFIX), \
-  $(ART_TEST_HOST_RUN_TEST_OPTIMIZING_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default-norelocate$(ART_PHONY_TEST_HOST_SUFFIX), \
-  $(ART_TEST_HOST_RUN_TEST_DEFAULT_NORELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter-norelocate$(ART_PHONY_TEST_HOST_SUFFIX), \
-  $(ART_TEST_HOST_RUN_TEST_INTERPRETER_NORELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing-norelocate$(ART_PHONY_TEST_HOST_SUFFIX), \
-  $(ART_TEST_HOST_RUN_TEST_OPTIMIZING_NORELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default-relocate$(ART_PHONY_TEST_HOST_SUFFIX), \
-  $(ART_TEST_HOST_RUN_TEST_DEFAULT_RELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter-relocate$(ART_PHONY_TEST_HOST_SUFFIX), \
-  $(ART_TEST_HOST_RUN_TEST_INTERPRETER_RELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-$(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing-relocate$(ART_PHONY_TEST_HOST_SUFFIX), \
-  $(ART_TEST_HOST_RUN_TEST_OPTIMIZING_RELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-ifneq ($(HOST_PREFER_32_BIT),true)
-  $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
-    $(ART_TEST_HOST_RUN_TEST_ALL$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
-    $(ART_TEST_HOST_RUN_TEST_DEFAULT$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
-    $(ART_TEST_HOST_RUN_TEST_INTERPRETER$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
-    $(ART_TEST_HOST_RUN_TEST_OPTIMIZING$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-no-prebuild$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
-    $(ART_TEST_HOST_RUN_TEST_NO_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-prebuild$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
-    $(ART_TEST_HOST_RUN_TEST_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-norelocate$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
-    $(ART_TEST_HOST_RUN_TEST_NORELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-relocate$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
-    $(ART_TEST_HOST_RUN_TEST_RELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default-no-prebuild$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
-    $(ART_TEST_HOST_RUN_TEST_DEFAULT_NO_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default-prebuild$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
-    $(ART_TEST_HOST_RUN_TEST_DEFAULT_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter-no-prebuild$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
-    $(ART_TEST_HOST_RUN_TEST_INTERPRETER_NO_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter-prebuild$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
-    $(ART_TEST_HOST_RUN_TEST_INTERPRETER_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing-no-prebuild$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
-    $(ART_TEST_HOST_RUN_TEST_OPTIMIZING_NO_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing-prebuild$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
-    $(ART_TEST_HOST_RUN_TEST_OPTIMIZING_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default-norelocate$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
-    $(ART_TEST_HOST_RUN_TEST_DEFAULT_NORELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter-norelocate$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
-    $(ART_TEST_HOST_RUN_TEST_INTERPRETER_NORELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing-norelocate$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
-    $(ART_TEST_HOST_RUN_TEST_OPTIMIZING_NORELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-default-relocate$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
-    $(ART_TEST_HOST_RUN_TEST_DEFAULT_RELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-interpreter-relocate$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
-    $(ART_TEST_HOST_RUN_TEST_INTERPRETER_RELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-  $(eval $(call define-test-art-run-test-group-rule,test-art-host-run-test-optimizing-relocate$(2ND_ART_PHONY_TEST_HOST_SUFFIX), \
-    $(ART_TEST_HOST_RUN_TEST_OPTIMIZING_RELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES)))
-endif
+$(foreach target, $(TARGET_TYPES), $(eval \
+  $(call define-test-art-run-test-group,test-art-$(target)-run-test,$(ART_RUN_TEST_$(call name-to-var,$(target))_RULES))))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach prebuild, $(PREBUILD_TYPES), $(eval \
+    $(call define-test-art-run-test-group,test-art-$(target)-run-test-$(prebuild),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(prebuild))_RULES)))))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach compiler, $(COMPILER_TYPES), $(eval \
+    $(call define-test-art-run-test-group,test-art-$(target)-run-test-$(compiler),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(compiler))_RULES)))))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach relocate, $(RELOCATE_TYPES), $(eval \
+    $(call define-test-art-run-test-group,test-art-$(target)-run-test-$(relocate),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(relocate))_RULES)))))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach trace, $(TRACE_TYPES), $(eval \
+    $(call define-test-art-run-test-group,test-art-$(target)-run-test-$(trace),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(trace))_RULES)))))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach gc, $(GC_TYPES), $(eval \
+    $(call define-test-art-run-test-group,test-art-$(target)-run-test-$(gc),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(gc))_RULES)))))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach jni, $(JNI_TYPES), $(eval \
+    $(call define-test-art-run-test-group,test-art-$(target)-run-test-$(jni),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(jni))_RULES)))))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach image, $(IMAGE_TYPES), $(eval \
+    $(call define-test-art-run-test-group,test-art-$(target)-run-test-$(image),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(image))_RULES)))))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach test, $(TEST_ART_RUN_TESTS), $(eval \
+    $(call define-test-art-run-test-group,test-art-$(target)-run-test-$(test),$(ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(test))_RULES)))))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach address_size, $(ADDRESS_SIZES_$(call name-to-var,$(target))), $(eval \
+    $(call define-test-art-run-test-group,test-art-$(target)-run-test-$(address_size),$(ART_RUN_TEST_$(address_size)_RULES)))))
+
+# Clear variables now we're finished with them.
+$(foreach target, $(TARGET_TYPES), $(eval ART_RUN_TEST_$(call name-to-var,$(target))_RULES :=))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach prebuild, $(PREBUILD_TYPES), \
+    $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(prebuild))_RULES :=)))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach compiler, $(COMPILER_TYPES), \
+    $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(compiler))_RULES :=)))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach relocate, $(RELOCATE_TYPES), \
+    $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(relocate))_RULES :=)))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach trace, $(TRACE_TYPES), \
+    $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(trace))_RULES :=)))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach gc, $(GC_TYPES), \
+    $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(gc))_RULES :=)))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach jni, $(JNI_TYPES), \
+    $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(jni))_RULES :=)))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach image, $(IMAGE_TYPES), \
+    $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(image))_RULES :=)))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach test, $(TEST_ART_RUN_TESTS), \
+    $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(test))_RULES :=)))
+$(foreach target, $(TARGET_TYPES), \
+  $(foreach address_size, $(ALL_ADDRESS_SIZES), \
+    $(eval ART_RUN_TEST_$(call name-to-var,$(target))_$(call name-to-var,$(address_size))_RULES :=)))
+define-test-art-run-test-group :=
+TARGET_TYPES :=
+PREBUILD_TYPES :=
+COMPILER_TYPES :=
+RELOCATE_TYPES :=
+TRACE_TYPES :=
+GC_TYPES :=
+JNI_TYPES :=
+IMAGE_TYPES :=
+ADDRESS_SIZES_TARGET :=
+ADDRESS_SIZES_HOST :=
+ALL_ADDRESS_SIZES :=
 
-# include libarttest build rules.
 include $(LOCAL_PATH)/Android.libarttest.mk
-
-# Include libnativebridgetest build rules.
 include art/test/Android.libnativebridgetest.mk
-
-define-test-art-run-test :=
-define-test-art-run-test-group-rule :=
-define-test-art-run-test-group :=
-TEST_ART_RUN_TESTS :=
-ART_TEST_TARGET_RUN_TEST_ALL_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING_RULES :=
-ART_TEST_TARGET_RUN_TEST_RELOCATE_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_RELOCATE_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_RELOCATE_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING_RELOCATE_RULES :=
-ART_TEST_TARGET_RUN_TEST_NORELOCATE_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_NORELOCATE_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_NORELOCATE_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NORELOCATE_RULES :=
-ART_TEST_TARGET_RUN_TEST_NO_PREBUILD_RULES :=
-ART_TEST_TARGET_RUN_TEST_PREBUILD_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_NO_PREBUILD_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_PREBUILD_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_NO_PREBUILD_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_PREBUILD_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NO_PREBUILD_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING_PREBUILD_RULES :=
-ART_TEST_TARGET_RUN_TEST_ALL$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_RELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_RELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_RELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING_RELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_NORELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_NORELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_NORELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NORELOCATE$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_NO_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_NO_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_NO_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_PREBUILD$(ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_ALL$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_RELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_RELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_RELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING_RELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_NORELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_NORELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_NORELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NORELOCATE$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_NO_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_NO_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_DEFAULT_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_NO_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_INTERPRETER_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING_NO_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_TARGET_RUN_TEST_OPTIMIZING_PREBUILD$(2ND_ART_PHONY_TEST_TARGET_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_ALL_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING_RULES :=
-ART_TEST_HOST_RUN_TEST_RELOCATE_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_RELOCATE_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_RELOCATE_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING_RELOCATE_RULES :=
-ART_TEST_HOST_RUN_TEST_NORELOCATE_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_NORELOCATE_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_NORELOCATE_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING_NORELOCATE_RULES :=
-ART_TEST_HOST_RUN_TEST_NO_PREBUILD_RULES :=
-ART_TEST_HOST_RUN_TEST_PREBUILD_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_NO_PREBUILD_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_PREBUILD_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_NO_PREBUILD_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_PREBUILD_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING_NO_PREBUILD_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING_PREBUILD_RULES :=
-ART_TEST_HOST_RUN_TEST_ALL$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_RELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_RELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_RELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING_RELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_NORELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_NORELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_NORELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING_NORELOCATE$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_NO_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_NO_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_NO_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_PREBUILD$(ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_ALL$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_RELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_RELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_RELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING_RELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_NORELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_NORELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_NORELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING_NORELOCATE$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_NO_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_NO_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_DEFAULT_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_NO_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_INTERPRETER_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING_NO_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
-ART_TEST_HOST_RUN_TEST_OPTIMIZING_PREBUILD$(2ND_ART_PHONY_TEST_HOST_SUFFIX)_RULES :=
index 2241f85..54a0865 100755 (executable)
@@ -23,6 +23,10 @@ QUIET="n"
 FLAGS=""
 COMPILER_FLAGS=""
 BUILD_BOOT_OPT=""
+PATCHOAT=""
+DEX2OAT=""
+FALSE_BIN="/bin/false"
+HAVE_IMAGE="y"
 exe="${ANDROID_HOST_OUT}/bin/dalvikvm32"
 main="Main"
 
@@ -33,6 +37,12 @@ while true; do
     elif [ "x$1" = "x--prebuild" ]; then
         PREBUILD="y"
         shift
+    elif [ "x$1" = "x--no-dex2oat" ]; then
+        DEX2OAT="-Xcompiler:${FALSE_BIN}"
+        shift
+    elif [ "x$1" = "x--no-patchoat" ]; then
+        PATCHOAT="-Xpatchoat:${FALSE_BIN}"
+        shift
     elif [ "x$1" = "x--lib" ]; then
         shift
         if [ "x$1" = "x" ]; then
@@ -44,6 +54,9 @@ while true; do
             LIB=${LIB/%so/dylib}
         fi
         shift
+    elif [ "x$1" = "x--no-image" ]; then
+        HAVE_IMAGE="n"
+        shift
     elif [ "x$1" = "x--boot" ]; then
         shift
         option="$1"
@@ -76,7 +89,7 @@ while true; do
         INTERPRETER="y"
         shift
     elif [ "x$1" = "x--64" ]; then
-        ISA="x64"
+        ISA="x86_64"
         exe="${ANDROID_HOST_OUT}/bin/dalvikvm64"
         shift
     elif [ "x$1" = "x--no-verify" ]; then
@@ -157,6 +170,11 @@ if [ "$INTERPRETER" = "y" ]; then
     COMPILER_FLAGS="${COMPILER_FLAGS} --compiler-filter=interpret-only"
 fi
 
+if [ "$HAVE_IMAGE" = "n" ]; then
+    # Set image to a place were there isn't one.
+    BOOT_OPT="-Ximage:/system/non-existant/core.art"
+fi
+
 if [ "$RELOCATE" = "y" ]; then
   FLAGS="${FLAGS} -Xrelocate"
   COMPILER_FLAGS="${COMPILER_FLAGS} --runtime-arg -Xnorelocate --include-patch-information"
@@ -178,7 +196,7 @@ else
 fi
 
 JNI_OPTS="-Xjnigreflimit:512 -Xcheck:jni"
-cmdline="$INVOKE_WITH $gdb $exe $gdbargs -XXlib:$LIB $JNI_OPTS $FLAGS $INT_OPTS $DEBUGGER_OPTS $BOOT_OPT -cp $DEX_LOCATION/$TEST_NAME.jar $main"
+cmdline="$INVOKE_WITH $gdb $exe $gdbargs -XXlib:$LIB $PATCHOAT $DEX2OAT $JNI_OPTS $FLAGS $INT_OPTS $DEBUGGER_OPTS $BOOT_OPT -cp $DEX_LOCATION/$TEST_NAME.jar $main"
 if [ "$DEV_MODE" = "y" ]; then
   if [ "$PREBUILD" = "y" ]; then
     echo "$mkdir_cmd && $prebuild_cmd && $cmdline"
@@ -190,4 +208,7 @@ if [ "$DEV_MODE" = "y" ]; then
 fi
 
 cd $ANDROID_BUILD_TOP
-$mkdir_cmd && $prebuild_cmd && LD_PRELOAD=libsigchain.so $cmdline "$@"
+# If we are execing /bin/false we might not be on the same ISA as libsigchain.so
+# ld.so will helpfully warn us of this. Unfortunately this messes up our error
+# checking so we will just filter out the error with a grep.
+$mkdir_cmd && $prebuild_cmd && LD_PRELOAD=libsigchain.so $cmdline "$@" 2>&1 | grep -v -E "^ERROR: ld\.so: object '.+\.so' from LD_PRELOAD cannot be preloaded: ignored\.$"
index ad23edf..91b8a0f 100755 (executable)
@@ -26,6 +26,10 @@ FLAGS=""
 TARGET_SUFFIX="32"
 GDB_TARGET_SUFFIX=""
 COMPILE_FLAGS=""
+FALSE_BIN="/system/bin/false"
+PATCHOAT=""
+DEX2OAT=""
+HAVE_IMAGE="y"
 
 while true; do
     if [ "x$1" = "x--quiet" ]; then
@@ -55,12 +59,21 @@ while true; do
         BOOT_OPT="$1"
         BUILD_BOOT_OPT="--boot-image=${1#-Ximage:}"
         shift
+    elif [ "x$1" = "x--no-dex2oat" ]; then
+        DEX2OAT="-Xcompiler:${FALSE_BIN}"
+        shift
+    elif [ "x$1" = "x--no-patchoat" ]; then
+        PATCHOAT="-Xpatchoat:${FALSE_BIN}"
+        shift
     elif [ "x$1" = "x--relocate" ]; then
         RELOCATE="y"
         shift
     elif [ "x$1" = "x--no-relocate" ]; then
         RELOCATE="n"
         shift
+    elif [ "x$1" = "x--no-image" ]; then
+        HAVE_IMAGE="n"
+        shift
     elif [ "x$1" = "x--debug" ]; then
         DEBUGGER="y"
         shift
@@ -136,6 +149,10 @@ fi
 
 msg "------------------------------"
 
+if [ "$HAVE_IMAGE" = "n" ]; then
+    BOOT_OPT="-Ximage:/system/non-existant/core.art"
+fi
+
 ARCH=$(adb shell ls -F /data/dalvik-cache | grep -Ewo "${ARCHITECTURES_PATTERN}")
 if [ x"$ARCH" = "x" ]; then
   echo "Unable to determine architecture"
@@ -194,7 +211,7 @@ fi
 cmdline="cd $DEX_LOCATION && export ANDROID_DATA=$DEX_LOCATION && export DEX_LOCATION=$DEX_LOCATION && \
     mkdir -p $DEX_LOCATION/dalvik-cache/$ARCH/ && \
     $INVOKE_WITH /system/bin/dex2oatd $COMPILE_FLAGS $BUILD_BOOT_OPT $BUILD_RELOCATE_OPT  --runtime-arg -classpath --runtime-arg $DEX_LOCATION/$TEST_NAME.jar --dex-file=$DEX_LOCATION/$TEST_NAME.jar --oat-file=$DEX_LOCATION/dalvik-cache/$ARCH/$(echo $DEX_LOCATION/$TEST_NAME.jar/classes.dex | cut -d/ -f 2- | sed "s:/:@:g") --instruction-set=$ARCH && \
-    $INVOKE_WITH $gdb /system/bin/dalvikvm$TARGET_SUFFIX $FLAGS $gdbargs -XXlib:$LIB $ZYGOTE $JNI_OPTS $RELOCATE_OPT $INT_OPTS $DEBUGGER_OPTS $BOOT_OPT -cp $DEX_LOCATION/$TEST_NAME.jar Main $@"
+    $INVOKE_WITH $gdb /system/bin/dalvikvm$TARGET_SUFFIX $FLAGS $gdbargs -XXlib:$LIB $PATCHOAT $DEX2OAT $ZYGOTE $JNI_OPTS $RELOCATE_OPT $INT_OPTS $DEBUGGER_OPTS $BOOT_OPT -cp $DEX_LOCATION/$TEST_NAME.jar Main $@"
 cmdfile=$(tempfile -p "cmd-" -s "-$TEST_NAME")
 echo "$cmdline" > $cmdfile
 
index 06075c2..e398b5d 100755 (executable)
@@ -22,6 +22,10 @@ INVOKE_WITH=""
 FLAGS=""
 TARGET_SUFFIX="32"
 GDB_TARGET_SUFFIX=""
+FALSE_BIN="/system/bin/false"
+PATCHOAT=""
+DEX2OAT=""
+HAVE_IMAGE="y"
 
 while true; do
     if [ "x$1" = "x--quiet" ]; then
@@ -40,6 +44,15 @@ while true; do
         option="$1"
         FLAGS="${FLAGS} -Xcompiler-option $option"
         shift
+    elif [ "x$1" = "x--no-image" ]; then
+        HAVE_IMAGE="n"
+        shift
+    elif [ "x$1" = "x--no-dex2oat" ]; then
+        DEX2OAT="-Xcompiler:${FALSE_BIN}"
+        shift
+    elif [ "x$1" = "x--no-patchoat" ]; then
+        PATCHOAT="-Xpatchoat:${FALSE_BIN}"
+        shift
     elif [ "x$1" = "x--runtime-option" ]; then
         shift
         option="$1"
@@ -129,6 +142,10 @@ fi
 
 msg "------------------------------"
 
+if [ "$HAVE_IMAGE" = "n" ]; then
+    BOOT_OPT="-Ximage:/system/non-existant/core.art"
+fi
+
 if [ "$QUIET" = "n" ]; then
   adb shell rm -r $DEX_LOCATION
   adb shell mkdir -p $DEX_LOCATION
@@ -172,7 +189,7 @@ else
 fi
 
 cmdline="cd $DEX_LOCATION && export ANDROID_DATA=$DEX_LOCATION && export DEX_LOCATION=$DEX_LOCATION && \
-    $INVOKE_WITH $gdb /system/bin/dalvikvm$TARGET_SUFFIX $FLAGS $gdbargs -XXlib:$LIB $ZYGOTE $JNI_OPTS $RELOCATE_OPT $INT_OPTS $DEBUGGER_OPTS $BOOT_OPT -cp $DEX_LOCATION/$TEST_NAME.jar Main"
+    $INVOKE_WITH $gdb /system/bin/dalvikvm$TARGET_SUFFIX $FLAGS $gdbargs -XXlib:$LIB $PATCHOAT $DEX2OAT $ZYGOTE $JNI_OPTS $RELOCATE_OPT $INT_OPTS $DEBUGGER_OPTS $BOOT_OPT -cp $DEX_LOCATION/$TEST_NAME.jar Main"
 if [ "$DEV_MODE" = "y" ]; then
   echo $cmdline "$@"
 fi
index 284cca0..318a0de 100755 (executable)
@@ -80,6 +80,12 @@ while true; do
     elif [ "x$1" = "x--64" ]; then
         run_args="${run_args} --64"
         shift
+    elif [ "x$1" = "x--gcstress" ]; then
+        run_args="${run_args} --gcstress"
+        shift
+    elif [ "x$1" = "x--gcverify" ]; then
+        run_args="${run_args} --gcverify"
+        shift
     elif [ "x$1" = "x--trace" ]; then
         run_args="${run_args} --trace"
         shift
@@ -95,6 +101,12 @@ while true; do
     elif [ "x$1" = "x--prebuild" ]; then
         run_args="${run_args} --prebuild"
         shift;
+    elif [ "x$1" = "x--no-dex2oat" ]; then
+        run_args="${run_args} --no-dex2oat"
+        shift;
+    elif [ "x$1" = "x--no-patchoat" ]; then
+        run_args="${run_args} --no-patchoat"
+        shift;
     elif [ "x$1" = "x--always-clean" ]; then
         run_args="${run_args} --always-clean"
     elif expr "x$1" : "x--" >/dev/null 2>&1; then
@@ -116,7 +128,8 @@ if [ "$usage" = "yes" ]; then
              "further documentation:"
         echo "    --debug --dev --host --interpreter --jvm --no-optimize"
         echo "    --no-verify -O --update --valgrind --zygote --64 --relocate"
-        echo "    --prebuild --always-clean"
+        echo "    --prebuild --always-clean --gcstress --gcverify --trace"
+        echo "    --no-patchoat --no-dex2oat"
         echo "  Specific Runtime Options:"
         echo "    --seq                Run tests one-by-one, avoiding failures caused by busy CPU"
     ) 1>&2
index 02b62b0..eed28a5 100755 (executable)
@@ -77,7 +77,13 @@ usage="no"
 build_only="no"
 suffix64=""
 trace="false"
+basic_verify="false"
+gc_verify="false"
+gc_stress="false"
 always_clean="no"
+have_dex2oat="yes"
+have_patchoat="yes"
+have_image="yes"
 
 while true; do
     if [ "x$1" = "x--host" ]; then
@@ -96,6 +102,15 @@ while true; do
         lib="libdvm.so"
         runtime="dalvik"
         shift
+    elif [ "x$1" = "x--no-dex2oat" ]; then
+        have_dex2oat="no"
+        shift
+    elif [ "x$1" = "x--no-patchoat" ]; then
+        have_patchoat="no"
+        shift
+    elif [ "x$1" = "x--no-image" ]; then
+        have_image="no"
+        shift
     elif [ "x$1" = "x--relocate" ]; then
         relocate="yes"
         shift
@@ -108,6 +123,14 @@ while true; do
     elif [ "x$1" = "x--no-prebuild" ]; then
         prebuild_mode="no"
         shift;
+    elif [ "x$1" = "x--gcverify" ]; then
+        basic_verify="true"
+        gc_verify="true"
+        shift
+    elif [ "x$1" = "x--gcstress" ]; then
+        basic_verify="true"
+        gc_stress="true"
+        shift
     elif [ "x$1" = "x--image" ]; then
         shift
         image="$1"
@@ -201,10 +224,20 @@ done
 # Cannot use realpath, as it does not exist on Mac.
 # Cannot us a simple "cd", as the path might not be created yet.
 # Use -m option of readlink: canonicalizes, but allows non-existing components.
+noncanonical_tmp_dir=$tmp_dir
 tmp_dir="`cd $oldwd ; readlink -m $tmp_dir`"
 
 mkdir -p $tmp_dir
 
+if [ "$basic_verify" = "true" ]; then
+  run_args="${run_args} --runtime-option -Xgc:preverify --runtime-option -Xgc:postverify"
+fi
+if [ "$gc_verify" = "true" ]; then
+  run_args="${run_args} --runtime-option -Xgc:preverify_rosalloc --runtime-option -Xgc:postverify_rosalloc"
+fi
+if [ "$gc_stress" = "true" ]; then
+  run_args="${run_args} --runtime-option -Xgc:SS --runtime-option -Xms2m --runtime-option -Xmx2m"
+fi
 if [ "$trace" = "true" ]; then
     run_args="${run_args} --runtime-option -Xmethod-trace --runtime-option -Xmethod-trace-file:${DEX_LOCATION}/trace.bin --runtime-option -Xmethod-trace-file-size:2000000"
 fi
@@ -240,6 +273,14 @@ else
     fi
 fi
 
+if [ "$have_patchoat" = "no" ]; then
+  run_args="${run_args} --no-patchoat"
+fi
+
+if [ "$have_dex2oat" = "no" ]; then
+  run_args="${run_args} --no-dex2oat"
+fi
+
 if [ ! "$runtime" = "jvm" ]; then
   run_args="${run_args} --lib $lib"
 fi
@@ -275,6 +316,30 @@ elif [ "$runtime" = "art" ]; then
     fi
 fi
 
+if [ "$have_image" = "no" ]; then
+    if [ "$runtime" != "art" ]; then
+        echo "--no-image is only supported on the art runtime"
+        exit 1
+    fi
+    if [ "$target_mode" = "no" ]; then
+        framework="${ANDROID_HOST_OUT}/framework"
+        bpath_suffix="-hostdex"
+    else
+        framework="/system/framework"
+        bpath_suffix=""
+    fi
+    # TODO If the target was compiled WITH_DEXPREOPT=true then these tests will
+    # fail since these jar files will be stripped.
+    bpath="${framework}/core-libart${bpath_suffix}.jar"
+    bpath="${bpath}:${framework}/conscrypt${bpath_suffix}.jar"
+    bpath="${bpath}:${framework}/okhttp${bpath_suffix}.jar"
+    bpath="${bpath}:${framework}/core-junit${bpath_suffix}.jar"
+    bpath="${bpath}:${framework}/bouncycastle${bpath_suffix}.jar"
+    # Pass down the bootclasspath
+    run_args="${run_args} --runtime-option -Xbootclasspath:${bpath}"
+    run_args="${run_args} --no-image"
+fi
+
 if [ "$dev_mode" = "yes" -a "$update_mode" = "yes" ]; then
     echo "--dev and --update are mutually exclusive" 1>&2
     usage="yes"
@@ -326,6 +391,8 @@ if [ "$usage" = "yes" ]; then
         echo "    --zygote             Spawn the process from the Zygote." \
              "If used, then the"
         echo "                         other runtime options are ignored."
+        echo "    --no-dex2oat         Run as though dex2oat was failing."
+        echo "    --no-patchoat        Run as though patchoat was failing."
         echo "    --prebuild           Run dex2oat on the files before starting test. (default)"
         echo "    --no-prebuild        Do not run dex2oat on the files before starting"
         echo "                         the test."
@@ -341,6 +408,8 @@ if [ "$usage" = "yes" ]; then
              "files."
         echo "    --64                 Run the test in 64-bit mode"
         echo "    --trace              Run with method tracing"
+        echo "    --gcstress           Run with gc stress testing"
+        echo "    --gcverify           Run with gc verification"
         echo "    --always-clean       Delete the test files even if the test fails."
     ) 1>&2
     exit 1
@@ -452,6 +521,8 @@ else
         "./${run}" $run_args "$@" >"$output" 2>&1
     else
         cp "$build_output" "$output"
+        echo "Failed to build in tmpdir=${tmp_dir} from oldwd=${oldwd} and cwd=`pwd`"
+        echo "Non-canonical tmpdir was ${noncanonical_tmp_dir}"
         echo "build exit status: $build_exit" >>"$output"
     fi
     ./$check_cmd "$expected" "$output"
old mode 100755 (executable)
new mode 100644 (file)